我的第一想法就是直接把rk的demo当成so库封装来用,我直接在yolov8_pose的c代码下面添加yolov8_pose.cc与yolov8_pose.h用作封装,先上代码
yolov8_pose.cc
#include "yolov8_pose.h"#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>#include "yolov8-pose.h"
#include "image_utils.h"
#include "file_utils.h"
#include "image_drawing.h"
#include "postprocess.h"
int skeleton[38] ={16, 14, 14, 12, 17, 15, 15, 13, 12, 13, 6, 12, 7, 13, 6, 7, 6, 8, 7, 9, 8, 10, 9, 11, 2, 3, 1, 2, 1, 3, 2, 4, 3, 5, 4, 6, 5, 7}; int ret;
rknn_app_context_t rknn_app_ctx;
image_buffer_t src_image;int yolov8_init() {memset(&rknn_app_ctx, 0, sizeof(rknn_app_context_t));init_post_process();ret = init_yolov8_pose_model("/data/user/0/com.rockchip.gpadc.yolodemo/cache/yolov8_pose.rknn", &rknn_app_ctx);if (ret != 0) {printf("yolov8_init fail! ret=%d\n", ret);}return ret;
}char* run_yolov8_pose(unsigned char* pixeldata,int cound) {memset(&src_image, 0, sizeof(image_buffer_t));//ret = read_image("/data/model/test.jpg", &src_image);// 默认图像为3通道int w = 1280, h = 720, c = 3;// printf("load image wxhxc=%dx%dx%d path=%s\n", w, h, c, path);int msize = w * h * c;// 设置图像数据(&src_image)->virt_addr = pixeldata;(&src_image)->width = w;(&src_image)->height = h;(&src_image)->format = IMAGE_FORMAT_RGB888;// if (ret != 0)// {// printf("read image fail! ret=%d image_path=%s\n", ret, "/data/model/test.jpg");// }object_detect_result_list od_results;ret = inference_yolov8_pose_model(&rknn_app_ctx, &src_image, &od_results);if (ret != 0){printf("inference_yolov8_pose_model fail! ret=%d\n", ret);}char buffers[1024]; // 用于存储格式化后的字符串char *finalBuffer = NULL; // 用于存储最终的字符串// 分配足够的内存来存储所有字符串finalBuffer = (char *)malloc(4084);if (finalBuffer == NULL) {perror("内存分配失败");}// 初始化最终缓冲区finalBuffer[0] = '\0';// 格式化字符串snprintf(reinterpret_cast<char *const>(buffers), sizeof(buffers), "%3d\n", "");// 画框和概率char text[256];for (int i = 0; i < od_results.count; i++){//做一个限制人数的功能if(od_results.count <= cound){object_detect_result *det_result = &(od_results.results[i]);// 将信息格式化到临时缓冲区//snprintf(buffers, 1024, "%s (left:%d top:%d right:%d bottom:%d) 概率:%.3f length:%d\n",// coco_cls_to_name(det_result->cls_id),// det_result->box.left, det_result->box.top,// det_result->box.right, det_result->box.bottom,// det_result->prop,od_results.count);snprintf(buffers, 1024, "%s (left:%d top:%d right:%d bottom:%d) 概率:%.3f length:%d\n","person-pose",det_result->box.left, det_result->box.top,det_result->box.right, det_result->box.bottom,det_result->prop,od_results.count);// 将格式化后的字符串附加到 finalBufferstrcat(finalBuffer, buffers);for (int j = 0; j < 38/2; ++j){int x1 = (int)(det_result->keypoints[skeleton[2*j]-1][0]);int y1 = (int)(det_result->keypoints[skeleton[2*j]-1][1]);int x2 = (int)(det_result->keypoints[skeleton[2*j+1]-1][0]);int y2 = (int)(det_result->keypoints[skeleton[2*j+1]-1][1]);// 将线段坐标添加到 finalBuffersnprintf(buffers, 1024, "skeleton:(%d, %d,%d, %d)\n", x1, y1, x2, y2);strcat(finalBuffer, buffers);}for (int j = 0; j < 17; ++j){int cx = (int)(det_result->keypoints[j][0]);int cy = (int)(det_result->keypoints[j][1]);// 将关键点坐标添加到 finalBuffersnprintf(buffers, 1024, "key:(%d, %d)\n",cx, cy);strcat(finalBuffer, buffers);}}}// write_image("/data/model/out.jpg", &src_image);//}return finalBuffer;
}void yolov8_cleanup() {deinit_post_process();ret = release_yolov8_pose_model(&rknn_app_ctx);if (ret != 0){printf("release_yolov5_model fail! ret=%d\n", ret);}if (src_image.virt_addr != NULL){free(src_image.virt_addr);}}
yolov8_pose.h
#ifndef YOLOV8_POSE_H
#define YOLOV8_POSE_H#ifdef __cplusplus
extern "C" {
#endif// 初始化模型
int yolov8_init();// 执行推理
char* run_yolov8_pose(unsigned char* pixeldata,int cound);// 清理工作
void yolov8_cleanup();#ifdef __cplusplus
}
#endif#endif // YOLOV8_POSE_H
1.代码解析
-
我仔细研究了一下代码,我发现,rk的demo是读取一张640*640的图片输入模型,然后再输出一张640*640带上骨架等信息的图片
-
我的功能逻辑是实时输入摄像头帧数据,然后输出骨架信息,人相框信息等
-
我这里默认使用1280*720的像素, 我这里写死了rknn模型的地址,你们可以自行更改
-
既然我把功能代码封装好了,我们就需要把这些功能代码打包成库,所以我们需要更改一下配置文件
-
这里的CMakeLists.txt文件我没有去做删改,我只是在原基础上增加打包库文件,并且使用所有的接口,如下源码
cmake_minimum_required(VERSION 3.10)
project(rknn_yolov8_pose_demo)set(rknpu_yolov8-pose_file rknpu2/yolov8-pose.cc)# 添加第三方库和工具库
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/../../../3rdparty/ 3rdparty.out)
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/../../../utils/ utils.out)set(CMAKE_INSTALL_RPATH "$ORIGIN/lib")file(GLOB SRCS ${CMAKE_CURRENT_SOURCE_DIR}/*.cc)# 定义共享库 yolov8_pose
add_library(yolov8_pose SHARED yolov8_pose.cc postprocess.cc${rknpu_yolov8-pose_file}
)# 将包含目录添加到 yolov8_pose 库
target_include_directories(yolov8_pose PRIVATE${CMAKE_CURRENT_SOURCE_DIR} ${LIBRKNNRT_INCLUDES} ${CMAKE_CURRENT_SOURCE_DIR}/../../../3rdparty ${CMAKE_CURRENT_SOURCE_DIR}/../../../utils/
)# 定义一个变量,包含公共库
set(COMMON_LIBSimageutilsfileutilsimagedrawing ${LIBRKNNRT}dl
)# 将共享库 yolov8_pose 链接到相关库
target_link_libraries(yolov8_pose ${COMMON_LIBS})# 添加可执行文件
add_executable(${PROJECT_NAME}main.ccpostprocess.cc${rknpu_yolov8-pose_file}
)# 将可执行文件链接到共享库和公共库
target_link_libraries(${PROJECT_NAME}yolov8_pose${COMMON_LIBS} # 也链接公共库
)if (CMAKE_SYSTEM_NAME STREQUAL "Android")target_link_libraries(${PROJECT_NAME}log)
endif()if (CMAKE_SYSTEM_NAME STREQUAL "Linux")set(THREADS_PREFER_PTHREAD_FLAG ON)find_package(Threads REQUIRED)target_link_libraries(${PROJECT_NAME} Threads::Threads)
endif()target_include_directories(${PROJECT_NAME} PRIVATE${CMAKE_CURRENT_SOURCE_DIR}${LIBRKNNRT_INCLUDES}
)# 安装目标
install(TARGETS ${PROJECT_NAME} DESTINATION .)
install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/../model/bus.jpg DESTINATION ./model)
install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/../model/yolov8_pose_labels_list.txt DESTINATION ./model)
file(GLOB RKNN_FILES "${CMAKE_CURRENT_SOURCE_DIR}/../model/*.rknn")
install(FILES ${RKNN_FILES} DESTINATION model)# 安装共享库
install(TARGETS yolov8_pose DESTINATION lib)
编译:这里就能在install下面找到我们编译成功的库
2.android studio使用,直接使用ndk开发
先上代码:
native-lib.cc
#include <jni.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <pthread.h>
#include <sys/syscall.h>
#include <sched.h>#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>#include "yolov8_pose.h"/******************************************************yolov8***************************************************/
// 其他必要的引入
#include <android/log.h>//#define LOGI(...) ((void)__android_log_print(ANDROID_LOG_INFO, "YOLOv8PoseJNI", __VA_ARGS__))
#define LOGI(...) __android_log_print(ANDROID_LOG_INFO, "YOLOv8PoseJNI", ##__VA_ARGS__);extern "C" JNIEXPORT void JNICALL
Java_com_rockchip_gpadc_demo_yolo_InferenceWrapper_native_1inityolov8(JNIEnv *env, jobject obj) {int ret = yolov8_init();LOGI("Java_com_rockchip_gpadc_demo_yolo_InferenceWrapper_native_1inityolov8 yolov8_init: %d",ret);
}// 推理
extern "C" JNIEXPORT void JNICALL
Java_com_rockchip_gpadc_demo_yolo_InferenceWrapper_native_1inference(JNIEnv *env, jobject obj,jobject call_back,jbyteArray imageData,jint width, jint height) {// 创建一个 JNI 全局引用jobject global_callback = (*env).NewGlobalRef(call_back);// 获取回调方法的方法IDjclass cls = (*env).GetObjectClass(call_back);jmethodID gCallBackMid = (*env).GetMethodID(cls, "onCall", "(Ljava/lang/String;)V");// 获取字节数组的指针jbyte *byteArray = (*env).GetByteArrayElements(imageData, NULL);if (byteArray == nullptr) {return; // 获取数组元素失败}//我这里限制五个人 前五个 后面的不管 数据量太大的话可能刷新不过来char *result = run_yolov8_pose(reinterpret_cast<unsigned char *>(byteArray),5);// 将 char* 转换为 Java 字符串jstring resultString = env->NewStringUTF(result);// 调用回调方法,将结果字符串传递回 Java 侧env->CallVoidMethod(global_callback, gCallBackMid, resultString);// 释放 JNI 创建的字符串env->DeleteLocalRef(resultString); // 删除局部引用的字符串// 释放字节数组的元素env->ReleaseByteArrayElements(imageData, byteArray, 0);// 释放全局引用env->DeleteGlobalRef(global_callback);
}// 清理资源
extern "C" JNIEXPORT void JNICALL
Java_com_rockchip_gpadc_demo_yolo_InferenceWrapper_native_1releaseModel(JNIEnv *env, jobject obj) {yolov8_cleanup();
}
CMakeLists.txt
# For more information about using CMake with Android Studio, read the
# documentation: https://d.android.com/studio/projects/add-native-code.html# Sets the minimum version of CMake required to build the native library.cmake_minimum_required(VERSION 3.4.1)# Creates and names a library, sets it as either STATIC
# or SHARED, and provides the relative paths to its source code.
# You can define multiple libraries, and CMake builds them for you.
# Gradle automatically packages shared libraries with your APK.# include rga.
#include_directories(src/main/cpp/rga)add_library( # Sets the name of the library.rknn4j# Sets the library as a shared library.SHARED# Provides a relative path to your source file(s).src/main/cpp/native-lib.ccsrc/main/cpp/yolov8_pose.h)# Searches for a specified prebuilt library and stores the path as a
# variable. Because CMake includes system libraries in the search path by
# default, you only need to specify the name of the public NDK library
# you want to add. CMake verifies that the library exists before
# completing its build.find_library( # Sets the name of the path variable.log-lib# Specifies the name of the NDK library that# you want CMake to locate.log )# Specifies libraries CMake should link to your target library. You
# can link multiple libraries, such as libraries you define in this
# build script, prebuilt third-party libraries, or system libraries.target_link_libraries( # Specifies the target library.rknn4j# Links the target library to the log library# included in the NDK.${CMAKE_SOURCE_DIR}/src/main/jniLibs/${ANDROID_ABI}/librknnrt.so${CMAKE_SOURCE_DIR}/src/main/jniLibs/${ANDROID_ABI}/librga.so${CMAKE_SOURCE_DIR}/src/main/jniLibs/${ANDROID_ABI}/libyolov8_pose.so${log-lib} )
这里我在native-lib.cc使用了实时回调,把模型推理数据实时的回调给app,app调用
package com.rockchip.gpadc.demo.yolo;import com.rockchip.gpadc.demo.callstr;
/*** Created by randall on 18-4-18.*/public class InferenceWrapper {private final String TAG = "rkyolo.InferenceWrapper";static {System.loadLibrary("rknn4j");}public InferenceWrapper() {}public void setmessage(byte[] bytes, final callstr mcallstr){// Log.e("TAG","setmessage bytes==="+bytes.length);native_inference(new INativeListener() {@Overridepublic void onCall(String str) {// Log.e("TAG","模型回调数据======"+str);mcallstr.onCall(str);}},bytes,1280,720);}public int initModel(){native_inityolov8();return 0; }public void deinit() {native_releaseModel();}// java回调接口 INativeListener.javainterface INativeListener {void onCall(String str);}/**************************************************yolov8******************************************/private native void native_inityolov8();private native void native_inference(INativeListener mINativeListener,byte[] imageData, int width, int height);private native void native_releaseModel();
}
这里我做了个回调接口,在对应的地方调用即可
这里还有个说明,因为rknn的推理数据是rgb的数据,但是很多摄像头的数据是yuv-nv12或者nv21,这里输入数据的时候需要转化一下,这个网上随便搜,很多.
- 具体的打开摄像头,解析数据等代码因为保密原因这里我不能贴出来
- 其实步骤很简单,在oncreat里面先初始化调用
public int initModel(){native_inityolov8();return 0; }- 然后转换一下数据格式传入jni,就能实时得到数据了
native_inference(new INativeListener() {@Overridepublic void onCall(String str) {Log.e("TAG","模型回调数据======"+str);//解析} },bytes,1280,720);- 具体怎么解析你们可以自定义上面的格式,这里我是一个数据一行,你们可以更改上面的yolov8_pose.cc里面的finalBuffer数据,这个数据就是所有数据回调
- 具体就这些了,有什么不懂的可以找我咨询,我正常上班时间都会看一下消息.