OpenMMlab导出swim-transformer模型并使用onnxruntime和tensorrt推理

news/2024/12/22 0:12:19/

导出onnx文件

通过mmpretrain 导出swin-transformer的onnx文件非常容易,注意需设置 opset_version=12这里是一个坑,刚开始设置的opset_version=11后续转换trtengine的时候会有问题。

import torch
from mmpretrain import get_model, inference_modelmodel = get_model('swin-tiny_16xb64_in1k',pretrained='swin_tiny_224_b16x64_300e_imagenet_20210616_090925-66df6be6.pth', device='cpu') input = torch.zeros(1, 3, 224, 224)
out = model(input)
print(torch.argmax(out, dim=1))
torch.onnx.export(model, input, "swin_transformer.onnx", opset_version=12)

如果安装了mmdeploy,也可以通过下面的脚本进行模型转换:

from mmdeploy.apis import torch2onnx
from mmdeploy.backend.sdk.export_info import export2SDKimg = 'demo.JPEG'
work_dir = './work_dir/onnx/swin_transformer'
save_file = './end2end.onnx'
deploy_cfg = 'mmdeploy/configs/mmpretrain/classification_onnxruntime_dynamic.py'
model_cfg = 'mmpretrain/configs/swin_transformer/swin-tiny_16xb64_in1k.py'
model_checkpoint = './checkpoints/swin_tiny_224_b16x64_300e_imagenet_20210616_090925-66df6be6.pth'
device = 'cpu'# 1. convert model to onnx
torch2onnx(img, work_dir, save_file, deploy_cfg, model_cfg, model_checkpoint, device)# 2. extract pipeline info for sdk use (dump-info)
export2SDK(deploy_cfg, model_cfg, work_dir, pth=model_checkpoint, device=device)

onnxruntime推理

python推理:

import cv2
import numpy as np
import onnxruntimeuse_letterbox = True
input_shape = (224, 224)     def letterbox(im, new_shape=(416, 416), color=(114, 114, 114)):# Resize and pad image while meeting stride-multiple constraintsshape = im.shape[:2]  # current shape [height, width]# Scale ratio (new / old)r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])# Compute paddingnew_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))    dw, dh = (new_shape[1] - new_unpad[0])/2, (new_shape[0] - new_unpad[1])/2  # wh padding top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))left, right = int(round(dw - 0.1)), int(round(dw + 0.1))if shape[::-1] != new_unpad:  # resizeim = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add borderreturn imif __name__ == '__main__':img = cv2.imread('goldfish.jpg')if use_letterbox:or_img = letterbox(img, input_shape)else:or_img = cv2.resize(img, input_shape)img = or_img[:,:,::-1].transpose(2,0,1)  #BGR2RGB和HWC2CHWimg = img.astype(dtype=np.float32)img[0,:] = (img[0,:] - 123.675) / 58.395   img[1,:] = (img[1,:] - 116.28) / 57.12img[2,:] = (img[2,:] - 103.53) / 57.375img = np.expand_dims(img, axis=0)onnx_session = onnxruntime.InferenceSession("swin_transformer.onnx", providers=['CUDAExecutionProvider','CPUExecutionProvider'])input_name=[]for node in onnx_session.get_inputs():input_name.append(node.name)output_name=[]for node in onnx_session.get_outputs():output_name.append(node.name)input_feed={}for name in input_name:input_feed[name] = imgpred = onnx_session.run(None, input_feed)[0]print(pred)print(np.argmax(pred))

C++推理:

#include <iostream>
#include <opencv2/opencv.hpp>
#include <onnxruntime_cxx_api.h>int main(int argc, char* argv[])
{Ort::Env env(ORT_LOGGING_LEVEL_WARNING, "cls");Ort::SessionOptions session_options;session_options.SetIntraOpNumThreads(1);session_options.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_EXTENDED);//OrtCUDAProviderOptions cuda_option;//cuda_option.device_id = 0;//cuda_option.arena_extend_strategy = 0;//cuda_option.cudnn_conv_algo_search = OrtCudnnConvAlgoSearchExhaustive;//cuda_option.gpu_mem_limit = SIZE_MAX;//cuda_option.do_copy_in_default_stream = 1;//session_options.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_ALL);//session_options.AppendExecutionProvider_CUDA(cuda_option);const wchar_t* model_path = L"swin_transformer.onnx";Ort::Session session(env, model_path, session_options);Ort::AllocatorWithDefaultOptions allocator;size_t num_input_nodes = session.GetInputCount();std::vector<const char*> input_node_names = { "input" };std::vector<const char*> output_node_names = { "output" };cv::Mat image = cv::imread("goldfish.jpg", 1);cv::resize(image, image, cv::Size(224, 224));const size_t input_tensor_size = 1 * 3 * image.cols * image.rows;std::vector<float> input_tensor_values(input_tensor_size);for (int i = 0; i < image.cols; i++){for (int j = 0; j < image.rows; j++){input_tensor_values[0 * image.cols * image.rows + i * image.rows + j] = (image.ptr<uchar>(i)[j * 3 + 2] - 123.675) / 58.395;input_tensor_values[1 * image.cols * image.rows + i * image.rows + j] = (image.ptr<uchar>(i)[j * 3 + 1] - 116.28) / 57.12;input_tensor_values[2 * image.cols * image.rows + i * image.rows + j] = (image.ptr<uchar>(i)[j * 3 + 0] - 103.53) / 57.375;}}std::vector<int64_t> input_node_dims = { 1, 3, image.cols, image.rows };auto memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);Ort::Value input_tensor = Ort::Value::CreateTensor<float>(memory_info, input_tensor_values.data(), input_tensor_size, input_node_dims.data(), input_node_dims.size());std::vector<Ort::Value> ort_inputs;ort_inputs.push_back(std::move(input_tensor));std::vector<Ort::Value> output_tensors = session.Run(Ort::RunOptions{ nullptr }, input_node_names.data(), ort_inputs.data(), input_node_names.size(), output_node_names.data(), output_node_names.size());const float* rawOutput = output_tensors[0].GetTensorData<float>();std::vector<int64_t> outputShape = output_tensors[0].GetTensorTypeAndShapeInfo().GetShape();size_t count = output_tensors[0].GetTensorTypeAndShapeInfo().GetElementCount();std::vector<float> output(rawOutput, rawOutput + count);int predict_label = std::max_element(output.begin(), output.end()) - output.begin();std::cout << predict_label << std::endl;return 0;
}

如果安装了mmdeploy,也可以这样进行推理:
python推理:

import cv2
from mmdeploy_runtime import Classifierimg = cv2.imread('mmpretrain/demo/demo.JPEG')
classifier = Classifier(model_path='work_dir/onnx/swin_transformer', device_name='cpu')
result = classifier(img)
for label_id, score in result:print(label_id, score)

C++推理可以参考:https://github.com/open-mmlab/mmdeploy/blob/main/demo/csrc/c/image_classification.cpp

导出engine文件

这里通过trtexec转换onnx文件,LZ的版本是TensorRT-8.2.1.8。
需要先使用onnxsim简化模型,这里是第二个坑,否则会报错。

import onnx
from onnxsim import simplify
onnx_model = onnx.load("swin_transformer.onnx")  # load onnx model
model_simp, check = simplify(onnx_model)
assert check, "Simplified ONNX model could not be validated"
onnx.save(model_simp, "swin_transformer_sim.onnx")

再去ensorRT的bin目录下运行

./trtexec.exe --onnx=swin_transformer_sim.onnx --saveEngine=swin_transformer.engine --workspace=20480

第三个坑是如果不加上–workspace参数可能会因内存不足报错,LZ的机器有32G内存索性就设了20G的工作空间,可以根据自己的内存大小酌情设置该参数。
至此,不出意外可以成功导出engine文件。
mmdeploy导出LZ没有尝试成功,不知道是环境配置问题还是在windows系统下库的bug。

tensorrt推理

python推理:(待补充)
C++推理:

// tensorRT include
#include <NvInfer.h>
#include <NvInferRuntime.h>
#include <NvOnnxParser.h> // onnx解析器的头文件// cuda include
#include <cuda_runtime.h>
#include <opencv2/opencv.hpp>// system include
#include <stdio.h>
#include <fstream>
#include <assert.h>inline const char* severity_string(nvinfer1::ILogger::Severity t) {switch (t) {case nvinfer1::ILogger::Severity::kINTERNAL_ERROR: return "internal_error";case nvinfer1::ILogger::Severity::kERROR:   return "error";case nvinfer1::ILogger::Severity::kWARNING: return "warning";case nvinfer1::ILogger::Severity::kINFO:    return "info";case nvinfer1::ILogger::Severity::kVERBOSE: return "verbose";default: return "unknow";}
}class TRTLogger : public nvinfer1::ILogger {
public:virtual void log(Severity severity, nvinfer1::AsciiChar const* msg) noexcept override {if (severity <= Severity::kINFO) {if (severity == Severity::kWARNING)printf("\033[33m%s: %s\033[0m\n", severity_string(severity), msg);else if (severity <= Severity::kERROR)printf("\033[31m%s: %s\033[0m\n", severity_string(severity), msg);elseprintf("%s: %s\n", severity_string(severity), msg);}}
} logger;std::vector<unsigned char> load_file(const std::string & file) {std::ifstream in(file, std::ios::in | std::ios::binary);if (!in.is_open())return {};in.seekg(0, std::ios::end);size_t length = in.tellg();std::vector<uint8_t> data;if (length > 0) {in.seekg(0, std::ios::beg);data.resize(length);in.read((char*)& data[0], length);}in.close();return data;
}void inference() {// ------------------------------ 1. 准备模型并加载   ----------------------------TRTLogger logger;auto engine_data = load_file("swin_transformer.engine");// 执行推理前,需要创建一个推理的runtime接口实例。与builer一样,runtime需要logger:nvinfer1::IRuntime* runtime = nvinfer1::createInferRuntime(logger);// 将模型从读取到engine_data中,则可以对其进行反序列化以获得enginenvinfer1::ICudaEngine* engine = runtime->deserializeCudaEngine(engine_data.data(), engine_data.size());if (engine == nullptr) {printf("Deserialize cuda engine failed.\n");runtime->destroy();return;}nvinfer1::IExecutionContext* execution_context = engine->createExecutionContext();cudaStream_t stream = nullptr;// 创建CUDA流,以确定这个batch的推理是独立的cudaStreamCreate(&stream);// ------------------------------ 2. 准备好要推理的数据并搬运到GPU   ----------------------------cv::Mat image = cv::imread("goldfish.jpg", 1);cv::resize(image, image, cv::Size(224, 224));cv::cvtColor(image, image, cv::COLOR_BGR2RGB);std::vector<uint8_t> fileData(image.cols * image.rows);fileData = (std::vector<uint8_t>)(image.reshape(1, 1));int input_numel = 1 * 3 * image.rows * image.cols;float* input_data_host = nullptr;cudaMallocHost(&input_data_host, input_numel * sizeof(float));int image_area = image.cols * image.rows;unsigned char* pimage = image.data;float* phost_b = input_data_host + image_area * 0;float* phost_g = input_data_host + image_area * 1;float* phost_r = input_data_host + image_area * 2;for (int i = 0; i < image_area; ++i, pimage += 3) {*phost_r++ = (pimage[0] - 123.675) / 58.395;*phost_g++ = (pimage[1] - 116.28 )/ 57.12;*phost_b++ = (pimage[2] - 103.53 )/ 57.375;}float* input_data_device = nullptr;float output_data_host[1000];float* output_data_device = nullptr;cudaMalloc(&input_data_device, input_numel * sizeof(float));cudaMalloc(&output_data_device, sizeof(output_data_host));cudaMemcpyAsync(input_data_device, input_data_host, input_numel * sizeof(float), cudaMemcpyHostToDevice, stream);// 用一个指针数组指定input和output在gpu中的指针float* bindings[] = { input_data_device, output_data_device };// ------------------------------ 3. 推理并将结果搬运回CPU   ----------------------------bool success = execution_context->enqueueV2((void**)bindings, stream, nullptr);cudaMemcpyAsync(output_data_host, output_data_device, sizeof(output_data_host), cudaMemcpyDeviceToHost, stream);cudaStreamSynchronize(stream);int predict_label = std::max_element(output_data_host, output_data_host + 1000) - output_data_host;std::cout << "predict_label: " << predict_label << std::endl;// ------------------------------ 4. 释放内存 ----------------------------cudaStreamDestroy(stream);execution_context->destroy();engine->destroy();runtime->destroy();
}int main() {inference();return 0;
}

http://www.ppmy.cn/news/1167656.html

相关文章

Nginx平滑升级重定向rewrite

文章目录 Nginx平滑升级&重定向rewritenginx平滑升级流程环境查看旧版的配置信息下载新版nginx源码包和功能模块包编译配置新版本平滑升级验证 重定向rewrite配置重定向准发访问测试 Nginx平滑升级&重定向rewrite nginx平滑升级 流程 平滑升级: (升级版本、增加新功…

使用 OpenSSL 扩展来实现公钥和私钥加密

首先&#xff0c;你需要生成一对公钥和私钥。可以使用 OpenSSL 工具来生成&#xff1a; 1、生成私钥 openssl genpkey -algorithm RSA -out private_key.pem 2、从私钥生成公钥&#xff1a; openssl rsa -pubout -in private_key.pem -out public_key.pem现在你有了一个私钥…

Leetcode—2652.倍数求和【简单】

2023每日刷题&#xff08;四&#xff09; Leetcode—2652.倍数求和 实现代码 int sumOfMultiples(int n){int ans 0;int i 1;for(; i < n; i) {if((i % 3 0) || (i % 5 0) || (i % 7 0)) {ans i;}}return ans; }测试结果 之后我会持续更新&#xff0c;如果喜欢我的文…

Jenkins自动化部署SpringBoot项目的实现

本文主要介绍了Jenkins自动化部署SpringBoot项目的实现&#xff0c;文中通过示例代码介绍的非常详细&#xff0c;具有一定的参考价值&#xff0c;感兴趣的小伙伴们可以参考一下 1、Jenkins介绍 1.1、概念 Jenkins是一个开源软件项目&#xff0c;是基于Java开发的一种持续集成…

Android 13.0 开机动画支持mp4格式视频作为开机动画播放

1.概述 在13.0的系统产品开发中,在系统开机动画这块一般情况下都是播放开机图片,然后绘制多张开机图片形成开机动画模式,而产品需求要求支持开机mp4格式的短视频来作为开机动画播放视频来介绍产品情况,就需要用开机视频来替代开机动画来实现功能 2.开机动画支持mp4格式视频…

vue的v-for中循环修改变量(this.xxx)的给子组件传值覆盖重复的问题

遇到问题 使用v-for&#xff0c;其中需要根据不同的item修改某个变量(this.xxx)&#xff0c;然后向子组件中传值&#xff0c;但是发现传到子组件中的值却全是重复一样的&#xff1a; 我们循环qsList&#xff0c;其中<qs-form>是我自定义的一个组件&#xff0c;想向该子组…

python读写.pptx文件

1、读取PPT import pptx pptpptx.Presentation(rC:\Users\user\Documents\\2.pptx) # ppt.save(rC:\Users\user\Documents\\1.pptx) # slideppt.slides.add_slide(ppt.slide_layouts[1])# 读取所有幻灯片上的文字 for slide in ppt.slides:for shape in slide.shapes:if shape…

sobel边缘检测算法

Sobel边缘检测算法是一种常用的图像处理技术&#xff0c;用于检测图像中的边缘和轮廓。该算法基于离散卷积操作&#xff0c;通过对图像进行滤波来寻找图像中灰度值的变化。Sobel算子是一种常用的卷积核&#xff0c;用于检测图像中的垂直边缘和水平边缘。以下是Sobel边缘检测算法…