0
点赞
收藏
分享

微信扫一扫

49、R-YOLOv4旋转目标检测,并进行ncnn和mnn部署开发


基本思想:需要一个简单的旋转目标检测算法,完成业务任务,并输出矫正后的角度信息

第一步:下载台湾小哥的源码

ubuntu@ubuntu:~$ git clone https://github.com/kunnnnethan/R-YOLOv4.git

下载模型,测试一下小哥训练的垃圾模型,注意检测图片的大小为416

测试原图

49、R-YOLOv4旋转目标检测,并进行ncnn和mnn部署开发_#include

 测试结果图

49、R-YOLOv4旋转目标检测,并进行ncnn和mnn部署开发_#include_02

第二步:写个pytorch2onnx.py脚本

1)修改一下模型返回值/home/ubuntu/R-YOLOv4/model/yolo.py

return torch.cat([y1, y2, y3], 1), (loss1 + loss2 + loss3)

修改为

return torch.cat([y1, y2, y3], 1)#, (loss1 + loss2 + loss3)

2)然后使用下面的脚本转模型到onnx上

import onnx
import torch
# import trochvision
import torch.utils.data
import argparse
import onnxruntime
import os
import cv2
import numpy as np
from onnxsim import simplify
from onnxruntime.datasets import get_example
from lib.post_process import post_process
from lib.plot import plot_boxes
import warnings
from model.yolo import Yolo
from PIL import Image
import torch
import torch.nn.functional as F
import torchvision.transforms as transforms
warnings.filterwarnings('ignore')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")



def resize(image, size):
image = F.interpolate(image.unsqueeze(0), size=size, mode="nearest").squeeze(0)
return image

def pad_to_square(img, pad_value):
c, h, w = img.shape
dim_diff = np.abs(h - w)
# (upper / left) padding and (lower / right) padding
pad1, pad2 = dim_diff // 2, dim_diff - dim_diff // 2
# Determine padding
pad = (0, 0, pad1, pad2) if h <= w else (pad1, pad2, 0, 0)
# Add padding
img = F.pad(img, pad, "constant", value=pad_value)

return img, pad


def pre_process(args):
img = cv2.imread(args.image_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img,_=pad_to_square_sub(img, 0)
image_resized = cv2.resize(img, (args.img_size,args.img_size))
image_np = image_resized / 255.0 # 归一化到0~1
image_exp = np.expand_dims(image_np, axis=0)
image_transposed = image_exp.transpose((0, 3, 1, 2))
image_transposed=torch.from_numpy(image_transposed)
return image_transposed.float()

def pad_to_square_sub(img, pad_value):
h, w, c = img.shape
dim_diff = np.abs(h - w)
# (upper / left) padding and (lower / right) padding
pad1, pad2 = dim_diff // 2, dim_diff - dim_diff // 2
# Determine padding
pad = (0, 0, pad1, pad2) if h <= w else (pad1, pad2, 0, 0)
# Add padding
img = cv2.copyMakeBorder(img, pad[2], pad[3], pad[0], pad[1], cv2.BORDER_CONSTANT)

return img, pad


def getInputCv(args):
# Extract image as PyTorch tensor
img=cv2.imread(args.image_path)
img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
img = transforms.ToTensor()(img)
# Pad to square resolution
img, _ = pad_to_square(img, 0)
# Resize
img = resize(img, args.img_size)
return img.unsqueeze(0)

def save_results(args,imgs, boxes,dir):
if not os.path.exists(dir):
os.makedirs(dir)
for i, (img_path, box) in enumerate(zip(imgs, boxes)):
plot_boxes(img_path, box, args.class_names, args.img_size,dir)

def infertorch(args,dummy_input):
dummy_input = dummy_input.to(device)
pretrained_dict = torch.load(args.model_path, map_location=device)
model = Yolo(n_classes=args.class_num)
model = model.to(device)
model.load_state_dict(pretrained_dict)
model.eval()
output= model(dummy_input) # batch=1 -> [1, n, n], batch=3 -> [3, n, n]
box = post_process(output, args.conf_thres, args.nms_thres)
print("box=",box)

save_results(args,[args.image_path], box,args.pt_jpg)

return model,dummy_input


def torch2onnx(args,model,input):
input_names = ['input'] # 模型输入的name
output_names = ['output'] # 模型输出的name
print("====", input.shape)
torch_out = torch.onnx._export(model, input, args.onnx_model_path,
verbose=False, input_names=input_names, output_names=output_names, opset_version=11)

# test onnx model
example_model = get_example(args.onnx_model_path)
session = onnxruntime.InferenceSession(example_model)
# get the name of the first input of the model
input_name = session.get_inputs()[0].name
# print('onnx Input Name:', input_name)
result = session.run([], {input_name: input.data.cpu().numpy()})
result=torch.from_numpy(result[0])
box = post_process(result, args.conf_thres, args.nms_thres)
print("box=",box)
print("onnx->>模型转换成功!")
save_results(args,[args.image_path], box,args.onnx_jpg)


def torch2libtorch(args, model,dummy_input):
ts = torch.jit.trace(model, dummy_input)
ts.save(args.jt_model_path)
model = torch.load(args.jt_model_path)
model = model.to(device)
model.eval()
output = model(dummy_input)
box = post_process(output, args.conf_thres, args.nms_thres)
print("box=", box)
print("推理便签是")

print("jt->>模型转换成功!")

def main(args):
#dummy_input = getInputCv(args) # 获得网络的输入
dummy_input=pre_process(args) #仿照上面作者写了个cv自己的方法
model,dummy_input=infertorch(args,dummy_input)
torch2onnx(args,model,dummy_input)
torch2libtorch(args, model,dummy_input)

if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="PyTorch model to onnx and mnn ncnn")
parser.add_argument('--image_path', type=str, default=r"/home/ubuntu/R-YOLOv4/data/trash/trash/detect/56.jpg",
help="For image from one model_file")
parser.add_argument('--model_path', type=str,
default=r"/home/ubuntu/R-YOLOv4/weights/trash/ryolov4.pth",
help="For training from one model_file")
parser.add_argument('--save_model_path', type=str, default=r"/home/ubuntu/R-YOLOv4/weights/trash/ryolov4",
help="For training from one model_file")
parser.add_argument('--onnx_model_path', type=str,
default=r"/home/ubuntu/R-YOLOv4/weights/trash/ryolov4.onnx",
help="For training from one model_file")
parser.add_argument('--sim_output', type=str,
default=r"/home/ubuntu/R-YOLOv4/weights/trash/ryolov4_sim.onnx",
help="For training from one model_file")
parser.add_argument('--jt_model_path', type=str,
default=r"/home/ubuntu/R-YOLOv4/weights/trash/ryolov4_jc.pt",
help="For training from one model_file")
parser.add_argument('--img_size', type=int, default=416,
help="the image size of model input")
parser.add_argument('--class_num', type=int, default=2,
help="the image size of model input")
parser.add_argument('--conf_thres', type=int, default=0.7,
help="the image size of model input")
parser.add_argument('--nms_thres', type=int, default=0.2,
help="the image size of model input")
parser.add_argument('--class_names', type=str, default=['drink carton', 'aluminum can'],
help="the image size of model input")
parser.add_argument('--pt_jpg', type=str, default="./pt",
help="the image size of model input")
parser.add_argument('--onnx_jpg', type=str, default="./onnx",
help="the image size of model input")

args = parser.parse_args()
main(args)

模型导出和测试结果比对一致

/usr/bin/python3.8 /home/ubuntu/R-YOLOv4/py2onnx.py
box= [tensor([[144.3747, 253.6161, 53.8617, 112.6256, 0.9937, 0.9415, 0.9997,
0.0000],
[160.5037, 170.0593, 53.2183, 105.7061, -1.5103, 0.9586, 0.9994,
1.0000]])]
==== torch.Size([1, 3, 416, 416])
box= [tensor([[144.3747, 253.6162, 53.8612, 112.6272, 0.9937, 0.9415, 0.9997,
0.0000],
[160.5033, 170.0592, 53.2184, 105.7061, -1.5103, 0.9586, 0.9994,
1.0000]])]
onnx->>模型转换成功!
box= [tensor([[144.3747, 253.6161, 53.8617, 112.6256, 0.9937, 0.9415, 0.9997,
0.0000],
[160.5037, 170.0593, 53.2183, 105.7061, -1.5103, 0.9586, 0.9994,
1.0000]])]
推理便签是
jt->>模型转换成功!

Process finished with exit code 0

第三步:转mnn模型和代码测试

ubuntu@ubuntu:~/MNN/build$ ./MNNConvert -f ONNX --modelFile /home/ubuntu/R-YOLOv4/weights/trash/ryolov4_sim.onnx --MNNModel /home/ubuntu/R-YOLOv4/weights/trash/ryolov4_sim.mnn --bizCode MNN
Start to Convert Other Model Format To MNN Model...
[15:48:05] /home/ubuntu/MNN/tools/converter/source/onnx/onnxConverter.cpp:40: ONNX Model ir version: 6
Start to Optimize the MNN Net...
inputTensors : [ input, ]
outputTensors: [ output, ]
Converted Success!

cmakelists.txt

cmake_minimum_required(VERSION 3.16)
project(untitled10)
set(CMAKE_CXX_FLAGS "-std=c++11")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fopenmp ")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fopenmp")
set(CMAKE_CXX_STANDARD 11)
include_directories(${CMAKE_SOURCE_DIR})
include_directories(${CMAKE_SOURCE_DIR}/include)
include_directories(${CMAKE_SOURCE_DIR}/include/MNN)
find_package(OpenCV REQUIRED)
#message(STATUS ${OpenCV_INCLUDE_DIRS})
#添加头文件
include_directories(${OpenCV_INCLUDE_DIRS})
#链接Opencv库

add_library(libmnn SHARED IMPORTED)
#set_target_properties(libncnn PROPERTIES IMPORTED_LOCATION ${CMAKE_SOURCE_DIR}/lib/libncnn.a)

set_target_properties(libmnn PROPERTIES IMPORTED_LOCATION ${CMAKE_SOURCE_DIR}/lib/libMNN.so)
add_executable(untitled10 main.cpp polyiou.cpp)
target_link_libraries(untitled10 ${OpenCV_LIBS} libmnn )

测试代码,代码后处理自己理解,进行了改写,商业版本不开放 polyiou.cpp polyiou.h 参考​​42、使用mmrotate中k3det进行旋转目标检测,并进行mnn部署和ncnn部署_sxj731533730的博客​​


#include <iostream>
#include <algorithm>
#include <vector>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/opencv.hpp>
#include<MNN/Interpreter.hpp>
#include<MNN/ImageProcess.hpp>
#include <numeric>
#include "polyiou.h"

#if NCNN_VULKAN
#include "gpu.h"
#endif // NCNN_VULKAN


struct Ploy {
float p1_0;
float p1_1;
float p2_0;
float p2_1;
float p3_0;
float p3_1;
float p4_0;
float p4_1;

Ploy() : p1_0(0), p1_1(0), p2_0(0), p2_1(0), p3_0(0), p3_1(0), p4_0(0), p4_1(0) {}

Ploy(float p1_0, float p1_1, float p2_0, float p2_1, float p3_0, float p3_1, float p4_0, float p4_1) : p1_0(p1_0),
p1_1(p1_1),
p2_0(p2_0),
p2_1(p2_1),
p3_0(p3_0),
p3_1(p3_1),
p4_0(p4_0),
p4_1(p4_1) {}
};

struct BBox {
int idx;
float conf;
Ploy ploy;

BBox() : idx(0), conf(0), ploy(0, 0, 0, 0, 0, 0, 0, 0) {}

BBox(int idx, float conf, Ploy ploy) : idx(idx), conf(conf), ploy(ploy) {}
};

void pad_to_square(cv::Mat img, cv::Mat &dst) {
int c = img.channels();
int h = img.rows;
int w = img.cols;
int dim_diff = abs(h - w);
float pad1 = dim_diff / 2;
float pad2 = dim_diff - dim_diff / 2;
int top = 0;
int buttom = 0;
int left = 0;
int right = 0;
if (h <= w) {
top = pad1;
buttom = pad2;
left = 0;
right = 0;


} else {
top = 0;
buttom = 0;
left = pad1;
right = pad2;

}
copyMakeBorder(img, dst, top, buttom, left, right, cv::BORDER_CONSTANT, cv::Scalar(0, 0, 0));

}

void sort_indexes(std::vector<float> v, std::vector<int> &keep_idx) {
std::iota(keep_idx.begin(), keep_idx.end(), 0);
// 通过比较v的值对索引idx进行排序
std::sort(keep_idx.begin(), keep_idx.end(), [&v](float i1, float i2) { return v[i1] > v[i2]; });
}

void rbox2polygon(std::vector<float> item, cv::Mat &dst) {

float x = item[0];
float y = item[1];
float w = item[2];
float h = item[3];

float angle = item[4] / M_PI * 180;
cv::RotatedRect box(cv::Size2f(x + w / 2, y + h / 2), cv::Size2f(w, h), angle);
cv::boxPoints(box, dst);//方框的顺序为:左下角,左上角,右上角,右下角
}

void rescale_boxes(std::vector<float> original, std::vector<Ploy> &result_rotate_detect, int current_dim,
int original_shape[]) {

int orig_h = original_shape[0];
int orig_w = original_shape[1];

float pad_x = std::max(orig_h - orig_w, 0) * ((float) current_dim / std::max(original_shape[0], original_shape[1]));
float pad_y = std::max(orig_w - orig_h, 0) * ((float) current_dim / std::max(original_shape[0], original_shape[1]));

float unpad_h = current_dim - pad_y;
float unpad_w = current_dim - pad_x;

float x1 = original[0];
float y1 = original[1];
float x2 = original[0] + original[2];
float y2 = original[1] + original[3];
x1 = ((x1 - pad_x / 2) / unpad_w) * orig_w;
y1 = ((y1 - pad_y / 2) / unpad_h) * orig_h;
x2 = ((x2 - pad_x / 2) / unpad_w) * orig_w;
y2 = ((y2 - pad_y / 2) / unpad_h) * orig_h;

int w = (x2 - x1);
int h = (y2 - y1);
cv::Mat pre_dst;
float angle = original[4] / M_PI * 180;
cv::RotatedRect box(cv::Size2f(x1, y1), cv::Size2f(w, h), angle);
cv::boxPoints(box, pre_dst);//方框的顺序为:左下角,左上角,右上角,右下角

// rbox2polygon(item,pre_dst );
float a0 = pre_dst.at<float>(0, 0);
float b0 = pre_dst.at<float>(0, 1);
float a1 = pre_dst.at<float>(1, 0);
float b1 = pre_dst.at<float>(1, 1);
float a2 = pre_dst.at<float>(2, 0);
float b2 = pre_dst.at<float>(2, 1);
float a3 = pre_dst.at<float>(3, 0);
float b3 = pre_dst.at<float>(3, 1);
result_rotate_detect.emplace_back(Ploy(a0, b0, a1, b1, a2, b2, a3, b3));
}


void skewiou(std::vector<std::vector<float>> detect, std::vector<Ploy> &rotate_detect, std::vector<float> &vec_conf) {

for (int i = 0; i < detect.size(); i++) {
cv::Mat pre_dst;
rbox2polygon(detect[i], pre_dst);

// std::cout<<pre_dst.at<float>(0,0)<<" "<<pre_dst.at<float>(0,1);
float x0 = pre_dst.at<float>(0, 0);
float y0 = pre_dst.at<float>(0, 1);
float x1 = pre_dst.at<float>(1, 0);
float y1 = pre_dst.at<float>(1, 1);
float x2 = pre_dst.at<float>(2, 0);
float y2 = pre_dst.at<float>(2, 1);
float x3 = pre_dst.at<float>(3, 0);
float y3 = pre_dst.at<float>(3, 1);
float conf = detect[i][5];
vec_conf.emplace_back(conf);
rotate_detect.emplace_back(Ploy(x0, y0, x1, y1, x2, y2, x3, y3));

}

}

// 进行iou计算
float iou(Ploy &r1, Ploy &r2) {
std::vector<double> p;
p.emplace_back(r1.p1_0);
p.emplace_back(r1.p1_1);
p.emplace_back(r1.p2_0);
p.emplace_back(r1.p2_1);
p.emplace_back(r1.p3_0);
p.emplace_back(r1.p3_1);
p.emplace_back(r1.p4_0);
p.emplace_back(r1.p4_1);
std::vector<double> q;
q.emplace_back(r2.p1_0);
q.emplace_back(r2.p1_1);
q.emplace_back(r2.p2_0);
q.emplace_back(r2.p2_1);
q.emplace_back(r2.p3_0);
q.emplace_back(r2.p3_1);
q.emplace_back(r2.p4_0);
q.emplace_back(r2.p4_1);
double iou = iou_poly(p, q);
return iou;

}


// 进行nms计算
void single_class_non_max_suppression(std::vector<Ploy> ploys, std::vector<float> confs, std::vector<Ploy> &ans,
std::vector<int> &keep_idx, float conf_thresh, float iou_thresh) {
if (ploys.size() == 0) {
return;
}
std::vector<BBox> bboxes;
BBox bbox;


for (int i = 0; i < (int) ploys.size(); ++i) {
bboxes.push_back(BBox(i, confs[i], ploys[i]));
}
// 对bbox的conf进行降序排序
sort(bboxes.begin(), bboxes.end(), [&](const BBox &a, const BBox &b) {
return a.conf > b.conf;
});
while (!bboxes.empty()) {
bbox = bboxes[0];
if (bbox.conf < conf_thresh) {
break;
}
keep_idx.emplace_back(bbox.idx);
bboxes.erase(bboxes.begin());
// 让conf最高的bbox与其他剩余的bbox进行iou计算
int size = bboxes.size();
for (int i = 0; i < size; ++i) {
float iou_ans = iou(bbox.ploy, bboxes[i].ploy);
if (iou_ans > iou_thresh) {
bboxes.erase(bboxes.begin() + i);
size = bboxes.size();
i = i - 1;
}
}
}

for (const int number : keep_idx) {
ans.push_back(ploys[number]);
}

}


int main(int argc, char **argv) {

//std::cout << detect_image<<std::endl;
int target_image_size = 416;
float conf_thres = 0.7;
const char *classes[] = {"drink carton", "aluminum can"};
cv::Mat img = cv::imread("../56.jpg");

int original_shape[] = {img.rows, img.cols};
cv::Mat dst;
pad_to_square(img, dst);
cv::Mat res_img;
cv::resize(dst, res_img, cv::Size(target_image_size, target_image_size), 0, 0, cv::INTER_NEAREST);

// MNN inference
auto mnnNet = std::shared_ptr<MNN::Interpreter>(MNN::Interpreter::createFromFile("../ryolov4_sim.mnn"));
MNN::ScheduleConfig netConfig;
netConfig.type = MNN_FORWARD_CPU;
netConfig.numThread = 4;

auto session = mnnNet->createSession(netConfig);
auto input = mnnNet->getSessionInput(session, nullptr);

mnnNet->resizeTensor(input, {1, 3, (int) target_image_size, (int) target_image_size});
mnnNet->resizeSession(session);


const float mean_vals[3] = {0, 0, 0};
const float norm_255[3] = {1 / 255.f, 1 / 255.f, 1 / 255.f};

std::shared_ptr<MNN::CV::ImageProcess> pretreat(
MNN::CV::ImageProcess::create(MNN::CV::BGR, MNN::CV::RGB, mean_vals, 3,
norm_255, 3));
pretreat->convert(res_img.data, (int) target_image_size, (int) target_image_size, res_img.step[0], input);

mnnNet->runSession(session);

auto mnnRotate = mnnNet->getSessionOutput(session, "output");

MNN::Tensor rotateHost(mnnRotate, mnnRotate->getDimensionType());
mnnRotate->copyToHostTensor(&rotateHost);

std::vector<float> vec_result;
std::vector<float> vec_image_pred;
int shape_h = rotateHost.height();
int shape_c = rotateHost.channel();
int shape_w = rotateHost.width();
int shape_s = rotateHost.size();
printf("---c= %d w= %d h= %d s= %d ----\n", shape_c, shape_w, shape_h, shape_s);

for (int i = 0; i < rotateHost.elementSize(); i += shape_h) {
float value = rotateHost.host<float>()[i + 5];
if (value >= conf_thres) {
for (int j = 0; j < shape_h; j++) {
vec_image_pred.emplace_back(rotateHost.host<float>()[i + j]);
// std::cout<<rotateHost.host<float>()[i + j]<<" ";
}
// std::cout<<std::endl;
}
}
std::vector<float> vec_score;
for (int i = 0; i < vec_image_pred.size(); i += shape_h) {
float maxValue = *max_element(vec_image_pred.begin() + i + 6, vec_image_pred.begin() + i + shape_h);
vec_score.emplace_back(vec_image_pred[i + 5] * maxValue);
}
std::vector<int> keep_idx;
keep_idx.resize(vec_score.size());
sort_indexes(vec_score, keep_idx);

std::vector<float> class_conf;
std::vector<int> class_preds;
std::vector<float> detections;
std::vector<int> label_unique;
for (int i = 0; i < keep_idx.size(); i++) {
int index = keep_idx[i];
float maxValue = *max_element(vec_image_pred.begin() + index * shape_h + 6,
vec_image_pred.begin() + index * shape_h + shape_h);
class_conf.emplace_back(maxValue);

int maxPosition = max_element(vec_image_pred.begin() + index * shape_h + 6,
vec_image_pred.begin() + index * shape_h + shape_h) -
(vec_image_pred.begin() + index * shape_h + 6);

class_preds.emplace_back(maxPosition);
for (int j = 0; j < shape_h - 2; j++) {
detections.emplace_back(vec_image_pred[index * shape_h + j]);
}
detections.emplace_back(maxValue);
detections.emplace_back(maxPosition);
label_unique.emplace_back(maxPosition);
}

std::set<int> s(label_unique.begin(), label_unique.end());
label_unique.assign(s.begin(), s.end());


for (int i = 0; i < label_unique.size(); i++) {
std::vector<std::vector<float>> detect;
std::vector<Ploy> vec_rotate_detect;
std::vector<Ploy> vec_ans, result_rotate_detect;
std::vector<float> vec_conf;
std::vector<int> keep_idx;
for (int j = 0; j < detections.size(); j += shape_h) {
std::vector<float> item;
if (int(detections[j + shape_h - 1]) == label_unique[i]) {
for (int k = 0; k < shape_h; k++) {
item.emplace_back(detections[j + k]);
}
detect.emplace_back(item);
}

}
skewiou(detect, vec_rotate_detect, vec_conf);

single_class_non_max_suppression(vec_rotate_detect, vec_conf, vec_ans, keep_idx, 0.5, 0.4);
if (keep_idx.size()) {
rescale_boxes(detect[keep_idx[0]], result_rotate_detect, target_image_size, original_shape);

std::vector<std::vector<cv::Point>> contours;
std::vector<cv::Point> contours_item;

contours_item.push_back(cv::Point(result_rotate_detect[0].p1_0, result_rotate_detect[0].p1_1));
contours_item.push_back(cv::Point(result_rotate_detect[0].p2_0, result_rotate_detect[0].p2_1));
contours_item.push_back(cv::Point(result_rotate_detect[0].p3_0, result_rotate_detect[0].p3_1));
contours_item.push_back(cv::Point(result_rotate_detect[0].p4_0, result_rotate_detect[0].p4_1));
int xc = result_rotate_detect[0].p1_0;
int yc = result_rotate_detect[0].p1_1;
contours.push_back(contours_item);
cv::drawContours(img, contours, -1, cv::Scalar(0, 255, 255), 2);
char text[256];
sprintf(text, "%s %.2f%%", classes[0], vec_conf[keep_idx[0]]);
int baseLine = 0;
cv::Size label_size = cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX, 0.25, 1, &baseLine);
int text_width = label_size.width;
int text_height = label_size.height;
cv::rectangle(img, cv::Point(int(xc), int(yc) - text_height - 2),
cv::Point(int(xc) + text_width, int(yc) + 3),
cv::Scalar(0, 128, 0), -1);
float font_scale = 0.25;

cv::putText(img, text, cv::Point(int(xc), int(yc)),
cv::FONT_HERSHEY_SIMPLEX, 0.25, cv::Scalar(255, 0, 0));

}
}
cv::resize(img, img, cv::Size(640, 480));
cv::imshow("image", img);
cv::waitKey(0);

mnnNet->releaseModel();
mnnNet->releaseSession(session);
return 0;
}

测试结果,图片太大,做了缩放

49、R-YOLOv4旋转目标检测,并进行ncnn和mnn部署开发_ubuntu_03

第四步:转ncnn模型和代码测试和使用pnnx也存在错误,高维度数据的问题,一定要学会改ncnn这个例子!!!!

ubuntu@ubuntu:~/ncnn/build/install/bin$ ./onnx2ncnn /home/ubuntu/R-YOLOv4/weights/trash/ryolov4_sim.onnx /home/ubuntu/R-YOLOv4/weights/trash/ryolov4_sim.param /home/ubuntu/R-YOLOv4/weights/trash/ryolov4_sim.bin
Expand not supported yet!
Expand not supported yet!
Unsupported slice axes !
ScatterND not supported yet!
ScatterND not supported yet!
ScatterND not supported yet!
ScatterND not supported yet!
ScatterND not supported yet!
Unsupported slice axes !
Unsupported slice axes !
Unsupported slice axes !
ScatterND not supported yet!
ScatterND not supported yet!
ScatterND not supported yet!
ScatterND not supported yet!
ScatterND not supported yet!
Unsupported slice axes !
Unsupported slice axes !
Unsupported slice axes !
ScatterND not supported yet!
ScatterND not supported yet!
ScatterND not supported yet!
ScatterND not supported yet!
ScatterND not supported yet!
Unsupported slice axes !
Unsupported slice axes !

ncnn修改模型

待补充,

参考

举报

相关推荐

0 条评论