0
点赞
收藏
分享

微信扫一扫

45、NCNN之ONNX模型解析及其使用(YOLO5)


基本思想:提供了几种模型转成ncnn模型,以提供ncnn 调用~

注意使用的yolov5 的tag2代码!!!!

首先下载ncnn的源码:git clone ​​ncnn: ncnn ncnn 是腾讯优图实验室首个开源项目,是一个为手机端极致优化的高性能神经网络前向计算框架​​ 进行编译 生成各模块的对应转化可执行二进制文件;

ubuntu@ubuntu:~$ git clone https://github.com/Tencent/ncnn
ubuntu@ubuntu:~$ cd ncnn/
ubuntu@ubuntu:~/ncnn$ sh package.sh
ubuntu@ubuntu:~/ncnn$ ./build.sh
ubuntu@ubuntu:~/ncnn$ mkdir -p build
ubuntu@ubuntu:~/ncnn$ cd build
ubuntu@ubuntu:~/ncnn$ make -j8
ubuntu@ubuntu:~/ncnn$ sudo make install

ONNX----NCNN

下载yolov5s.pt模型

ubuntu@ubuntu:~$ git clone https://github.com/ultralytics/yolov5 
ubuntu@ubuntu:~$ cd yolov5
ubuntu@ubuntu:~$ pip install -r requirements.txt
ubuntu@ubuntu:~$ pip install onnx>=1.7.0
ubuntu@ubuntu:~$ pip install coremltools==4.0
ubuntu@ubuntu:~$ pip install onnx-simplifier

使用ncnn的转换命令执行文件生成

ubuntu@ubuntu:~/yolov5$ python3 models/export.py --weights yolov5s.pt --img 640 --batch 1 
ubuntu@ubuntu:~/yolov5$ python3 -m onnxsim yolov5s.onnx yolov5s-smi.onnx
Simplifying...
Checking 0/3...
Checking 1/3...
Checking 2/3...
Ok!


ubuntu@ubuntu:~/ncnn/build/tools/onnx$ ./onnx2ncnn /home/ubuntu/yolov5/yolov5s-smi.onnx /home/ubuntu/yolov5/models/yolov5s-smi.param /home/ubuntu/yolov5/models/yolov5s-smi.bin


OR

ubuntu@ubuntu:~/yolov5$ python3 models/export.py --weights yolov5l.pt --img 640 --batch 1
ubuntu@ubuntu:~/yolov5$ python3 -m onnxsim yolov5l.onnx yolov5l-smi.onnx
Simplifying...
Checking 0/3...
Checking 1/3...
Checking 2/3...
Ok!

ubuntu@ubuntu:~/ncnn/build/tools/onnx$ ./onnx2ncnn /home/ubuntu/yolov5/yolov5l-smi.onnx /home/ubuntu/yolov5/models/yolov5l-smi.param /home/ubuntu/yolov5/models/yolov5l-smi.bin

参考up的​​param and model file structure · Tencent/ncnn Wiki · GitHub​​ 

参考up的知乎 ​​详细记录u版YOLOv5目标检测ncnn实现 - 知乎​​

45、NCNN之ONNX模型解析及其使用(YOLO5)_#include

45、NCNN之ONNX模型解析及其使用(YOLO5)_#include_02

对应的yolov5l-smi.param 修改

45、NCNN之ONNX模型解析及其使用(YOLO5)_ide_03

删除input底下的10行 309-10 (构建的新层+1)=300,同时接入底层的输入层263

(下图存在错误,第二行应该修正为300 350)

45、NCNN之ONNX模型解析及其使用(YOLO5)_#include_04

同时修改网络的输出接口

45、NCNN之ONNX模型解析及其使用(YOLO5)_目标检测_05

修改为

45、NCNN之ONNX模型解析及其使用(YOLO5)_ide_06

打开生成的param文件,修改为右侧图

追加一个自己训练的模型修改历史

原yolo5s-smi.param

45、NCNN之ONNX模型解析及其使用(YOLO5)_#include_07

修改之后的

45、NCNN之ONNX模型解析及其使用(YOLO5)_ubuntu_08

尾部param尾部的参数

45、NCNN之ONNX模型解析及其使用(YOLO5)_#include_09

修改之后为:

45、NCNN之ONNX模型解析及其使用(YOLO5)_ubuntu_10

注意:可以使用​​Netron​​查看模型的输入\输出或者使用python代码查看模型输入\输出

ubuntu@ubuntu:~/ncnn/build/tools/darknet$ pip install -i https://pypi.tuna.tsinghua.edu.cn/simple netron

45、NCNN之ONNX模型解析及其使用(YOLO5)_linux_11

45、NCNN之ONNX模型解析及其使用(YOLO5)_目标检测_12

45、NCNN之ONNX模型解析及其使用(YOLO5)_ubuntu_13

参考yolo5的检测代码 进行编写解析

查看输出的网络模型的输入输出模型之后,我们开始进测试;

ubuntu@ubuntu:~/ncnn/build-host-gcc-linux/install$ ls
include lib

将这两个文件拷贝到工程中进行调用

CMakeLists.txt

cmake_minimum_required(VERSION 3.17)
project(prj_yolo5)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -o3")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -o3")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fopenmp ")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fopenmp ")
set(CMAKE_CXX_STANDARD 11)
include_directories(${CMAKE_SOURCE_DIR}/include)
find_package(OpenCV REQUIRED)
include_directories(${OpenCV_INCLUDE_DIRS})
#导入ncnn
add_library(libncnn STATIC IMPORTED)
set_target_properties(libncnn PROPERTIES IMPORTED_LOCATION ${CMAKE_SOURCE_DIR}/lib/libncnn.a)


add_executable(prj_yolo5 main.cpp)

target_link_libraries(prj_yolo5 ${OpenCV_LIBS} libncnn)

代码

#include "ncnn/benchmark.h"
#include "ncnn/cpu.h"
#include "ncnn/datareader.h"
#include "ncnn/net.h"
#include "ncnn/gpu.h"
#include "ncnn/layer.h"
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <vector>
using namespace cv;
using namespace std;
using namespace ncnn;
static ncnn::Net yolov5;
class YoloV5Focus : public ncnn::Layer
{
public:
YoloV5Focus()
{
one_blob_only = true;
}

virtual int forward(const ncnn::Mat& bottom_blob, ncnn::Mat& top_blob, const ncnn::Option& opt) const
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;

int outw = w / 2;
int outh = h / 2;
int outc = channels * 4;

top_blob.create(outw, outh, outc, 4u, 1, opt.blob_allocator);
if (top_blob.empty())
return -100;

#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outc; p++)
{
const float* ptr = bottom_blob.channel(p % channels).row((p / channels) % 2) + ((p / channels) / 2);
float* outptr = top_blob.channel(p);

for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
*outptr = *ptr;

outptr += 1;
ptr += 2;
}

ptr += w;
}
}

return 0;
}
};

DEFINE_LAYER_CREATOR(YoloV5Focus)



struct Object
{
float x;
float y;
float w;
float h;
int label;
float prob;
};
static inline float sigmoid(float x)
{
return static_cast<float>(1.f / (1.f + exp(-x)));
}

static void generate_proposals(const ncnn::Mat& anchors, int stride, const ncnn::Mat& in_pad, const ncnn::Mat& feat_blob, float prob_threshold, std::vector<Object>& objects)
{
const int num_grid = feat_blob.h;

int num_grid_x;
int num_grid_y;
if (in_pad.w > in_pad.h)
{
num_grid_x = in_pad.w / stride;
num_grid_y = num_grid / num_grid_x;
}
else
{
num_grid_y = in_pad.h / stride;
num_grid_x = num_grid / num_grid_y;
}

const int num_class = feat_blob.w - 5;

const int num_anchors = anchors.w / 2;

for (int q = 0; q < num_anchors; q++)
{
const float anchor_w = anchors[q * 2];
const float anchor_h = anchors[q * 2 + 1];

const ncnn::Mat feat = feat_blob.channel(q);

for (int i = 0; i < num_grid_y; i++)
{
for (int j = 0; j < num_grid_x; j++)
{
const float* featptr = feat.row(i * num_grid_x + j);

// find class index with max class score
int class_index = 0;
float class_score = -FLT_MAX;
for (int k = 0; k < num_class; k++)
{
float score = featptr[5 + k];
if (score > class_score)
{
class_index = k;
class_score = score;
}
}

float box_score = featptr[4];

float confidence = sigmoid(box_score) * sigmoid(class_score);

if (confidence >= prob_threshold)
{
// yolov5/models/yolo.py Detect forward
// y = x[i].sigmoid()
// y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy
// y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh

float dx = sigmoid(featptr[0]);
float dy = sigmoid(featptr[1]);
float dw = sigmoid(featptr[2]);
float dh = sigmoid(featptr[3]);

float pb_cx = (dx * 2.f - 0.5f + j) * stride;
float pb_cy = (dy * 2.f - 0.5f + i) * stride;

float pb_w = pow(dw * 2.f, 2) * anchor_w;
float pb_h = pow(dh * 2.f, 2) * anchor_h;

float x0 = pb_cx - pb_w * 0.5f;
float y0 = pb_cy - pb_h * 0.5f;
float x1 = pb_cx + pb_w * 0.5f;
float y1 = pb_cy + pb_h * 0.5f;

Object obj;
obj.x = x0;
obj.y = y0;
obj.w = x1 - x0;
obj.h = y1 - y0;
obj.label = class_index;
obj.prob = confidence;

objects.push_back(obj);
}
}
}
}
}
static inline float intersection_area(const Object& a, const Object& b)
{
if (a.x > b.x + b.w || a.x + a.w < b.x || a.y > b.y + b.h || a.y + a.h < b.y)
{
// no intersection
return 0.f;
}

float inter_width = std::min(a.x + a.w, b.x + b.w) - std::max(a.x, b.x);
float inter_height = std::min(a.y + a.h, b.y + b.h) - std::max(a.y, b.y);

return inter_width * inter_height;
}

static void qsort_descent_inplace(std::vector<Object>& faceobjects, int left, int right)
{
int i = left;
int j = right;
float p = faceobjects[(left + right) / 2].prob;

while (i <= j)
{
while (faceobjects[i].prob > p)
i++;

while (faceobjects[j].prob < p)
j--;

if (i <= j)
{
// swap
std::swap(faceobjects[i], faceobjects[j]);

i++;
j--;
}
}

#pragma omp parallel sections
{
#pragma omp section
{
if (left < j) qsort_descent_inplace(faceobjects, left, j);
}
#pragma omp section
{
if (i < right) qsort_descent_inplace(faceobjects, i, right);
}
}
}

static void qsort_descent_inplace(std::vector<Object>& faceobjects)
{
if (faceobjects.empty())
return;

qsort_descent_inplace(faceobjects, 0, faceobjects.size() - 1);
}

static void nms_sorted_bboxes(const std::vector<Object>& faceobjects, std::vector<int>& picked, float nms_threshold)
{
picked.clear();

const int n = faceobjects.size();

std::vector<float> areas(n);
for (int i = 0; i < n; i++)
{
areas[i] = faceobjects[i].w * faceobjects[i].h;
}

for (int i = 0; i < n; i++)
{
const Object& a = faceobjects[i];

int keep = 1;
for (int j = 0; j < (int)picked.size(); j++)
{
const Object& b = faceobjects[picked[j]];

// intersection over union
float inter_area = intersection_area(a, b);
float union_area = areas[i] + areas[picked[j]] - inter_area;
// float IoU = inter_area / union_area
if (inter_area / union_area > nms_threshold)
keep = 0;
}

if (keep)
picked.push_back(i);
}
}


int demo(cv::Mat& image, ncnn::Net &detector, int detector_size_width, int detector_size_height)
{

static const char* class_names[] = { "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light",
"fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow",
"elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee",
"skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard",
"tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple",
"sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch",
"potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone",
"microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear",
"hair drier", "toothbrush"
};

const int target_size = 640;
int width=image.cols;
int height=image.rows;
// letterbox pad to multiple of 32
int w = width;
int h = height;
float scale = 1.f;
if (w > h)
{
scale = (float)target_size / w;
w = target_size;
h = h * scale;
}
else
{
scale = (float)target_size / h;
h = target_size;
w = w * scale;
}

ncnn::Mat in = ncnn::Mat::from_pixels_resize(image.data, ncnn::Mat::PIXEL_BGR2RGB,\
image.cols, image.rows, w, h);

// pad to target_size rectangle
// yolov5/utils/datasets.py letterbox
int wpad = (w + 31) / 32 * 32 - w;
int hpad = (h + 31) / 32 * 32 - h;
ncnn::Mat in_pad;
ncnn::copy_make_border(in, in_pad, hpad / 2, hpad - hpad / 2, wpad / 2, wpad - wpad / 2, ncnn::BORDER_CONSTANT, 114.f);

// yolov5
std::vector<Object> objects;
{
const float prob_threshold = 0.25f;
const float nms_threshold = 0.45f;

const float norm_vals[3] = {1 / 255.f, 1 / 255.f, 1 / 255.f};
in_pad.substract_mean_normalize(0, norm_vals);

ncnn::Extractor ex = yolov5.create_extractor();


ex.input("images", in_pad);

std::vector<Object> proposals;

// anchor setting from yolov5/models/yolov5s.yaml

// stride 8
{
ncnn::Mat out;
ex.extract("output", out);

ncnn::Mat anchors(6);
anchors[0] = 10.f;
anchors[1] = 13.f;
anchors[2] = 16.f;
anchors[3] = 30.f;
anchors[4] = 33.f;
anchors[5] = 23.f;

std::vector<Object> objects8;
generate_proposals(anchors, 8, in_pad, out, prob_threshold, objects8);

proposals.insert(proposals.end(), objects8.begin(), objects8.end());
}

// stride 16
{
ncnn::Mat out;
ex.extract("771", out);

ncnn::Mat anchors(6);
anchors[0] = 30.f;
anchors[1] = 61.f;
anchors[2] = 62.f;
anchors[3] = 45.f;
anchors[4] = 59.f;
anchors[5] = 119.f;

std::vector<Object> objects16;
generate_proposals(anchors, 16, in_pad, out, prob_threshold, objects16);

proposals.insert(proposals.end(), objects16.begin(), objects16.end());
}

// stride 32
{
ncnn::Mat out;
ex.extract("791", out);

ncnn::Mat anchors(6);
anchors[0] = 116.f;
anchors[1] = 90.f;
anchors[2] = 156.f;
anchors[3] = 198.f;
anchors[4] = 373.f;
anchors[5] = 326.f;

std::vector<Object> objects32;
generate_proposals(anchors, 32, in_pad, out, prob_threshold, objects32);

proposals.insert(proposals.end(), objects32.begin(), objects32.end());
}

// sort all proposals by score from highest to lowest
qsort_descent_inplace(proposals);

// apply nms with nms_threshold
std::vector<int> picked;
nms_sorted_bboxes(proposals, picked, nms_threshold);

int count = picked.size();

objects.resize(count);
for (int i = 0; i < count; i++)
{
objects[i] = proposals[picked[i]];

// adjust offset to original unpadded
float x0 = (objects[i].x - (wpad / 2)) / scale;
float y0 = (objects[i].y - (hpad / 2)) / scale;
float x1 = (objects[i].x + objects[i].w - (wpad / 2)) / scale;
float y1 = (objects[i].y + objects[i].h - (hpad / 2)) / scale;

// clip
x0 = std::max(std::min(x0, (float)(width - 1)), 0.f);
y0 = std::max(std::min(y0, (float)(height - 1)), 0.f);
x1 = std::max(std::min(x1, (float)(width - 1)), 0.f);
y1 = std::max(std::min(y1, (float)(height - 1)), 0.f);

objects[i].x = x0;
objects[i].y = y0;
objects[i].w = x1 - x0;
objects[i].h = y1 - y0;

cv::rectangle (image, cv::Point(x0, y0), cv::Point(x1, y1), cv::Scalar(255, 255, 0), 1, 1, 0);

char text[256];
sprintf(text, "%s %.1f%%", class_names[objects[i].label], objects[i].prob);
int baseLine = 0;
cv::Size label_size = cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
cv::putText(image, text, cv::Point(x1, y1 + label_size.height),
cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 0));

}
}
return 0;
}

//摄像头测试
int test_cam()
{
//定义yolo-fastest VOC检测器 x-special/nautilus-clipboard

ncnn::Net detector;
yolov5.register_custom_layer("YoloV5Focus", YoloV5Focus_layer_creator);
yolov5.load_param("/home/ubuntu/yolov5/yolov5s-smi.param");
yolov5.load_model("//home/ubuntu/yolov5/yolov5s-smi.bin");
int detector_size_width = 320;
int detector_size_height = 320;

cv::Mat frame;
cv::VideoCapture cap(0);


cv::Mat src;
//加载图片
src= imread("/home/ubuntu/AlexeyAB_darknet/data/dog.jpg");
//检测是否加载成功
if(!src.data) //or == if(src.empty())
{
cout<<"Could not open or find the image"<< endl;
return -1;
}

double start = ncnn::get_current_time();
demo(src, detector, detector_size_width, detector_size_height);
double end = ncnn::get_current_time();
double time = end - start;
printf("Time:%7.2f \n",time);

imshow("Display", src);

//暂停,等待按键结束
waitKey(0 );


///home/ubuntu/yolov5/yolov5s-smi.param


return 0;
}
int main()
{
test_cam();
return 0;
}

yolo5的识别结果真的强

45、NCNN之ONNX模型解析及其使用(YOLO5)_linux_14

量化的模型時間爲:Time: 205.94 

clion文件结构

ubuntu@ubuntu:~/Downloads/example$ tree -L 2
.
├── cmake-build-debug
│   ├── CMakeCache.txt
│   ├── CMakeFiles
│   ├── cmake_install.cmake
│   ├── example
│   ├── example.cbp
│   └── Makefile
├── CMakeLists.txt
├── include
│   └── ncnn
├── lib
│   ├── cmake
│   └── libncnn.a
└── main.cpp

6 directories, 8 files

如果需要移植yolo5到android 端进行检测,需要重新编译ncnn生成静态包,首先需要在CMakeLists.txt开头添加两句;

set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-rtti -fno-exceptions")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti -fno-exceptions")

然后执行 进行编译,将对应静态包依次拷贝到android 的文件夹中

45、NCNN之ONNX模型解析及其使用(YOLO5)_ide_15

代码参考:​​https://github.com/sxj731533730/AndroidNCNNYolo5.git​​

另一个模型为yolo5l.pt 转完之后的代码 雷同  只是输出不一样了而已

#include "ncnn/benchmark.h"
#include "ncnn/cpu.h"
#include "ncnn/datareader.h"
#include "ncnn/net.h"
#include "ncnn/gpu.h"
#include "ncnn/layer.h"
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <vector>
using namespace cv;
using namespace std;
using namespace ncnn;
static ncnn::Net yolov5;
class YoloV5Focus : public ncnn::Layer
{
public:
YoloV5Focus()
{
one_blob_only = true;
}

virtual int forward(const ncnn::Mat& bottom_blob, ncnn::Mat& top_blob, const ncnn::Option& opt) const
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;

int outw = w / 2;
int outh = h / 2;
int outc = channels * 4;

top_blob.create(outw, outh, outc, 4u, 1, opt.blob_allocator);
if (top_blob.empty())
return -100;

#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outc; p++)
{
const float* ptr = bottom_blob.channel(p % channels).row((p / channels) % 2) + ((p / channels) / 2);
float* outptr = top_blob.channel(p);

for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
*outptr = *ptr;

outptr += 1;
ptr += 2;
}

ptr += w;
}
}

return 0;
}
};

DEFINE_LAYER_CREATOR(YoloV5Focus)



struct Object
{
float x;
float y;
float w;
float h;
int label;
float prob;
};
static inline float sigmoid(float x)
{
return static_cast<float>(1.f / (1.f + exp(-x)));
}

static void generate_proposals(const ncnn::Mat& anchors, int stride, const ncnn::Mat& in_pad, const ncnn::Mat& feat_blob, float prob_threshold, std::vector<Object>& objects)
{
const int num_grid = feat_blob.h;

int num_grid_x;
int num_grid_y;
if (in_pad.w > in_pad.h)
{
num_grid_x = in_pad.w / stride;
num_grid_y = num_grid / num_grid_x;
}
else
{
num_grid_y = in_pad.h / stride;
num_grid_x = num_grid / num_grid_y;
}

const int num_class = feat_blob.w - 5;

const int num_anchors = anchors.w / 2;

for (int q = 0; q < num_anchors; q++)
{
const float anchor_w = anchors[q * 2];
const float anchor_h = anchors[q * 2 + 1];

const ncnn::Mat feat = feat_blob.channel(q);

for (int i = 0; i < num_grid_y; i++)
{
for (int j = 0; j < num_grid_x; j++)
{
const float* featptr = feat.row(i * num_grid_x + j);

// find class index with max class score
int class_index = 0;
float class_score = -FLT_MAX;
for (int k = 0; k < num_class; k++)
{
float score = featptr[5 + k];
if (score > class_score)
{
class_index = k;
class_score = score;
}
}

float box_score = featptr[4];

float confidence = sigmoid(box_score) * sigmoid(class_score);

if (confidence >= prob_threshold)
{
// yolov5/models/yolo.py Detect forward
// y = x[i].sigmoid()
// y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy
// y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh

float dx = sigmoid(featptr[0]);
float dy = sigmoid(featptr[1]);
float dw = sigmoid(featptr[2]);
float dh = sigmoid(featptr[3]);

float pb_cx = (dx * 2.f - 0.5f + j) * stride;
float pb_cy = (dy * 2.f - 0.5f + i) * stride;

float pb_w = pow(dw * 2.f, 2) * anchor_w;
float pb_h = pow(dh * 2.f, 2) * anchor_h;

float x0 = pb_cx - pb_w * 0.5f;
float y0 = pb_cy - pb_h * 0.5f;
float x1 = pb_cx + pb_w * 0.5f;
float y1 = pb_cy + pb_h * 0.5f;

Object obj;
obj.x = x0;
obj.y = y0;
obj.w = x1 - x0;
obj.h = y1 - y0;
obj.label = class_index;
obj.prob = confidence;

objects.push_back(obj);
}
}
}
}
}
static inline float intersection_area(const Object& a, const Object& b)
{
if (a.x > b.x + b.w || a.x + a.w < b.x || a.y > b.y + b.h || a.y + a.h < b.y)
{
// no intersection
return 0.f;
}

float inter_width = std::min(a.x + a.w, b.x + b.w) - std::max(a.x, b.x);
float inter_height = std::min(a.y + a.h, b.y + b.h) - std::max(a.y, b.y);

return inter_width * inter_height;
}

static void qsort_descent_inplace(std::vector<Object>& faceobjects, int left, int right)
{
int i = left;
int j = right;
float p = faceobjects[(left + right) / 2].prob;

while (i <= j)
{
while (faceobjects[i].prob > p)
i++;

while (faceobjects[j].prob < p)
j--;

if (i <= j)
{
// swap
std::swap(faceobjects[i], faceobjects[j]);

i++;
j--;
}
}

#pragma omp parallel sections
{
#pragma omp section
{
if (left < j) qsort_descent_inplace(faceobjects, left, j);
}
#pragma omp section
{
if (i < right) qsort_descent_inplace(faceobjects, i, right);
}
}
}

static void qsort_descent_inplace(std::vector<Object>& faceobjects)
{
if (faceobjects.empty())
return;

qsort_descent_inplace(faceobjects, 0, faceobjects.size() - 1);
}

static void nms_sorted_bboxes(const std::vector<Object>& faceobjects, std::vector<int>& picked, float nms_threshold)
{
picked.clear();

const int n = faceobjects.size();

std::vector<float> areas(n);
for (int i = 0; i < n; i++)
{
areas[i] = faceobjects[i].w * faceobjects[i].h;
}

for (int i = 0; i < n; i++)
{
const Object& a = faceobjects[i];

int keep = 1;
for (int j = 0; j < (int)picked.size(); j++)
{
const Object& b = faceobjects[picked[j]];

// intersection over union
float inter_area = intersection_area(a, b);
float union_area = areas[i] + areas[picked[j]] - inter_area;
// float IoU = inter_area / union_area
if (inter_area / union_area > nms_threshold)
keep = 0;
}

if (keep)
picked.push_back(i);
}
}


int demo(cv::Mat& image, ncnn::Net &detector, int detector_size_width, int detector_size_height)
{

static const char* class_names[] = { "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light",
"fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow",
"elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee",
"skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard",
"tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple",
"sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch",
"potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone",
"microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear",
"hair drier", "toothbrush"
};

const int target_size = 640;
int width=image.cols;
int height=image.rows;
// letterbox pad to multiple of 32
int w = width;
int h = height;
float scale = 1.f;
if (w > h)
{
scale = (float)target_size / w;
w = target_size;
h = h * scale;
}
else
{
scale = (float)target_size / h;
h = target_size;
w = w * scale;
}

ncnn::Mat in = ncnn::Mat::from_pixels_resize(image.data, ncnn::Mat::PIXEL_BGR2RGB,\
image.cols, image.rows, w, h);

// pad to target_size rectangle
// yolov5/utils/datasets.py letterbox
int wpad = (w + 31) / 32 * 32 - w;
int hpad = (h + 31) / 32 * 32 - h;
ncnn::Mat in_pad;
ncnn::copy_make_border(in, in_pad, hpad / 2, hpad - hpad / 2, wpad / 2, wpad - wpad / 2, ncnn::BORDER_CONSTANT, 114.f);

// yolov5
std::vector<Object> objects;
{
const float prob_threshold = 0.25f;
const float nms_threshold = 0.45f;

const float norm_vals[3] = {1 / 255.f, 1 / 255.f, 1 / 255.f};
in_pad.substract_mean_normalize(0, norm_vals);

ncnn::Extractor ex = yolov5.create_extractor();


ex.input("images", in_pad);

std::vector<Object> proposals;

// anchor setting from yolov5/models/yolov5s.yaml

// stride 8
{
ncnn::Mat out;
ex.extract("output", out);//650

ncnn::Mat anchors(6);
anchors[0] = 10.f;
anchors[1] = 13.f;
anchors[2] = 16.f;
anchors[3] = 30.f;
anchors[4] = 33.f;
anchors[5] = 23.f;

std::vector<Object> objects8;
generate_proposals(anchors, 8, in_pad, out, prob_threshold, objects8);

proposals.insert(proposals.end(), objects8.begin(), objects8.end());
}

// stride 16
{
ncnn::Mat out;
ex.extract("671", out); //671

ncnn::Mat anchors(6);
anchors[0] = 30.f;
anchors[1] = 61.f;
anchors[2] = 62.f;
anchors[3] = 45.f;
anchors[4] = 59.f;
anchors[5] = 119.f;

std::vector<Object> objects16;
generate_proposals(anchors, 16, in_pad, out, prob_threshold, objects16);

proposals.insert(proposals.end(), objects16.begin(), objects16.end());
}

// stride 32
{
ncnn::Mat out;
ex.extract("691", out); //691

ncnn::Mat anchors(6);
anchors[0] = 116.f;
anchors[1] = 90.f;
anchors[2] = 156.f;
anchors[3] = 198.f;
anchors[4] = 373.f;
anchors[5] = 326.f;

std::vector<Object> objects32;
generate_proposals(anchors, 32, in_pad, out, prob_threshold, objects32);

proposals.insert(proposals.end(), objects32.begin(), objects32.end());
}

// sort all proposals by score from highest to lowest
qsort_descent_inplace(proposals);

// apply nms with nms_threshold
std::vector<int> picked;
nms_sorted_bboxes(proposals, picked, nms_threshold);

int count = picked.size();

objects.resize(count);
for (int i = 0; i < count; i++)
{
objects[i] = proposals[picked[i]];

// adjust offset to original unpadded
float x0 = (objects[i].x - (wpad / 2)) / scale;
float y0 = (objects[i].y - (hpad / 2)) / scale;
float x1 = (objects[i].x + objects[i].w - (wpad / 2)) / scale;
float y1 = (objects[i].y + objects[i].h - (hpad / 2)) / scale;

// clip
x0 = std::max(std::min(x0, (float)(width - 1)), 0.f);
y0 = std::max(std::min(y0, (float)(height - 1)), 0.f);
x1 = std::max(std::min(x1, (float)(width - 1)), 0.f);
y1 = std::max(std::min(y1, (float)(height - 1)), 0.f);

objects[i].x = x0;
objects[i].y = y0;
objects[i].w = x1 - x0;
objects[i].h = y1 - y0;

cv::rectangle (image, cv::Point(x0, y0), cv::Point(x1, y1), cv::Scalar(255, 255, 0), 1, 1, 0);

char text[256];
sprintf(text, "%s %.1f%%", class_names[objects[i].label], objects[i].prob);
int baseLine = 0;
cv::Size label_size = cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
cv::putText(image, text, cv::Point(x1, y1 + label_size.height),
cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 0));

}
}
return 0;
}

//摄像头测试
int test_cam()
{
//定义yolo-fastest VOC检测器 x-special/nautilus-clipboard

ncnn::Net detector;
yolov5.register_custom_layer("YoloV5Focus", YoloV5Focus_layer_creator);
yolov5.load_param("/home/ubuntu/yolov5/models/yolov5l-smi.param");
yolov5.load_model("//home/ubuntu/yolov5/models/yolov5l-smi.bin");
int detector_size_width = 320;
int detector_size_height = 320;

cv::Mat frame;



cv::Mat src;
//加载图片
src= imread("/home/ubuntu/AlexeyAB_darknet/data/dog.jpg");
//检测是否加载成功
if(!src.data) //or == if(src.empty())
{
cout<<"Could not open or find the image"<< endl;
return -1;
}

double start = ncnn::get_current_time();
demo(src, detector, detector_size_width, detector_size_height);
double end = ncnn::get_current_time();
double time = end - start;
printf("Time:%7.2f \n",time);

imshow("Display", src);

//暂停,等待按键结束
waitKey(0 );


///home/ubuntu/yolov5/yolov5s-smi.param


return 0;
}
int main()
{
test_cam();
return 0;
}

补充:yolov5 v6算法

python export.py --weights best.pt --train --img 640 --batch 1
python -m onnxsim best.onnx best_sim.onnx
Simplifying...
Checking 0/3...
Checking 1/3...
Checking 2/3...
Ok!

然后修改一下模型

45、NCNN之ONNX模型解析及其使用(YOLO5)_ide_16

修改为

45、NCNN之ONNX模型解析及其使用(YOLO5)_ubuntu_17

 

 

参考:​​详细记录u版YOLOv5目标检测ncnn实现 - 知乎​​

举报

相关推荐

0 条评论