0
点赞
收藏
分享

微信扫一扫

38、静默活体检测测试及ncnn、mnn部署


基本思想:因为最近需要搞个Android端的活体检测,github了一下,git到小视科技公司的一个开源项目,听了大佬的介绍,因此测试了一下,效果还可以,于是进行了模型转换和ncnn、mnn部署

本实验的模型

链接:https://pan.baidu.com/s/1_-hn-edpg3KS8FVHHO3NDA 
提取码:t5v0

一、首先测试一下大佬们的开源框架

git clone https://github.com/minivision-ai/Silent-Face-Anti-Spoofing.git

 (1)、首先测试一下其提供的图片

38、静默活体检测测试及ncnn、mnn部署_mnn

38、静默活体检测测试及ncnn、mnn部署_人工智能_02

 测试结果

"C:\Program Files\Python36\python.exe" G:/Silent-Face-Anti-Spoofing/test.py
Image 'image_F1.jpg' is Fake Face. Score: 0.73.
Prediction cost 7.35 s


Image 'image_T1.jpg' is Real Face. Score: 0.99.
Prediction cost 4.69 s

Process finished with exit code 0

38、静默活体检测测试及ncnn、mnn部署_mnn_03

38、静默活体检测测试及ncnn、mnn部署_python_04

 (2)、可以使用ip camera.apk进行手机拉流测试一下手机摄像头的画面对活体检测识别精度(可以使用RTMP或者RTSP,我使用的RTSP)

38、静默活体检测测试及ncnn、mnn部署_python_05

38、静默活体检测测试及ncnn、mnn部署_python_06

测试结果(真人&假人)

38、静默活体检测测试及ncnn、mnn部署_git_07

38、静默活体检测测试及ncnn、mnn部署_python_08

 二、先转一下模型到ncnn框架上,先转成onnx模型

因为官方代码使用就检测算法,我的方法就是在检测算法之后,截取一张人脸,然后进行onnx数据比对(下图来自检测图)

38、静默活体检测测试及ncnn、mnn部署_mnn_09

测试结果

"C:\Program Files\Python36\python.exe" G:/Silent-Face-Anti-Spoofing/test.py
2.7_80x80_MiniFASNetV2.pth
result tensor([[-4.0072, 4.8543, -0.8501]], device='cuda:0')
result softmax [[1.4125550e-04 9.9653888e-01 3.3199114e-03]]
[[1.4125550e-04 9.9653888e-01 3.3199114e-03]]
4_0_0_80x80_MiniFASNetV1SE.pth
result tensor([[-4.4244, 5.2854, -0.8630]], device='cuda:0')
result softmax [[6.055400e-05 9.978071e-01 2.132311e-03]]
[[6.055400e-05 9.978071e-01 2.132311e-03]]
Image 'image_T1.jpg' is Real Face. Score: 1.00.
Prediction cost 3.46 s

Process finished with exit code 0

转换模型代码


import torch
# import trochvision
import torch.utils.data
import argparse
import onnxruntime
import os
import cv2
import numpy as np
from torch.autograd import Variable
from onnxruntime.datasets import get_example

import torch
import torch.nn.functional as F
import warnings
from src.model_lib.MiniFASNet import MiniFASNetV1, MiniFASNetV2,MiniFASNetV1SE,MiniFASNetV2SE
from src.utility import get_kernel, parse_model_name

warnings.filterwarnings('ignore')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

MODEL_MAPPING = {
'MiniFASNetV1': MiniFASNetV1,
'MiniFASNetV2': MiniFASNetV2,
'MiniFASNetV1SE':MiniFASNetV1SE,
'MiniFASNetV2SE':MiniFASNetV2SE
}

def _load_model( model_path):
# define model
model_name = os.path.basename(model_path)
h_input, w_input, model_type, _ = parse_model_name(model_name)
kernel_size = get_kernel(h_input, w_input, )
model = MODEL_MAPPING[model_type](conv6_kernel=kernel_size).to(device)

# load model weight
state_dict = torch.load(model_path, map_location=device)
keys = iter(state_dict)
first_layer_name = keys.__next__()
if first_layer_name.find('module.') >= 0:
from collections import OrderedDict
new_state_dict = OrderedDict()
for key, value in state_dict.items():
name_key = key[7:]
new_state_dict[name_key] = value
model.load_state_dict(new_state_dict)
else:
model.load_state_dict(state_dict)
return model



def torch2libtorch(model_path ,lib_path ,dummy_input):
model = _load_model(model_path)
model = model.to(device)
model.eval()
ts = torch.jit.trace(model, dummy_input.to(device))
ts.save(lib_path)

model = torch.load(lib_path)
model = model.to(device)
model.eval()
pre = model(dummy_input)
print("the jit:{}".format(pre))
pre = F.softmax(pre).cpu()
print("the softmax jit:{}".format(pre))
print("推理便签是")
print("the label id {}".format(torch.argmax(pre)))
print("jt->>模型转换成功!")



def main(args):
print("opencv 读取方式 {}".format(args.model_path))
dummy_input = getInputCv(args.img_size) # 获得网络的输入
model = infertorch(dummy_input)
torch2onnx(args, model, dummy_input)
torch2libtorch(args.model_path, args.jt_model_path, dummy_input)



def getInputCv(img_size):
input = cv2.imread(args.image_path)
input = cv2.resize(input, (img_size, img_size))
print("resize = " ,input.shape)
input = np.transpose(input, (2, 0, 1)).astype(np.float32) # chw rgb
print("after input[0,0,0]:{}".format(input[0, 0, 0]))
now_image1 = Variable(torch.from_numpy(input).to(device))
dummy_input = now_image1.unsqueeze(0)
return dummy_input


def infertorch(dummy_input):
model = _load_model(args.model_path)
model = model.to(device)
model.eval()
print(dummy_input.shape)
pre = model(dummy_input)
print("the pt:{}".format(pre))
pre= F.softmax(pre).cpu()
print("the softmax pt:{}".format(pre))
print("推理便签是")
print("the label id {}".format(torch.argmax(pre)))
print("推理成功")
return model


def torch2onnx(args, model, dummy_input):
input_names = ['input'] # 模型输入的name
output_names = ['output'] # 模型输出的name
print("====", dummy_input.shape)
torch_out = torch.onnx._export(model, dummy_input, args.onnx_model_path,
verbose=False, input_names=input_names, output_names=output_names, opset_version=11)
# test onnx model
example_model = get_example(args.onnx_model_path)
session = onnxruntime.InferenceSession(example_model)
# get the name of the first input of the model
input_name = session.get_inputs()[0].name
# print('onnx Input Name:', input_name)
result = session.run([], {input_name: dummy_input.data.cpu().numpy()})
result = torch.tensor(result[0], dtype=torch.float32)
print("the result is {}".format(result[0]))
result = F.softmax(result).cpu()
print("the softmax result is {}".format(result[0]))
print("推理便签是")
print("the label id {}".format(torch.Tensor(result).argmax(1)))
print("onnx->>模型转换成功!")


if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="PyTorch model to onnx and ncnn")
parser.add_argument('--image_path', type=str, default=r"G:\Silent-Face-Anti-Spoofing\face.jpg",
help="For image from one model_file")
parser.add_argument('--model_path', type=str, default=r"G:\Silent-Face-Anti-Spoofing\resources\anti_spoof_models\4_0_0_80x80_MiniFASNetV1SE.pth",
help="For training from one model_file")
parser.add_argument('--save_model_path', type=str, default=r"G:\Silent-Face-Anti-Spoofing\resources\convert",
help="For training from one model_file")
parser.add_argument('--onnx_model_path', type=str,
default=r"G:\Silent-Face-Anti-Spoofing\resources\convert\4_0_0_80x80_MiniFASNetV1SE.onnx",
help="For training from one model_file")
parser.add_argument('--jt_model_path', type=str,
default=r"G:\Silent-Face-Anti-Spoofing\resources\convert\4_0_0_80x80_MiniFASNetV1SE_jc.pt",
help="For training from one model_file")
parser.add_argument('--img_size', type=int, default=80,
help="the image size of model input")
args = parser.parse_args()
main(args)

模型一的转换onnx

"C:\Program Files\Python36\python.exe" G:/Silent-Face-Anti-Spoofing/convert.py
opencv 读取方式 G:\Silent-Face-Anti-Spoofing\resources\anti_spoof_models\4_0_0_80x80_MiniFASNetV1SE.pth
resize = (80, 80, 3)
after input[0,0,0]:119.0
torch.Size([1, 3, 80, 80])
the pt:tensor([[-4.4244, 5.2854, -0.8630]], device='cuda:0', grad_fn=<MmBackward0>)
the softmax pt:tensor([[6.0554e-05, 9.9781e-01, 2.1323e-03]], grad_fn=<ToCopyBackward0>)
推理便签是
the label id 1
推理成功
==== torch.Size([1, 3, 80, 80])
the result is tensor([-4.4244, 5.2854, -0.8630])
the softmax result is tensor([6.0554e-05, 9.9781e-01, 2.1323e-03])
推理便签是
the label id tensor([1])
onnx->>模型转换成功!
the jit:tensor([[-4.4244, 5.2854, -0.8630]], device='cuda:0', grad_fn=<MmBackward0>)
the softmax jit:tensor([[6.0554e-05, 9.9781e-01, 2.1323e-03]], grad_fn=<ToCopyBackward0>)
推理便签是
the label id 1
jt->>模型转换成功!

Process finished with exit code 0

模型二转换onnx

"C:\Program Files\Python36\python.exe" G:/Silent-Face-Anti-Spoofing/convert.py
opencv 读取方式 G:\Silent-Face-Anti-Spoofing\resources\anti_spoof_models\2.7_80x80_MiniFASNetV2.pth
resize = (80, 80, 3)
after input[0,0,0]:119.0
torch.Size([1, 3, 80, 80])
the pt:tensor([[-4.0072, 4.8543, -0.8501]], device='cuda:0', grad_fn=<MmBackward0>)
the softmax pt:tensor([[1.4126e-04, 9.9654e-01, 3.3199e-03]], grad_fn=<ToCopyBackward0>)
推理便签是
the label id 1
推理成功
==== torch.Size([1, 3, 80, 80])
the result is tensor([-4.0072, 4.8543, -0.8501])
the softmax result is tensor([1.4126e-04, 9.9654e-01, 3.3199e-03])
推理便签是
the label id tensor([1])
onnx->>模型转换成功!
the jit:tensor([[-4.0072, 4.8543, -0.8501]], device='cuda:0', grad_fn=<MmBackward0>)
the softmax jit:tensor([[1.4126e-04, 9.9654e-01, 3.3199e-03]], grad_fn=<ToCopyBackward0>)
推理便签是
the label id 1
jt->>模型转换成功!

Process finished with exit code 0

数据是一一对应上去,开始转ncnn模型

2.7_80x80_MiniFASNetV2_sim.onnx模型图

38、静默活体检测测试及ncnn、mnn部署_python_10

 

38、静默活体检测测试及ncnn、mnn部署_mnn_11

4_0_0_80x80_MiniFASNetV1SE_sim.onnx模型图

38、静默活体检测测试及ncnn、mnn部署_人工智能_12

38、静默活体检测测试及ncnn、mnn部署_人工智能_13

三、使用onnx2ncnn转一下

D:\ncnn\buildMinGW\tools\onnx>python -m onnxsim G:\Silent-Face-Anti-Spoofing\resources\convert\2.7_80x80_MiniFASNetV2.onnx G:\Silent-Face-Anti-Spoofing\resources\convert\2.7_80x80_MiniFASNetV2_sim.onnx
Simplifying...
Checking 0/3...
Checking 1/3...
Checking 2/3...
Ok!

D:\ncnn\buildMinGW\tools\onnx>onnx2ncnn.exe G:\Silent-Face-Anti-Spoofing\resources\convert\2.7_80x80_MiniFASNetV2_sim.onnx G:\Silent-Face-Anti-Spoofing\resources\convert\2.7_80x80_MiniFASNetV2_sim.param G:\Silent-Face-Anti-Spoofing\resources\convert\2.7_80x80_MiniFASNetV2_sim.bin

D:\ncnn\buildMinGW\tools\onnx>python -m onnxsim G:\Silent-Face-Anti-Spoofing\resources\convert\4_0_0_80x80_MiniFASNetV1SE.onnx G:\Silent-Face-Anti-Spoofing\resources\convert\4_0_0_80x80_MiniFASNetV1SE_sim.onnx
Simplifying...
Checking 0/3...
Checking 1/3...
Checking 2/3...
Ok!

D:\ncnn\buildMinGW\tools\onnx>onnx2ncnn.exe G:\Silent-Face-Anti-Spoofing\resources\convert\4_0_0_80x80_MiniFASNetV1SE_sim.onnx G:\Silent-Face-Anti-Spoofing\resources\convert\4_0_0_80x80_MiniFASNetV1SE_sim.param G:\Silent-Face-Anti-Spoofing\resources\convert\4_0_0_80x80_MiniFASNetV1SE_sim.bin

也可以使用pnnx 去试一下

写个代码推理一下,因为python代码后面堆了一个softmax操作

38、静默活体检测测试及ncnn、mnn部署_git_14

(方法一) c++推理代码时候,需要追加一下ncnn的softmax模块


#include <stdio.h>
#include <algorithm>
#include <vector>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>

#include "platform.h"
#include "net.h"
#if NCNN_VULKAN
#include "gpu.h"
#endif // NCNN_VULKAN
#define IMAGE_SIZE 80

int main(int argc, char** argv)
{

cv::Mat bgr = cv::imread("/home/ubuntu/CLionProjects/untitled1/face.jpg");

std::vector<float> cls_scores;

ncnn::Net SilentFaceSpoofing;

SilentFaceSpoofing.load_param("/home/ubuntu/CLionProjects/untitled1/model/2.7_80x80_MiniFASNetV2_sim.param");
SilentFaceSpoofing.load_model("/home/ubuntu/CLionProjects/untitled1/model/2.7_80x80_MiniFASNetV2_sim.bin");

ncnn::Mat in = ncnn::Mat::from_pixels_resize(bgr.data, ncnn::Mat::PIXEL_BGR, bgr.cols, bgr.rows,IMAGE_SIZE,IMAGE_SIZE);



fprintf(stderr, "input shape: %d %d %d %d\n", in.dims, in.h, in.w, in.c);

ncnn::Extractor ex = SilentFaceSpoofing.create_extractor();

ex.input("input", in);//input 是 .param文件中输入节点名称


ncnn::Mat out;
ex.extract("output", out);

{
ncnn::Layer* softmax=ncnn::create_layer("Softmax");
ncnn::ParamDict pb;
softmax->load_param(pb);
softmax->forward_inplace(out,SilentFaceSpoofing.opt);
delete softmax;
}
out=out.reshape(out.h*out.w*out.c);
fprintf(stderr, "output shape: %d %d %d %d\n", out.dims, out.h, out.w, out.c);

cls_scores.resize(out.w);
for (int j=0; j<out.w; j++)
{
cls_scores[j] = out[j];
printf("cls_scores[%d]=%f\n",j,cls_scores[j]);
}

std::cout<<std::endl;
auto itMax = max_element(cls_scores.begin(), cls_scores.end());
std::cout << "the max:" << *itMax << " the location:" << distance(cls_scores.begin(), itMax) << std::endl;
if (distance(cls_scores.begin(), itMax) == 1){
std::cout<<"Image "<<distance(cls_scores.begin(), itMax) <<" is Real Face. Score: "<< *itMax<<std::endl;
}
else {
std::cout<<"Image "<<distance(cls_scores.begin(), itMax) <<" is Fake Face. Score: "<< *itMax<<std::endl;
}


return 0;
}

测试结果

F:\untitled11\cmake-build-debug\untitled11.exe
cls_scores[0]=0.000141
cls_scores[1]=0.996539
cls_scores[2]=0.003320

the max:0.996539 the location:1
Image 1 is Real Face. Score: 0.996539
input shape: 3 80 80 3
output shape: 1 1 3 1

(方法二)感谢超神的协助  (添加了softmax层后,忘记改层数了,所以导致一直输出错误,修正layer和blob层数,增加一层 111--->112 123--->124 就好了)

38、静默活体检测测试及ncnn、mnn部署_git_15

38、静默活体检测测试及ncnn、mnn部署_git_16

修改的模型

38、静默活体检测测试及ncnn、mnn部署_mnn_17

测试代码 


#include <stdio.h>
#include <algorithm>
#include <vector>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>

#include "platform.h"
#include "net.h"
#if NCNN_VULKAN
#include "gpu.h"
#endif // NCNN_VULKAN
#define IMAGE_SIZE 80

int main(int argc, char** argv)
{

cv::Mat bgr = cv::imread("/home/ubuntu/CLionProjects/untitled1/face.jpg");

std::vector<float> cls_scores;

ncnn::Net SilentFaceSpoofing;

SilentFaceSpoofing.load_param("/home/ubuntu/CLionProjects/untitled1/model/2.7_80x80_MiniFASNetV2_sim.param");
SilentFaceSpoofing.load_model("/home/ubuntu/CLionProjects/untitled1/model/2.7_80x80_MiniFASNetV2_sim.bin");

ncnn::Mat in = ncnn::Mat::from_pixels_resize(bgr.data, ncnn::Mat::PIXEL_BGR, bgr.cols, bgr.rows,IMAGE_SIZE,IMAGE_SIZE);



fprintf(stderr, "input shape: %d %d %d %d\n", in.dims, in.h, in.w, in.c);

ncnn::Extractor ex = SilentFaceSpoofing.create_extractor();

ex.input("input", in);//input 是 .param文件中输入节点名称


ncnn::Mat out;
ex.extract("prob", out);


fprintf(stderr, "output shape: %d %d %d %d\n", out.dims, out.h, out.w, out.c);

cls_scores.resize(out.w);
for (int j=0; j<out.w; j++)
{
cls_scores[j] = out[j];
printf("cls_scores[%d]=%f\n",j,cls_scores[j]);
}

std::cout<<std::endl;
auto itMax = max_element(cls_scores.begin(), cls_scores.end());
std::cout << "the max:" << *itMax << " the location:" << distance(cls_scores.begin(), itMax) << std::endl;
if (distance(cls_scores.begin(), itMax) == 1){
std::cout<<"Image "<<distance(cls_scores.begin(), itMax) <<" is Real Face. Score: "<< *itMax<<std::endl;
}
else {
std::cout<<"Image "<<distance(cls_scores.begin(), itMax) <<" is Fake Face. Score: "<< *itMax<<std::endl;
}

return 0;
}

测试结果

/home/ubuntu/CLionProjects/untitled1/cmake-build-debug/untitled1
input shape: 3 80 80 3
output shape: 1 1 3 1
cls_scores[0]=0.000141
cls_scores[1]=0.996539
cls_scores[2]=0.003320
the max:0.996539 the location:1
Image 1 is Real Face. Score: 0.996539

Process finished with exit code 0

另一个模型也是相同操作(132--->133 147-->148) 最后一层添加一个softmax层即可,只贴一个代码和结果


#include <stdio.h>
#include <algorithm>
#include <vector>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>

#include "platform.h"
#include "net.h"
#if NCNN_VULKAN
#include "gpu.h"
#endif // NCNN_VULKAN
#define IMAGE_SIZE 80

int main(int argc, char** argv)
{

cv::Mat bgr = cv::imread("/home/ubuntu/CLionProjects/untitled1/face.jpg");

std::vector<float> cls_scores;

ncnn::Net SilentFaceSpoofing;

SilentFaceSpoofing.load_param("/home/ubuntu/CLionProjects/untitled1/model/4_0_0_80x80_MiniFASNetV1SE_sim.param");
SilentFaceSpoofing.load_model("/home/ubuntu/CLionProjects/untitled1/model/4_0_0_80x80_MiniFASNetV1SE_sim.bin");

ncnn::Mat in = ncnn::Mat::from_pixels_resize(bgr.data, ncnn::Mat::PIXEL_BGR, bgr.cols, bgr.rows,IMAGE_SIZE,IMAGE_SIZE);



fprintf(stderr, "input shape: %d %d %d %d\n", in.dims, in.h, in.w, in.c);

ncnn::Extractor ex = SilentFaceSpoofing.create_extractor();

ex.input("input", in);//input 是 .param文件中输入节点名称


ncnn::Mat out;
ex.extract("prob", out);


fprintf(stderr, "output shape: %d %d %d %d\n", out.dims, out.h, out.w, out.c);

cls_scores.resize(out.w);
for (int j=0; j<out.w; j++)
{
cls_scores[j] = out[j];
printf("cls_scores[%d]=%f\n",j,cls_scores[j]);
}

std::vector<int> index;
std::cout<<std::endl;
auto itMax = max_element(cls_scores.begin(), cls_scores.end());
std::cout << "the max:" << *itMax << " the location:" << distance(cls_scores.begin(), itMax) << std::endl;
if (distance(cls_scores.begin(), itMax) == 1){
std::cout<<"Image "<<distance(cls_scores.begin(), itMax) <<" is Real Face. Score: "<< *itMax<<std::endl;
}
else {
std::cout<<"Image "<<distance(cls_scores.begin(), itMax) <<" is Fake Face. Score: "<< *itMax<<std::endl;
}


return 0;
}

测试结果

/home/ubuntu/CLionProjects/untitled1/cmake-build-debug/untitled1
input shape: 3 80 80 3
cls_scores[0]=0.000061
cls_scores[1]=0.997807
cls_scores[2]=0.002132
output shape: 1 1 3 1
the max:0.997807 the location:1
Image 1 is Real Face. Score: 0.997807
Process finished with exit code 0

这样就和python 输出结果一样了,就可以使用c++部署静默活体检测了~

(方法三)直接修改onnx模型

import onnx

onnx_model = onnx.load(r'G:\Silent-Face-Anti-Spoofing\resources\convert\4_0_0_80x80_MiniFASNetV1SE_sim.onnx')
onnx.checker.check_model(onnx_model)

nodes = onnx_model.graph.node

for idx, n in enumerate(nodes):
if 'MatMul' == n.op_type and "output" in n.output[0]:
## add additional softmax node
print(idx)
n.output[0] = str(idx + 1)
_input = n.output[0]
_output = "output"
n_sm = onnx.helper.make_node('Softmax', inputs=[_input], outputs=[_output], axis=0)
nodes.append(n_sm)


#检查模型
onnx.checker.check_model(onnx_model)
#保存新模型
onnx.save(onnx_model, r'G:\Silent-Face-Anti-Spoofing\resources\experiment\4_0_0_80x80_MiniFASNetV1SE_sim_softmax.onnx')

原4_0_0_80x80_MiniFASNetV1SE_sim.onnx模型为

38、静默活体检测测试及ncnn、mnn部署_git_18

修改之后的4_0_0_80x80_MiniFASNetV1SE_sim_softmax.onnx模型

38、静默活体检测测试及ncnn、mnn部署_深度学习_19

另一个模型也是如此操作 4_0_0_80x80_MiniFASNetV1SE_sim.onnx


#include <stdio.h>
#include <algorithm>
#include <vector>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>

#include "platform.h"
#include "net.h"
#if NCNN_VULKAN
#include "gpu.h"
#endif // NCNN_VULKAN
#define IMAGE_SIZE 80




int main(int argc, char** argv)
{

cv::Mat bgr = cv::imread("G:\\Silent-Face-Anti-Spoofing\\face.jpg");

std::vector<float> cls_scores;

ncnn::Net SilentFaceSpoofing;

SilentFaceSpoofing.load_param("G:\\Silent-Face-Anti-Spoofing\\resources\\experiment\\4_0_0_80x80_MiniFASNetV1SE_sim_softmax.param");
SilentFaceSpoofing.load_model("G:\\Silent-Face-Anti-Spoofing\\resources\\experiment\\4_0_0_80x80_MiniFASNetV1SE_sim_softmax.bin");

ncnn::Mat in = ncnn::Mat::from_pixels_resize(bgr.data, ncnn::Mat::PIXEL_BGR, bgr.cols, bgr.rows,IMAGE_SIZE,IMAGE_SIZE);



fprintf(stderr, "input shape: %d %d %d %d\n", in.dims, in.h, in.w, in.c);

ncnn::Extractor ex = SilentFaceSpoofing.create_extractor();

ex.input("input", in);//input 是 .param文件中输入节点名称


ncnn::Mat out;
ex.extract("output", out);


fprintf(stderr, "output shape: %d %d %d %d\n", out.dims, out.h, out.w, out.c);

cls_scores.resize(out.w);
for (int j=0; j<out.w; j++)
{
cls_scores[j] = out[j];
printf("cls_scores[%d]=%f\n",j,cls_scores[j]);
}

std::cout<<std::endl;
auto itMax = max_element(cls_scores.begin(), cls_scores.end());
std::cout << "the max:" << *itMax << " the location:" << distance(cls_scores.begin(), itMax) << std::endl;
if (distance(cls_scores.begin(), itMax) == 1){
std::cout<<"Image "<<distance(cls_scores.begin(), itMax) <<" is Real Face. Score: "<< *itMax<<std::endl;
}
else {
std::cout<<"Image "<<distance(cls_scores.begin(), itMax) <<" is Fake Face. Score: "<< *itMax<<std::endl;
}


return 0;
}

测试结果 标签1 的置信度度最大为真脸,0,2置信度为假脸的置信度

F:\untitled11\cmake-build-debug\untitled11.exe
cls_scores[0]=0.000061
cls_scores[1]=0.997807
cls_scores[2]=0.002132

the max:0.997807 the location:1
Image 1 is Real Face. Score: 0.997807
input shape: 3 80 80 3
output shape: 1 1 3 1

另一个模型代码和测试结果就不附录了

四、mnn试一下 我没用修改的onnx模型做转换,可以使用修改的onnx直接转

ubuntu@ubuntu:~/MNN/build$ ./MNNConvert -f ONNX --modelFile /home/ubuntu/sxj731533730/2.7_80x80_MiniFASNetV2_sim.onnx  --MNNModel /home/ubuntu/sxj731533730/2.7_80x80_MiniFASNetV2_sim.mnn --bizCode MNN
Start to Convert Other Model Format To MNN Model...
[09:00:42] /home/ubuntu/MNN/tools/converter/source/onnx/onnxConverter.cpp:30: ONNX Model ir version: 7
Start to Optimize the MNN Net...
inputTensors : [ input, ]
outputTensors: [ output, ]
Converted Success!
ubuntu@ubuntu:~/MNN/build$ ./MNNConvert -f ONNX --modelFile /home/ubuntu/sxj731533730/4_0_0_80x80_MiniFASNetV1SE_sim.onnx --MNNModel /home/ubuntu/sxj731533730/4_0_0_80x80_MiniFASNetV1SE_sim.mnn --bizCode MNN
Start to Convert Other Model Format To MNN Model...
[09:04:38] /home/ubuntu/MNN/tools/converter/source/onnx/onnxConverter.cpp:30: ONNX Model ir version: 7
Start to Optimize the MNN Net...
inputTensors : [ input, ]
outputTensors: [ output, ]
Converted Success

模型一的输出结果

#include <iostream>
#include<opencv2/core.hpp>
#include<opencv2/imgproc.hpp>
#include<opencv2/highgui.hpp>
#include<MNN/Interpreter.hpp>
#include<MNN/ImageProcess.hpp>
#include <cmath>
#include <algorithm>
#include <numeric>
using namespace std;
using namespace cv;
using namespace MNN;


#define IMAGE_SIZE 80

double myfunction(double num) {
return exp(num);
}

template <typename T>
void softmax(const typename::std::vector<T> &v, typename::std::vector<T> &s){
double sum=0.0;
transform(v.begin(), v.end(), s.begin(), myfunction);
sum=accumulate(s.begin(), s.end(), sum);
for(size_t i=0; i<s.size(); ++i)
s.at(i)/=sum;
}

int main() {
//cv::INTER_LINEAR为双线性插值,需要和python下的resize插值方式一致
cv::Mat img_src = cv::imread("/home/ubuntu/CLionProjects/untitled6/face.jpg");

cv::Mat img_resized;
cv::resize(img_src.clone(), img_resized, cv::Size(IMAGE_SIZE, IMAGE_SIZE), cv::INTER_LINEAR);

auto net = std::shared_ptr<MNN::Interpreter>(MNN::Interpreter::createFromFile("/home/ubuntu/CLionProjects/untitled6/2.7_80x80_MiniFASNetV2_sim.mnn"));//创建解释器
cout << "Interpreter created" << endl;
ScheduleConfig config;
config.numThread = 8;
config.type = MNN_FORWARD_CPU;
auto session = net->createSession(config);//创建session
cout << "session created" << endl;

auto inTensor = net->getSessionInput(session, NULL);
auto outTensor = net->getSessionInput(session, NULL);
auto _Tensor = MNN::Tensor::create<float>({1,3,IMAGE_SIZE,IMAGE_SIZE}, NULL, MNN::Tensor::CAFFE);


if(_Tensor->elementSize()!=3*IMAGE_SIZE*IMAGE_SIZE)
{
std::cout<<_Tensor->elementSize()<<" "<<img_resized.channels()*img_resized.cols*img_resized.rows<<std::endl;
std::cout<<"input shape not equal image shape"<<std::endl;
return -1;
}

for (auto i = 0; i < img_resized.channels(); i++) {
for(int j=0;j<img_resized.rows;j++) {
for (int k = 0; k < img_resized.cols; k++) {
_Tensor->host<float>()[i*IMAGE_SIZE*IMAGE_SIZE+j*IMAGE_SIZE+k] =img_resized.at<cv::Vec3b>(j, k)[i];
}
}
}

inTensor->copyFromHostTensor(_Tensor);

//推理
net->runSession(session);
auto output= net->getSessionOutput(session, NULL);
// MNN::Tensor feat_tensor(output, output->getDimensionType());
// output->copyToHostTensor(&feat_tensor);
// feat_tensor.print();


MNN::Tensor score_host(output, output->getDimensionType());
output->copyToHostTensor(&score_host);


auto score_ptr = score_host.host<float>();
std::vector<std::pair<float, int>> scores;
std::vector<float> vec_score,vec_softmax;// 等有时间查看一下mnn 如何自定义层

for (int i = 0; i < score_host.elementSize(); ++i) {
float score = score_ptr[i];
if(i%5!=0||i==0){
std::cout<<score<<" ,";
}else {
std::cout<<score<<std::endl;
}
vec_score.push_back(score);
vec_softmax.push_back(score);
}
std::cout<<std::endl;
softmax(vec_score,vec_softmax);
std::vector<float>::const_iterator it=vec_softmax.begin();
for(; it!=vec_softmax.end(); ++it) {
std::cout<<*it<<" ";
}
std::cout<<std::endl;
auto itMax = max_element(vec_softmax.begin(), vec_softmax.end());
std::cout << "the max:" << *itMax << " the location:" << distance(vec_softmax.begin(), itMax) << std::endl;
if (distance(vec_softmax.begin(), itMax) == 1){
std::cout<<"Image "<<distance(vec_softmax.begin(), itMax) <<" is Real Face. Score: "<< *itMax<<std::endl;
}
else {
std::cout<<"Image "<<distance(vec_softmax.begin(), itMax) <<" is Fake Face. Score: "<< *itMax<<std::endl;
}

return 0;
}

测试结果

/home/ubuntu/CLionProjects/untitled6/cmake-build-debug/untitled6
Interpreter created
session created
-4.00721 ,4.85425 ,-0.850086 ,
0.000141257 0.996539 0.00331995
the max:0.997807 the location:1
Image 1 is Real Face. Score: 0.997807

Process finished with exit code 0

模型二

#include <iostream>
#include<opencv2/core.hpp>
#include<opencv2/imgproc.hpp>
#include<opencv2/highgui.hpp>
#include<MNN/Interpreter.hpp>
#include<MNN/ImageProcess.hpp>
#include <cmath>
#include <algorithm>
#include <numeric>
using namespace std;
using namespace cv;
using namespace MNN;


#define IMAGE_SIZE 80

double myfunction(double num) {
return exp(num);
}

template <typename T>
void softmax(const typename::std::vector<T> &v, typename::std::vector<T> &s){
double sum=0.0;
transform(v.begin(), v.end(), s.begin(), myfunction);
sum=accumulate(s.begin(), s.end(), sum);
for(size_t i=0; i<s.size(); ++i)
s.at(i)/=sum;
}

int main() {
//cv::INTER_LINEAR为双线性插值,需要和python下的resize插值方式一致
cv::Mat img_src = cv::imread("/home/ubuntu/CLionProjects/untitled6/face.jpg");

cv::Mat img_resized;
cv::resize(img_src.clone(), img_resized, cv::Size(IMAGE_SIZE, IMAGE_SIZE), cv::INTER_LINEAR);

auto net = std::shared_ptr<MNN::Interpreter>(MNN::Interpreter::createFromFile("/home/ubuntu/CLionProjects/untitled6/4_0_0_80x80_MiniFASNetV1SE_sim.mnn"));//创建解释器
cout << "Interpreter created" << endl;
ScheduleConfig config;
config.numThread = 8;
config.type = MNN_FORWARD_CPU;
auto session = net->createSession(config);//创建session
cout << "session created" << endl;

auto inTensor = net->getSessionInput(session, NULL);
auto outTensor = net->getSessionInput(session, NULL);
auto _Tensor = MNN::Tensor::create<float>({1,3,IMAGE_SIZE,IMAGE_SIZE}, NULL, MNN::Tensor::CAFFE);


if(_Tensor->elementSize()!=3*IMAGE_SIZE*IMAGE_SIZE)
{
std::cout<<_Tensor->elementSize()<<" "<<img_resized.channels()*img_resized.cols*img_resized.rows<<std::endl;
std::cout<<"input shape not equal image shape"<<std::endl;
return -1;
}

for (auto i = 0; i < img_resized.channels(); i++) {
for(int j=0;j<img_resized.rows;j++) {
for (int k = 0; k < img_resized.cols; k++) {
_Tensor->host<float>()[i*IMAGE_SIZE*IMAGE_SIZE+j*IMAGE_SIZE+k] =img_resized.at<cv::Vec3b>(j, k)[i];
}
}
}

inTensor->copyFromHostTensor(_Tensor);

//推理
net->runSession(session);
auto output= net->getSessionOutput(session, NULL);
// MNN::Tensor feat_tensor(output, output->getDimensionType());
// output->copyToHostTensor(&feat_tensor);
// feat_tensor.print();


MNN::Tensor score_host(output, output->getDimensionType());
output->copyToHostTensor(&score_host);


auto score_ptr = score_host.host<float>();
std::vector<std::pair<float, int>> scores;
std::vector<float> vec_score,vec_softmax;// 等有时间查看一下mnn 如何自定义层

for (int i = 0; i < score_host.elementSize(); ++i) {
float score = score_ptr[i];
if(i%5!=0||i==0){
std::cout<<score<<" ,";
}else {
std::cout<<score<<std::endl;
}
vec_score.push_back(score);
vec_softmax.push_back(score);
}
std::cout<<std::endl;
softmax(vec_score,vec_softmax);
std::vector<float>::const_iterator it=vec_softmax.begin();
for(; it!=vec_softmax.end(); ++it) {
std::cout<<*it<<" ";
}
std::cout<<std::endl;
auto itMax = max_element(vec_softmax.begin(), vec_softmax.end());
cout << "the max:" << *itMax << " the location:" << distance(vec_softmax.begin(), itMax) << endl;
if (distance(vec_softmax.begin(), itMax) == 1){
std::cout<<"Image "<<distance(vec_softmax.begin(), itMax) <<" is Real Face. Score: "<< *itMax<<std::endl;
}
else {
std::cout<<"Image "<<distance(vec_softmax.begin(), itMax) <<" is Fake Face. Score: "<< *itMax<<std::endl;
}

return 0;
}

测试结果

F:\untitled11\cmake-build-debug\untitled11.exe
Interpreter created
session created
-4.4244 ,5.28539 ,-0.862977 ,
6.05538e-05 0.997807 0.00213229
the max:0.997807 the location:1
Image 1 is Real Face. Score: 0.997807

Process finished with exit code 0

参考

​​https://github.com/Tencent/ncnn/wiki/operation-param-weight-table​​

​​https://github.com/OAID/Tengine/tree/tengine-lite/tools/optimize​​

​​https://www.yuque.com/mnn/en​​

举报

相关推荐

0 条评论