0
点赞
收藏
分享

微信扫一扫

40、window10+MinGW+Clion/Android Studio+JNI下使用调用Dlib静态库使用


基本思想:在window10下,使用MinGW+Cmske+Dlib 进行编译,然后在代码中使用c++进行静态库的调用;完成一系列步骤之后,就可以在更换编译器情况下,进行跨平台交叉编译;

该工程的代码:​​https://github.com/sxj731533730/HeadPose​​

具体搭建过程如下:

第一步:搭建环境; 可以暂不搭建opencv

第二步:下载dlib包,使用window10的附加linux内核下载较快:

ubuntu@ubuntu:/mnt/f$ axel -n 100 http://dlib.net/files/dlib-19.21.zip 
ubuntu@ubuntu:/mnt/f$ unzip dlib-19.21.zip
ubuntu@ubuntu:/mnt/f$ cd dlib-19.21/
ubuntu@ubuntu:/mnt/f/dlib-19.21$ mkdir -p build

目录结构为:

ubuntu@ubuntu://mnt/f/dlib-19.21$ tree -L 1
.
├── CMakeLists.txt
├── ISSUE_TEMPLATE.md
├── MANIFEST.in
├── README.md
├── build
├── dlib
├── docs
├── documentation.html
├── examples
├── python_examples
├── setup.py
└── tools

6 directories, 6 files

第三步:切换到window10的cmd模式下,进行编译:

F:\dlib-19.21\build>cmake -DCMAKE_INSTALL_PREFIX=F:/dlib-19.21/build -DCMAKE_C_COMPILER=gcc -DCMAKE_CXX_COMPILER=g++ -DCMAKE_BUILD_TYPE=Release -DCMAKE_MAKE=mingw32-make -G "MinGW Makefiles" ..
F:\dlib-19.21\build>mingw32-make -f Makefile
F:\dlib-19.21\build>mingw32-make -f Makefile install

编译完成后,在 F:\dlib-19.21\build目录下存在一个include和lib目录;

{如果需要移植android studio 上,进行边缘端开发,需要使用ndk进行编译;

:~/dlib-19.21/build$ cmake -DCMAKE_INSTALL_PREFIX=~/dlib-19.21/build -DCMAKE_C_COMPILER=/usr/local/arm-linux-androideabi/bin/arm-linux-androideabi-gcc -DCMAKE_CXX_COMPILER=/usr/local/arm-linux-androideabi/bin/arm-linux-androideabi-g++ -DCMAKE_BUILD_TYPE=Release  -DCMAKE_CXX_FLAGS=-std=c++11 -frtti -fexceptions -DDLIB_NO_GUI_SUPPORT=ON ..
for arm7v
:~/dlib-19.21/build$cmake -DCMAKE_INSTALL_PREFIX=~/dlib-19.21/build -DCMAKE_C_COMPILER=/usr/local/arm-linux-androideabi/bin/aarch64-linux-android21-clang -DCMAKE_CXX_COMPILER=/usr/local/arm-linux-androideabi/bin/aarch64-linux-android21-clang++ -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS=-std=c++11 -frtti -fexceptions -DDLIB_NO_GUI_SUPPORT=ON ..
for arm8v
:~/dlib-19.21/build$ make

对应的lib包和include 移植到对应的文件夹中;

40、window10+MinGW+Clion/Android Studio+JNI下使用调用Dlib静态库使用_android

Android Studio的CmakeList.txt配置

# For more information about using CMake with Android Studio, read the
# documentation: https://d.android.com/studio/projects/add-native-code.html

# Sets the minimum version of CMake required to build the native library.

cmake_minimum_required(VERSION 3.4.1)

# Creates and names a library, sets it as either STATIC
# or SHARED, and provides the relative paths to its source code.
# You can define multiple libraries, and CMake builds them for you.
# Gradle automatically packages shared libraries with your APK.

# 添加opencv的头文件目录
include_directories(${CMAKE_SOURCE_DIR}/include)

# 导入opencv的so
add_library(libopencv_java4 SHARED IMPORTED)
set_target_properties(libopencv_java4 PROPERTIES IMPORTED_LOCATION
${CMAKE_SOURCE_DIR}/../jniLibs/libs/${ANDROID_ABI}/libopencv_java4.so)


include_directories(${CMAKE_SOURCE_DIR}/include)
add_library(libdlib STATIC IMPORTED)
set_target_properties(libdlib PROPERTIES IMPORTED_LOCATION ${CMAKE_SOURCE_DIR}/../jniLibs/libs/${ANDROID_ABI}/libdlib.a)

add_library( # Sets the name of the library.
native-lib

# Sets the library as a shared library.
SHARED

# Provides a relative path to your source file(s).
native-lib.cpp )

# Searches for a specified prebuilt library and stores the path as a
# variable. Because CMake includes system libraries in the search path by
# default, you only need to specify the name of the public NDK library
# you want to add. CMake verifies that the library exists before
# completing its build.

find_library( # Sets the name of the path variable.
log-lib

# Specifies the name of the NDK library that
# you want CMake to locate.
log )

# Specifies libraries CMake should link to your target library. You
# can link multiple libraries, such as libraries you define in this
# build script, prebuilt third-party libraries, or system libraries.

target_link_libraries( # Specifies the target library.
native-lib
jnigraphics
libopencv_java4 # 链接opencv的so
libdlib
# Links the target library to the log library
# included in the NDK.
${log-lib} )

Android Studio的 budile.gradle 同时修改文件为:

40、window10+MinGW+Clion/Android Studio+JNI下使用调用Dlib静态库使用_ubuntu_02

边缘端其它细节不累述,开放sd卡读取权限,c++代码移植即可;

}

第四步:将生成生成的include文件和对应的lib文件拖到Clion 工程中;

 

40、window10+MinGW+Clion/Android Studio+JNI下使用调用Dlib静态库使用_ubuntu_03

文件目录结构为:

ubuntu@ubuntu://mnt/f/example$ tree -L 2
.
├── CMakeLists.txt
├── cmake-build-debug
│ ├── CMakeCache.txt
│ ├── CMakeFiles
│ ├── Makefile
│ ├── cmake_install.cmake
│ ├── eyeshoot.cbp
│ ├── eyeshoot.exe
│ └── saved_function.dat
├── include
│ └── dlib
├── lib
│ ├── cmake
│ ├── libdlib.a
│ └── pkgconfig
└── main.cpp

7 directories, 9 files

修改CmakeList.txt文件为:

cmake_minimum_required(VERSION 3.17)
project(example)
set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3" )
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -O3")
set(CMAKE_CXX_STANDARD 11)
include_directories(${CMAKE_SOURCE_DIR}/include)

# 导入opencv的so
add_library(libdlib STATIC IMPORTED)
set_target_properties(libdlib PROPERTIES IMPORTED_LOCATION ${CMAKE_SOURCE_DIR}/lib/libdlib.a)
add_executable(example main.cpp)
target_link_libraries(example libdlib)

测试代码:

#include <iostream>
#include "dlib/svm.h"

using namespace std;
using namespace dlib;

int main()
{

typedef matrix<double, 2, 1> sample_type;
typedef radial_basis_kernel<sample_type> kernel_type;


// Now we make objects to contain our samples and their respective labels.
std::vector<sample_type> samples;
std::vector<double> labels;

// Now let's put some data into our samples and labels objects. We do this
// by looping over a bunch of points and labeling them according to their
// distance from the origin.
for (int r = -20; r <= 20; ++r)
{
for (int c = -20; c <= 20; ++c)
{
sample_type samp;
samp(0) = r;
samp(1) = c;
samples.push_back(samp);

// if this point is less than 10 from the origin
if (sqrt((double)r*r + c*c) <= 10)
labels.push_back(+1);
else
labels.push_back(-1);

}
}

vector_normalizer<sample_type> normalizer;
// Let the normalizer learn the mean and standard deviation of the samples.
normalizer.train(samples);
// now normalize each sample
for (unsigned long i = 0; i < samples.size(); ++i)
samples[i] = normalizer(samples[i]);

randomize_samples(samples, labels);


// here we make an instance of the svm_c_trainer object that uses our kernel
// type.
svm_c_trainer<kernel_type> trainer;

cout << "doing cross validation" << endl;
for (double gamma = 0.00001; gamma <= 1; gamma *= 5)
{
for (double C = 1; C < 100000; C *= 5)
{
// tell the trainer the parameters we want to use
trainer.set_kernel(kernel_type(gamma));
trainer.set_c(C);

cout << "gamma: " << gamma << " C: " << C;
// Print out the cross validation accuracy for 3-fold cross validation using
// the current gamma and C. cross_validate_trainer() returns a row vector.
// The first element of the vector is the fraction of +1 training examples
// correctly classified and the second number is the fraction of -1 training
// examples correctly classified.
cout << " cross validation accuracy: "
<< cross_validate_trainer(trainer, samples, labels, 3);
}
}

trainer.set_kernel(kernel_type(0.15625));
trainer.set_c(5);
typedef decision_function<kernel_type> dec_funct_type;
typedef normalized_function<dec_funct_type> funct_type;

// Here we are making an instance of the normalized_function object. This
// object provides a convenient way to store the vector normalization
// information along with the decision function we are going to learn.
funct_type learned_function;
learned_function.normalizer = normalizer; // save normalization information
learned_function.function = trainer.train(samples, labels); // perform the actual SVM training and save the results

// print out the number of support vectors in the resulting decision function
cout << "\nnumber of support vectors in our learned_function is "
<< learned_function.function.basis_vectors.size() << endl;

// Now let's try this decision_function on some samples we haven't seen before.
sample_type sample;

sample(0) = 3.123;
sample(1) = 2;
cout << "This is a +1 class example, the classifier output is " << learned_function(sample) << endl;

sample(0) = 3.123;
sample(1) = 9.3545;
cout << "This is a +1 class example, the classifier output is " << learned_function(sample) << endl;

sample(0) = 13.123;
sample(1) = 9.3545;
cout << "This is a -1 class example, the classifier output is " << learned_function(sample) << endl;

sample(0) = 13.123;
sample(1) = 0;
cout << "This is a -1 class example, the classifier output is " << learned_function(sample) << endl;


// We can also train a decision function that reports a well conditioned
// probability instead of just a number > 0 for the +1 class and < 0 for the
// -1 class. An example of doing that follows:
typedef probabilistic_decision_function<kernel_type> probabilistic_funct_type;
typedef normalized_function<probabilistic_funct_type> pfunct_type;

pfunct_type learned_pfunct;
learned_pfunct.normalizer = normalizer;
learned_pfunct.function = train_probabilistic_decision_function(trainer, samples, labels, 3);
// Now we have a function that returns the probability that a given sample is of the +1 class.

// print out the number of support vectors in the resulting decision function.
// (it should be the same as in the one above)
cout << "\nnumber of support vectors in our learned_pfunct is "
<< learned_pfunct.function.decision_funct.basis_vectors.size() << endl;

sample(0) = 3.123;
sample(1) = 2;
cout << "This +1 class example should have high probability. Its probability is: "
<< learned_pfunct(sample) << endl;

sample(0) = 3.123;
sample(1) = 9.3545;
cout << "This +1 class example should have high probability. Its probability is: "
<< learned_pfunct(sample) << endl;

sample(0) = 13.123;
sample(1) = 9.3545;
cout << "This -1 class example should have low probability. Its probability is: "
<< learned_pfunct(sample) << endl;

sample(0) = 13.123;
sample(1) = 0;
cout << "This -1 class example should have low probability. Its probability is: "
<< learned_pfunct(sample) << endl;

serialize("saved_function.dat") << learned_pfunct;

// Now let's open that file back up and load the function object it contains.
deserialize("saved_function.dat") >> learned_pfunct;

cout << "\ncross validation accuracy with only 10 support vectors: "
<< cross_validate_trainer(reduced2(trainer, 10), samples, labels, 3);

// Let's print out the original cross validation score too for comparison.
cout << "cross validation accuracy with all the original support vectors: "
<< cross_validate_trainer(trainer, samples, labels, 3);

// When you run this program you should see that, for this problem, you can
// reduce the number of basis vectors down to 10 without hurting the cross
// validation accuracy.


// To get the reduced decision function out we would just do this:
learned_function.function = reduced2(trainer, 10).train(samples, labels);
// And similarly for the probabilistic_decision_function:
learned_pfunct.function = train_probabilistic_decision_function(reduced2(trainer, 10), samples, labels, 3);

return 0;
}

测试结果为:

F:\example\cmake-build-debug\example.exe
doing cross validation
gamma: 1e-05 C: 1 cross validation accuracy: 0 1
gamma: 1e-05 C: 5 cross validation accuracy: 0 1
gamma: 1e-05 C: 25 cross validation accuracy: 0 1
gamma: 1e-05 C: 125 cross validation accuracy: 0 1
gamma: 1e-05 C: 625 cross validation accuracy: 0 1
gamma: 1e-05 C: 3125 cross validation accuracy: 0 1
gamma: 1e-05 C: 15625 cross validation accuracy: 0 1
gamma: 1e-05 C: 78125 cross validation accuracy: 0 1
gamma: 5e-05 C: 1 cross validation accuracy: 0 1
gamma: 5e-05 C: 5 cross validation accuracy: 0 1
gamma: 5e-05 C: 25 cross validation accuracy: 0 1
gamma: 5e-05 C: 125 cross validation accuracy: 0 1
gamma: 5e-05 C: 625 cross validation accuracy: 0 1
gamma: 5e-05 C: 3125 cross validation accuracy: 0 1
gamma: 5e-05 C: 15625 cross validation accuracy: 0 1
gamma: 5e-05 C: 78125 cross validation accuracy: 0 1
gamma: 0.00025 C: 1 cross validation accuracy: 0 1
gamma: 0.00025 C: 5 cross validation accuracy: 0 1
gamma: 0.00025 C: 25 cross validation accuracy: 0 1
gamma: 0.00025 C: 125 cross validation accuracy: 0 1
gamma: 0.00025 C: 625 cross validation accuracy: 0 1
gamma: 0.00025 C: 3125 cross validation accuracy: 0 1
gamma: 0.00025 C: 15625 cross validation accuracy: 0 1
gamma: 0.00025 C: 78125 cross validation accuracy: 0.990476 0.991189
gamma: 0.00125 C: 1 cross validation accuracy: 0 1
gamma: 0.00125 C: 5 cross validation accuracy: 0 1
gamma: 0.00125 C: 25 cross validation accuracy: 0 1
gamma: 0.00125 C: 125 cross validation accuracy: 0 1
gamma: 0.00125 C: 625 cross validation accuracy: 0 1
gamma: 0.00125 C: 3125 cross validation accuracy: 0.980952 0.994126
gamma: 0.00125 C: 15625 cross validation accuracy: 0.980952 0.991924
gamma: 0.00125 C: 78125 cross validation accuracy: 0.984127 0.99486
gamma: 0.00625 C: 1 cross validation accuracy: 0 1
gamma: 0.00625 C: 5 cross validation accuracy: 0 1
gamma: 0.00625 C: 25 cross validation accuracy: 0 1
gamma: 0.00625 C: 125 cross validation accuracy: 0.980952 0.99486
gamma: 0.00625 C: 625 cross validation accuracy: 0.980952 0.991924
gamma: 0.00625 C: 3125 cross validation accuracy: 0.980952 0.995595
gamma: 0.00625 C: 15625 cross validation accuracy: 0.987302 0.994126
gamma: 0.00625 C: 78125 cross validation accuracy: 0.990476 0.99486
gamma: 0.03125 C: 1 cross validation accuracy: 0 1
gamma: 0.03125 C: 5 cross validation accuracy: 0.971429 0.996329
gamma: 0.03125 C: 25 cross validation accuracy: 0.974603 0.992658
gamma: 0.03125 C: 125 cross validation accuracy: 0.980952 0.996329
gamma: 0.03125 C: 625 cross validation accuracy: 0.987302 0.99486
gamma: 0.03125 C: 3125 cross validation accuracy: 0.990476 0.99486
gamma: 0.03125 C: 15625 cross validation accuracy: 0.95873 0.995595
gamma: 0.03125 C: 78125 cross validation accuracy: 0.996825 0.995595
gamma: 0.15625 C: 1 cross validation accuracy: 0.952381 0.998532
gamma: 0.15625 C: 5 cross validation accuracy: 0.993651 0.996329
gamma: 0.15625 C: 25 cross validation accuracy: 0.990476 0.995595
gamma: 0.15625 C: 125 cross validation accuracy: 0.980952 0.99486
gamma: 0.15625 C: 625 cross validation accuracy: 0.949206 0.997797
gamma: 0.15625 C: 3125 cross validation accuracy: 0.993651 0.998532
gamma: 0.15625 C: 15625 cross validation accuracy: 0.987302 1
gamma: 0.15625 C: 78125 cross validation accuracy: 0.990476 0.997797
gamma: 0.78125 C: 1 cross validation accuracy: 0.952381 0.997797
gamma: 0.78125 C: 5 cross validation accuracy: 0.974603 0.997797
gamma: 0.78125 C: 25 cross validation accuracy: 0.974603 1
gamma: 0.78125 C: 125 cross validation accuracy: 0.984127 1
gamma: 0.78125 C: 625 cross validation accuracy: 0.987302 1
gamma: 0.78125 C: 3125 cross validation accuracy: 0.987302 1
gamma: 0.78125 C: 15625 cross validation accuracy: 0.987302 0.997797
gamma: 0.78125 C: 78125 cross validation accuracy: 0.980952 0.998532

number of support vectors in our learned_function is 209
This is a +1 class example, the classifier output is 2.71477
This is a +1 class example, the classifier output is -0.0102314
This is a -1 class example, the classifier output is -4.36211
This is a -1 class example, the classifier output is -2.16552

number of support vectors in our learned_pfunct is 209
This +1 class example should have high probability. Its probability is: 1
This +1 class example should have high probability. Its probability is: 0.465781
This -1 class example should have low probability. Its probability is: 3.05246e-11
This -1 class example should have low probability. Its probability is: 5.78323e-06

cross validation accuracy with only 10 support vectors: 0.993651 0.99486
cross validation accuracy with all the original support vectors: 0.993651 0.996329

Process finished with exit code 0

然后在CmakeList.txt导入opencv支持和人脸代码测试:

提供一个face_pose的识别效果 代码参考:​​https://github.com/lincolnhard/head-pose-estimation​​

cmakeList.txt文件

cmake_minimum_required(VERSION 3.17)
project(example)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3 ")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -O3 ")
set(CMAKE_CXX_STANDARD 11)
set(OpenCV_DIR "D:\\Opencv440\\buildMinGW")#改为mingw-bulid的位置
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/")
find_package(OpenCV REQUIRED)
include_directories(${CMAKE_SOURCE_DIR}/include)
include_directories(${OpenCV_INCLUDE_DIRS})

# 导入opencv的so
add_library(libdlib STATIC IMPORTED)
set_target_properties(libdlib PROPERTIES IMPORTED_LOCATION ${CMAKE_SOURCE_DIR}/lib/libdlib.a)
add_executable(example main.cpp)
target_link_libraries(example ${OpenCV_LIBS} libdlib )

set(OpenCV_LIBS opencv_core opencv_imgproc opencv_highgui opencv_imgcodecs)

源代码 摘录 ​​https://github.com/lincolnhard/head-pose-estimation​​

#include <iostream>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/imgproc/imgproc.hpp>

#include <vector>
#include "dlib/opencv.h"
#include "dlib/image_processing/frontal_face_detector.h"
#include "dlib/image_processing/render_face_detections.h"
#include "dlib/image_processing.h"




//Intrisics can be calculated using opencv sample code under opencv/sources/samples/cpp/tutorial_code/calib3d
//Normally, you can also apprximate fx and fy by image width, cx by half image width, cy by half image height instead
double K[9] = { 6.5308391993466671e+002, 0.0, 3.1950000000000000e+002, 0.0, 6.5308391993466671e+002, 2.3950000000000000e+002, 0.0, 0.0, 1.0 };
double D[5] = { 7.0834633684407095e-002, 6.9140193737175351e-002, 0.0, 0.0, -1.3073460323689292e+000 };

int main()
{
//open cam
cv::VideoCapture cap(0);
if (!cap.isOpened())
{
std::cout << "Unable to connect to camera" << std::endl;
return EXIT_FAILURE;
}
//Load face detection and pose estimation models (dlib).
dlib::frontal_face_detector detector = dlib::get_frontal_face_detector();
dlib::shape_predictor predictor;
dlib::deserialize("shape_predictor_68_face_landmarks.dat") >> predictor;

//fill in cam intrinsics and distortion coefficients
cv::Mat cam_matrix = cv::Mat(3, 3, CV_64FC1, K);
cv::Mat dist_coeffs = cv::Mat(5, 1, CV_64FC1, D);

//fill in 3D ref points(world coordinates), model referenced from http://aifi.isr.uc.pt/Downloads/OpenGL/glAnthropometric3DModel.cpp
std::vector<cv::Point3d> object_pts;
object_pts.push_back(cv::Point3d(6.825897, 6.760612, 4.402142)); //#33 left brow left corner
object_pts.push_back(cv::Point3d(1.330353, 7.122144, 6.903745)); //#29 left brow right corner
object_pts.push_back(cv::Point3d(-1.330353, 7.122144, 6.903745)); //#34 right brow left corner
object_pts.push_back(cv::Point3d(-6.825897, 6.760612, 4.402142)); //#38 right brow right corner
object_pts.push_back(cv::Point3d(5.311432, 5.485328, 3.987654)); //#13 left eye left corner
object_pts.push_back(cv::Point3d(1.789930, 5.393625, 4.413414)); //#17 left eye right corner
object_pts.push_back(cv::Point3d(-1.789930, 5.393625, 4.413414)); //#25 right eye left corner
object_pts.push_back(cv::Point3d(-5.311432, 5.485328, 3.987654)); //#21 right eye right corner
object_pts.push_back(cv::Point3d(2.005628, 1.409845, 6.165652)); //#55 nose left corner
object_pts.push_back(cv::Point3d(-2.005628, 1.409845, 6.165652)); //#49 nose right corner
object_pts.push_back(cv::Point3d(2.774015, -2.080775, 5.048531)); //#43 mouth left corner
object_pts.push_back(cv::Point3d(-2.774015, -2.080775, 5.048531)); //#39 mouth right corner
object_pts.push_back(cv::Point3d(0.000000, -3.116408, 6.097667)); //#45 mouth central bottom corner
object_pts.push_back(cv::Point3d(0.000000, -7.415691, 4.070434)); //#6 chin corner

//2D ref points(image coordinates), referenced from detected facial feature
std::vector<cv::Point2d> image_pts;

//result
cv::Mat rotation_vec; //3 x 1
cv::Mat rotation_mat; //3 x 3 R
cv::Mat translation_vec; //3 x 1 T
cv::Mat pose_mat = cv::Mat(3, 4, CV_64FC1); //3 x 4 R | T
cv::Mat euler_angle = cv::Mat(3, 1, CV_64FC1);

//reproject 3D points world coordinate axis to verify result pose
std::vector<cv::Point3d> reprojectsrc;
reprojectsrc.push_back(cv::Point3d(10.0, 10.0, 10.0));
reprojectsrc.push_back(cv::Point3d(10.0, 10.0, -10.0));
reprojectsrc.push_back(cv::Point3d(10.0, -10.0, -10.0));
reprojectsrc.push_back(cv::Point3d(10.0, -10.0, 10.0));
reprojectsrc.push_back(cv::Point3d(-10.0, 10.0, 10.0));
reprojectsrc.push_back(cv::Point3d(-10.0, 10.0, -10.0));
reprojectsrc.push_back(cv::Point3d(-10.0, -10.0, -10.0));
reprojectsrc.push_back(cv::Point3d(-10.0, -10.0, 10.0));

//reprojected 2D points
std::vector<cv::Point2d> reprojectdst;
reprojectdst.resize(8);

//temp buf for decomposeProjectionMatrix()
cv::Mat out_intrinsics = cv::Mat(3, 3, CV_64FC1);
cv::Mat out_rotation = cv::Mat(3, 3, CV_64FC1);
cv::Mat out_translation = cv::Mat(3, 1, CV_64FC1);

//text on screen
std::ostringstream outtext;

//main loop
while (1)
{
// Grab a frame
cv::Mat temp;
cap >> temp;
dlib::cv_image<dlib::bgr_pixel> cimg(temp);

// Detect faces
std::vector<dlib::rectangle> faces = detector(cimg);

// Find the pose of each face
if (faces.size() > 0)
{
//track features
dlib::full_object_detection shape = predictor(cimg, faces[0]);

//draw features
for (unsigned int i = 0; i < 68; ++i)
{
cv::circle(temp, cv::Point(shape.part(i).x(), shape.part(i).y()), 2, cv::Scalar(0, 0, 255), -1);
}

//fill in 2D ref points, annotations follow https://ibug.doc.ic.ac.uk/resources/300-W/
image_pts.push_back(cv::Point2d(shape.part(17).x(), shape.part(17).y())); //#17 left brow left corner
image_pts.push_back(cv::Point2d(shape.part(21).x(), shape.part(21).y())); //#21 left brow right corner
image_pts.push_back(cv::Point2d(shape.part(22).x(), shape.part(22).y())); //#22 right brow left corner
image_pts.push_back(cv::Point2d(shape.part(26).x(), shape.part(26).y())); //#26 right brow right corner
image_pts.push_back(cv::Point2d(shape.part(36).x(), shape.part(36).y())); //#36 left eye left corner
image_pts.push_back(cv::Point2d(shape.part(39).x(), shape.part(39).y())); //#39 left eye right corner
image_pts.push_back(cv::Point2d(shape.part(42).x(), shape.part(42).y())); //#42 right eye left corner
image_pts.push_back(cv::Point2d(shape.part(45).x(), shape.part(45).y())); //#45 right eye right corner
image_pts.push_back(cv::Point2d(shape.part(31).x(), shape.part(31).y())); //#31 nose left corner
image_pts.push_back(cv::Point2d(shape.part(35).x(), shape.part(35).y())); //#35 nose right corner
image_pts.push_back(cv::Point2d(shape.part(48).x(), shape.part(48).y())); //#48 mouth left corner
image_pts.push_back(cv::Point2d(shape.part(54).x(), shape.part(54).y())); //#54 mouth right corner
image_pts.push_back(cv::Point2d(shape.part(57).x(), shape.part(57).y())); //#57 mouth central bottom corner
image_pts.push_back(cv::Point2d(shape.part(8).x(), shape.part(8).y())); //#8 chin corner

//calc pose
cv::solvePnP(object_pts, image_pts, cam_matrix, dist_coeffs, rotation_vec, translation_vec);

//reproject
cv::projectPoints(reprojectsrc, rotation_vec, translation_vec, cam_matrix, dist_coeffs, reprojectdst);

//draw axis
cv::line(temp, reprojectdst[0], reprojectdst[1], cv::Scalar(0, 0, 255));
cv::line(temp, reprojectdst[1], reprojectdst[2], cv::Scalar(0, 0, 255));
cv::line(temp, reprojectdst[2], reprojectdst[3], cv::Scalar(0, 0, 255));
cv::line(temp, reprojectdst[3], reprojectdst[0], cv::Scalar(0, 0, 255));
cv::line(temp, reprojectdst[4], reprojectdst[5], cv::Scalar(0, 0, 255));
cv::line(temp, reprojectdst[5], reprojectdst[6], cv::Scalar(0, 0, 255));
cv::line(temp, reprojectdst[6], reprojectdst[7], cv::Scalar(0, 0, 255));
cv::line(temp, reprojectdst[7], reprojectdst[4], cv::Scalar(0, 0, 255));
cv::line(temp, reprojectdst[0], reprojectdst[4], cv::Scalar(0, 0, 255));
cv::line(temp, reprojectdst[1], reprojectdst[5], cv::Scalar(0, 0, 255));
cv::line(temp, reprojectdst[2], reprojectdst[6], cv::Scalar(0, 0, 255));
cv::line(temp, reprojectdst[3], reprojectdst[7], cv::Scalar(0, 0, 255));

//calc euler angle
cv::Rodrigues(rotation_vec, rotation_mat);
cv::hconcat(rotation_mat, translation_vec, pose_mat);
cv::decomposeProjectionMatrix(pose_mat, out_intrinsics, out_rotation, out_translation, cv::noArray(), cv::noArray(), cv::noArray(), euler_angle);

//show angle result
outtext << "X: " << std::setprecision(3) << euler_angle.at<double>(0);
cv::putText(temp, outtext.str(), cv::Point(50, 40), cv::FONT_HERSHEY_SIMPLEX, 0.75, cv::Scalar(0, 255, 255));
outtext.str("");
outtext << "Y: " << std::setprecision(3) << euler_angle.at<double>(1);
cv::putText(temp, outtext.str(), cv::Point(50, 60), cv::FONT_HERSHEY_SIMPLEX, 0.75, cv::Scalar(0, 255, 255));
outtext.str("");
outtext << "Z: " << std::setprecision(3) << euler_angle.at<double>(2);
cv::putText(temp, outtext.str(), cv::Point(50, 80), cv::FONT_HERSHEY_SIMPLEX, 0.75, cv::Scalar(0, 255, 255));
outtext.str("");

image_pts.clear();
}

//press esc to end
cv::imshow("demo", temp);
unsigned char key = cv::waitKey(1);
if (key == 27)
{
break;
}
}

return 0;
}

识别的效果图

测试视频来自该代码的连接的文件夹

40、window10+MinGW+Clion/Android Studio+JNI下使用调用Dlib静态库使用_android_04

 

对于标定数据的K、D而言,需要自己拍摄标定板,生成的代码奉上; 参考代码来自网络资料~

import os
import numpy as np
import cv2
import glob


def calib(inter_corner_shape, size_per_grid, img_dir, img_type):
# criteria: only for subpix calibration, which is not used here.
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
w, h = inter_corner_shape
# cp_int: corner point in int form, save the coordinate of corner points in world sapce in 'int' form
# like (0,0,0), (1,0,0), (2,0,0) ....,(10,7,0).
cp_int = np.zeros((w * h, 3), np.float32)
cp_int[:, :2] = np.mgrid[0:w, 0:h].T.reshape(-1, 2)
# cp_world: corner point in world space, save the coordinate of corner points in world space.
cp_world = cp_int * size_per_grid

obj_points = [] # the points in world space
img_points = [] # the points in image space (relevant to obj_points)
images = glob.glob(img_dir + os.sep + '**.' + img_type)
for fname in images:
img = cv2.imread(fname)
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# find the corners, cp_img: corner points in pixel space.
ret, cp_img = cv2.findChessboardCorners(gray_img, (w, h), None)
# if ret is True, save.
if ret == True:
cv2.cornerSubPix(gray_img,cp_img,(8,8),(-1,-1),criteria) # (8,8) 可调试 设置最优的数字配对让,三维坐标的误差最小
obj_points.append(cp_world)
img_points.append(cp_img)
# view the corners
cv2.drawChessboardCorners(img, (w, h), cp_img, ret)
#cv2.imshow('FoundCorners', img)
#cv2.waitKey(1)
cv2.destroyAllWindows()
# calibrate the camera
ret, mat_inter, coff_dis, v_rot, v_trans = cv2.calibrateCamera(obj_points, img_points, gray_img.shape[::-1], None,
None)
print(("ret:"), ret)
print(("internal matrix:\n"), list(mat_inter.flatten()) )
# in the form of (k_1,k_2,p_1,p_2,k_3)
print(("distortion cofficients:\n"),list(coff_dis.flatten()))
print(("rotation vectors:\n"), v_rot )
print(("translation vectors:\n"), v_trans )
total_error = 0
for i in range(len(obj_points)):
img_points_repro, _ = cv2.projectPoints(obj_points[i], v_rot[i], v_trans[i], mat_inter, coff_dis)
error = cv2.norm(img_points[i], img_points_repro, cv2.NORM_L2) / len(img_points_repro)
total_error += error
print(("Average Error of Reproject: "), total_error / len(obj_points))

return mat_inter, coff_dis


if __name__ == '__main__':
inter_corner_shape = (7, 7) #横纵坐标的角点数
size_per_grid = 0.027 #m每个表格的大小
img_dir = "F:\\camera_calibration_tool\\chess"
img_type = "jpg"
calib(inter_corner_shape, size_per_grid, img_dir, img_type)

 

举报

相关推荐

0 条评论