8

Dlib C++ 可以很好地检测地标和估计人脸姿态。但是,如何获得头部姿势的 3D 坐标轴方向 (x,y,z)?

4

1 回答 1

16

我也遇到了同样的问题,不久前,搜索并找到了 1-2 篇有用的博客文章,此链接将为您提供所涉及技术的概述,如果您只需要以小数位计算 3D 姿势,那么您可以跳过 OpenGL 渲染部分,但是如果您想直观地获得反馈,那么您也可以尝试使用 OpenGL,但我建议您作为初学者忽略 OpenGL 部分,因此从github页面提取的最小工作代码片段会看起来像这样:

在此处输入图像描述

// Reading image using OpenCV, you may use dlib as well.
cv::Mat img = cv::imread(imagePath);

std::vector<double> rv(3), tv(3);
cv::Mat rvec(rv),tvec(tv);
cv::Vec3d eav;

// Labelling the 3D Points derived from a 3D model of human face.
// You may replace these points as per your custom 3D head model if any
std::vector<cv::Point3f > modelPoints;
modelPoints.push_back(cv::Point3f(2.37427,110.322,21.7776));    // l eye (v 314)
modelPoints.push_back(cv::Point3f(70.0602,109.898,20.8234));    // r eye (v 0)
modelPoints.push_back(cv::Point3f(36.8301,78.3185,52.0345));    //nose (v 1879)
modelPoints.push_back(cv::Point3f(14.8498,51.0115,30.2378));    // l mouth (v 1502)
modelPoints.push_back(cv::Point3f(58.1825,51.0115,29.6224));    // r mouth (v 695)
modelPoints.push_back(cv::Point3f(-61.8886f,127.797,-89.4523f));  // l ear (v 2011)
modelPoints.push_back(cv::Point3f(127.603,126.9,-83.9129f));     // r ear (v 1138)

// labelling the position of corresponding feature points on the input image.
std::vector<cv::Point2f> srcImagePoints = {cv::Point2f(442, 442), // left eye
                                           cv::Point2f(529, 426), // right eye
                                           cv::Point2f(501, 479), // nose
                                           cv::Point2f(469, 534), //left lip corner
                                           cv::Point2f(538, 521), // right lip corner
                                           cv::Point2f(349, 457), // left ear
                                           cv::Point2f(578, 415) // right ear};


cv::Mat ip(srcImagePoints);

cv::Mat op = cv::Mat(modelPoints);
cv::Scalar m = mean(cv::Mat(modelPoints));

rvec = cv::Mat(rv);
double _d[9] = {1,0,0,
                0,-1,0,
                0,0,-1};
Rodrigues(cv::Mat(3,3,CV_64FC1,_d),rvec);
tv[0]=0;tv[1]=0;tv[2]=1;
tvec = cv::Mat(tv);


double max_d = MAX(img.rows,img.cols);
double _cm[9] = {max_d,     0, (double)img.cols/2.0,
                 0    , max_d, (double)img.rows/2.0,
                 0    ,     0,                  1.0};
cv::Mat camMatrix = cv::Mat(3,3,CV_64FC1, _cm);

double _dc[] = {0,0,0,0};
solvePnP(op,ip,camMatrix,cv::Mat(1,4,CV_64FC1,_dc),rvec,tvec,false,CV_EPNP);

double rot[9] = {0};
cv::Mat rotM(3,3,CV_64FC1,rot);
Rodrigues(rvec,rotM);
double* _r = rotM.ptr<double>();
printf("rotation mat: \n %.3f %.3f %.3f\n%.3f %.3f %.3f\n%.3f %.3f %.3f\n",
       _r[0],_r[1],_r[2],_r[3],_r[4],_r[5],_r[6],_r[7],_r[8]);

printf("trans vec: \n %.3f %.3f %.3f\n",tv[0],tv[1],tv[2]);

double _pm[12] = {_r[0],_r[1],_r[2],tv[0],
                  _r[3],_r[4],_r[5],tv[1],
                  _r[6],_r[7],_r[8],tv[2]};

cv::Mat tmp,tmp1,tmp2,tmp3,tmp4,tmp5;
cv::decomposeProjectionMatrix(cv::Mat(3,4,CV_64FC1,_pm),tmp,tmp1,tmp2,tmp3,tmp4,tmp5,eav);
printf("Face Rotation Angle:  %.5f %.5f %.5f\n",eav[0],eav[1],eav[2]);

输出:

                       **X**     **Y**    **Z**

Face Rotation Angle:  171.44027 -8.72583 -9.90596
于 2016-04-13T07:13:59.113 回答