我想使用 LBP 和 SVM 创建一个实时情绪识别程序。在人脸检测过程之后,我将捕获的图像转换为 32x32 像素的灰度图像。我很难为我的 LBP 创建和显示直方图(我使用简单的、未插值的 LBP)。到目前为止,我得到的是实时显示生成的 LBP 图像。
阿霍宁等。al 的论文指出
将 LBP 图像划分为 m 个局部区域,并从每个(区域)中提取一个直方图
我们如何确定 m 个局部区域的数量?
我一直试图在这里和这里寻找答案 ,但我无法理解。我在这里看到了 berak 关于空间直方图的工作,我仍然很困惑。有人可以逐步教我吗(是的,我是新手:/)。我真的需要计算和显示直方图,如第 14 页所示。
可能我应该在这里展示我凌乱的代码。
// Libraries included
#include "opencv2/core/core.hpp"
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
// Namespace declaration
using namespace std;
using namespace cv;
// Function Headers
void detectAndDisplay(Mat frame);
Mat LBP(Mat img);
// Global variables
String face_cascade_name = "haarcascade_frontalface_alt.xml";
String eyes_cascade_name = "haarcascade_eye_tree_eyeglasses.xml";
CascadeClassifier face_cascade;
CascadeClassifier eyes_cascade;
// Function main
int main(){
// Start cvStartWindowThread to create a thread process. VERY IMPORTANT
cvStartWindowThread();
// Initializing local variables
int k=1;
CvCapture* capture;
Mat frame;
// Load the cascade, use ifs (if more than one xml files are used) to prevent segmentation fault
if (!face_cascade.load(face_cascade_name)){
printf("--(!)Error loading\n");
return (-1);
}
if( !eyes_cascade.load( eyes_cascade_name ) ){
printf("--(!)Error loading\n");
return -1;
};
// Start the program, capture from CAM with CAMID =0
capture = cvCaptureFromCAM(0 );
if( capture !=0){
while(k==1){
frame = cvQueryFrame( capture );
cv::flip(frame,frame,1);
//-- 3. Apply the classifier to the frame
if( !frame.empty() ){
detectAndDisplay( frame );
}
else{
printf(" --(!) No captured frame -- Break!");
break;
}
int c = waitKey(1);
if( (char)c == 'c' ) {
k=0;
destroyWindow("FYP Live Camera");
break;
}
}
}
else{
printf("CvCaptureFromCAM ERROR\n");
}
cvReleaseCapture(&capture);
return 0;
}
// Function detectAndDisplay
void detectAndDisplay(Mat frame){
std::vector<Rect> faces;
std::vector<Rect> eyes;
Mat frame_gray;
Mat crop;
Mat crop2;
Mat res;
Mat gray;
Mat dst;
string text;
stringstream sstm;
cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
equalizeHist(frame_gray, frame_gray);
// Detect faces
face_cascade.detectMultiScale(frame_gray, faces, 1.1, 4, 0 | CV_HAAR_FIND_BIGGEST_OBJECT, Size(60, 60));
// Set Region of Interest
cv::Rect roi_b;
cv::Rect roi_c;
size_t ic = 0; // ic is index of current element
if (faces.size() !=0){
for (ic = 0; ic < faces.size(); ic++) // Iterate through all current elements (detected faces)
{
roi_b.x = faces[ic].x;
roi_b.y = faces[ic].y;
roi_b.width = faces[ic].width;
roi_b.height = faces[ic].height;
crop = frame(roi_b);
resize(crop, res, Size(256, 256), 0, 0, INTER_LINEAR); // This will be needed later while saving images
cvtColor(res, gray, CV_BGR2GRAY); // Convert cropped image to Grayscale
eyes_cascade.detectMultiScale(gray, eyes, 1.1, 4, 0 |CV_HAAR_SCALE_IMAGE, Size(15, 15) );
if (eyes.size() == 2){
if ( eyes[0].x <= eyes[1].x ){
roi_c.x = eyes[0].x*0.75;
roi_c.y = eyes[0].y*0.7;
roi_c.width = (eyes[1].x+65)-roi_c.x;
roi_c.height = 190;
}
else if ( eyes[0].x >= eyes[1].x ) {
roi_c.x = eyes[1].x*0.75;
roi_c.y = eyes[1].y*0.7;
roi_c.width = (eyes[0].x+65)-roi_c.x;
roi_c.height = 190;
}
crop2 = gray(roi_c);
resize(crop2, crop2, Size(128, 128), 0, 0, INTER_LINEAR); // This will be needed later while saving images
dst= LBP(crop2);
Point centerEye1( eyes[0].x + eyes[0].width*0.5, eyes[0].y + eyes[0].height*0.5 );
int radiusEye1 = cvRound( (eyes[0].width + eyes[0].height)*0.25 );
circle( gray, centerEye1, radiusEye1, Scalar( 0, 0, 255 ), 1, 8, 0 );
Point centerEye2( eyes[1].x + eyes[1].width*0.5, eyes[1].y + eyes[1].height*0.5 );
int radiusEye2 = cvRound( (eyes[1].width + eyes[1].height)*0.25 );
circle( gray, centerEye2, radiusEye2, Scalar( 0, 0, 255 ), 1, 8, 0 );
}
Point pt1(faces[ic].x, faces[ic].y); // Display detected faces on main window - live stream from camera
Point pt2((faces[ic].x + faces[ic].height), (faces[ic].y + faces[ic].width));
rectangle(frame, pt1, pt2, Scalar(0, 255, 0), 1, 8, 0);
putText(frame, "Auto-focused", cvPoint((faces[ic].x+faces[ic].width/4), faces[ic].y-10), FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0, 0, 255), 1, CV_AA);
}
}
imshow("Live Camera", frame);
if (!crop2.empty())
{
imshow("Gray2", dst);
imshow("Gray3", crop2);
}
else{
destroyWindow("Gray2");
destroyWindow("Gray3");
}
}
Mat LBP(Mat img){
Mat dst = Mat::zeros(img.rows-2, img.cols-2, CV_8UC1);
for(int i=1;i<img.rows-1;i++) {
for(int j=1;j<img.cols-1;j++) {
uchar center = img.at<uchar>(i,j);
unsigned char code = 0;
code |= ((img.at<uchar>(i-1,j-1)) > center) << 7;
code |= ((img.at<uchar>(i-1,j)) > center) << 6;
code |= ((img.at<uchar>(i-1,j+1)) > center) << 5;
code |= ((img.at<uchar>(i,j+1)) > center) << 4;
code |= ((img.at<uchar>(i+1,j+1)) > center) << 3;
code |= ((img.at<uchar>(i+1,j)) > center) << 2;
code |= ((img.at<uchar>(i+1,j-1)) > center) << 1;
code |= ((img.at<uchar>(i,j-1)) > center) << 0;
dst.at<uchar>(i-1,j-1) = code;
}
}
return dst;
}
显然我不能发布我的截图,因为我没有足够的声望点:(