我正在尝试使用 iPhone 将 3d 玻璃映射到脸上。我正在使用以下 OpenCV 眼睛检测。然而,眼睛检测不是很可靠。稍微转一下脸,或者当眼睛变得有点狭窄时,如果我低头看着相机,眼睛检测不起作用。即使在正面,它也只能在一半的帧中检测到。我读过很多地方,调整某些参数可以帮助或预处理图像。但是我无法获得正确的组合。以下是我正在使用的预处理和参数。如果有人可以建议/分享更好的参数,请提供帮助。谢谢
pixelBuffer
从-image获取灰度图像。然后processFrame
被称为:
if (format == kCVPixelFormatType_420YpCbCr8BiPlanarFullRange) {
// For grayscale mode, the luminance channel of the YUV data is used
CVPixelBufferLockBaseAddress(pixelBuffer, 0);
void *baseaddress = CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0);
cv::Mat mat(videoRect.size.height, videoRect.size.width, CV_8UC1, baseaddress, 0);
[self processFrame:mat videoRect:videoRect videoOrientation:videoOrientation];
CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
}
else if (format == kCVPixelFormatType_32BGRA) {
// For color mode a 4-channel cv::Mat is created from the BGRA data
CVPixelBufferLockBaseAddress(pixelBuffer, 0);
void *baseaddress = CVPixelBufferGetBaseAddress(pixelBuffer);
cv::Mat mat(videoRect.size.height, videoRect.size.width, CV_8UC4, baseaddress, 0);
[self processFrame:mat videoRect:videoRect videoOrientation:videoOrientation];
CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
}
else {
NSLog(@"Unsupported video format");
}
初始化分类器:
NSString * const kFaceCascadeFilename = @"haarcascade_frontalface_alt2";
NSString * const kEyesCascadeFilename = @"haarcascade_eye";
processFrame:进行检测
- (void)processFrame:(cv::Mat &)mat videoRect:(CGRect)rect videoOrientation:(AVCaptureVideoOrientation)videOrientation
{
// Shrink video frame to 320X240
cv::resize(mat, mat, cv::Size(), 0.5f, 0.5f, CV_INTER_LINEAR);
rect.size.width /= 2.0f;
rect.size.height /= 2.0f;
// Rotate video frame by 90deg to portrait by combining a transpose and a flip
// Note that AVCaptureVideoDataOutput connection does NOT support hardware-accelerated
// rotation and mirroring via videoOrientation and setVideoMirrored properties so we
// need to do the rotation in software here.
cv::transpose(mat, mat);
CGFloat temp = rect.size.width;
rect.size.width = rect.size.height;
rect.size.height = temp;
if (videOrientation == AVCaptureVideoOrientationLandscapeRight)
{
// flip around y axis for back camera
cv::flip(mat, mat, 1);
}
else {
// Front camera output needs to be mirrored to match preview layer so no flip is required here
}
videOrientation = AVCaptureVideoOrientationPortrait;
// Detect faces
std::vector<cv::Rect> faces;
std::vector<cv::Rect> eyes;
_faceCascade.detectMultiScale(mat, faces, 1.1, 2, 0 |CV_HAAR_SCALE_IMAGE, cv::Size(30, 30));
// We will usually have only one face in frame
if (faces.size() >0){
cv::Mat faceROI = mat(faces.front());
_eyesCascade.detectMultiScale( faceROI, eyes, 1.15, 3.0, 0 , cv::Size(30, 30));
}
// Dispatch updating of face markers to main queue
dispatch_sync(dispatch_get_main_queue(), ^{
[self displayFaces:faces eyes:eyes
forVideoRect:rect
videoOrientation:videOrientation];
});
}