我从 OpenCV 源代码中获取了一个样本并尝试在 iOS 上使用它,我做了以下操作:
- (void)captureOutput:(AVCaptureOutput *)captureOutput
didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
fromConnection:(AVCaptureConnection *)connection {
// get cv::Mat from CMSampleBufferRef
UIImage * img = [self imageFromSampleBuffer: sampleBuffer];
cv::Mat cvImg = [img CVGrayscaleMat];
cv::HOGDescriptor hog;
hog.setSVMDetector(cv::HOGDescriptor::getDefaultPeopleDetector());
cv::vector<cv::Rect> found;
hog.detectMultiScale(cvImg, found, 0.2, cv::Size(8,8), cv::Size(16,16), 1.05, 2);
for( int i = 0; i < (int)found.size(); i++ )
{
cv::Rect r = found[i];
dispatch_async(dispatch_get_main_queue(), ^{
self.label.text = [NSString stringWithFormat:@"Found at %d, %d, %d, %d", r.x, r.y, r.width, r.height];
});
NSLog(@"Found at %d, %d, %d, %d", r.x, r.y, r.width, r.height);
}
}
CVGrayscaleMat 在哪里
-(cv::Mat)CVGrayscaleMat
{
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceGray();
CGFloat cols = self.size.width;
CGFloat rows = self.size.height;
cv::Mat cvMat = cv::Mat(rows, cols, CV_8UC1); // 8 bits per component, 1 channel
CGContextRef contextRef = CGBitmapContextCreate(cvMat.data, // Pointer to backing data
cols, // Width of bitmap
rows, // Height of bitmap
8, // Bits per component
cvMat.step[0], // Bytes per row
colorSpace, // Colorspace
kCGImageAlphaNone |
kCGBitmapByteOrderDefault); // Bitmap info flags
CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), self.CGImage);
CGContextRelease(contextRef);
CGColorSpaceRelease(colorSpace);
return cvMat;
}
并且 imageFromSampleBuffer 是来自 Apple 文档的示例。问题是 - 该应用程序无法检测到人,我尝试了不同的尺寸和姿势 - 对我没有任何作用。我错过了什么?