0

我正在 iOS 上使用 OpenCV 做一些实验。我尝试将 png 图像放在检测到的面部之上。这就是我尝试放置在检测到的面部上的图像:在此处输入图像描述

但我明白了:在此处输入图像描述

我使用此函数将 UIImage 转换为 cvMat。:

- (cv::Mat)cvMatFromUIImage:(UIImage *)image
{
CGColorSpaceRef colorSpace = CGImageGetColorSpace(image.CGImage);
CGFloat cols = image.size.width;
CGFloat rows = image.size.height;

cv::Mat cvMat(rows, cols, CV_8UC4); // 8 bits per component, 4 channels

CGContextRef contextRef = CGBitmapContextCreate(cvMat.data,                 // Pointer to    data
                                                cols,                       // Width of bitmap
                                                rows,                       // Height of bitmap
                                                8,                          // Bits per component
                                                cvMat.step[0],              // Bytes per row
                                                colorSpace,                 // Colorspace
                                                kCGImageAlphaNoneSkipLast |
                                                kCGBitmapByteOrderDefault); // Bitmap info flags

CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), image.CGImage);
CGContextRelease(contextRef);
CGColorSpaceRelease(colorSpace);

return cvMat;
}

在这里我添加图像。:

- (void)processImage:(cv::Mat&)image;
{
 cv::vector<cv::Rect> faces;
 cv::Mat frame_gray;

cvtColor(image, frame_gray, CV_BGRA2GRAY);
equalizeHist(frame_gray, frame_gray);

face_cascade.detectMultiScale(frame_gray, faces, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE,   cv::Size(100, 100));

for(unsigned int i = 0; i < faces.size(); ++i)
{
    rectangle(image, cv::Point(faces[i].x, faces[i].y),
              cv::Point(faces[i].x + faces[i].width, faces[i].y + faces[i].height),
              cv::Scalar(0,255,255));
    CGPoint pos;
    pos.x = faces[i].x;
    pos.y = faces[i].y;

    cv::Rect roi( cv::Point( pos.x , pos.y ), cv::Size( faces[i].width , faces[i].height   ));
    cv::Mat destinationROI = image( roi );
    cv::Mat smallImage = [self faceIntoImageView:pos size:CGSizeMake(faces[i].width,   faces[i].height)];
    smallImage.copyTo(image(roi));

 }
 }

-(cv::Mat)faceIntoImageView:(CGPoint)position size:(CGSize)size
{

UIImage* face = [UIImage imageNamed:@"Face1.png"];
face = [self imageWithImage:face scaledToSize:size];
return [self cvMatFromUIImage:face];
}
4

1 回答 1

3

您应该交换 B 和 R 通道。

    - (cv::Mat)cvMatFromUIImage:(UIImage *)image
    {
    CGColorSpaceRef colorSpace = CGImageGetColorSpace(image.CGImage);
    CGFloat cols = image.size.width;
    CGFloat rows = image.size.height;

    cv::Mat cvMat(rows, cols, CV_8UC4); // 8 bits per component, 4 channels

    CGContextRef contextRef = CGBitmapContextCreate(cvMat.data,                 // Pointer to    data
                                                    cols,                       // Width of bitmap
                                                    rows,                       // Height of bitmap
                                                    8,                          // Bits per component
                                                    cvMat.step[0],              // Bytes per row
                                                    colorSpace,                 // Colorspace
                                                    kCGImageAlphaNoneSkipLast |
                                                    kCGBitmapByteOrderDefault); // Bitmap info flags

    CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), image.CGImage);
    CGContextRelease(contextRef);
    CGColorSpaceRelease(colorSpace);

//-------swap channels
std::vector<Mat> ch;
cv::split(cvMat,ch);
std::swap(ch[0],ch[2]);
cv::merge(ch,cvMat);
//-------
    return cvMat;
    }
于 2013-08-18T09:50:46.790 回答