在人脸检测中,我首先使用代码将 UIImage 转换为 Iplimage
- (IplImage *)CreateIplImageFromUIImage:(UIImage *)image {
CGImageRef imageRef;
CGColorSpaceRef colorSpaceRef;
CGContextRef context;
IplImage * iplImage;
IplImage * returnImage;
imageRef = image.CGImage;
colorSpaceRef = CGColorSpaceCreateDeviceRGB();
iplImage = cvCreateImage( cvSize( image.size.width, image.size.height ), IPL_DEPTH_8U, 4 );
context = CGBitmapContextCreate
(
iplImage->imageData,
iplImage->width,
iplImage->height,
iplImage->depth,
iplImage->widthStep,
colorSpaceRef,
kCGImageAlphaPremultipliedLast | kCGBitmapByteOrderDefault
);
CGContextDrawImage( context, CGRectMake( 0, 0, image.size.width, image.size.height ), imageRef );
CGContextRelease( context );
CGColorSpaceRelease( colorSpaceRef );
returnImage = cvCreateImage( cvGetSize( iplImage ), IPL_DEPTH_8U, 3 );
cvCvtColor( iplImage, returnImage, CV_RGBA2BGR);
cvReleaseImage( &iplImage );
return returnImage;
}
然后在检测到面部特征后,使用以下代码将 iplimage 转换为 uiimage:
- (UIImage *)UIImageFromIplImage:(IplImage *)image {
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();//CGColorSpaceCreateDeviceGray();
// Allocating the buffer for CGImage
NSData *data = [NSData dataWithBytes:image->imageData length:image->imageSize];
CGDataProviderRef provider = CGDataProviderCreateWithCFData((CFDataRef)data);
// Creating CGImage from chunk of IplImage
CGImageRef imageRef = CGImageCreate(image->width, image->height, image->depth, image->depth * image->nChannels, image->widthStep,
colorSpace, kCGImageAlphaNone|kCGBitmapByteOrderDefault, provider, NULL, false, kCGRenderingIntentDefault);
// Getting UIImage from CGImage
UIImage *ret = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpace);
return ret;
}
但是当它显示在图像视图中时,它显示为蓝色。如果我使用色彩空间
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceGray();
然后整个图像变灰。两种方式都完全失去了原始颜色。
提前致谢。