3
  1. 我使用来自 iphone 博客的函数创建了一个蒙版图像:

    UIImage *imgToSave = [self maskImage:[UIImage imageNamed:@"pic.jpg"] withMask:[UIImage imageNamed:@"sd-face-mask.png"]];

  2. 在 UIImageView 中看起来不错

    UIImageView *imgView = [[UIImageView alloc] initWithImage:imgToSave];
    imgView.center = CGPointMake(160.0f, 140.0f);
    [self.view addSubview:imgView];
    
  3. UIImagePNGRepresentation 保存到磁盘

    [UIImagePNGRepresentation(imgToSave) writeToFile:[self findUniqueSavePath] atomically:YES];

UIImagePNGRepresentation返回看起来不同的图像的 NSData。

输出是反向图像掩码。在应用程序中被剪切的区域现在在文件中可见。应用程序中可见的区域现在已删除。能见度是相反的。

我的面具旨在去除图片中除面部区域以外的所有内容。UIImage 在应用程序中看起来正确,但在我将其保存到磁盘后,文件看起来相反。脸被移除,但其他一切都在那里。

如果您能提供帮助,请告诉我!

4

2 回答 2

3

在石英中,您可以通过图像遮罩(黑色通过和白色块)或正常图像(白色通过和黑色块)来进行遮罩,这是相反的。似乎出于某种原因,保存将图像遮罩视为要遮罩的普通图像。一种想法是渲染到位图上下文,然后创建要从中保存的图像。

于 2010-11-16T05:20:51.310 回答
0

我遇到了完全相同的问题,当我保存文件时,这是一种方式,但内存中返回的图像却完全相反。

罪魁祸首和解决方案是 UIImagePNGRepresentation()。它会在将应用内图像保存到磁盘之前对其进行修复,因此我只是插入了该函数作为创建蒙版图像并返回该图像的最后一步。

这可能不是最优雅的解决方案,但它确实有效。我从我的应用程序中复制了一些代码并对其进行了压缩,不确定下面的代码是否按原样工作,但如果不是,它就接近了……也许只是一些错别字。

享受。:)

// MyImageHelperObj.h

@interface MyImageHelperObj : NSObject

+ (UIImage *) createGrayScaleImage:(UIImage*)originalImage;
+ (UIImage *) createMaskedImageWithSize:(CGSize)newSize sourceImage:(UIImage *)sourceImage maskImage:(UIImage *)maskImage;

@end





// MyImageHelperObj.m

#import <QuartzCore/QuartzCore.h>
#import "MyImageHelperObj.h"


@implementation MyImageHelperObj


+ (UIImage *) createMaskedImageWithSize:(CGSize)newSize sourceImage:(UIImage *)sourceImage maskImage:(UIImage *)maskImage;
{
    // create image size rect
    CGRect newRect = CGRectZero;
    newRect.size = newSize;

    // draw source image
    UIGraphicsBeginImageContextWithOptions(newRect.size, NO, 0.0f);
    [sourceImage drawInRect:newRect];
    UIImage *newImage = UIGraphicsGetImageFromCurrentImageContext();

    // draw mask image
    [maskImage drawInRect:newRect blendMode:kCGBlendModeNormal alpha:1.0f];
    maskImage = UIGraphicsGetImageFromCurrentImageContext();
    UIGraphicsEndImageContext();

    // create grayscale version of mask image to make the "image mask"
    UIImage *grayScaleMaskImage = [MyImageHelperObj createGrayScaleImage:maskImage];
    CGFloat width = CGImageGetWidth(grayScaleMaskImage.CGImage);
    CGFloat height = CGImageGetHeight(grayScaleMaskImage.CGImage);
    CGFloat bitsPerPixel = CGImageGetBitsPerPixel(grayScaleMaskImage.CGImage);
    CGFloat bytesPerRow = CGImageGetBytesPerRow(grayScaleMaskImage.CGImage);
    CGDataProviderRef providerRef = CGImageGetDataProvider(grayScaleMaskImage.CGImage);
    CGImageRef imageMask = CGImageMaskCreate(width, height, 8, bitsPerPixel, bytesPerRow, providerRef, NULL, false);

    CGImageRef maskedImage = CGImageCreateWithMask(newImage.CGImage, imageMask);
    CGImageRelease(imageMask);
    newImage = [UIImage imageWithCGImage:maskedImage];
    CGImageRelease(maskedImage);
    return [UIImage imageWithData:UIImagePNGRepresentation(newImage)];
}

+ (UIImage *) createGrayScaleImage:(UIImage*)originalImage;
{
    //create gray device colorspace.
    CGColorSpaceRef space = CGColorSpaceCreateDeviceGray();
    //create 8-bit bimap context without alpha channel.
    CGContextRef bitmapContext = CGBitmapContextCreate(NULL, originalImage.size.width, originalImage.size.height, 8, 0, space, kCGImageAlphaNone);
    CGColorSpaceRelease(space);
    //Draw image.
    CGRect bounds = CGRectMake(0.0, 0.0, originalImage.size.width, originalImage.size.height);
    CGContextDrawImage(bitmapContext, bounds, originalImage.CGImage);
    //Get image from bimap context.
    CGImageRef grayScaleImage = CGBitmapContextCreateImage(bitmapContext);
    CGContextRelease(bitmapContext);
    //image is inverted. UIImage inverts orientation while converting CGImage to UIImage.
    UIImage* image = [UIImage imageWithCGImage:grayScaleImage];
    CGImageRelease(grayScaleImage);
    return image;
}

@end
于 2013-09-12T22:38:41.920 回答