50

我正在尝试通过以下方式将图像转换为灰度:

#define bytesPerPixel 4
#define bitsPerComponent 8

-(unsigned char*) getBytesForImage: (UIImage*)pImage
{
    CGImageRef image = [pImage CGImage];
    NSUInteger width = CGImageGetWidth(image);
    NSUInteger height = CGImageGetHeight(image);

    NSUInteger bytesPerRow = bytesPerPixel * width;

    CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
    unsigned char *rawData = malloc(height * width * 4);
    CGContextRef context = CGBitmapContextCreate(rawData, width, height, bitsPerComponent, bytesPerRow, colorSpace, kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
    CGColorSpaceRelease(colorSpace);

    CGContextDrawImage(context, CGRectMake(0, 0, width, height), image);
    CGContextRelease(context);

    return rawData;
}

-(UIImage*) processImage: (UIImage*)pImage
{   
    DebugLog(@"processing image");
    unsigned char *rawData = [self getBytesForImage: pImage];

    NSUInteger width = pImage.size.width;
    NSUInteger height = pImage.size.height;

    DebugLog(@"width: %d", width);
    DebugLog(@"height: %d", height);

    NSUInteger bytesPerRow = bytesPerPixel * width;

    for (int xCoordinate = 0; xCoordinate < width; xCoordinate++)
    {
        for (int yCoordinate = 0; yCoordinate < height; yCoordinate++)
        {
            int byteIndex = (bytesPerRow * yCoordinate) + xCoordinate * bytesPerPixel;

            //Getting original colors
            float red = ( rawData[byteIndex] / 255.f );
            float green = ( rawData[byteIndex + 1] / 255.f );
            float blue = ( rawData[byteIndex + 2] / 255.f );

            //Processing pixel data
            float averageColor = (red + green + blue) / 3.0f;

            red = averageColor;
            green = averageColor;
            blue = averageColor;

            //Assigning new color components
            rawData[byteIndex] = (unsigned char) red * 255;
            rawData[byteIndex + 1] = (unsigned char) green * 255;
            rawData[byteIndex + 2] = (unsigned char) blue * 255;


        }
    }

    NSData* newPixelData = [NSData dataWithBytes: rawData length: height * width * 4];
    UIImage* newImage = [UIImage imageWithData: newPixelData];

    free(rawData);

    DebugLog(@"image processed");

    return newImage;

}

因此,当我想转换图像时,我只需调用 processImage:

imageToDisplay.image = [self processImage: image];

但 imageToDisplay 不显示。可能是什么问题?

谢谢。

4

12 回答 12

50

我需要一个保留 alpha 通道的版本,所以我修改了 Dutchie432 发布的代码:

@implementation UIImage (grayscale)

typedef enum {
    ALPHA = 0,
    BLUE = 1,
    GREEN = 2,
    RED = 3
} PIXELS;

- (UIImage *)convertToGrayscale {
    CGSize size = [self size];
    int width = size.width;
    int height = size.height;

    // the pixels will be painted to this array
    uint32_t *pixels = (uint32_t *) malloc(width * height * sizeof(uint32_t));

    // clear the pixels so any transparency is preserved
    memset(pixels, 0, width * height * sizeof(uint32_t));

    CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();

    // create a context with RGBA pixels
    CGContextRef context = CGBitmapContextCreate(pixels, width, height, 8, width * sizeof(uint32_t), colorSpace, 
                                                 kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedLast);

    // paint the bitmap to our context which will fill in the pixels array
    CGContextDrawImage(context, CGRectMake(0, 0, width, height), [self CGImage]);

    for(int y = 0; y < height; y++) {
        for(int x = 0; x < width; x++) {
            uint8_t *rgbaPixel = (uint8_t *) &pixels[y * width + x];

            // convert to grayscale using recommended method: http://en.wikipedia.org/wiki/Grayscale#Converting_color_to_grayscale
            uint32_t gray = 0.3 * rgbaPixel[RED] + 0.59 * rgbaPixel[GREEN] + 0.11 * rgbaPixel[BLUE];

            // set the pixels to gray
            rgbaPixel[RED] = gray;
            rgbaPixel[GREEN] = gray;
            rgbaPixel[BLUE] = gray;
        }
    }

    // create a new CGImageRef from our context with the modified pixels
    CGImageRef image = CGBitmapContextCreateImage(context);

    // we're done with the context, color space, and pixels
    CGContextRelease(context);
    CGColorSpaceRelease(colorSpace);
    free(pixels);

    // make a new UIImage to return
    UIImage *resultUIImage = [UIImage imageWithCGImage:image];

    // we're done with image now too
    CGImageRelease(image);

    return resultUIImage;
}

@end
于 2009-10-08T23:14:18.967 回答
45

这是仅使用 UIKit 和亮度混合模式的代码。有点hack,但效果很好。

// Transform the image in grayscale.
- (UIImage*) grayishImage: (UIImage*) inputImage {

    // Create a graphic context.
    UIGraphicsBeginImageContextWithOptions(inputImage.size, YES, 1.0);
    CGRect imageRect = CGRectMake(0, 0, inputImage.size.width, inputImage.size.height);

    // Draw the image with the luminosity blend mode.
    // On top of a white background, this will give a black and white image.
    [inputImage drawInRect:imageRect blendMode:kCGBlendModeLuminosity alpha:1.0];

    // Get the resulting image.
    UIImage *filteredImage = UIGraphicsGetImageFromCurrentImageContext();
    UIGraphicsEndImageContext();

    return filteredImage;

}

为了保持透明度,也许您可​​以将opaquemode 参数设置为UIGraphicsBeginImageContextWithOptionsto NO。需要检查。

于 2011-06-05T19:29:58.547 回答
37

基于 Cam 的代码,能够处理 Retina 显示器的比例。

- (UIImage *) toGrayscale 
{
    const int RED = 1;
    const int GREEN = 2;
    const int BLUE = 3;

    // Create image rectangle with current image width/height
    CGRect imageRect = CGRectMake(0, 0, self.size.width * self.scale, self.size.height * self.scale);

    int width = imageRect.size.width;
    int height = imageRect.size.height;

    // the pixels will be painted to this array
    uint32_t *pixels = (uint32_t *) malloc(width * height * sizeof(uint32_t));

    // clear the pixels so any transparency is preserved
    memset(pixels, 0, width * height * sizeof(uint32_t));

    CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();

    // create a context with RGBA pixels
    CGContextRef context = CGBitmapContextCreate(pixels, width, height, 8, width * sizeof(uint32_t), colorSpace, 
                                                 kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedLast);

    // paint the bitmap to our context which will fill in the pixels array
    CGContextDrawImage(context, CGRectMake(0, 0, width, height), [self CGImage]);

    for(int y = 0; y < height; y++) {
        for(int x = 0; x < width; x++) {
            uint8_t *rgbaPixel = (uint8_t *) &pixels[y * width + x];

            // convert to grayscale using recommended method: http://en.wikipedia.org/wiki/Grayscale#Converting_color_to_grayscale
            uint8_t gray = (uint8_t) ((30 * rgbaPixel[RED] + 59 * rgbaPixel[GREEN] + 11 * rgbaPixel[BLUE]) / 100); 

            // set the pixels to gray
            rgbaPixel[RED] = gray;
            rgbaPixel[GREEN] = gray;
            rgbaPixel[BLUE] = gray;
        }
    }

    // create a new CGImageRef from our context with the modified pixels
    CGImageRef image = CGBitmapContextCreateImage(context);

    // we're done with the context, color space, and pixels
    CGContextRelease(context);
    CGColorSpaceRelease(colorSpace);
    free(pixels);

    // make a new UIImage to return
    UIImage *resultUIImage = [UIImage imageWithCGImage:image
                                                 scale:self.scale 
                                           orientation:UIImageOrientationUp];

    // we're done with image now too
    CGImageRelease(image);

    return resultUIImage;
}
于 2011-03-11T18:06:04.887 回答
31

我喜欢 Mathieu Godart 的回答,但它似乎不适用于视网膜或 alpha 图像。这是一个似乎对我来说都适用的更新版本:

- (UIImage*)convertToGrayscale
{
    UIGraphicsBeginImageContextWithOptions(self.size, NO, self.scale);
    CGRect imageRect = CGRectMake(0.0f, 0.0f, self.size.width, self.size.height);

    CGContextRef ctx = UIGraphicsGetCurrentContext();

    // Draw a white background
    CGContextSetRGBFillColor(ctx, 1.0f, 1.0f, 1.0f, 1.0f);
    CGContextFillRect(ctx, imageRect);

    // Draw the luminosity on top of the white background to get grayscale
    [self drawInRect:imageRect blendMode:kCGBlendModeLuminosity alpha:1.0f];

    // Apply the source image's alpha
    [self drawInRect:imageRect blendMode:kCGBlendModeDestinationIn alpha:1.0f];

    UIImage* grayscaleImage = UIGraphicsGetImageFromCurrentImageContext();
    UIGraphicsEndImageContext();

    return grayscaleImage;
}
于 2013-01-15T21:58:51.183 回答
30

当你使用这个函数时究竟发生了什么?该函数是否返回了无效图像,或者显示器没有正确显示它?

这是我用来转换为灰度的方法。

- (UIImage *) convertToGreyscale:(UIImage *)i {

    int kRed = 1;
    int kGreen = 2;
    int kBlue = 4;

    int colors = kGreen | kBlue | kRed;
    int m_width = i.size.width;
    int m_height = i.size.height;

    uint32_t *rgbImage = (uint32_t *) malloc(m_width * m_height * sizeof(uint32_t));
    CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
    CGContextRef context = CGBitmapContextCreate(rgbImage, m_width, m_height, 8, m_width * 4, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaNoneSkipLast);
    CGContextSetInterpolationQuality(context, kCGInterpolationHigh);
    CGContextSetShouldAntialias(context, NO);
    CGContextDrawImage(context, CGRectMake(0, 0, m_width, m_height), [i CGImage]);
    CGContextRelease(context);
    CGColorSpaceRelease(colorSpace);

    // now convert to grayscale
    uint8_t *m_imageData = (uint8_t *) malloc(m_width * m_height);
    for(int y = 0; y < m_height; y++) {
        for(int x = 0; x < m_width; x++) {
            uint32_t rgbPixel=rgbImage[y*m_width+x];
            uint32_t sum=0,count=0;
            if (colors & kRed) {sum += (rgbPixel>>24)&255; count++;}
            if (colors & kGreen) {sum += (rgbPixel>>16)&255; count++;}
            if (colors & kBlue) {sum += (rgbPixel>>8)&255; count++;}
            m_imageData[y*m_width+x]=sum/count;
        }
    }
    free(rgbImage);

    // convert from a gray scale image back into a UIImage
    uint8_t *result = (uint8_t *) calloc(m_width * m_height *sizeof(uint32_t), 1);

    // process the image back to rgb
    for(int i = 0; i < m_height * m_width; i++) {
        result[i*4]=0;
        int val=m_imageData[i];
        result[i*4+1]=val;
        result[i*4+2]=val;
        result[i*4+3]=val;
    }

    // create a UIImage
    colorSpace = CGColorSpaceCreateDeviceRGB();
    context = CGBitmapContextCreate(result, m_width, m_height, 8, m_width * sizeof(uint32_t), colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaNoneSkipLast);
    CGImageRef image = CGBitmapContextCreateImage(context);
    CGContextRelease(context);
    CGColorSpaceRelease(colorSpace);
    UIImage *resultUIImage = [UIImage imageWithCGImage:image];
    CGImageRelease(image);

    free(m_imageData);

    // make sure the data will be released by giving it to an autoreleased NSData
    [NSData dataWithBytesNoCopy:result length:m_width * m_height];

    return resultUIImage;
}
于 2009-08-19T15:11:13.543 回答
13

CIFilter 的不同方法。保留 Alpha 通道并使用透明背景:

+ (UIImage *)convertImageToGrayScale:(UIImage *)image
{
    CIImage *inputImage = [CIImage imageWithCGImage:image.CGImage];
    CIContext *context = [CIContext contextWithOptions:nil];

    CIFilter *filter = [CIFilter filterWithName:@"CIColorControls"];
    [filter setValue:inputImage forKey:kCIInputImageKey];
    [filter setValue:@(0.0) forKey:kCIInputSaturationKey];

    CIImage *outputImage = filter.outputImage;

    CGImageRef cgImageRef = [context createCGImage:outputImage fromRect:outputImage.extent];

    UIImage *result = [UIImage imageWithCGImage:cgImageRef];
    CGImageRelease(cgImageRef);

    return result;

}
于 2015-07-04T18:04:18.193 回答
11

快速扩展UIImage,保留 alpha:

extension UIImage {

    private func convertToGrayScaleNoAlpha() -> CGImageRef {
        let colorSpace = CGColorSpaceCreateDeviceGray();
        let bitmapInfo = CGBitmapInfo(CGImageAlphaInfo.None.rawValue)
        let context = CGBitmapContextCreate(nil, UInt(size.width), UInt(size.height), 8, 0, colorSpace, bitmapInfo)
        CGContextDrawImage(context, CGRectMake(0, 0, size.width, size.height), self.CGImage)
        return CGBitmapContextCreateImage(context)
    }


    /**
        Return a new image in shades of gray + alpha
    */
     func convertToGrayScale() -> UIImage {
        let bitmapInfo = CGBitmapInfo(CGImageAlphaInfo.Only.rawValue)
        let context = CGBitmapContextCreate(nil, UInt(size.width), UInt(size.height), 8, 0, nil, bitmapInfo)
        CGContextDrawImage(context, CGRectMake(0, 0, size.width, size.height), self.CGImage);
        let mask = CGBitmapContextCreateImage(context)
        return UIImage(CGImage: CGImageCreateWithMask(convertToGrayScaleNoAlpha(), mask), scale: scale, orientation:imageOrientation)!
    }
}
于 2015-01-15T09:22:41.267 回答
9

这是另一个很好的解决方案,作为 UIImage 上的类别方法。它基于博客文章及其评论。但我在这里修复了一个内存问题:

- (UIImage *)grayScaleImage {
    // Create image rectangle with current image width/height
    CGRect imageRect = CGRectMake(0, 0, self.size.width * self.scale, self.size.height * self.scale);
    // Grayscale color space
    CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceGray();
    // Create bitmap content with current image size and grayscale colorspace
    CGContextRef context = CGBitmapContextCreate(nil, self.size.width * self.scale, self.size.height * self.scale, 8, 0, colorSpace, kCGImageAlphaNone);
    // Draw image into current context, with specified rectangle
    // using previously defined context (with grayscale colorspace)
    CGContextDrawImage(context, imageRect, [self CGImage]);
    // Create bitmap image info from pixel data in current context
    CGImageRef grayImage = CGBitmapContextCreateImage(context);
    // release the colorspace and graphics context
    CGColorSpaceRelease(colorSpace);
    CGContextRelease(context);
    // make a new alpha-only graphics context
    context = CGBitmapContextCreate(nil, self.size.width * self.scale, self.size.height * self.scale, 8, 0, nil, kCGImageAlphaOnly);
    // draw image into context with no colorspace
    CGContextDrawImage(context, imageRect, [self CGImage]);
    // create alpha bitmap mask from current context
    CGImageRef mask = CGBitmapContextCreateImage(context);
    // release graphics context
    CGContextRelease(context);
    // make UIImage from grayscale image with alpha mask
    CGImageRef cgImage = CGImageCreateWithMask(grayImage, mask);
    UIImage *grayScaleImage = [UIImage imageWithCGImage:cgImage scale:self.scale orientation:self.imageOrientation];
    // release the CG images
CGImageRelease(cgImage);
    CGImageRelease(grayImage);
    CGImageRelease(mask);
    // return the new grayscale image
    return grayScaleImage;
}
于 2012-08-14T23:11:36.013 回答
9

适用于 iOS 9/10的快速高效的 Swift 3 实现。 我觉得这很有效,因为我现在尝试了我能找到的每次处理 100 张图像的每种图像过滤方法(使用 AlamofireImage 的 ImageFilter 选项下载时)。在内存和速度方面,我选择了这种方法,因为它比我尝试过的任何其他方法(对于我的用例)都要好。

func convertToGrayscale() -> UIImage? {

    UIGraphicsBeginImageContextWithOptions(self.size, false, self.scale)
    let imageRect = CGRect(x: 0.0, y: 0.0, width: self.size.width, height: self.size.height)
    let context = UIGraphicsGetCurrentContext()

    // Draw a white background
    context!.setFillColor(red: 1.0, green: 1.0, blue: 1.0, alpha: 1.0)
    context!.fill(imageRect)

    // optional: increase contrast with colorDodge before applying luminosity 
    // (my images were too dark when using just luminosity - you may not need this)
    self.draw(in: imageRect, blendMode: CGBlendMode.colorDodge, alpha: 0.7)


    // Draw the luminosity on top of the white background to get grayscale of original image
    self.draw(in: imageRect, blendMode: CGBlendMode.luminosity, alpha: 0.90)

    // optional: re-apply alpha if your image has transparency - based on user1978534's answer (I haven't tested this as I didn't have transparency - I just know this would be the the syntax)
    // self.draw(in: imageRect, blendMode: CGBlendMode.destinationIn, alpha: 1.0)

    let grayscaleImage = UIGraphicsGetImageFromCurrentImageContext()
    UIGraphicsEndImageContext()
    return grayscaleImage
}


重新使用 colorDodge:我最初遇到的问题是让我的图像足够亮以匹配使用 CIFilter("CIPhotoEffectTonal") 产生的灰度着色 - 我的结果太暗了。通过应用 CGBlendMode.colorDodge @ ~ 0.7 alpha,我能够获得不错的匹配,这似乎增加了整体对比度。

其他颜色混合效果也可能起作用 - 但我认为您希望在亮度之前应用,这是灰度过滤效果。 我发现这个页面对于参考不同的 BlendModes 非常有帮助



我发现重新提高效率:我需要处理从服务器加载的 100 张缩略图(使用 AlamofireImage 进行异步加载、缓存和应用过滤器)。当我的图像总大小超过缓存大小时,我开始遇到崩溃,所以我尝试了其他方法。

基于 CoreImage CPU 的 CIFilter 方法是我尝试的第一个方法,但对于我正在处理的图像数量来说内存效率不够高。

我还尝试通过 GPU 使用 CIFilter 应用EAGLContext(api: .openGLES3),这实际上更加占用内存 - 我实际上在加载 200 + 图像时收到了 450 + mb 使用的内存警告。

我尝试了位图处理(即CGContext(data: nil, width: width, height: height, bitsPerComponent: 8, bytesPerRow: 0, space: colorSpace, bitmapInfo: CGImageAlphaInfo.none.rawValue)……效果很好,只是我无法为现代视网膜设备获得足够高的分辨率。即使我添加了context.scaleBy(x: scaleFactor, y: scaleFactor).

因此,在我尝试过的所有方法中,这种方法(UIGraphics Context Draw)在作为过滤器应用于 AlamofireImage 时会大大提高速度和内存效率。例如,在处理我的 200 多张图像时看到不到 70 mbs 的内存,它们基本上是立即加载的,而不是使用 openEAGL 方法花费的大约 35 秒。我知道这些不是很科学的基准。不过,如果有人很好奇,我会对其进行检测:)


最后,如果您确实需要将这个或另一个灰度过滤器传递给 AlamofireImage - 这是如何做到的:(请注意,您必须将 AlamofireImage 导入到您的类中才能使用 ImageFilter)

public struct GrayScaleFilter: ImageFilter {
    public init() {
    }

    public var filter: (UIImage) -> UIImage {
        return { image in
            return image.convertToGrayscale() ?? image
        }
    }
}

要使用它,请像这样创建过滤器并像这样传递给 af_setImage:

let filter = GrayScaleFilter()
imageView.af_setImage(withURL: url, filter: filter)
于 2017-01-14T02:35:32.930 回答
6
@interface UIImageView (Settings)

- (void)convertImageToGrayScale;

@end

@implementation UIImageView (Settings)

- (void)convertImageToGrayScale
{
    // Create image rectangle with current image width/height
    CGRect imageRect = CGRectMake(0, 0, self.image.size.width, self.image.size.height);

    // Grayscale color space
    CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceGray();

    // Create bitmap content with current image size and grayscale colorspace
    CGContextRef context = CGBitmapContextCreate(nil, self.image.size.width, self.image.size.height, 8, 0, colorSpace, kCGImageAlphaNone);

    // Draw image into current context, with specified rectangle
    // using previously defined context (with grayscale colorspace)
    CGContextDrawImage(context, imageRect, [self.image CGImage]);

    // Create bitmap image info from pixel data in current context
    CGImageRef imageRef = CGBitmapContextCreateImage(context);

    // Create a new UIImage object
    UIImage *newImage = [UIImage imageWithCGImage:imageRef];

    // Release colorspace, context and bitmap information
    CGColorSpaceRelease(colorSpace);
    CGContextRelease(context);
    CFRelease(imageRef);

    // Return the new grayscale image
    self.image = newImage;
}

@end
于 2017-01-09T11:25:23.667 回答
2

我还有另一个答案。这个性能非常好,可以处理视网膜图形以及透明度。它扩展了 Sargis Gevorgyan 的方法:

+ (UIImage*) grayScaleFromImage:(UIImage*)image opaque:(BOOL)opaque
{
// NSTimeInterval start = [NSDate timeIntervalSinceReferenceDate];

CGSize size = image.size;

CGRect bounds = CGRectMake(0, 0, size.width, size.height);

// Create bitmap content with current image size and grayscale colorspace
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceGray();
size_t bitsPerComponent = 8;
size_t bytesPerPixel = opaque ? 1 : 2;
size_t bytesPerRow = bytesPerPixel * size.width * image.scale;
CGContextRef context = CGBitmapContextCreate(nil, size.width, size.height, bitsPerComponent, bytesPerRow, colorSpace, opaque ? kCGImageAlphaNone : kCGImageAlphaPremultipliedLast);

// create image from bitmap
CGContextDrawImage(context, bounds, image.CGImage);
CGImageRef cgImage = CGBitmapContextCreateImage(context);
UIImage* result = [[UIImage alloc] initWithCGImage:cgImage scale:image.scale orientation:UIImageOrientationUp];
CGImageRelease(cgImage);
CGContextRelease(context);

// performance results on iPhone 6S+ in Release mode.
// Results are in photo pixels, not device pixels:
//  ~ 5ms for 500px x 600px
//  ~ 15ms for 2200px x 600px
// NSLog(@"generating %d x %d @ %dx grayscale took %f seconds", (int)size.width, (int)size.height, (int)image.scale, [NSDate timeIntervalSinceReferenceDate] - start);

return result;
}

改用混合模式很优雅,但复制到灰度位图的性能更高,因为您只使用一个或两个颜色通道而不是四个。opacity bool 用于接收 UIView 的 opaque 标志,因此如果您知道不需要使用 alpha 通道,则可以选择不使用。

我没有在这个答案线程中尝试过基于 Core Image 的解决方案,但是如果性能很重要,我会非常谨慎地使用 Core Image。

于 2017-06-23T17:20:43.890 回答
0

那是我尝试通过直接绘制到灰度色彩空间而不需要每个像素枚举来快速转换。它的工作速度比CIImageFilter解决方案快 10 倍。

@implementation UIImage (Grayscale)

static UIImage *grayscaleImageFromCIImage(CIImage *image, CGFloat scale)
{
    CIImage *blackAndWhite = [CIFilter filterWithName:@"CIColorControls" keysAndValues:kCIInputImageKey, image, kCIInputBrightnessKey, @0.0, kCIInputContrastKey, @1.1, kCIInputSaturationKey, @0.0, nil].outputImage;
    CIImage *output = [CIFilter filterWithName:@"CIExposureAdjust" keysAndValues:kCIInputImageKey, blackAndWhite, kCIInputEVKey, @0.7, nil].outputImage;
    CGImageRef ref = [[CIContext contextWithOptions:nil] createCGImage:output fromRect:output.extent];
    UIImage *result = [UIImage imageWithCGImage:ref scale:scale orientation:UIImageOrientationUp];
    CGImageRelease(ref);
    return result;
}

static UIImage *grayscaleImageFromCGImage(CGImageRef imageRef, CGFloat scale)
{
    NSInteger width = CGImageGetWidth(imageRef) * scale;
    NSInteger height = CGImageGetHeight(imageRef) * scale;

    NSMutableData *pixels = [NSMutableData dataWithLength:width*height];
    CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceGray();
    CGContextRef context = CGBitmapContextCreate(pixels.mutableBytes, width, height, 8, width, colorSpace, 0);

    CGContextDrawImage(context, CGRectMake(0, 0, width, height), imageRef);
    CGImageRef ref = CGBitmapContextCreateImage(context);
    UIImage *result = [UIImage imageWithCGImage:ref scale:scale orientation:UIImageOrientationUp];

    CGContextRelease(context);
    CGColorSpaceRelease(colorSpace);
    CGImageRelease(ref);

    return result;
}

- (UIImage *)grayscaleImage
{
    if (self.CIImage) {
        return grayscaleImageFromCIImage(self.CIImage, self.scale);
    } else if (self.CGImage) {
        return grayscaleImageFromCGImage(self.CGImage, self.scale);
    }

    return nil;
}

@end
于 2016-04-15T06:15:59.233 回答