2

我正在构建一个执行一些基本检测的 iOS 应用程序。我从 AVCaptureVideoDataOutput 获取原始帧,将 CMSampleBufferRef 转换为 UIImage,调整 UIImage 的大小,然后将其转换为 CVPixelBufferRef。据我可以使用 Instruments 检测到,泄漏是我将 CGImage 转换为 CVPixelBufferRef 的最后一部分。

这是我使用的代码:

- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection 
{
    videof = [[ASMotionDetect alloc] initWithSampleImage:[self resizeSampleBuffer:sampleBuffer]];
    // ASMotionDetect is my class for detection and I use videof to calculate the movement
}

-(UIImage*)resizeSampleBuffer:(CMSampleBufferRef) sampleBuffer {
    UIImage *img;
    CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer); 
    CVPixelBufferLockBaseAddress(imageBuffer,0);        // Lock the image buffer 

    uint8_t *baseAddress = (uint8_t *)CVPixelBufferGetBaseAddressOfPlane(imageBuffer, 0);   // Get information of the image 
    size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer); 
    size_t width = CVPixelBufferGetWidth(imageBuffer); 
    size_t height = CVPixelBufferGetHeight(imageBuffer); 
    CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB(); 

    CGContextRef newContext = CGBitmapContextCreate(baseAddress, width, height, 8, bytesPerRow, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst); 
    CGImageRef newImage = CGBitmapContextCreateImage(newContext); 
    CGContextRelease(newContext); 

    CGColorSpaceRelease(colorSpace); 
    CVPixelBufferUnlockBaseAddress(imageBuffer,0); 
    /* CVBufferRelease(imageBuffer); */  // do not call this!

    img = [UIImage imageWithCGImage:newImage];
    CGImageRelease(newImage);
    newContext = nil;
    img = [self resizeImageToSquare:img];
    return img;
}

-(UIImage*)resizeImageToSquare:(UIImage*)_temp {
    UIImage *img;
    int w = _temp.size.width;
    int h = _temp.size.height;
    CGRect rect;
    if (w>h) {
        rect = CGRectMake((w-h)/2,0,h,h);
    } else {
        rect = CGRectMake(0, (h-w)/2, w, w);
    }
    //
    img = [self crop:_temp inRect:rect];
    return img;
}

-(UIImage*) crop:(UIImage*)image inRect:(CGRect)rect{
    UIImage *sourceImage = image;
    CGRect selectionRect = rect;
    CGRect transformedRect = TransformCGRectForUIImageOrientation(selectionRect, sourceImage.imageOrientation, sourceImage.size);
    CGImageRef resultImageRef = CGImageCreateWithImageInRect(sourceImage.CGImage, transformedRect);
    UIImage *resultImage = [[UIImage alloc] initWithCGImage:resultImageRef scale:1.0 orientation:image.imageOrientation];
    CGImageRelease(resultImageRef);
    return resultImage;
}

在我的检测类中,我有:

- (id)initWithSampleImage:(UIImage*)sampleImage {
  if ((self = [super init])) {
    _frame = new CVMatOpaque();
    _histograms = new CVMatNDOpaque[kGridSize *
                                    kGridSize];
    [self extractFrameFromImage:sampleImage];
  }
  return self;
}

- (void)extractFrameFromImage:(UIImage*)sampleImage {
    CGImageRef imageRef = [sampleImage CGImage];
    CVImageBufferRef imageBuffer = [self pixelBufferFromCGImage:imageRef];
    CVPixelBufferLockBaseAddress(imageBuffer, 0);
  // Collect some information required to extract the frame.
    void *baseAddress = CVPixelBufferGetBaseAddress(imageBuffer);
    size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
    size_t height = CVPixelBufferGetHeight(imageBuffer);
    size_t width = CVPixelBufferGetWidth(imageBuffer);

  // Extract the frame, convert it to grayscale, and shove it in _frame.
    cv::Mat frame(height, width, CV_8UC4, baseAddress, bytesPerRow);
    cv::cvtColor(frame, frame, CV_BGR2GRAY);
    _frame->matrix = frame;
    CVPixelBufferUnlockBaseAddress(imageBuffer, 0);
    CGImageRelease(imageRef);
}

- (CVPixelBufferRef) pixelBufferFromCGImage: (CGImageRef) image
{
    CVPixelBufferRef pxbuffer = NULL;
    int width = CGImageGetWidth(image)*2;
    int height = CGImageGetHeight(image)*2;

    NSMutableDictionary *attributes = [NSMutableDictionary dictionaryWithObjectsAndKeys:[NSNumber numberWithInt:kCVPixelFormatType_32ARGB], kCVPixelBufferPixelFormatTypeKey, [NSNumber numberWithInt:width], kCVPixelBufferWidthKey, [NSNumber numberWithInt:height], kCVPixelBufferHeightKey, nil];
    CVPixelBufferPoolRef pixelBufferPool; 
    CVReturn theError = CVPixelBufferPoolCreate(kCFAllocatorDefault, NULL, (__bridge CFDictionaryRef) attributes, &pixelBufferPool);
    NSParameterAssert(theError == kCVReturnSuccess);
    CVReturn status = CVPixelBufferPoolCreatePixelBuffer(NULL, pixelBufferPool, &pxbuffer);
    NSParameterAssert(status == kCVReturnSuccess && pxbuffer != NULL);

    CVPixelBufferLockBaseAddress(pxbuffer, 0);
    void *pxdata = CVPixelBufferGetBaseAddress(pxbuffer);
    NSParameterAssert(pxdata != NULL);
    CGColorSpaceRef rgbColorSpace = CGColorSpaceCreateDeviceRGB();
    CGContextRef context = CGBitmapContextCreate(pxdata, width,
                                                 height, 8, width*4, rgbColorSpace, 
                                                 kCGImageAlphaNoneSkipFirst);
    NSParameterAssert(context);
/* here is the problem: */
    CGContextDrawImage(context, CGRectMake(0, 0, width, height), image);
    CGColorSpaceRelease(rgbColorSpace);
    CGContextRelease(context);

    CVPixelBufferUnlockBaseAddress(pxbuffer, 0);

    return pxbuffer;
}

使用 Instrument 我发现问题出在 CVPixelBufferRef 分配上,但我不明白为什么 - 有人能看到问题吗?

谢谢

4

1 回答 1

1

-pixelBufferFromCGImage:中,两者pxBufferpixelBufferPool都没有被释放。这对于 来说是有意义的pxBuffer,因为它是一个返回值,但对于pixelBufferPool- 您每次调用该方法都会创建和泄漏一个值。

快速修复应该是

  1. 发布pixelBufferPool-pixelBufferFromCGImage:
  2. 释放pxBuffer(的返回值 -pixelBufferFromCGImage:)在-extractFrameFromImage:

您还应该重命名-pixelBufferFromCGImage:-createPixelBufferFromCGImage:明确它返回一个保留对象。

于 2012-06-26T10:16:09.313 回答