1

长期潜伏者,第一次海报。

我正在尝试解码视频并将每一帧保存为图像。(该视频是我之前录制的,可以在 iPhone 上正常播放。)我的第一步是确保我得到正确的图像数据,所以这个小片段只是为了在 UIImageView 中显示它:

- (void)showFrame:(UIImage*)frame {
    self.videoIV.image=frame;
}

- (void)unarchiveThread:(NSString*)fileName {
    NSAutoreleasePool *pool=[[NSAutoreleasePool alloc] init];

    AVURLAsset *asset = [[[AVURLAsset alloc] initWithURL:[NSURL fileURLWithPath:fileName] options:nil] autorelease];
    if (asset==nil) {
        NSLog(@"Couldn't load avasset from '%@'\n",fileName);
        return;
    }
    AVAssetReader *reader=[[[AVAssetReader alloc] initWithAsset:asset error:NULL] autorelease];

    NSArray* video_tracks = [asset tracksWithMediaType:AVMediaTypeVideo];
    AVAssetTrack* video_track = [video_tracks objectAtIndex:0];
    CGAffineTransform transform=video_track.preferredTransform;

    NSDictionary* dictionary = [NSDictionary dictionaryWithObject:@(kCVPixelFormatType_32BGRA) forKey:(NSString*)kCVPixelBufferPixelFormatTypeKey];
    AVAssetReaderTrackOutput* asset_reader_output = [[[AVAssetReaderTrackOutput alloc] initWithTrack:video_track outputSettings:dictionary] autorelease];
    [reader addOutput:asset_reader_output];

    [reader startReading];

    CMSampleBufferRef buffer;

    while ([reader status]==AVAssetReaderStatusReading ) {
        buffer = [asset_reader_output copyNextSampleBuffer];
        if (buffer!=NULL) {
            NSLog(@"buffer=%@",buffer);
            CVPixelBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(buffer);
            NSLog(@"pixel buffer=%@",pixelBuffer);
            CIImage *cImage = [CIImage imageWithCVPixelBuffer:pixelBuffer];
            NSLog(@"cImage = %@",cImage);
            cImage=[cImage imageByApplyingTransform:transform];
            NSLog(@"transformed cImage = %@",cImage);
            UIImage *image=[UIImage imageWithCIImage:cImage];
            NSLog(@"image=%@",image);
            [self performSelectorOnMainThread:@selector(showFrame:) withObject:image waitUntilDone:YES];
            CMSampleBufferInvalidate(buffer);
            CFRelease(buffer);
            buffer = nil;
        }
    }
    [pool drain];
}

前两个步骤工作正常(我得到一个有效的缓冲区和像素缓冲区),但由于cIImage:imageWithCVPixelBuffer:某种原因不能工作(get(null))。这是我的输出:

buffer=CMSampleBuffer 0xba6e2d0 retainCount: 1 allocator: 0x4b43e7
    invalid = NO
    dataReady = YES
    makeDataReadyCallback = 0x0
    makeDataReadyRefcon = 0x0
    formatDescription = <CMVideoFormatDescription 0xba6e260 [0x212eb48]> {
    mediaType:'vide'
    mediaSubType:'BGRA'
    mediaSpecific: {
            codecType: 'BGRA'               dimensions: 1920 x 1080
    }
    extensions: {<CFBasicHash 0xba6e2a0 [0x212eb48]>{type = immutable dict, count = 5,
entries =>
    2 : <CFString 0x4c0024 [0x212eb48]>{contents = "Version"} = <CFNumber 0xb8682b0 [0x212eb48]>{value = +2, type = kCFNumberSInt32Type}
    3 : <CFString 0x4bffe4 [0x212eb48]>{contents = "CVBytesPerRow"} = <CFNumber 0xba6e1f0 [0x212eb48]>{value = +7680, type = kCFNumberSInt32Type}
    4 : <CFString 0x466f1c [0x212eb48]>{contents = "CVImageBufferYCbCrMatrix"} = <CFString 0x466f2c [0x212eb48]>{contents = "ITU_R_709_2"}
    5 : <CFString 0x466f5c [0x212eb48]>{contents = "CVImageBufferColorPrimaries"} = <CFString 0x466f2c [0x212eb48]>{contents = "ITU_R_709_2"}
    6 : <CFString 0x466f8c [0x212eb48]>{contents = "CVImageBufferTransferFunction"} = <CFString 0x466f2c [0x212eb48]>{contents = "ITU_R_709_2"}
}
}
}
    sbufToTrackReadiness = 0x0
    numSamples = 1
    sampleTimingArray[1] = {
            {PTS = {0/600 = 0.000}, DTS = {INVALID}, duration = {INVALID}},
    }
    imageBuffer = 0xba6dff0
pixel buffer=<CVPixelBuffer 0xba6dff0 width=1920 height=1080 bytesPerRow=7680 pixelFormat=BGRA attributes=<CFBasicHash 0xba6de30 [0x212eb48]>{type = immutable dict, count = 3,
entries =>
    0 : <CFString 0x466cdc [0x212eb48]>{contents = "Height"} = <CFNumber 0x8e70ec0 [0x212eb48]>{value = +1080, type = kCFNumberSInt32Type}
    1 : <CFString 0x466ccc [0x212eb48]>{contents = "Width"} = <CFNumber 0x8e70eb0 [0x212eb48]>{value = +1920, type = kCFNumberSInt32Type}
    2 : <CFString 0x46707c [0x212eb48]>{contents = "PixelFormatType"} = <CFNumber 0xbb89710 [0x212eb48]>{value = +1111970369, type = kCFNumberSInt32Type}
}
 propagatedAttachments=<CFBasicHash 0xba6e110 [0x212eb48]>{type = mutable dict, count = 3,
entries =>
    0 : <CFString 0x466f1c [0x212eb48]>{contents = "CVImageBufferYCbCrMatrix"} = <CFString 0x466f2c [0x212eb48]>{contents = "ITU_R_709_2"}
    1 : <CFString 0x466f8c [0x212eb48]>{contents = "CVImageBufferTransferFunction"} = <CFString 0x466f2c [0x212eb48]>{contents = "ITU_R_709_2"}
    2 : <CFString 0x466f5c [0x212eb48]>{contents = "CVImageBufferColorPrimaries"} = <CFString 0x466f2c [0x212eb48]>{contents = "ITU_R_709_2"}
}
 nonPropagatedAttachments=<CFBasicHash 0xba6dce0 [0x212eb48]>{type = mutable dict, count = 0,
entries =>
}
 iosurface=0x0>
cImage = (null)
transformed cImage = (null)
image=<UIImage: 0x8c669d0>

有人知道我做错了什么吗?我搜索了 Stack Overflow,发现了很多可以使用的示例(来自实时提要),但没有使用 AVAssetReader 然后将缓冲区从 copyNextSampleBuffer 转换为 CIImage。

4

1 回答 1

0

好的,我对此的理论如下。来自 copyNextSampleBuffer 的 CVPixelBufferRef 已经与 GPU 上的 IOSurface 相关联。

我能够通过获取 CVPixelBufferRef 数据并使用它创建一个传递给 [CIImage imageWithCGImage:] 的 CGImageRef 来完成这项工作。

实现如下(类方法,放在任何类中)。

+(CGImageRef) imageWithCVPixelBuffer:(CVPixelBufferRef)pixelBuffer
{
CGImageRef image = NULL;
CGColorSpaceRef colorSpace = (CGColorSpaceRef)CVBufferGetAttachment((CVImageBufferRef)pixelBuffer,kCVImageBufferCGColorSpaceKey,NULL);
    if (REQUIRED_DEBUG(colorSpace!=NULL))
        {
        image = [self imageWithCVPixelBuffer:pixelBuffer colorSpace:colorSpace];
        }
    return image;
}

+(CGImageRef) imageWithCVPixelBuffer:(CVPixelBufferRef)pixelBuffer colorSpace:(CGColorSpaceRef)colorSpace
{
    return [self imageWithCVPixelBuffer:pixelBuffer colorSpace:colorSpace copyMemory:YES];
}

static void sReleaseCVPixelBuffer(void* pixelBuffer, const void *data=0, size_t size=0) 
{   
    if (REQUIRED_DEBUG(pixelBuffer!=NULL))
        {
        REQUIRED_CVRETURN(CVPixelBufferUnlockBaseAddress((CVPixelBufferRef)pixelBuffer,kCVPixelBufferLock_ReadOnly));
        CVPixelBufferRelease((CVPixelBufferRef)pixelBuffer);
        }
}

static void sReleasePixelMemory(void* pixel_memory, const void *data=0, size_t size=0) 
{   
    if (REQUIRED_DEBUG(pixel_memory!=NULL))
        free(pixel_memory);
}

+(CGImageRef) imageWithCVPixelBuffer:(CVPixelBufferRef)pixelBuffer colorSpace:(CGColorSpaceRef)colorSpace copyMemory:(BOOL)copyMemory
{
CGImageRef imageRef = NULL;
CGBitmapInfo imageBitmapInfo = 0;
OSType sourcePixelFormat = CVPixelBufferGetPixelFormatType(pixelBuffer);
    if ( kCVPixelFormatType_32ARGB==sourcePixelFormat )
        imageBitmapInfo = kCGBitmapByteOrder32Big|kCGImageAlphaNoneSkipFirst;
    else if ( kCVPixelFormatType_32BGRA == sourcePixelFormat )
        imageBitmapInfo = kCGBitmapByteOrder32Little|kCGImageAlphaNoneSkipFirst;
    if (REQUIRED_DEBUG(imageBitmapInfo!=0) && REQUIRED_DEBUG(colorSpace!=NULL))
        {
    size_t imageRowBytes = CVPixelBufferGetBytesPerRow(pixelBuffer);
    size_t imageWidth = CVPixelBufferGetWidth(pixelBuffer);
    size_t imageHeight = CVPixelBufferGetHeight(pixelBuffer);
        if (REQUIRED_CVRETURN(CVPixelBufferLockBaseAddress( pixelBuffer, kCVPixelBufferLock_ReadOnly )))
            {
        size_t imageDataSizeBytes = imageRowBytes*imageHeight;
        void* imageBaseAddress = CVPixelBufferGetBaseAddress(pixelBuffer);
        if (REQUIRED_DEBUG(imageBaseAddress!=NULL))
                {
                if (copyMemory)
                    {
                void* pixel_memory = malloc(imageDataSizeBytes);
                    if (REQUIRED_DEBUG(pixel_memory!=NULL))
                        {
                        memcpy(pixel_memory,imageBaseAddress,imageDataSizeBytes);
                    CGDataProviderRef imageDataProvider =     CGDataProviderCreateWithData(pixel_memory, pixel_memory, imageDataSizeBytes, sReleasePixelMemory);
                        if (REQUIRED_DEBUG(imageDataProvider!=NULL))
                            {
                            imageRef = CGImageCreate(imageWidth, imageHeight, 8, 32, imageRowBytes, colorSpace, imageBitmapInfo, imageDataProvider, NULL, true, kCGRenderingIntentDefault);
                            CGDataProviderRelease(imageDataProvider);
                            }
                    }
                    }
                else /* make an alias of the bits in the CVPixelBuffer */
                    {
                CGDataProviderRef imageDataProvider = CGDataProviderCreateWithData((void*)pixelBuffer, imageBaseAddress, imageDataSizeBytes, sReleaseCVPixelBuffer);
                    if (REQUIRED_DEBUG(imageDataProvider!=NULL))
                        {
                        CVPixelBufferRetain(pixelBuffer);
                        imageRef = CGImageCreate(imageWidth, imageHeight, 8, 32, imageRowBytes, colorSpace, imageBitmapInfo, imageDataProvider, NULL, true, kCGRenderingIntentDefault);
                        REQUIRED_DEBUG(imageRef!=NULL);
                        CGDataProviderRelease(imageDataProvider);
                    }
                    else
                        sReleaseCVPixelBuffer(pixelBuffer);
                    }
                }
            }
        }
    REQUIRED_DEBUG(imageRef!=NULL);
    return imageRef;
}

+(CIImage*) coreImageForPixelBuffer:(CVPixelBufferRef)pixelBuffer colorSpace:(CGColorSpaceRef)colorSpace
{
CIImage* coreImage = nil;
#if 0 // FAILS TARGET_OS_IPHONE
    REQUIRED_DEBUG(coreImage = [CIImage imageWithCVPixelBuffer:pixelBuffer]);
#else
CGImageRef pixelBufferImageRef = [self imageWithCVPixelBuffer:pixelBuffer colorSpace:colorSpace];
    if (REQUIRED_DEBUG(pixelBufferImageRef!=NULL))
        {
        REQUIRED_DEBUG(coreImage=[CIImage imageWithCGImage:pixelBufferImageRef]);
        CGImageRelease(pixelBufferImageRef);
        }
#endif
    return coreImage;
}
于 2013-09-13T05:35:23.237 回答