2

目前,使用 iPhone 5s/6 我可以将 120(iPhone 5s)或 240(iPhone 6)帧/秒捕获到 CMSampleBufferRef 中。但是,返回给我的 AVCaptureDeviceFormat 仅提供这些高速帧速率,分辨率为 1280x720。

我想以较低的分辨率(640x480 或更低)捕获它,因为我会将它放入循环缓冲区中以进行存储。虽然我能够降低 didOutputSampleBuffer 委托方法中的分辨率,但我想知道 CMSampleBufferRef 是否有任何方法可以通过配置设备或设置直接为我提供较低的分辨率,而不是获取 720p 图像并降低分辨率手动使用 CVPixelBuffer。

我需要将图像存储在缓冲区中以供以后处理,并希望应用最少的必要处理,否则我将开始丢帧。如果我可以避免调整大小并直接从 didOutputSampleBuffer 委托方法获得较低分辨率的 CMSampleBuffer,那将是理想的。

在 240fps 时,我需要在 5ms 内处理每个图像,并且调整大小例程无法跟上以这种速度缩小图像。但是,我想将它存储到一个循环缓冲区中以供以后处理(例如,使用 AVAssetWriter 将其写入电影),但需要较低的分辨率。

高帧率录制中唯一支持的图像尺寸似乎是 1280x720。将此分辨率的多个图像放入帧缓冲区会产生内存压力,因此如果可以节省内存并跟上帧速率,我希望直接从 didOutputSampleBuffer 捕获较低分辨率的图像。

谢谢您的帮助。

4

1 回答 1

1
// core image use GPU to all image ops, crop / transform / ...

// --- create once ---
EAGLContext *glCtx = [[EAGLContext alloc] initWithAPI:kEAGLRenderingAPIOpenGLES2];
CIContext *ciContext = [CIContext contextWithEAGLContext:glCtx options:@{kCIContextWorkingColorSpace:[NSNull null]}];
// use rgb faster 3x
CGColorSpaceRef ciContextColorSpace = CGColorSpaceCreateDeviceRGB();
OSType cvPixelFormat = kCVPixelFormatType_32BGRA;

// create compression session
VTCompressionSessionRef compressionSession;
NSDictionary* pixelBufferOptions = @{(__bridge NSString*) kCVPixelBufferPixelFormatTypeKey:@(cvPixelFormat),
                                     (__bridge NSString*) kCVPixelBufferWidthKey:@(outputResolution.width),
                                     (__bridge NSString*) kCVPixelBufferHeightKey:@(outputResolution.height),
                                     (__bridge NSString*) kCVPixelBufferOpenGLESCompatibilityKey : @YES,
                                     (__bridge NSString*) kCVPixelBufferIOSurfacePropertiesKey : @{}};

OSStatus ret = VTCompressionSessionCreate(kCFAllocatorDefault,
                                          outputResolution.width,
                                          outputResolution.height,
                                          kCMVideoCodecType_H264,
                                          NULL,
                                          (__bridge CFDictionaryRef)pixelBufferOptions,
                                          NULL,
                                          VTEncoderOutputCallback,
                                          (__bridge void*)self,
                                          &compressionSession);

CVPixelBufferRef finishPixelBuffer;
// I'm use VTCompressionSession pool, you can use AVAssetWriterInputPixelBufferAdaptor
CVReturn res = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, VTCompressionSessionGetPixelBufferPool(compressionSession), &finishPixelBuffer);
// -------------------

// ------ scale ------
// new buffer comming...
// - (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection

CVPixelBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
CVPixelBufferLockBaseAddress(pixelBuffer, kCVPixelBufferLock_ReadOnly);

CIImage *baseImg = [CIImage imageWithCVPixelBuffer:pixelBuffer];
CGFloat outHeight = 240;
CGFloat scale = 1 / (CVPixelBufferGetHeight(pixelBuffer) / outHeight);
CGAffineTransform transform = CGAffineTransformMakeScale(scale, scale);

// result image not changed after
CIImage *resultImg = [baseImg imageByApplyingTransform:transform];
// resultImg = [resultImg imageByCroppingToRect:...];

// CIContext applies transform to CIImage and draws to finish buffer
[ciContext render:resultImg toCVPixelBuffer:finishPixelBuffer bounds:resultImg.extent colorSpace:ciContextColorSpace];
CVPixelBufferUnlockBaseAddress(pixelBuffer, kCVPixelBufferLock_ReadOnly);

// [videoInput appendSampleBuffer:CMSampleBufferCreateForImageBuffer(... finishPixelBuffer...)]
VTCompressionSessionEncodeFrame(compressionSession, finishPixelBuffer, CMSampleBufferGetPresentationTimeStamp(sampleBuffer), CMSampleBufferGetDuration(sampleBuffer), NULL, sampleBuffer, NULL);
// -------------------
于 2016-05-03T11:59:31.413 回答