CGBitmapContextRef
只能画成类似的东西32ARGB
,正确的。这意味着您将需要创建ARGB
(或RGBA
)缓冲区,然后找到一种方法将YUV
像素快速传输到该ARGB
表面上。这个秘籍包括使用CoreImage
一个自制的CVPixelBufferRef
通过池,一个CGBitmapContextRef
引用你自制的像素缓冲区,然后重新创建一个CMSampleBufferRef
类似于你的输入缓冲区,但引用你的输出像素。换句话说,
- 将输入像素提取到
CIImage
.
- 使用您正在创建
CVPixelBufferPool
的像素格式和输出尺寸创建一个。你不希望CVPixelBuffer
实时创建没有池的 s:如果你的生产者太快,你会耗尽内存;由于不会重用缓冲区,因此您将分散 RAM;这是对周期的浪费。
CIContext
使用您将在缓冲区之间共享的默认构造函数创建一个。它不包含外部状态,但文档说在每一帧上重新创建它是非常昂贵的。
- 在传入帧上,创建一个新的像素缓冲区。确保使用分配阈值,以免 RAM 使用失控。
- 锁定像素缓冲区
- 创建一个引用像素缓冲区中字节的位图上下文
- 使用 CIContext 将平面图像数据渲染到线性缓冲区中
- 在 CGContext 中执行您的应用程序特定的绘图!
- 解锁像素缓冲区
- 获取原始样本缓冲区的时序信息
CMVideoFormatDescriptionRef
通过询问像素缓冲区的确切格式来创建
- 为像素缓冲区创建一个样本缓冲区。完毕!
这是一个示例实现,我选择了 32ARGB 作为要使用的图像格式,因为这是在 iOS 上都喜欢使用的CGBitmapContext
东西CoreVideo
:
{
CGPixelBufferPoolRef *_pool;
CGSize _poolBufferDimensions;
}
- (void)_processSampleBuffer:(CMSampleBufferRef)inputBuffer
{
// 1. Input data
CVPixelBufferRef inputPixels = CMSampleBufferGetImageBuffer(inputBuffer);
CIImage *inputImage = [CIImage imageWithCVPixelBuffer:inputPixels];
// 2. Create a new pool if the old pool doesn't have the right format.
CGSize bufferDimensions = {CVPixelBufferGetWidth(inputPixels), CVPixelBufferGetHeight(inputPixels)};
if(!_pool || !CGSizeEqualToSize(bufferDimensions, _poolBufferDimensions)) {
if(_pool) {
CFRelease(_pool);
}
OSStatus ok0 = CVPixelBufferPoolCreate(NULL,
NULL, // pool attrs
(__bridge CFDictionaryRef)(@{
(id)kCVPixelBufferPixelFormatTypeKey: @(kCVPixelFormatType_32ARGB),
(id)kCVPixelBufferWidthKey: @(bufferDimensions.width),
(id)kCVPixelBufferHeightKey: @(bufferDimensions.height),
}), // buffer attrs
&_pool
);
_poolBufferDimensions = bufferDimensions;
assert(ok0 == noErr);
}
// 4. Create pixel buffer
CVPixelBufferRef outputPixels;
OSStatus ok1 = CVPixelBufferPoolCreatePixelBufferWithAuxAttributes(NULL,
_pool,
(__bridge CFDictionaryRef)@{
// Opt to fail buffer creation in case of slow buffer consumption
// rather than to exhaust all memory.
(__bridge id)kCVPixelBufferPoolAllocationThresholdKey: @20
}, // aux attributes
&outputPixels
);
if(ok1 == kCVReturnWouldExceedAllocationThreshold) {
// Dropping frame because consumer is too slow
return;
}
assert(ok1 == noErr);
// 5, 6. Graphics context to draw in
CGColorSpaceRef deviceColors = CGColorSpaceCreateDeviceRGB();
OSStatus ok2 = CVPixelBufferLockBaseAddress(outputPixels, 0);
assert(ok2 == noErr);
CGContextRef cg = CGBitmapContextCreate(
CVPixelBufferGetBaseAddress(outputPixels), // bytes
CVPixelBufferGetWidth(inputPixels), CVPixelBufferGetHeight(inputPixels), // dimensions
8, // bits per component
CVPixelBufferGetBytesPerRow(outputPixels), // bytes per row
deviceColors, // color space
kCGImageAlphaPremultipliedFirst // bitmap info
);
CFRelease(deviceColors);
assert(cg != NULL);
// 7
[_imageContext render:inputImage toCVPixelBuffer:outputPixels];
// 8. DRAW
CGContextSetRGBFillColor(cg, 0.5, 0, 0, 1);
CGContextSetTextDrawingMode(cg, kCGTextFill);
NSAttributedString *text = [[NSAttributedString alloc] initWithString:@"Hello world" attributes:NULL];
CTLineRef line = CTLineCreateWithAttributedString((__bridge CFAttributedStringRef)text);
CTLineDraw(line, cg);
CFRelease(line);
// 9. Unlock and stop drawing
CFRelease(cg);
CVPixelBufferUnlockBaseAddress(outputPixels, 0);
// 10. Timings
CMSampleTimingInfo timingInfo;
OSStatus ok4 = CMSampleBufferGetSampleTimingInfo(inputBuffer, 0, &timingInfo);
assert(ok4 == noErr);
// 11. VIdeo format
CMVideoFormatDescriptionRef videoFormat;
OSStatus ok5 = CMVideoFormatDescriptionCreateForImageBuffer(NULL, outputPixels, &videoFormat);
assert(ok5 == noErr);
// 12. Output sample buffer
CMSampleBufferRef outputBuffer;
OSStatus ok3 = CMSampleBufferCreateForImageBuffer(NULL, // allocator
outputPixels, // image buffer
YES, // data ready
NULL, // make ready callback
NULL, // make ready refcon
videoFormat,
&timingInfo, // timing info
&outputBuffer // out
);
assert(ok3 == noErr);
[_consumer consumeSampleBuffer:outputBuffer];
CFRelease(outputPixels);
CFRelease(videoFormat);
CFRelease(outputBuffer);
}