我试图在原始线程中回答这个问题,但是 SO 不允许我这样做。希望有更多权威的人可以将其合并到原始问题中。
好的,这是一个更完整的答案。首先,设置捕获:
// Create capture session
self.captureSession = [[AVCaptureSession alloc] init];
[self.captureSession setSessionPreset:AVCaptureSessionPresetPhoto];
// Setup capture input
self.inputDevice = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
AVCaptureDeviceInput *captureInput = [AVCaptureDeviceInput deviceInputWithDevice:self.inputDevice
error:nil];
[self.captureSession addInput:captureInput];
// Setup video processing (capture output)
AVCaptureVideoDataOutput *captureOutput = [[AVCaptureVideoDataOutput alloc] init];
// Don't add frames to the queue if frames are already processing
captureOutput.alwaysDiscardsLateVideoFrames = YES;
// Create a serial queue to handle processing of frames
_videoQueue = dispatch_queue_create("cameraQueue", NULL);
[captureOutput setSampleBufferDelegate:self queue:_videoQueue];
// Set the video output to store frame in YUV
NSString* key = (NSString*)kCVPixelBufferPixelFormatTypeKey;
NSNumber* value = [NSNumber numberWithUnsignedInt:kCVPixelFormatType_420YpCbCr8BiPlanarFullRange];
NSDictionary* videoSettings = [NSDictionary dictionaryWithObject:value forKey:key];
[captureOutput setVideoSettings:videoSettings];
[self.captureSession addOutput:captureOutput];
现在好了,委托/回调的实现:
- (void)captureOutput:(AVCaptureOutput *)captureOutput
didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
fromConnection:(AVCaptureConnection *)connection
{
// Create autorelease pool because we are not in the main_queue
@autoreleasepool {
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
//Lock the imagebuffer
CVPixelBufferLockBaseAddress(imageBuffer,0);
// Get information about the image
uint8_t *baseAddress = (uint8_t *)CVPixelBufferGetBaseAddress(imageBuffer);
// size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
CVPlanarPixelBufferInfo_YCbCrBiPlanar *bufferInfo = (CVPlanarPixelBufferInfo_YCbCrBiPlanar *)baseAddress;
// This just moved the pointer past the offset
baseAddress = (uint8_t *)CVPixelBufferGetBaseAddressOfPlane(imageBuffer, 0);
// convert the image
_prefImageView.image = [self makeUIImage:baseAddress bufferInfo:bufferInfo width:width height:height bytesPerRow:bytesPerRow];
// Update the display with the captured image for DEBUG purposes
dispatch_async(dispatch_get_main_queue(), ^{
[_myMainView.yUVImage setImage:_prefImageView.image];
});
}
最后是从 YUV 转换为 UIImage 的方法
- (UIImage *)makeUIImage:(uint8_t *)inBaseAddress bufferInfo:(CVPlanarPixelBufferInfo_YCbCrBiPlanar *)inBufferInfo width:(size_t)inWidth height:(size_t)inHeight bytesPerRow:(size_t)inBytesPerRow {
NSUInteger yPitch = EndianU32_BtoN(inBufferInfo->componentInfoY.rowBytes);
uint8_t *rgbBuffer = (uint8_t *)malloc(inWidth * inHeight * 4);
uint8_t *yBuffer = (uint8_t *)inBaseAddress;
uint8_t val;
int bytesPerPixel = 4;
// for each byte in the input buffer, fill in the output buffer with four bytes
// the first byte is the Alpha channel, then the next three contain the same
// value of the input buffer
for(int y = 0; y < inHeight*inWidth; y++)
{
val = yBuffer[y];
// Alpha channel
rgbBuffer[(y*bytesPerPixel)] = 0xff;
// next three bytes same as input
rgbBuffer[(y*bytesPerPixel)+1] = rgbBuffer[(y*bytesPerPixel)+2] = rgbBuffer[y*bytesPerPixel+3] = val;
}
// Create a device-dependent RGB color space
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(rgbBuffer, yPitch, inHeight, 8,
yPitch*bytesPerPixel, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedLast);
CGImageRef quartzImage = CGBitmapContextCreateImage(context);
CGContextRelease(context);
CGColorSpaceRelease(colorSpace);
UIImage *image = [UIImage imageWithCGImage:quartzImage];
CGImageRelease(quartzImage);
free(rgbBuffer);
return image;
}
您还需要#import "Endian.h"
请注意,对 CGBitmapContextCreate 的调用比我预期的要复杂得多。我对视频处理不是很精通,但是这个电话让我难过一段时间。然后当它最终起作用时,它就像魔术一样。