我正在从这样的 OpenGL 层生成 CVPixelBufferRef
- (CVPixelBufferRef) getGLPixelBuf {
int s = 1;
UIScreen * screen = [UIScreen mainScreen];
if ([screen respondsToSelector:@selector(scale)]){
s = (int)[screen scale];
}
const int w = self.frame.size.width/2;
const int h = self.frame.size.height/2;
const NSInteger my_data_length = 4 * w * h * s * s;
// allocate array and read pixels into it.
GLubyte * buffer = malloc(my_data_length);
GLint readType;
GLint readFormat;
glGetIntegerv(GL_IMPLEMENTATION_COLOR_READ_TYPE, &readType);
glGetIntegerv(GL_IMPLEMENTATION_COLOR_READ_FORMAT, &readFormat);
glReadPixels(0, 0, w * s, h * s, readFormat, readType, buffer);
// gl renders "upside down" so swap top to bottom into new array.
GLubyte * buffer2 = malloc(my_data_length);
for(int y = 0; y < h*s; y++){
memcpy(buffer2 + (h * s - 1 - y) * 4 * w * s, buffer + (4 * y * w * s), 4 * w * s);
}
free(buffer);
CVPixelBufferRef pixel_buffer = NULL;
CVPixelBufferCreateWithBytes (NULL, w * 2, h * 2, kCVPixelFormatType_32BGRA, buffer2 , 4 * w * s, NULL, 0, NULL, &pixel_buffer);
free(buffer2);
return pixel_buffer;
}
然后将该像素缓冲区传递给我的 AVAssetWriterInputPixelBufferAdaptor 在一个辅助类中,如下所示:
- (void)recordFrame {
if([recorder isRecording]){
CVPixelBufferRef pixelBuffer = [self getGLPixelBuf];
CVPixelBufferLockBaseAddress(pixelBuffer, 0);
[recorder appendPixelBuffer:pixelBuffer withPresentationTime:camera.lastSampleTime];
CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
CVPixelBufferRelease(pixelBuffer);
}
}
但是在该助手类中,以下内容
- (BOOL)appendPixelBuffer:(CVPixelBufferRef)pixelBuffer withPresentationTime:(CMTime)presentationTime {
if (writerInput.readyForMoreMediaData)
return [adaptor appendPixelBuffer:pixelBuffer withPresentationTime:presentationTime];
return NO;
}
当我调用 appendPixelBuffer 时会导致 EXC_BAD_ACCESS。我已启用 NSZombieEnabled 但它没有为我提供任何信息。记录器被初始化为与 OpenGL 层的背景高度和宽度相同的高度和宽度。适配器配置为 kCVPixelFormatType_32BGRA 像素格式以及像素缓冲区。
任何帮助表示赞赏!谢谢!