11

我已经分别从视频帧中提取 YUV 数据并将它们保存在data[0],data[1],data[2];帧大小是640*480;现在我创建pixelBuffer如下:

void *pYUV[3] = {data[0], data[1], data[2]};
size_t planeWidth = {640, 320, 320};
size_t planeHeight = {480, 240, 240};
size_t planeBytesPerRow = {640, 320, 320};
CVReturn renturn = CVPixelBufferCreateWithPlanarBytes(kCFAllocatorDefault,
                                   640, 
                                   480,
                                   kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange, 
                                   nil,
                                   nil,
                                   3, 
                                   pYUV,
                                   planeWidth,
                                   planeHeight, 
                                   planeBytesPerRow, 
                                   nil,
                                   nil, nil, &_pixelBuffer);
CVPixelBufferLockBaseAddress(_pixelBuffer, 0);
CVPixelBufferRetain(_pixelBuffer);
    // Periodic texture cache flush every frame
CVOpenGLESTextureCacheFlush(_textureCache, 0);

// The Buffer cannot be used with OpenGL as either its size, pixelformat or attributes are not supported by OpenGL
 glActiveTexture(GL_TEXTURE0);
CVReturn err = CVOpenGLESTextureCacheCreateTextureFromImage(kCFAllocatorDefault, 
                                                            _textureCache,
                                                            _pixelBuffer,
                                                            NULL,
                                                            GL_TEXTURE_2D,
                                                            GL_LUMINANCE,
                                                            im.width,
                                                            im.height,
                                                            GL_LUMINANCE,
                                                            GL_UNSIGNED_BYTE,
                                                            0,
                                                            &_yTexture);

if (!_yTexture || err) {
    NSLog(@"CVOpenGLESTextureCacheCreateTextureFromImage failed (error: %d)", err);  
    return;
}
glBindTexture(CVOpenGLESTextureGetTarget(_yTexture), CVOpenGLESTextureGetName(_yTexture));
 CVPixelBufferUnlockBaseAddress(_pixelBuffer, 0);

但是错误是 -6638,文档只是指出“由于不支持的缓冲区大小、像素格式或属性,像素缓冲区与 OpenGL 不兼容”。这对我没有多大帮助。

我该如何解决?

4

2 回答 2

2

您的源图像/视频帧的分辨率是否为 2 的幂?如果没有,您必须在创建纹理之前调整它的大小。

于 2013-06-28T16:00:57.767 回答
2

Apple 在Technical Q&A 1781中详细说明了这个确切问题的原因

问题是源像素缓冲区必须支持 IOSSurface。指定一个空字典作为值kCVPixelBufferIOSurfacePropertiesKey

于 2014-08-19T01:12:21.257 回答