是的,您可以这样做,而且我认为您的问题足够具体,可以归入此处。你不是唯一一个想要这样做的人,而且确实需要一点点挖掘才能弄清楚你能做什么和不能做什么。
AV Foundation 允许您使用 AVAssetReader 对 H.264 视频进行硬件加速解码,此时您将收到 BGRA 格式的原始解码视频帧。这些可以使用glTexImage2D()
iOS 5.0 中的任一或更高效的纹理缓存上传到纹理。从那里,您可以处理显示或从 OpenGL ES 检索帧,并使用 AVAssetWriter 对结果执行硬件加速的 H.264 编码。所有这些都使用公共 API,因此您永远不会接近会导致 App Store 拒绝的东西。
但是,您不必滚动自己的实现。我的 BSD 许可开源框架GPUImage封装了这些操作并为您处理所有这些。您为输入的 H.264 电影创建一个 GPUImageMovie 实例,在其上附加过滤器(例如叠加混合或色度键操作),然后将这些过滤器附加到 GPUImageView 进行显示和/或附加到 GPUImageMovieWriter 以重新编码 H. 264 电影来自处理过的视频。
我目前遇到的一个问题是我不遵守视频中的时间戳进行播放,因此帧的处理速度与从电影中解码的速度一样快。对于视频的过滤和重新编码,这不是问题,因为时间戳会传递到记录器,但对于直接显示到屏幕,这意味着视频可以加速多达 2-4 倍. 我欢迎任何可以让您将播放速率与实际视频时间戳同步的贡献。
我目前可以在 iPhone 4 上以超过 30 FPS 的速度播放、过滤和重新编码 640x480 视频,在 ~20-25 FPS 上播放 720p 视频,而 iPhone 4S 能够以远高于 30 FPS 的速度进行 1080p 过滤和编码. 一些更昂贵的过滤器会对 GPU 造成负担并稍微减慢速度,但大多数过滤器都在这些帧率范围内运行。
如果你愿意,你可以检查 GPUImageMovie 类,看看它是如何上传到 OpenGL ES 的,但相关代码如下:
- (void)startProcessing;
{
NSDictionary *inputOptions = [NSDictionary dictionaryWithObject:[NSNumber numberWithBool:YES] forKey:AVURLAssetPreferPreciseDurationAndTimingKey];
AVURLAsset *inputAsset = [[AVURLAsset alloc] initWithURL:self.url options:inputOptions];
[inputAsset loadValuesAsynchronouslyForKeys:[NSArray arrayWithObject:@"tracks"] completionHandler: ^{
NSError *error = nil;
AVKeyValueStatus tracksStatus = [inputAsset statusOfValueForKey:@"tracks" error:&error];
if (!tracksStatus == AVKeyValueStatusLoaded)
{
return;
}
reader = [AVAssetReader assetReaderWithAsset:inputAsset error:&error];
NSMutableDictionary *outputSettings = [NSMutableDictionary dictionary];
[outputSettings setObject: [NSNumber numberWithInt:kCVPixelFormatType_32BGRA] forKey: (NSString*)kCVPixelBufferPixelFormatTypeKey];
// Maybe set alwaysCopiesSampleData to NO on iOS 5.0 for faster video decoding
AVAssetReaderTrackOutput *readerVideoTrackOutput = [AVAssetReaderTrackOutput assetReaderTrackOutputWithTrack:[[inputAsset tracksWithMediaType:AVMediaTypeVideo] objectAtIndex:0] outputSettings:outputSettings];
[reader addOutput:readerVideoTrackOutput];
NSArray *audioTracks = [inputAsset tracksWithMediaType:AVMediaTypeAudio];
BOOL shouldRecordAudioTrack = (([audioTracks count] > 0) && (self.audioEncodingTarget != nil) );
AVAssetReaderTrackOutput *readerAudioTrackOutput = nil;
if (shouldRecordAudioTrack)
{
audioEncodingIsFinished = NO;
// This might need to be extended to handle movies with more than one audio track
AVAssetTrack* audioTrack = [audioTracks objectAtIndex:0];
readerAudioTrackOutput = [AVAssetReaderTrackOutput assetReaderTrackOutputWithTrack:audioTrack outputSettings:nil];
[reader addOutput:readerAudioTrackOutput];
}
if ([reader startReading] == NO)
{
NSLog(@"Error reading from file at URL: %@", self.url);
return;
}
if (synchronizedMovieWriter != nil)
{
__unsafe_unretained GPUImageMovie *weakSelf = self;
[synchronizedMovieWriter setVideoInputReadyCallback:^{
[weakSelf readNextVideoFrameFromOutput:readerVideoTrackOutput];
}];
[synchronizedMovieWriter setAudioInputReadyCallback:^{
[weakSelf readNextAudioSampleFromOutput:readerAudioTrackOutput];
}];
[synchronizedMovieWriter enableSynchronizationCallbacks];
}
else
{
while (reader.status == AVAssetReaderStatusReading)
{
[self readNextVideoFrameFromOutput:readerVideoTrackOutput];
if ( (shouldRecordAudioTrack) && (!audioEncodingIsFinished) )
{
[self readNextAudioSampleFromOutput:readerAudioTrackOutput];
}
}
if (reader.status == AVAssetWriterStatusCompleted) {
[self endProcessing];
}
}
}];
}
- (void)readNextVideoFrameFromOutput:(AVAssetReaderTrackOutput *)readerVideoTrackOutput;
{
if (reader.status == AVAssetReaderStatusReading)
{
CMSampleBufferRef sampleBufferRef = [readerVideoTrackOutput copyNextSampleBuffer];
if (sampleBufferRef)
{
runOnMainQueueWithoutDeadlocking(^{
[self processMovieFrame:sampleBufferRef];
});
CMSampleBufferInvalidate(sampleBufferRef);
CFRelease(sampleBufferRef);
}
else
{
videoEncodingIsFinished = YES;
[self endProcessing];
}
}
else if (synchronizedMovieWriter != nil)
{
if (reader.status == AVAssetWriterStatusCompleted)
{
[self endProcessing];
}
}
}
- (void)processMovieFrame:(CMSampleBufferRef)movieSampleBuffer;
{
CMTime currentSampleTime = CMSampleBufferGetOutputPresentationTimeStamp(movieSampleBuffer);
CVImageBufferRef movieFrame = CMSampleBufferGetImageBuffer(movieSampleBuffer);
int bufferHeight = CVPixelBufferGetHeight(movieFrame);
int bufferWidth = CVPixelBufferGetWidth(movieFrame);
CFAbsoluteTime startTime = CFAbsoluteTimeGetCurrent();
if ([GPUImageOpenGLESContext supportsFastTextureUpload])
{
CVPixelBufferLockBaseAddress(movieFrame, 0);
[GPUImageOpenGLESContext useImageProcessingContext];
CVOpenGLESTextureRef texture = NULL;
CVReturn err = CVOpenGLESTextureCacheCreateTextureFromImage(kCFAllocatorDefault, coreVideoTextureCache, movieFrame, NULL, GL_TEXTURE_2D, GL_RGBA, bufferWidth, bufferHeight, GL_BGRA, GL_UNSIGNED_BYTE, 0, &texture);
if (!texture || err) {
NSLog(@"Movie CVOpenGLESTextureCacheCreateTextureFromImage failed (error: %d)", err);
return;
}
outputTexture = CVOpenGLESTextureGetName(texture);
// glBindTexture(CVOpenGLESTextureGetTarget(texture), outputTexture);
glBindTexture(GL_TEXTURE_2D, outputTexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
for (id<GPUImageInput> currentTarget in targets)
{
NSInteger indexOfObject = [targets indexOfObject:currentTarget];
NSInteger targetTextureIndex = [[targetTextureIndices objectAtIndex:indexOfObject] integerValue];
[currentTarget setInputSize:CGSizeMake(bufferWidth, bufferHeight) atIndex:targetTextureIndex];
[currentTarget setInputTexture:outputTexture atIndex:targetTextureIndex];
[currentTarget newFrameReadyAtTime:currentSampleTime];
}
CVPixelBufferUnlockBaseAddress(movieFrame, 0);
// Flush the CVOpenGLESTexture cache and release the texture
CVOpenGLESTextureCacheFlush(coreVideoTextureCache, 0);
CFRelease(texture);
outputTexture = 0;
}
else
{
// Upload to texture
CVPixelBufferLockBaseAddress(movieFrame, 0);
glBindTexture(GL_TEXTURE_2D, outputTexture);
// Using BGRA extension to pull in video frame data directly
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, bufferWidth, bufferHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, CVPixelBufferGetBaseAddress(movieFrame));
CGSize currentSize = CGSizeMake(bufferWidth, bufferHeight);
for (id<GPUImageInput> currentTarget in targets)
{
NSInteger indexOfObject = [targets indexOfObject:currentTarget];
NSInteger targetTextureIndex = [[targetTextureIndices objectAtIndex:indexOfObject] integerValue];
[currentTarget setInputSize:currentSize atIndex:targetTextureIndex];
[currentTarget newFrameReadyAtTime:currentSampleTime];
}
CVPixelBufferUnlockBaseAddress(movieFrame, 0);
}
if (_runBenchmark)
{
CFAbsoluteTime currentFrameTime = (CFAbsoluteTimeGetCurrent() - startTime);
NSLog(@"Current frame time : %f ms", 1000.0 * currentFrameTime);
}
}