0

我正在开发一个 iPad3 应用程序,我必须在其中录制视频和音频。

在 iPad3 上工作时,我遇到了如下问题:

AudioOutPutBuffer got missed while CGContextDrawImage.

缩放级别设置为 0.55 时发​​生错误

我的代码如下:

- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer (CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
{
if (!CMSampleBufferDataIsReady(sampleBuffer))
{
    NSLog( @"sample buffer is not ready. Skipping sample" );
    return;
}

if (_cameraBar.recording == YES) {
    _lastTimestamp = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);

    [self calculateFramerateAtTimestamp:_lastTimestamp];
    if (_videoWriter.status != AVAssetWriterStatusWriting)
    {
        //Initialize the video on the first frame
        [_videoWriter startWriting];
        [_videoWriter startSessionAtSourceTime:_lastTimestamp];
        _startTime = CMTimeGetSeconds(_lastTimestamp);
    }
    else if(captureOutput == _videoDataOutput) {

        NSLog( @"-------------Movie-----------" );

        //skip the first frame just to be sure that the video writer is ready
        CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);

        // Lock the base address of the pixel buffer.
        CVPixelBufferLockBaseAddress(imageBuffer,0);
        // Get the number of bytes per row for the pixel buffer.
        size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
        // Get the pixel buffer width and height.
        size_t width = CVPixelBufferGetWidth(imageBuffer);
        size_t height = CVPixelBufferGetHeight(imageBuffer);

        // Get the base address of the pixel buffer.
        void *baseAddress = CVPixelBufferGetBaseAddress(imageBuffer);
        // Get the data size for contiguous planes of the pixel buffer.
        size_t bufferSize = CVPixelBufferGetDataSize(imageBuffer);
        // Create a Quartz direct-access data provider that uses data we supply.
        CGDataProviderRef dataProvider = CGDataProviderCreateWithData(NULL, baseAddress, bufferSize, NULL);
        // Create a bitmap image from data supplied by the data provider.
        CGImageRef cgImage = CGImageCreate(width, height, 8, 32, bytesPerRow,
                      _colorSpace, kCGImageAlphaNoneSkipFirst |
                      kCGBitmapByteOrder32Little,
                      dataProvider, NULL, true, kCGRenderingIntentDefault);
        CGDataProviderRelease(dataProvider);

        CVPixelBufferRef croppedBuffer = NULL;

        NSDictionary *options = [NSDictionary dictionaryWithObjectsAndKeys:
                                 [NSNumber numberWithBool:YES], kCVPixelBufferCGImageCompatibilityKey,
                                 [NSNumber numberWithBool:YES], kCVPixelBufferCGBitmapContextCompatibilityKey, nil];

        CGImageRef croppedImage;
        CGContextRef croppedContext;
        //Divide by 1.5 to account for resolution difference between video and photo modes            
        size_t croppedWidth = (float)width *zoomScale;
        size_t croppedHeight =(float) height *zoomScale;

        //Crop the image to get rid of the "porthole" effect, simulating a 65% zoom

        croppedImage = CGImageCreateWithImageInRect(cgImage, CGRectMake((width - croppedWidth) / 2,
                                                                        (height - croppedHeight) / 2,
                                                                        croppedWidth, croppedHeight));

        CGImageRelease(cgImage);

        //Create a new pixel buffer and draw the cropped image into its context
        CVPixelBufferCreate(kCFAllocatorDefault,
                            croppedWidth,
                            croppedHeight,
                            CVPixelBufferGetPixelFormatType(imageBuffer),
                            (CFDictionaryRef) options,
                            &croppedBuffer);

        CVPixelBufferLockBaseAddress(croppedBuffer,0);

        void *croppedAddress = CVPixelBufferGetBaseAddress(croppedBuffer);

        croppedContext = CGBitmapContextCreate(croppedAddress,
                                               croppedWidth,
                                               croppedHeight,
                                               8,
                                             CVPixelBufferGetBytesPerRow(croppedBuffer),
                                               _colorSpace,
                                               kCGImageAlphaNoneSkipFirst | kCGBitmapByteOrder32Little);

        CGContextDrawImage(croppedContext, CGRectMake(0, 0, croppedWidth, croppedHeight), croppedImage);

        CGImageRelease(croppedImage);
        CVPixelBufferUnlockBaseAddress(imageBuffer, 0);
        CVPixelBufferUnlockBaseAddress(croppedBuffer, 0);
        CGContextRelease(croppedContext);

        //Append the new buffer into our video
        if (croppedBuffer) [self newVideoSampleWithBuffer:croppedBuffer time:_lastTimestamp];

    }
    else if (captureOutput == _audioDataOutput)
    {
        NSLog( @"~~~~~~~~~~~~~~~Audio~~~~~~~~~~~~~~~" );
        //Audio is a lot simpler, thankfully
        [self newAudioSample:sampleBuffer];
    }

}
}

经过长时间的尝试,我知道了,

CGContextDrawImage(croppedContext, CGRectMake(0, 0, croppedWidth, croppedHeight), croppedImage); 

这个方法需要后台做。我不知道。当我尝试将其放入调度代码中时:

dispatch_asynch(dispatch_Get_mainqueue,^{
CGContextDrawImage(croppedContext, CGRectMake(0, 0, croppedWidth, croppedHeight), croppedImage);})

然后就崩溃了。

所以任何人都有任何解决方案,请帮助。

先感谢您。

4

0 回答 0