0

我正在尝试将一个简单的核心图像过滤器应用于实时摄像机输入。我认为我的代码没问题,但是drawImage:inRect:fromRect在 captureOutput 方法中使用该方法会导致一个EXC_BAD_ACCESS, 或一个 [__NSCFNumber drawImage:inRect:fromRect:]: unrecognized选择器,这让我认为当我尝试在其上调用 drawImage 时我的上下文已被释放。这对我来说没有意义,因为我CIContext是班级成员。

问题似乎不是来自 OpenGL,因为我尝试使用简单的上下文(不是从 a 创建的EAGLContext)并且我遇到了同样的问题。

我正在使用 ios 6 的 iphone 5 上对其进行测试,因为相机在模拟器上不起作用。

你能帮我吗?非常感谢您的宝贵时间

我有我的 .h 文件:

<!-- language: c# -->

    //  CameraController.h

    #import <UIKit/UIKit.h>
    #import <OpenGLES/EAGL.h>
    #import <AVFoundation/AVFoundation.h>
    #import <GLKit/GLKit.h>
    #import <CoreMedia/CoreMedia.h>
    #import <CoreVideo/CoreVideo.h>
    #import <QuartzCore/QuartzCore.h>
    #import <CoreImage/CoreImage.h>
    #import <ImageIO/ImageIO.h>

    @interface CameraController : GLKViewController <AVCaptureVideoDataOutputSampleBufferDelegate>{

        AVCaptureSession *avCaptureSession;
        CIContext *coreImageContext;
        CIContext *ciTestContext;
        GLuint _renderBuffer;
        EAGLContext *glContext;
    }

    @end

和我的 .m 文件

<!-- language: c# -->

    //  CameraController.m

    #import "CameraController.h"

    @interface CameraController ()

    @end

    @implementation CameraController

    - (id)initWithNibName:(NSString *)nibNameOrNil bundle:(NSBundle *)nibBundleOrNil
    {
        self = [super initWithNibName:nibNameOrNil bundle:nibBundleOrNil];
        if (self) {

        }
        return self;
    }

    - (void)viewDidLoad
    {
        [super viewDidLoad];

        // Initialize Open GL ES2 Context
        glContext = [[EAGLContext alloc] initWithAPI:kEAGLRenderingAPIOpenGLES2];
        if (!glContext) {
            NSLog(@"Failed to create ES context");
        }
        [EAGLContext setCurrentContext:nil];

        // Gets the GL View and sets the depth format to 24 bits, and the context of the view to be the Open GL context created above
        GLKView *view = (GLKView *)self.view;
        view.context = glContext;
        view.drawableDepthFormat = GLKViewDrawableDepthFormat24;

        // Creates CI Context from  EAGLContext
        NSMutableDictionary *options = [[NSMutableDictionary alloc] init];
        [options setObject: [NSNull null] forKey: kCIContextWorkingColorSpace];
        coreImageContext = [CIContext contextWithEAGLContext:glContext options:options];

        glGenRenderbuffers(1, &_renderBuffer);
        glBindRenderbuffer(GL_RENDERBUFFER, _renderBuffer);

        // Initialize Video Capture Device
        NSError *error;
        AVCaptureDevice *videoDevice = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
        AVCaptureDeviceInput *input = [AVCaptureDeviceInput deviceInputWithDevice:videoDevice error:&error];

        // Initialize Video Output object and set output settings
        AVCaptureVideoDataOutput *dataOutput = [[AVCaptureVideoDataOutput alloc] init];

        [dataOutput setAlwaysDiscardsLateVideoFrames:YES];
        [dataOutput setVideoSettings:[NSDictionary  dictionaryWithObject:[NSNumber numberWithInt:kCVPixelFormatType_32BGRA]
                                                                  forKey:(id)kCVPixelBufferPixelFormatTypeKey]];


        // Delegates the SampleBuffer to the current object which implements the AVCaptureVideoDataOutputSampleBufferDelegate interface via the captureOutput method
        [dataOutput setSampleBufferDelegate:self queue:dispatch_get_main_queue()];

        // Initialize the capture session, add input, output, start urnning
        avCaptureSession = [[AVCaptureSession alloc] init];
        [avCaptureSession beginConfiguration];
        [avCaptureSession setSessionPreset:AVCaptureSessionPreset1280x720];
        [avCaptureSession addInput:input];
        [avCaptureSession addOutput:dataOutput];
        [avCaptureSession commitConfiguration];
        [avCaptureSession startRunning];


    }

    -(void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection {

        // Creates a CIImage from the sample buffer of the camera frame
        CVPixelBufferRef pixelBuffer = (CVPixelBufferRef)CMSampleBufferGetImageBuffer(sampleBuffer);
        CIImage *inputImage = [CIImage imageWithCVPixelBuffer:pixelBuffer];

        // Creates the relevant filter
        CIFilter *filter = [CIFilter filterWithName:@"CISepiaTone"];
        [filter setValue:inputImage forKey:kCIInputImageKey];
        [filter setValue:[NSNumber numberWithFloat:0.8f] forKey:@"InputIntensity"];

        // Creates a reference to the output of the filter
        CIImage *result = [filter valueForKey:kCIOutputImageKey];

        // Draw to the context
        [coreImageContext drawImage:result inRect:[result extent] fromRect:[result extent]]; // 5

        [glContext presentRenderbuffer:GL_RENDERBUFFER];
    }

    - (void)didReceiveMemoryWarning
    {
        [super didReceiveMemoryWarning];
        // Dispose of any resources that can be recreated.
    }


    @end
4

1 回答 1

2

在您的 viewDidLoad 方法中,您有:

coreImageContext = [CIContext contextWithEAGLContext:glContext options:options];

如果要在 captureOutput 方法中使用 coreImageContext ,则需要保留它。

于 2013-05-30T18:11:29.930 回答