到目前为止,我已经展示了相机在其中放置了一个滑块,现在可以缩小相机但现在我想放置另一个滑块来增加和降低相机的亮度我已经搜索过,我需要使用 AVCaptureVideoDataOutput 并拍摄单帧,我可以获取委托方法并处理它并使用该框架执行您想要执行的操作我在下面发布我的代码并解释其中的其他内容
AVCaptureDevice *videoDevice = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
// AVCaptureVideoDataOutput
if (videoDevice) {
NSError *error;
AVCaptureDeviceInput *videoIn = [AVCaptureDeviceInput deviceInputWithDevice:videoDevice error:&error];
if (!error) {
if ([[self captureSession] canAddInput:videoIn])
[[self captureSession] addInput:videoIn];
else
NSLog(@"Couldn't add video input");
}
else
NSLog(@"Couldn't create video input");
}
else
NSLog(@"Couldn't create video capture device");
AVCaptureVideoDataOutput *videoOut = [[AVCaptureVideoDataOutput alloc] init];
/*
RosyWriter prefers to discard late video frames early in the capture pipeline, since its
processing can take longer than real-time on some platforms (such as iPhone 3GS).
Clients whose image processing is faster than real-time should consider setting AVCaptureVideoDataOutput's
alwaysDiscardsLateVideoFrames property to NO.
*/
[videoOut setAlwaysDiscardsLateVideoFrames:YES];
[videoOut setVideoSettings:[NSDictionary dictionaryWithObject:[NSNumber numberWithInt:kCVPixelFormatType_32BGRA] forKey:(id)kCVPixelBufferPixelFormatTypeKey]];
dispatch_queue_t videoCaptureQueue = dispatch_queue_create("Video Capture Queue", DISPATCH_QUEUE_SERIAL);
[videoOut setSampleBufferDelegate:self queue:videoCaptureQueue];
dispatch_release(videoCaptureQueue);
if ([captureSession canAddOutput:videoOut])
[captureSession addOutput:videoOut];
}
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
{
I HAVE READ THAT NEED TO PROCESS FRAME AND THEN SEND IT BACK TO SESSION BUT HOW I DONT KNOW
THIS IS THE DELEGATE METHOD WHERE I GET FRAME NOW I DONT UNDERSTAND HOW TO PROCESS THIS
FRAME WHAT SHOULD I DO SO I CAN INCREASE AND DECREASE BRIGHTNESS OF CAMERA PLEASE HELP ME IF YOU KNOW
}