3

我正在尝试为我目前正在开发的应用程序设计条形码扫描仪。我希望扫描仪预览充满设备的整个屏幕并提供一个较小的框架来指向条形码。一切都在按我想要的方式工作,但我无法让感兴趣的框架工作。

下面是条码扫描器的实现:

#import "GEScannerViewController.h"
@import AVFoundation;

@interface GEScannerViewController () <AVCaptureMetadataOutputObjectsDelegate> {
    AVCaptureSession *_session;
    AVCaptureDevice *_device;
    AVCaptureDeviceInput *_input;
    AVCaptureMetadataOutput *_output;
    AVCaptureVideoPreviewLayer *_prevLayer;

    UIView *_greyView;
    UIView *_highlightView;
    UIView *_scopeView;
    UILabel *_label;
}
@end

@implementation GEScannerViewController

- (void)viewDidLoad {
    [super viewDidLoad];

    _label = [[UILabel alloc] init];
    _label.frame = CGRectMake(0, self.view.bounds.size.height - 40, self.view.bounds.size.width, 40);
    _label.autoresizingMask = UIViewAutoresizingFlexibleTopMargin;
    _label.backgroundColor = [UIColor colorWithWhite:0.15 alpha:0.65];
    _label.textColor = [UIColor whiteColor];
    _label.textAlignment = NSTextAlignmentCenter;
    _label.text = @"(none)";
    [self.view addSubview:_label];

    NSError *error = nil;

    _session = [[AVCaptureSession alloc] init];
    _device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];

    [_device lockForConfiguration:&error];

    if (error) {
        NSLog(@"Error: %@", error);
    }

    _device.focusPointOfInterest = CGPointMake(self.view.frame.size.width / 2, (self.view.frame.size.height / 2) - 80);

    _input = [AVCaptureDeviceInput deviceInputWithDevice:_device error:&error];
    if (_input) {
        [_session addInput:_input];
    } else {
        NSLog(@"Error: %@", error);
    }

    _output = [[AVCaptureMetadataOutput alloc] init];
    [_output setMetadataObjectsDelegate:self queue:dispatch_get_main_queue()];
    _output.rectOfInterest = CGRectMake((self.view.frame.size.width / 2) - 160, (self.view.frame.size.height / 2) - 160, 320, 160);
    [_session addOutput:_output];

    _output.metadataObjectTypes = [_output availableMetadataObjectTypes];

    _prevLayer = [AVCaptureVideoPreviewLayer layerWithSession:_session];
    _prevLayer.frame = self.view.bounds;
    _prevLayer.videoGravity = AVLayerVideoGravityResizeAspectFill;
    [self.view.layer addSublayer:_prevLayer];

    _greyView = [[UIView alloc] initWithFrame:self.view.frame];
    _greyView.bounds = self.view.bounds;
    _greyView.backgroundColor = [UIColor colorWithWhite:0.15 alpha:0.65];
    [self.view.layer addSublayer:_greyView.layer];

    _scopeView = [[UIView alloc] initWithFrame:CGRectMake((self.view.frame.size.width / 2) - 160, (self.view.frame.size.height / 2) - 160, 320, 160)];
    _scopeView.backgroundColor = [UIColor clearColor];
    _scopeView.layer.borderColor = [UIColor greenColor].CGColor;
    _scopeView.layer.borderWidth = 1;
    _scopeView.clipsToBounds = YES;
    [self.view addSubview:_scopeView];

    _highlightView = [[UIView alloc] init];
    _highlightView.autoresizingMask = UIViewAutoresizingFlexibleTopMargin|UIViewAutoresizingFlexibleLeftMargin|UIViewAutoresizingFlexibleRightMargin|UIViewAutoresizingFlexibleBottomMargin;
    _highlightView.layer.borderColor = [UIColor greenColor].CGColor;
    _highlightView.layer.borderWidth = 3;
    [_scopeView addSubview:_highlightView];

    [_session startRunning];

    [self.view bringSubviewToFront:_highlightView];
    [self.view bringSubviewToFront:_label];
}

我正在使用 _output.rectOfInterest 将框架指定为与 _scopeView 的框架相同。不幸的是,这不起作用。如果我这样做,则不再识别条形码。

4

1 回答 1

7

刚拿到手,就很清楚了:

AVCaptureMetadataOutput 由像素定义,因此要将显示器的坐标映射到输出中的坐标,我必须使用 metadataOutputRectOfInterestForRect:

From AVCaptureOutput.h:


/*!
@method metadataOutputRectOfInterestForRect:
@abstract
Converts a rectangle in the receiver's coordinate space to a rectangle of interest in the coordinate space of an AVCaptureMetadataOutput
whose capture device is providing input to the receiver.

@param rectInOutputCoordinates
A CGRect in the receiver's coordinates.

@result
A CGRect in the coordinate space of the metadata output whose capture device is providing input to the receiver.

@discussion
AVCaptureMetadataOutput rectOfInterest is expressed as a CGRect where {0,0} represents the top left of the picture area,
and {1,1} represents the bottom right on an unrotated picture.  This convenience method converts a rectangle in
the coordinate space of the receiver to a rectangle of interest in the coordinate space of an AVCaptureMetadataOutput
whose AVCaptureDevice is providing input to the receiver.  The conversion takes orientation, mirroring, and scaling into
consideration.  See -transformedMetadataObjectForMetadataObject:connection: for a full discussion of how orientation and mirroring
are applied to sample buffers passing through the output.
*/

- (CGRect)metadataOutputRectOfInterestForRect:(CGRect)rectInOutputCoordinates NS_AVAILABLE_IOS(7_0);

使用它来设置 rectOfInterest 后它起作用了。

于 2014-10-31T12:24:18.867 回答