5

我正在尝试使用新的 Apple Vision API 从图像中检测条形码并返回其详细信息。我已成功检测到 QR 码并使用CIDetector. 但是,我无法使这项工作适用于一维条码。下面是一个示例结果:

import UIKit
import Vision

class BarcodeDetector {

    func recognizeBarcode(for source: UIImage,
                            complete: @escaping (UIImage) -> Void) {
        var resultImage = source
        let detectBarcodeRequest = VNDetectBarcodesRequest { (request, error) in
            if error == nil {
                if let results = request.results as? [VNBarcodeObservation] {
                    print("Number of Barcodes found: \(results.count)")
                    if results.count == 0 { print("\r") }

                    var barcodeBoundingRects = [CGRect]()
                    for barcode in results {
                        barcodeBoundingRects.append(barcode.boundingBox)
                        let barcodeType = String(barcode.symbology.rawValue)?.replacingOccurrences(of: "VNBarcodeSymbology", with: "")
                        print("-Barcode Type: \(barcodeType!)")

                        if barcodeType == "QR" {
                            let image = CIImage(image: source)
                            image?.cropping(to: barcode.boundingBox)
                            self.qrCodeDescriptor(qrCode: barcode, qrCodeImage: image!)
                        }
                    }
                    resultImage = self.drawOnImage(source: resultImage, barcodeBoundingRects: barcodeBoundingRects)
                }
            } else {
                print(error!.localizedDescription)
            }
            complete(resultImage)
        }
        let vnImage = VNImageRequestHandler(cgImage: source.cgImage!, options: [:])
        try? vnImage.perform([detectBarcodeRequest])
    }

    private func qrCodeDescriptor(qrCode: VNBarcodeObservation, qrCodeImage: CIImage) {
        if let description = qrCode.barcodeDescriptor as? CIQRCodeDescriptor {
            readQRCode(qrCodeImage: qrCodeImage)
            print(" -Payload: \(description.errorCorrectedPayload)")
            print(" -Mask Pattern: \(description.maskPattern)")
            print(" -Symbol Version: \(description.symbolVersion)\n")
        }
    }

    private func readQRCode(qrCodeImage: CIImage) {
        let detector: CIDetector = CIDetector(ofType: CIDetectorTypeQRCode, context: nil, options: [CIDetectorAccuracy: CIDetectorAccuracyHigh])!
        var qrCodeLink = ""

        let features = detector.features(in: qrCodeImage)
        for feature in features as! [CIQRCodeFeature] {
            if let messageString = feature.messageString {
                qrCodeLink += messageString
            }
        }

        if qrCodeLink == "" {
            print(" -No Code Message")
        } else {
            print(" -Code Message: \(qrCodeLink)")
        }
    }

如何将图像转换为AVMetadataObject然后从那里读取?还是有更好的方法?

4

1 回答 1

5

Swift 4.1,使用 Vision 框架(没有 3rd 方的东西或 Pod)

尝试这个。它适用于 QR 和其他类型(本例中为 Code39):

func startDetection() {
   let request = VNDetectBarcodesRequest(completionHandler: self.detectHandler)
   request.symbologies = [VNBarcodeSymbology.code39] // or use .QR, etc
   self.requests = [request]
}

func detectHandler(request: VNRequest, error: Error?) {
    guard let observations = request.results else {
        //print("no result")
        return
    }
    let results = observations.map({$0 as? VNBarcodeObservation})
    for result in results {
          print(result!.payloadStringValue!)
    }
}

然后在:

func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
    guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
        return
    }       
    var requestOptions:[VNImageOption:Any] = [:]        
    if let camData = CMGetAttachment(sampleBuffer, kCMSampleBufferAttachmentKey_CameraIntrinsicMatrix, nil) {
        requestOptions = [.cameraIntrinsics:camData]
    }
    let imageRequestHandler = VNImageRequestHandler(cvPixelBuffer: pixelBuffer, orientation: CGImagePropertyOrientation(rawValue: 6)!, options: requestOptions)
    do {
        try imageRequestHandler.perform(self.requests)
    } catch {
        print(error)
    }
}

其余的实现是常规的AVCaptureDevice东西AVCaptureSession。您还需要遵守AVCaptureVideoDataOutputSampleBufferDelegate

import AVFoundation
import Vision

var captureDevice: AVCaptureDevice!
var session = AVCaptureSession()
var requests = [VNRequest]()

func viewDidLoad() {
    self.setupVideo()
    self.startDetection()
}

func setupVideo() {
    session.sessionPreset = AVCaptureSession.Preset.photo
    captureDevice = AVCaptureDevice.default(for: AVMediaType.video)
    let deviceInput = try! AVCaptureDeviceInput(device: captureDevice!)
    let deviceOutput = AVCaptureVideoDataOutput()
    deviceOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_32BGRA)]
    deviceOutput.setSampleBufferDelegate(self, queue: DispatchQueue.global(qos: DispatchQoS.QoSClass.default))
    session.addInput(deviceInput)
    session.addOutput(deviceOutput)
    let imageLayer = AVCaptureVideoPreviewLayer(session: session)
    imageLayer.frame = imageView.bounds
    imageView.layer.addSublayer(imageLayer)
    session.startRunning()
}
于 2018-07-10T23:16:45.097 回答