1

到目前为止,我是新手,Swift并且一直发现 Stackoverflow 对我的故障排除非常有帮助。在我当前的项目中,除了已经存在的捕获功能之外,我还试图创建一个类似于附加屏幕截图的置信度标签:

信心标签屏幕截图.

当我运行我的代码时,相机视图和捕获功能可以工作,但对象识别(inception V3)似乎没有接收来自AVCaptureOutput(它也没有将结果打印到控制台)的数据。

我没有收到任何错误消息,所以我不知道我做错了什么。任何反馈将不胜感激

谢谢!

func setupCaptureSession() {
    captureSession.sessionPreset = AVCaptureSession.Preset.photo
}


func setupDevice() {
    let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: AVMediaType.video, position: AVCaptureDevice.Position.unspecified)
    let device = deviceDiscoverySession.devices

    for device in device {
        if device.position == AVCaptureDevice.Position.back {
            backCamera = device
        }else if device.position == AVCaptureDevice.Position.front {
            frontCamera = device
        }
    }

    currentCamera = backCamera
}

func setupInputOutput() {
    do {
        let captureDeviceInput = try AVCaptureDeviceInput(device: currentCamera!)
        captureSession.addInput(captureDeviceInput)
        photoOutput = AVCapturePhotoOutput()
        photoOutput?.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])], completionHandler: nil)
        captureSession.addOutput(photoOutput!)
    } catch {
        print(error)
    }
}

func setupPreviewLayer() {
    camerapreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
    camerapreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
    camerapreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
    camerapreviewLayer?.frame = self.view.frame
    self.view.layer.insertSublayer(camerapreviewLayer!, at: 0)

}

func  setupRunningCaptureSession() {
    captureSession.startRunning()

}


func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
    //        print("Camera was able to capture a frame:", Date())

    guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }


    guard let model = try? VNCoreMLModel(for: Inceptionv3().model) else { return }
    let request = VNCoreMLRequest(model: model) { (finishedReq, err) in

        //perhaps check the err

        //            print(finishedReq.results)

        guard let results = finishedReq.results as? [VNClassificationObservation] else { return }

        guard let firstObservation = results.first else { return }

        print(firstObservation.identifier, firstObservation.confidence)

        DispatchQueue.main.async {
            self.confidenceLabel.text = "\(firstObservation.identifier) \(firstObservation.confidence * 100)"
        }

    }

    try? VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:]).perform([request])
}
4

0 回答 0