0

我想在IOS上创建一个可以记录和保存RGB+Depth数据的应用程序。我已经能够从双摄像头捕获数据并在屏幕上实时预览。现在我想将它保存为库中的两个序列(一个 RGB 序列和一个深度图序列)。

所以我的问题是如何将 iPhone 画廊的深度信息保存为视频或序列,同时保存 RGB 信息,以供将来进行深度处理?

我正在使用 Xcode 10.2、Swift 5 和 iPhone XS。

import UIKit
import AVFoundation

class ViewController: UIViewController {

    @IBOutlet weak var previewView: UIImageView!
    @IBOutlet weak var previewModeControl: UISegmentedControl!


    var previewMode = PreviewMode.original //Original(RGB) or Depth
    let session = AVCaptureSession()
    let dataOutputQueue = DispatchQueue(label: "video data queue", qos: .userInitiated, attributes: [], autoreleaseFrequency: .workItem)

    var background: CIImage?
    var depthMap: CIImage?
    var scale: CGFloat = 0.0

    override func viewDidLoad() {
        super.viewDidLoad()

        previewMode = PreviewMode(rawValue: previewModeControl.selectedSegmentIndex) ?? .original
        configureCaptureSession()
        session.startRunning()

    }

    override var shouldAutorotate: Bool {
        return false
    }

    func configureCaptureSession() {

        session.beginConfiguration()

        //Add input to the session
        guard let camera = AVCaptureDevice.default(.builtInDualCamera, for: .video, position: .unspecified) else {
            fatalError("No depth video camera available")
        }

        session.sessionPreset = .photo

        do{
            let cameraInput = try AVCaptureDeviceInput(device: camera)
            if session.canAddInput(cameraInput){
                session.addInput(cameraInput)
            }else{
                fatalError("Error adding input device to session")
            }
        }catch{
            fatalError(error.localizedDescription)
        }

        //Add output to the session
        let videoOutput = AVCaptureVideoDataOutput()
        videoOutput.setSampleBufferDelegate(self, queue: dataOutputQueue)
        videoOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA]
        if session.canAddOutput(videoOutput){
            session.addOutput(videoOutput)
        }else{
            fatalError("Error adding output to session")
        }

        let videoConnection = videoOutput.connection(with: .video)
        videoConnection?.videoOrientation = .portrait

        //Add output to the session DEPTH        
        let depthOutput = AVCaptureDepthDataOutput()
        //Set the current view controller as the delegate for the new object
        depthOutput.setDelegate(self, callbackQueue: dataOutputQueue)
        depthOutput.isFilteringEnabled = true //take advantge of holesin the data
        if session.canAddOutput(depthOutput){
            session.addOutput(depthOutput)
        }else{
            fatalError("Error adding output to session")
        }
        let depthConnection = depthOutput.connection(with: .depthData)
        depthConnection?.videoOrientation = .portrait

        let outputRect = CGRect(x: 0, y: 0, width: 1, height: 1)
        let videoRect = videoOutput.outputRectConverted(fromMetadataOutputRect: outputRect)
        let depthRect = depthOutput.outputRectConverted(fromMetadataOutputRect: outputRect)
        scale = max(videoRect.width, videoRect.height) / max(depthRect.width, depthRect.height)
        do{
            try camera.lockForConfiguration()
            if let frameDuration = camera.activeDepthDataFormat?.videoSupportedFrameRateRanges.first?.minFrameDuration{
                camera.activeVideoMinFrameDuration = frameDuration
            }
            camera.unlockForConfiguration()
        }catch{
            fatalError(error.localizedDescription)
        }
        session.commitConfiguration()
    }

    @IBAction func previewModeChanged(_ sender: UISegmentedControl) {

        previewMode = PreviewMode(rawValue: previewModeControl.selectedSegmentIndex) ?? .original

    }
}

extension ViewController: AVCaptureVideoDataOutputSampleBufferDelegate{
    func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
        let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
        let image = CIImage(cvPixelBuffer: pixelBuffer!)

        let previewImage: CIImage

        switch previewMode {
        case .original:
            previewImage = image
        case .depth:
            previewImage = depthMap ?? image
        //default:
            //previewImage = image
        }

        let displayImage = UIImage(ciImage: previewImage)
        DispatchQueue.main.async {
            [weak self] in self?.previewView.image = displayImage
        }
    }
}

extension ViewController: AVCaptureDepthDataOutputDelegate{
    func depthDataOutput(_ output: AVCaptureDepthDataOutput, didOutput depthData: AVDepthData, timestamp: CMTime, connection: AVCaptureConnection) {

        if previewMode == .original{
            return
        }

        var convertedDepth: AVDepthData
        if depthData.depthDataType != kCVPixelFormatType_DisparityFloat32{
            convertedDepth = depthData.converting(toDepthDataType: kCVPixelFormatType_DisparityFloat32)
        }else{
            convertedDepth = depthData
        }
        let pixelBuffer = convertedDepth.depthDataMap
        pixelBuffer.clamp()
        let depthMap = CIImage(cvPixelBuffer: pixelBuffer)
        DispatchQueue.main.async {
            [weak self] in self?.depthMap = depthMap
        }   
    }
}

UI上选择的不同CIImage(图像或深度图)在屏幕上实时预览实际结果

4

0 回答 0