6

我正在进行对象检测并用于UIViewControllerRepresentable添加我的视图控制器。问题是我无法将数据从我的 SwiftUI 视图传递ViewController到我的 SwiftUI 视图。我可以打印它。

有人能帮我吗?这是我的代码:

//

import SwiftUI
import AVKit
import UIKit
import Vision
let SVWidth = UIScreen.main.bounds.width

struct MaskDetectionView: View {
    let hasMaskColor = Color.green
    let noMaskColor = Color.red
    let shadowColor = Color.gray
    
    var body: some View {
        VStack(alignment: .center) {
            VStack(alignment: .center) {
                Text("Please place your head inside the bounded box.")
                    .font(.system(size: 15, weight: .regular, design: .default))
                Text("For better result, show your entire face.")
                    .font(.system(size: 15, weight: .regular, design: .default))
            }.padding(.top, 10)
            
            VStack(alignment: .center) {
                SwiftUIViewController()
                    .frame(width: SVWidth - 30, height: SVWidth + 30, alignment: .center)
                    .background(Color.white)
                    .cornerRadius(25)
                    .shadow(color: hasMaskColor, radius: 7, x: 0, y: 0)
                    .padding(.top, 30)
                Spacer()

///      VALUE HERE
            }

        }.padding()
    }
}

struct MaskDetectionView_Previews: PreviewProvider {
    static var previews: some View {
        MaskDetectionView()
        
    }
}


class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {

    
    var result = String()
    //ALL THE OBJECTS
    override func viewDidLoad() {
        super.viewDidLoad()
        
        // 1 - start session
        let capture_session = AVCaptureSession()
        //capture_session.sessionPreset = .vga640x480
        
        // 2 - set the device front & add input
        guard let capture_device = AVCaptureDevice.default(AVCaptureDevice.DeviceType.builtInWideAngleCamera, for: .video, position: .front) else {return}
        
        guard let input = try? AVCaptureDeviceInput(device: capture_device) else { return }
        capture_session.addInput(input)
        
        // 3 - the layer on screen that shows the picture
        let previewLayer = AVCaptureVideoPreviewLayer(session: capture_session)
        view.layer.addSublayer(previewLayer)
        previewLayer.frame.size = CGSize(width: SVWidth, height: SVWidth + 40)
        previewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
        
        // 4 - run the session
        capture_session.startRunning()
        
        // 5 - the produced output aka image or video
        let dataOutput = AVCaptureVideoDataOutput()
        dataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "videoQueue"))
        capture_session.addOutput(dataOutput)
    }
    
    func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection){
        // our model
        
        guard let model = try? VNCoreMLModel(for: SqueezeNet(configuration: MLModelConfiguration()).model) else { return }
        // request for our model
        let request = VNCoreMLRequest(model: model) { (finishedReq, err) in
            if let error = err {
                print("failed to detect faces:", error)
                return
                
            }
            //result
            guard let results = finishedReq.results as? [VNClassificationObservation] else {return}
            guard let first_observation = results.first else {return}
            
            self.result = first_observation.identifier
            print(self.result)
            
        }
        
        guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {return}
        try? VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:]).perform([request])
    }

}






struct SwiftUIViewController: UIViewControllerRepresentable {
    
    func makeUIViewController(context: Context) -> ViewController{
        return ViewController()
    }
    
    func updateUIViewController(_ uiViewController: ViewController, context: Context) {
        
    }
    
}




4

3 回答 3

26

简而言之,您将需要Binding在 UI 层次结构中循环一个实例(这包括 SwiftUI 和 UIKit 代码)。这Binding将透明地更新与其连接的所有视图上的数据,无论是谁进行了更改。

数据流图可能如下所示: 在此处输入图像描述

现在,首先,您需要@State将分类标识符存储在 SwiftUI 视图中,以便它可以连接到您的视图控制器,以及将显示它的另一个 UI 元素:

struct MaskDetectionView: View {
    @State var clasificationIdentifier: String = ""

接下来,您需要将其传递给视图控制器和一些 UI 元素:

var body: some View {
    ...
    SwiftUIViewController(identifier: $clasificationIdentifier)
    ...
    // this is the "VALUE HERE" from your question
    Text("Clasification identifier: \(clasificationIdentifier)")

现在,我们正确地注入了绑定,让我们更新代码的 UIKit 端以允许接收绑定。

更新您的可表示视图,使其看起来像这样:

struct SwiftUIViewController: UIViewControllerRepresentable {
    
    // this is the binding that we receive from the SwiftUI side
    let identifier: Binding<String>
    
    // this will be the delegate of the view controller, it's role is to allow
    // the data transfer from UIKit to SwiftUI
    class Coordinator: ViewControllerDelegate {
        let identifierBinding: Binding<String>
        
        init(identifierBinding: Binding<String>) {
            self.identifierBinding = identifierBinding
        }
        
        func clasificationOccured(_ viewController: ViewController, identifier: String) {
            // whenever the view controller notifies it's delegate about receiving a new idenfifier
            // the line below will propagate the change up to SwiftUI
            identifierBinding.wrappedValue = identifier
        }
    }
    
    func makeUIViewController(context: Context) -> ViewController{
        let vc = ViewController()
        vc.delegate = context.coordinator
        return vc
    }
    
    func updateUIViewController(_ uiViewController: ViewController, context: Context) {
        // update the controller data, if needed
    }
    
    // this is very important, this coordinator will be used in `makeUIViewController`
    func makeCoordinator() -> Coordinator {
        Coordinator(identifierBinding: identifier)
    }
}

难题的最后一块是编写视图控制器委托的代码,以及使用该委托的代码:

protocol ViewControllerDelegate: AnyObject {
    func clasificationOccured(_ viewController: ViewController, identifier: String)
}

class ViewController: UIViewController {
    
    weak var delegate: ViewControllerDelegate?

    ...

    func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
        ...
  
        print(self.result)

        // let's tell the delegate we found a new clasification
        // the delegate, aka the Coordinator will then update the Binding
        // the Binding will update the State, and this change will be
        // propagate to the Text() element from the SwiftUI view
        delegate?.clasificationOccured(self, identifier: self.result)
    }
于 2020-11-23T07:38:32.847 回答
2

Swift 有多种方法可以让您在视图和对象之间来回传递数据

例如delegateKey-Value-Observation,或者专门针对 SwiftUI 的属性包装器,例如 @State、@Binding、@ObservableObject 和 @ObservedObject。但是,在 SwiftUI 视图中显示数据时,您将需要属性包装器。

如果你想以 SwiftUI 的方式来做,你可能想看看属性包装器@State以及@Binding如何在你的UIViewControllerRepresentable结构中使用协调器。向 SwiftUI 视图添加一个@State属性并将其作为绑定传递给您的UIViewControllerRepresentable.

//Declare a new property in struct MaskDetectionView and pass it to SwiftUIViewController as a binding
@State var string result = ""
...
SwiftUIViewController(resultText: $result)

//Add your new binding as a property in the SwiftUIViewController struct
@Binding var string resultText

这样,您可以将 SwiftUI 视图的一部分(Text例如,您可以在视图中使用的结果字符串)公开给UIViewControllerRepresentable. 从那里,您可以将其进一步传递给ViewController和/或查看以下有关协调器的文章:https ://www.hackingwithswift.com/books/ios-swiftui/using-coordinators-to-manage-swiftui-视图控制器

在我看来,将您的相机工作封装在另一个类ViewController中已经过时,并且可以通过使用协调器来完成。以下步骤应该可以帮助您启动并运行视图控制器:

  1. 在 中创建视图控制器代码makeUIView,包括设置 AVKit 对象
  2. 确保context.coordinator作为代表而不是self
  3. Coordinator在内部创建一个嵌套类SwiftUIViewController并将该类声明为您的AVCaptureVideoDataOutputSampleBufferDelegate
  4. 向协调器添加一个属性以保存视图控制器对象的实例并实现初始化程序和makeCoordinator函数以使协调器存储对视图控制器的引用
  5. 如果到目前为止设置正确,您现在可以AVCaptureVideoDataOutputSampleBufferDelegate在协调器类中实现您的委托功能,并在检测到某些东西并返回结果时更新视图控制器的绑定属性
于 2020-11-23T01:17:44.023 回答
0

协议(其他语言的接口)在这样的用例中使生活变得轻松,它也非常易于使用

1 - 在合适的地方定义一个协议

2 - 在所需视图(类,结构)处实施

3 - 将实现的对象引用传递给调用者类或结构

示例 - > 下面

//Protocol
protocol MyDataReceiverDelegte {
  func dataReceived(data:String) //any type of data as your need, I choose String
}

struct MaskDetectionView: View, MyDataReceiverDelegte { // implementer struct
   func dataReceived(data:String){
     //write your code here to process received data
     print(data)
   }
var body: some View {
     //your views comes here
     VStack(alignment: .center) {
         SwiftUIViewController(parent:self)
     }
  }
}

//custom view
struct SwiftUIViewController: UIViewControllerRepresentable {
   let parent:MaskDetectionView
   func makeUIViewController(context: Context) -> ViewController{
    return ViewController(delegate:parent)
   }

   func updateUIViewController(_ uiViewController: ViewController, context: Context) {
    
}
}


//caller class
//i omit your code for simpilicity
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
    let delegate: MyDataReceiverDelegte
  init(delegate d: MyDataReceiverDelegte) {
      self.delegate = d
      super.init(nibName: nil, bundle: nil)
   }
required init?(coder: NSCoder) {
    fatalError("init(coder:) has not been implemented")
}
override func viewDidLoad() {
    super.viewDidLoad()
    
    //your code comes here
}

func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection){
        //your rest of code comes here ,
    delegate.dataReceived("Data you want to pass to parent view")
    }
}
于 2020-11-23T06:26:59.153 回答