我正在进行对象检测并用于UIViewControllerRepresentable
添加我的视图控制器。问题是我无法将数据从我的 SwiftUI 视图传递ViewController
到我的 SwiftUI 视图。我可以打印它。
有人能帮我吗?这是我的代码:
//
import SwiftUI
import AVKit
import UIKit
import Vision
let SVWidth = UIScreen.main.bounds.width
struct MaskDetectionView: View {
let hasMaskColor = Color.green
let noMaskColor = Color.red
let shadowColor = Color.gray
var body: some View {
VStack(alignment: .center) {
VStack(alignment: .center) {
Text("Please place your head inside the bounded box.")
.font(.system(size: 15, weight: .regular, design: .default))
Text("For better result, show your entire face.")
.font(.system(size: 15, weight: .regular, design: .default))
}.padding(.top, 10)
VStack(alignment: .center) {
SwiftUIViewController()
.frame(width: SVWidth - 30, height: SVWidth + 30, alignment: .center)
.background(Color.white)
.cornerRadius(25)
.shadow(color: hasMaskColor, radius: 7, x: 0, y: 0)
.padding(.top, 30)
Spacer()
/// VALUE HERE
}
}.padding()
}
}
struct MaskDetectionView_Previews: PreviewProvider {
static var previews: some View {
MaskDetectionView()
}
}
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
var result = String()
//ALL THE OBJECTS
override func viewDidLoad() {
super.viewDidLoad()
// 1 - start session
let capture_session = AVCaptureSession()
//capture_session.sessionPreset = .vga640x480
// 2 - set the device front & add input
guard let capture_device = AVCaptureDevice.default(AVCaptureDevice.DeviceType.builtInWideAngleCamera, for: .video, position: .front) else {return}
guard let input = try? AVCaptureDeviceInput(device: capture_device) else { return }
capture_session.addInput(input)
// 3 - the layer on screen that shows the picture
let previewLayer = AVCaptureVideoPreviewLayer(session: capture_session)
view.layer.addSublayer(previewLayer)
previewLayer.frame.size = CGSize(width: SVWidth, height: SVWidth + 40)
previewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
// 4 - run the session
capture_session.startRunning()
// 5 - the produced output aka image or video
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "videoQueue"))
capture_session.addOutput(dataOutput)
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection){
// our model
guard let model = try? VNCoreMLModel(for: SqueezeNet(configuration: MLModelConfiguration()).model) else { return }
// request for our model
let request = VNCoreMLRequest(model: model) { (finishedReq, err) in
if let error = err {
print("failed to detect faces:", error)
return
}
//result
guard let results = finishedReq.results as? [VNClassificationObservation] else {return}
guard let first_observation = results.first else {return}
self.result = first_observation.identifier
print(self.result)
}
guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {return}
try? VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:]).perform([request])
}
}
struct SwiftUIViewController: UIViewControllerRepresentable {
func makeUIViewController(context: Context) -> ViewController{
return ViewController()
}
func updateUIViewController(_ uiViewController: ViewController, context: Context) {
}
}