4

我正在关注这个Apple 示例代码,并且想知道如何清除输入缓冲区(即result在这种情况下),以便在说出一个单词后重新开始听写。

例如:

当用户说出单词时,它们被添加到result.bestTranscription.formattedString然后被附加。因此,如果我说“白色”、“紫色”、“黄色”,result.bestTranscription.formattedString看起来就像“白色紫色黄色”并且单词会一直被附加到缓冲区停止(显然是约 1 分钟)。当一个词被说出来时,我正在做一个动作,所以如果你说“蓝色”,我正在检查缓冲区是否包含“蓝色”(或“蓝色”),既然它包含,继续下一个活动并重置缓冲区。

但是,当我这样做时,我收到此错误:

2020-09-09 18:25:44.247575-0400 测试应用程序[28460:1337362] [实用程序] +[AFAggregator logDictationFailedWithError:] 错误域=kAFAssistantErrorDomain 代码=209“(空)”

当它听到“蓝色”时停止音频检测可以正常工作,但是一旦我尝试重新初始化语音识别代码,它就会窒息。下面是我的识别任务:

// Create a recognition task for the speech recognition session.
// Keep a reference to the task so that it can be canceled.
recognitionTask = speechRecognizer.recognitionTask(with: recognitionRequest) { result, error in
    var isFinal = false
    
    if let result = result {
        // Update the text view with the results.
        self.speechInput = result.bestTranscription.formattedString
        isFinal = result.isFinal
        print("Text \(result.bestTranscription.formattedString)")
    }
    
    if (result?.bestTranscription.formattedString.range(of:"blue") != nil) || (result?.bestTranscription.formattedString.range(of:"Blue") != nil) {
        self.ColorView.backgroundColor = .random()
        isFinal = true
    }
    
    if error != nil || isFinal {
        // Stop recognizing speech if there is a problem.
        self.audioEngine.stop()
        self.recognitionRequest?.endAudio() // Necessary
        inputNode.removeTap(onBus: 0)

        self.recognitionRequest = nil
        self.recognitionTask = nil
    }
    
}

这是完整的代码:

import UIKit
import Speech

private let audioEngine = AVAudioEngine()


extension CGFloat {
    static func random() -> CGFloat {
        
        return CGFloat(arc4random()) / CGFloat(UInt32.max)
    }
}

extension UIColor {
    static func random() -> UIColor {
        return UIColor(
           red:   .random(),
           green: .random(),
           blue:  .random(),
           alpha: 1.0
        )
    }
}

class ViewController: UIViewController, SFSpeechRecognizerDelegate  {
    @IBOutlet var ColorView: UIView!
    @IBOutlet weak var StartButton: UIButton!
    
    private let speechRecognizer = SFSpeechRecognizer()!
    private var recognitionRequest: SFSpeechAudioBufferRecognitionRequest?
    private var recognitionTask: SFSpeechRecognitionTask?
    private let audioEngine = AVAudioEngine()
    
    private var speechInput: String = ""
    
    override func viewDidLoad() {
        super.viewDidLoad()
        
        // Create and configure the speech recognition request.
        recognitionRequest = SFSpeechAudioBufferRecognitionRequest()
        guard let recognitionRequest = recognitionRequest else { fatalError("Unable to create a SFSpeechAudioBufferRecognitionRequest object") }
        recognitionRequest.shouldReportPartialResults = true
        
        // Do any additional setup after loading the view.
        
    }
    
    func start() {
        // Cancel the previous task if it's running.
        recognitionTask?.cancel()
        self.recognitionTask = nil
        
        // Configure the audio session for the app.
        let audioSession = AVAudioSession.sharedInstance()
        do {
            try audioSession.setCategory(.record, mode: .measurement, options: .duckOthers)
        } catch {}
        do {
            try audioSession.setActive(true, options: .notifyOthersOnDeactivation)
            
        } catch {}
        let inputNode = audioEngine.inputNode

        // Create and configure the speech recognition request.
        recognitionRequest = SFSpeechAudioBufferRecognitionRequest()
        guard let recognitionRequest = recognitionRequest else { fatalError("Unable to create a SFSpeechAudioBufferRecognitionRequest object") }
        recognitionRequest.shouldReportPartialResults = true
        
        // Keep speech recognition data on device
        if #available(iOS 13, *) {
            recognitionRequest.requiresOnDeviceRecognition = false
        }
        
        // Create a recognition task for the speech recognition session.
        // Keep a reference to the task so that it can be canceled.
        recognitionTask = speechRecognizer.recognitionTask(with: recognitionRequest) { result, error in
            var isFinal = false
            
            if let result = result {
                // Update the text view with the results.
                self.speechInput = result.bestTranscription.formattedString
                isFinal = result.isFinal
                print("Text \(result.bestTranscription.formattedString)")
            }
            
            if (result?.bestTranscription.formattedString.range(of:"blue") != nil) || (result?.bestTranscription.formattedString.range(of:"Blue") != nil) {
                self.ColorView.backgroundColor = .random()
                isFinal = true
            }
            
            if error != nil || isFinal {
                // Stop recognizing speech if there is a problem.
                self.audioEngine.stop()
                self.recognitionRequest?.endAudio() // Necessary
                inputNode.removeTap(onBus: 0)

                self.recognitionRequest = nil
                self.recognitionTask = nil
                
                self.start() //This chokes it
            }
            
        }

        // Configure the microphone input.
        let recordingFormat = inputNode.outputFormat(forBus: 0)
        inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer: AVAudioPCMBuffer, when: AVAudioTime) in
            self.recognitionRequest?.append(buffer)
        }
        
        audioEngine.prepare()
        do {
            try audioEngine.start()
        } catch {}
        
        // Let the user know to start talking.
        //textView.text = "(Go ahead, I'm listening)"
    }
    
    override func viewDidAppear(_ animated: Bool) {
        super.viewDidAppear(animated)
        // Configure the SFSpeechRecognizer object already
        // stored in a local member variable.
        speechRecognizer.delegate = self
        
        // Asynchronously make the authorization request.
        SFSpeechRecognizer.requestAuthorization { authStatus in

            // Divert to the app's main thread so that the UI
            // can be updated.
            OperationQueue.main.addOperation {
                switch authStatus {
                case .authorized:
                    self.ColorView.backgroundColor = .green
                    
                case .denied:
                    self.ColorView.backgroundColor = .red
                    
                case .restricted:
                    self.ColorView.backgroundColor = .orange
                    
                case .notDetermined:
                    self.ColorView.backgroundColor = .gray
                    
                default:
                    self.ColorView.backgroundColor = .random()
                }
            }
        }
    }
    
    @IBAction func start(_ sender: Any) {
        start()
    }
    
    public func speechRecognizer(_ speechRecognizer: SFSpeechRecognizer, availabilityDidChange available: Bool) {
        if available {
            //recordButton.isEnabled = true
            //recordButton.setTitle("Start Recording", for: [])
        } else {
            //recordButton.isEnabled = false
            //recordButton.setTitle("Recognition Not Available", for: .disabled)
        }
    }
}

我确定我缺少一些简单的东西,有什么建议吗?

4

0 回答 0