1

我已经在 iOS Alexa 应用程序上工作了一段时间,但我正在努力将麦克风音频作为流发送到 AVS API。

我成功地预先录制了一个音频样本并将其作为一个整体发送并得到响应。

我只想知道如何使用 NSURLSession http/2 连接将数据流式传输到 AVS。

这是我现在正在做的代码片段:

func sendData() {
        let request = NSMutableURLRequest(URL: NSURL(string: "https://avs-alexa-na.amazon.com/v20160207/events")!)
        request.setValue("Bearer \(Settings.Credentials.TOKEN)", forHTTPHeaderField: "authorization")
        request.HTTPMethod = "POST"

        let boundry = NSUUID().UUIDString
        let contentType = "multipart/form-data; boundary=\(boundry)"
        request.setValue(contentType, forHTTPHeaderField: "content-type")

        let bodyData = NSMutableData()

        let jsonData = "{\"context\":[{\"header\":{\"namespace\":\"Alerts\",\"name\":\"AlertsState\"},\"payload\":{\"allAlerts\":[],\"activeAlerts\":[]}},{\"header\":{\"namespace\":\"AudioPlayer\",\"name\":\"PlaybackState\"},\"payload\":{\"token\":\"\",\"offsetInMilliseconds\":0,\"playerActivity\":\"IDLE\"}},{\"header\":{\"namespace\":\"Speaker\",\"name\":\"VolumeState\"},\"payload\":{\"volume\":25,\"muted\":false}},{\"header\":{\"namespace\":\"SpeechSynthesizer\",\"name\":\"SpeechState\"},\"payload\":{\"token\":\"\",\"offsetInMilliseconds\":0,\"playerActivity\":\"FINISHED\"}}],\"event\":{\"header\":{\"namespace\":\"SpeechRecognizer\",\"name\":\"Recognize\",\"messageId\":\"messageId-123\",\"dialogRequestId\":\"dialogRequestId-321\"},\"payload\":{\"profile\":\"CLOSE_TALK\",\"format\":\"AUDIO_L16_RATE_16000_CHANNELS_1\"}}}"

        bodyData.appendData("--\(boundry)\r\n".dataUsingEncoding(NSUTF8StringEncoding)!)
        bodyData.appendData("Content-Disposition: form-data; name=\"metadata\"\r\n".dataUsingEncoding(NSUTF8StringEncoding)!)
        bodyData.appendData("Content-Type: application/json; charset=UTF-8\r\n\r\n".dataUsingEncoding(NSUTF8StringEncoding)!)
        bodyData.appendData(jsonData.dataUsingEncoding(NSUTF8StringEncoding)!)
        bodyData.appendData("\r\n".dataUsingEncoding(NSUTF8StringEncoding)!)

        bodyData.appendData("--\(boundry)\r\n".dataUsingEncoding(NSUTF8StringEncoding)!)

        bodyData.appendData("Content-Disposition: form-data; name=\"audio\"\r\n".dataUsingEncoding(NSUTF8StringEncoding)!)
        //        bodyData.appendData("Content-Type: audio/L16; rate=16000; channels=1\r\n\r\n".dataUsingEncoding(NSUTF8StringEncoding)!)
        bodyData.appendData("Content-Type: application/octet-stream\r\n\r\n".dataUsingEncoding(NSUTF8StringEncoding)!)
        bodyData.appendData(audioData!)
        bodyData.appendData("\r\n".dataUsingEncoding(NSUTF8StringEncoding)!)

        bodyData.appendData("--\(boundry)\r\n".dataUsingEncoding(NSUTF8StringEncoding)!)
        session = NSURLSession.sharedSession()
        session.configuration.timeoutIntervalForResource = 60000
        session.configuration.timeoutIntervalForRequest = 60000

        let upload = session.uploadTaskWithRequest(request, fromData: bodyData) { (data, response, error) in
            print("done")
            if(data?.length > 0) {
                print("break")
            }
            if let httpResponse = response as? NSHTTPURLResponse {
                if let responseData = data, let contentTypeHeader = httpResponse.allHeaderFields["Content-Type"] {

                    var boundry: String?
                    let ctbRange = contentTypeHeader.rangeOfString("boundary=.*?;", options: .RegularExpressionSearch)
                    if ctbRange.location != NSNotFound {
                        let boundryNSS = contentTypeHeader.substringWithRange(ctbRange) as NSString
                        boundry = boundryNSS.substringWithRange(NSRange(location: 9, length: boundryNSS.length - 10))
                    }

                    if let b = boundry {
                        let parts = self.parseResponse(responseData, boundry: b)
                        print("got parts")
//                        self.sendSynchronize()
                        self.successHandler?(data: responseData, parts:self.parseResponse(responseData, boundry: b))
                    } else {
                        print("something went wrong")
                        self.errorHandler?(error: NSError(domain: Settings.Error.ErrorDomain, code: Settings.Error.AVSResponseBorderParseErrorCode, userInfo: [NSLocalizedDescriptionKey : "Could not find boundry in AVS response"]))
                    }
                }
            }
        }

        upload.resume()
    }

此函数每 320 字节的音频数据调用一次,因为这是亚马逊推荐的流式传输大小:)

问候!

4

2 回答 2

0

您应该只在对话请求开始时(例如,麦克风打开并开始录制的那一刻)发送一次 JSON 元数据标头。

每次为同一流调用 sendData 方法时,您还需要使用相同的边界值。对整个请求使用相同的 HTTP/2 流,这意味着您需要“从里到外”重构您的 sendData 方法以适应这种情况。使用 uploadTask:withStreamedRequest 的示例可能会有所帮助(您可能需要使用)。

我不熟悉 Swift HTTP/2 API,所以我不知道是否会为您处理延续帧,或者您是否需要自己管理,所以需要注意。祝你好运,希望这会有所帮助。

于 2017-08-26T20:41:14.970 回答
0

像这样:

public func send(event: AlexaEvent?) {
        self.queue.async {[weak self] in
            guard let self = self else { return }
            let urlStr = self.host.appending(AlexaConstant.ServiceUrl.eventsURL)
            var eventRequest: URLRequest = URLRequest(url: URL(string: urlStr)!)
            eventRequest.httpMethod = "POST"
            eventRequest.setValue("multipart/form-data; boundary=\(AlexaConstant.HttpBodyData.boundary)", forHTTPHeaderField: "Content-Type")
            self.addAuthHeader(request: &eventRequest)
            guard let bodyData = event?.HTTPBodyData else { return }
            eventRequest.httpBody = bodyData
//            self.bodyStream = InputStream(data: bodyData)
            eventRequest.httpBodyStream = self.bodyStream

            let uploadTask = self.session?.uploadTask(withStreamedRequest: eventRequest)
            guard let task = uploadTask else { return }
            self.state = .started(.init(task: task))
            task.resume()
        }
    }

然后在 URLSessionDataDelegate 代理方法中绑定输入输出流:</p>

func urlSession(_ session: URLSession, task: URLSessionTask, needNewBodyStream completionHandler: @escaping (InputStream?) -> Void) {
        let sendTimer = Timer(timeInterval: TimeInterval(1), target: self, selector: #selector(didFire(sendTimer:)), userInfo: nil, repeats: true)

        let streamingState = StreamingState(task: task, sendTimer: sendTimer)
        self.state = .streaming(streamingState)
        var bodyData = Data()
        let data = AlexaHttpBodyData.jsonContent()
        guard let jsonObj = try? JSONSerialization.jsonObject(with: data, options: []) else { return }
        guard let valueData = try? JSONSerialization.data(withJSONObject: jsonObj, options: []) else { return }
        bodyData.append(AlexaHttpBodyData.boundaryBegin)
        bodyData.append(AlexaHttpBodyData.jsonHeaders)
        bodyData.append(AlexaHttpBodyData.jsonContent(data: valueData))
        bodyData.append(AlexaHttpBodyData.boundaryBegin)
        bodyData.append(AlexaHttpBodyData.AudioHeaders)
//        bodyStream = InputStream(data: bodyData)
        let streams = Stream.boundPair(bufferSize: BufferSize, inputStream: bodyStream)
        self.bodyStream = streams.inputStream
        self.outputStream = streams.outputStream
//        RunLoop.current.add(streamingState.sendTimer, forMode: .default)
        outputStream?.delegate = self
        outputStream?.schedule(in: .current, forMode: .default)
        outputStream?.open()
        completionHandler(self.bodyStream)
        outputStream?.write(Array(bodyData), maxLength: bodyData.count)
//        while (true) {
        outputStream?.write(Array(self.audioQueue.dequeue()), maxLength: BufferSize)
        let rndata = "\r\n".data(using: .utf8)
        outputStream?.write(Array(rndata!), maxLength: rndata!.count)
        outputStream?.write(Array(AlexaHttpBodyData.boundaryEnd), maxLength: AlexaHttpBodyData.boundaryEnd.count)

//        }
        stop(error: nil)
    }
extension Stream {
static func boundPair(bufferSize: Int, inputStream: InputStream?) -> (inputStream: InputStream?, outputStream: OutputStream?) {
        var inStream: InputStream? = inputStream
        var outStream: OutputStream? = nil
        Stream.getBoundStreams(withBufferSize: bufferSize, inputStream: &inStream, outputStream: &outStream)
        return (inStream, outStream)
    }
}
于 2019-11-04T03:42:52.500 回答