19

我正在使用带有 NodeJS 后端的 Google Cloud API for Speech-to-text。该应用程序需要能够侦听语音命令,并将它们作为缓冲区传输到后端。为此,我需要在检测到静音时发送前面音频的缓冲区。

任何帮助,将不胜感激。包括下面的js代码

 if (!navigator.getUserMedia)
    navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia ||
        navigator.mozGetUserMedia || navigator.msGetUserMedia;

if (navigator.getUserMedia) {
    navigator.getUserMedia({audio: true}, success, function (e) {
        alert('Error capturing audio.');
    });
} else alert('getUserMedia not supported in this browser.');

var recording = false;

window.startRecording = function () {
    recording = true;
};

window.stopRecording = function () {
    recording = false;
    // window.Stream.end();
};

function success(e) {
    audioContext = window.AudioContext || window.webkitAudioContext;
    context = new audioContext();

    // the sample rate is in context.sampleRate
    audioInput = context.createMediaStreamSource(e);

    var bufferSize = 4096;
    recorder = context.createScriptProcessor(bufferSize, 1, 1);

    recorder.onaudioprocess = function (e) {
        if (!recording) return;
        console.log('recording');
        var left = e.inputBuffer.getChannelData(0);
        console.log(convertoFloat32ToInt16(left));
       };

    audioInput.connect(recorder);
    recorder.connect(context.destination);
}
4

3 回答 3

24

我不太确定问题中到底要问什么,所以这个答案只是为了提供一种方法来检测 AudioStream 中的静音。


要检测 AudioStream 中的静音,您可以使用AudioAnalyser节点,您将在该节点上getByteFrequencyData定期调用该方法,并检查在给定时间内是否有高于预期级别的声音。

您可以直接使用minDecibelsAnalyserNode 的属性设置阈值级别。

function detectSilence(
  stream,
  onSoundEnd = _=>{},
  onSoundStart = _=>{},
  silence_delay = 500,
  min_decibels = -80
  ) {
  const ctx = new AudioContext();
  const analyser = ctx.createAnalyser();
  const streamNode = ctx.createMediaStreamSource(stream);
  streamNode.connect(analyser);
  analyser.minDecibels = min_decibels;

  const data = new Uint8Array(analyser.frequencyBinCount); // will hold our data
  let silence_start = performance.now();
  let triggered = false; // trigger only once per silence event

  function loop(time) {
    requestAnimationFrame(loop); // we'll loop every 60th of a second to check
    analyser.getByteFrequencyData(data); // get current data
    if (data.some(v => v)) { // if there is data above the given db limit
      if(triggered){
        triggered = false;
        onSoundStart();
        }
      silence_start = time; // set it to now
    }
    if (!triggered && time - silence_start > silence_delay) {
      onSoundEnd();
      triggered = true;
    }
  }
  loop();
}

function onSilence() {
  console.log('silence');
}
function onSpeak() {
  console.log('speaking');
}

navigator.mediaDevices.getUserMedia({
    audio: true
  })
  .then(stream => {
    detectSilence(stream, onSilence, onSpeak);
    // do something else with the stream
  })
  .catch(console.error);

并且作为一个小提琴,因为 stackSnippets 可能会阻止 gUM。

于 2017-10-17T03:24:02.470 回答
2

最简单的方法是使用and.pause()方法来允许用户开始、暂停和停止录制使用捕获的音频.resume()并将结果转换为.stop()MediaRecorder()navigator.mediaDevices.getUserMedia()BlobArrayBufferPOST

<!DOCTYPE html>
<html>

<head>
  <title>User Media Recording</title>
</head>

<body>
  <input type="button" value="Start/resume recording audio" id="start">
  <input type="button" value="Pause recording audio" id="pause">
  <input type="button" value="Stop recording audio" id="stop">
  <script>
    navigator.mediaDevices.getUserMedia({
        audio: true
      })
      .then(stream => {
        const recorder = new MediaRecorder(stream);

        recorder.ondataavailable = async(e) => {
          if (stream.active) {
            try {
              const blobURL = URL.createObjectURL(e.data);
              const request = await fetch(blobURL);
              const ab = await request.arrayBuffer();
              // do stuff with `ArrayBuffer` of recorded audio
              console.log(blobURL, ab);
              // we do not need the `Blob URL`, we can revoke the object
              // URL.revokeObjectURL(blobURL);
            } catch (err) {
              throw err
            }
          }
        }
        recorder.onpause = e => {
          console.log("recorder " + recorder.state);
          recorder.requestData();
        }

        stream.oninactive = () => {
          console.log("stream ended");
        }

        document.getElementById("start")
          .onclick = () => {

            if (recorder.state === "inactive") {
              recorder.start();
            } else {
              recorder.resume();
            }
            console.log("recorder.state:", recorder.state);
          }

        document.getElementById("pause")
          .onclick = () => {

            if (recorder.state === "recording") {
              recorder.pause();
            }
            console.log("recorder.state:", recorder.state);
          }

        document.getElementById("stop")
          .onclick = () => {

            if (recorder.state === "recording" || recorder.state === "paused") {
              recorder.stop();
            }

            for (let track of stream.getTracks()) {
              track.stop();
            }

            document.getElementById("start").onclick = null;
            document.getElementById("pause").onclick = null;
            console.log("recorder.state:", recorder.state
            , "stream.active", stream.active);
          }

      })
      .catch(err => {
        console.error(err)
      });
  </script>
</body>

</html>

plnkr https://plnkr.co/edit/7caWYMsvub90G6pwDdQp?p=preview

于 2017-10-17T01:34:50.663 回答
2

您可以使用event 来确定一个单词或短语何时被识别,SpeechRecognition result例如,,ls或其他命令,将of传递给at attach和event of call或传递的object ;将at事件转换为using or 。cdpwd.transcriptSpeechRecognitionAlternativespeechSynthesis.speak()startendSpeechSynthesisUtterance.start().resume()MediaRecorderMediaStreamBlobdataavailableArrayBufferFileReaderResponse.arrayBuffer()

我们可以选择使用audiostartor soundstartwithaudioendsoundendeventsSpeechRecognition来记录用户的实际声音,尽管相对于仅由标准系统麦克风捕获的音频的实际开始和结束,结束可能不会一致地触发。

<!DOCTYPE html>
<html>

<head>
  <title>Speech Recognition Recording</title>
</head>

<body>
  <input type="button" value="Stop speech command recognition" id="stop">
  <script>
    navigator.mediaDevices.getUserMedia({
        audio: true
      })
      .then(stream => {
        const recorder = new MediaRecorder(stream);
        const recognition = new webkitSpeechRecognition();
        const synthesis = new SpeechSynthesisUtterance();
        const handleResult = e => {
          recognition.onresult = null;
          console.log(e.results);
          const result = e.results[e.results.length - 1];

          if (result.isFinal) {
            const [{transcript}] = result;
            console.log(transcript);
            synthesis.text = transcript;
            window.speechSynthesis.speak(synthesis);
          }
        }
        synthesis.onstart = () => {
          if (recorder.state === "inactive") {
            recorder.start()
          } else {
            if (recorder.state === "paused") {
              recorder.resume();
            }
          }
        }
        synthesis.onend = () => {
          recorder.pause();
          recorder.requestData();
        }
        recorder.ondataavailable = async(e) => {
          if (stream.active) {
            try {
              const blobURL = URL.createObjectURL(e.data);
              const request = await fetch(blobURL);
              const ab = await request.arrayBuffer();
              console.log(blobURL, ab);
              recognition.onresult = handleResult;
              // URL.revokeObjectURL(blobURL);
            } catch (err) {
              throw err
            }
          }
        }
        recorder.onpause = e => {
          console.log("recorder " + recorder.state);
        }
        recognition.continuous = true;
        recognition.interimResults = false;
        recognition.maxAlternatives = 1;
        recognition.start();
        recognition.onend = e => {
          console.log("recognition ended, stream.active", stream.active);

          if (stream.active) {
            console.log(e);
            // the service disconnects after a period of time
            recognition.start();
          }
        }
        recognition.onresult = handleResult;

        stream.oninactive = () => {
          console.log("stream ended");
        }

        document.getElementById("stop")
          .onclick = () => {
            console.log("stream.active:", stream.active);
            if (stream && stream.active && recognition) {
              recognition.abort();
              recorder.stop();
              for (let track of stream.getTracks()) {
                track.stop();
              }
              console.log("stream.active:", stream.active);
            }
          }

      })
      .catch(err => {
        console.error(err)
      });
  </script>
</body>

</html>

plnkr https://plnkr.co/edit/4DVEg6mhFRR94M5gdaIp?p=preview

于 2017-10-15T02:09:17.527 回答