3

We have built a web application. The application's core is to arrange the meetings/sessions on the web. So User A(Meeting co-ordinator) will arrange a meeting/session and all other participants B, C, D and etc will be joining in the meeting/session. So I have used Twilio group video call to achieve it.

I have the below use case. We want to do the voice pitch shifting of the User A's(Meeting co-ordinator) voice. So all other participants will be receiving the pitch-shifted voice in group video. We have analyzed the AWS Polly in Twilio but it doesn’t match with our use case.

So please advice is there any services in Twilio to achieve this scenario.
(or) will it be possible to interrupt Twilio group call and pass the pitch-shifted voice to other participants?

Sample Code Used

initAudio();

function initAudio() {

analyser1 = audioContext.createAnalyser();
analyser1.fftSize = 1024;
analyser2 = audioContext.createAnalyser();
analyser2.fftSize = 1024;

if (!navigator.getUserMedia)
    navigator.getUserMedia = navigator.webkitGetUserMedia || navigator.mozGetUserMedia;

if (!navigator.getUserMedia)
    return(alert("Error: getUserMedia not supported!"));

navigator.getUserMedia({ audio: true }, function(stream){
    gotStream(stream);
}, function(){ console.log('Error getting Microphone stream'); });

if ((typeof MediaStreamTrack === 'undefined')||(!MediaStreamTrack.getSources)){
    console.log("This browser does not support MediaStreamTrack, so doesn't support selecting sources.\n\nTry Chrome Canary.");
} else {
    MediaStreamTrack.getSources(gotSources);
}
}
function gotStream (stream) {
audioInput = audioContext.createMediaStreamSource(stream);
outputMix = audioContext.createGain();
dryGain = audioContext.createGain();
wetGain = audioContext.createGain();
effectInput = audioContext.createGain();
audioInput.connect(dryGain);
audioInput.connect(effectInput);
dryGain.connect(outputMix);
wetGain.connect(outputMix);
audioOutput = audioContext.createMediaStreamDestination();
outputMix.connect(audioOutput);
outputMix.connect(analyser2);
crossfade(1.0);
changeEffect();
}
    function crossfade (value) {
        var gain1 = Math.cos(value * 0.5 * Math.PI);
        var gain2 = Math.cos((1.0 - value) * 0.5 * Math.PI);

    dryGain.gain.value = gain1;
    wetGain.gain.value = gain2;
}

function createPitchShifter () {
    effect = new Jungle( audioContext );
    effect.output.connect( wetGain );
    effect.setPitchOffset(1);
    return effect.input;
}

function changeEffect () {
    if (currentEffectNode)
        currentEffectNode.disconnect();
if (effectInput)
    effectInput.disconnect();

var effect = 'pitch';

switch (effect) {
    case 'pitch':
        currentEffectNode = createPitchShifter();
        break;
}

audioInput.connect(currentEffectNode);
}

Facing the error while adding the Localaudiotrack to a room

var mediaStream = new Twilio.Video.LocalAudioTrack(audioOutput.stream);

room.localParticipant.publishTrack(mediaStream, {
    name: 'adminaudio'
});

ERROR: Uncaught (in promise) TypeError: Failed to execute 'addTrack' on 'MediaStream': parameter 1 is not of type 'MediaStreamTrack'.

4

2 回答 2

1

Twilio 开发人员布道者在这里。

Twilio 本身没有任何东西可以改变声音。

如果您在浏览器中构建它,那么您可以使用 Web Audio API 从用户的麦克风获取输入并对其进行音高转换,然后将生成的音频流而不是原始麦克风流提供给 Video API。

于 2018-09-27T05:50:12.507 回答
0

上述答案中的评论非常有帮助!我已经研究了几个星期,发布到 Twilio-video.js 无济于事,最后只是正确的措辞在 SO 上提出了这个问题!

但总结并添加我发现的工作,因为很难遵循所有 27 个问题/评论/代码摘录:

连接到 Twilio 时:

const room = await Video.connect(twilioToken, {
          name: roomName,
          tracks: localTracks,
          audio: false, // if you don't want to hear the normal voice at all, you can hide this and add the shifted track upon participant connections 
          video: true,
          logLevel: "debug",
        }).then((room) => {
          
          return room;
        });

在新的(远程)参与者连接上:

        const stream = new MediaStream([audioTrack.mediaStreamTrack]);
        const audioContext = new AudioContext(); 
        const audioInput = audioContext.createMediaStreamSource(stream);

source.disconnect(audioOutput);
          console.log("using PitchShift.js");
          var pitchShift = PitchShift(audioContext);

          if (isFinite(pitchVal)) {
            pitchShift.transpose = pitchVal;
            console.log("gain is " + pitchVal);
          }
          pitchShift.wet.value = 1;
          pitchShift.dry.value = 0.5;

          try {
            audioOutput.stream.getAudioTracks()[0]?.applyConstraints({
              echoCancellation: true,
              noiseSuppression: true,
            });
          } catch (e) {
            console.log("tried to constrain audio track " + e);
          }

          var biquadFilter = audioContext.createBiquadFilter();
          // Create a compressor node
          var compressor = audioContext.createDynamicsCompressor();
          compressor.threshold.setValueAtTime(-50, audioContext.currentTime);
          compressor.knee.setValueAtTime(40, audioContext.currentTime);
          compressor.ratio.setValueAtTime(12, audioContext.currentTime);
          compressor.attack.setValueAtTime(0, audioContext.currentTime);
          compressor.release.setValueAtTime(0.25, audioContext.currentTime);
          //biquadFilter.type = "lowpass";
          if (isFinite(freqVal)) {
            biquadFilter.frequency.value = freqVal;
            console.log("gain is " + freqVal);
          }
          if (isFinite(gainVal)) {
            biquadFilter.gain.value = gainVal;
            console.log("gain is " + gainVal);
          }
          source.connect(compressor);
          compressor.connect(biquadFilter);
          biquadFilter.connect(pitchShift);
          pitchShift.connect(audioOutput);
   
        const localAudioWarpedTracks = new Video.LocalAudioTrack(audioOutput.stream.getAudioTracks()[0]);

        const audioElement2 = document.createElement("audio");
        document.getElementById("audio_div").appendChild(audioElement2);

        localAudioWarpedTracks.attach();

于 2021-04-02T19:53:30.490 回答