我正在尝试从 Wavesurfer 的默认设置中使用单独的上下文/脚本处理器,这样我就可以独立于播放速率来操纵音频的音高。当我尝试将上下文/脚本处理器作为参数提供并播放音频时,我没有听到任何声音。
我的波形组件:
const playbackEngine = new PlaybackEngine({
emitter: emitter,
pitch: pitch,
});
const Waveform = WaveSurfer.create({
audioContext: playbackEngine.context,
audioScriptProcessor: playbackEngine.scriptProcessor,
barWidth: 1,
cursorWidth: 1,
pixelRatio: 1,
container: '#audio-spectrum',
progressColor: '#03a9f4',
height: 100,
normalize: true,
responsive: true,
waveColor: '#ccc',
cursorColor: '#4a74a5'
});
// called in ComponentDidMount()
function loadMediaUrl(url) {
var request = new XMLHttpRequest();
request.open('GET', url, true);
request.responseType = 'arraybuffer';
// Decode asynchronously
request.onload = async function() {
let buffer = request.response;
// sets arrayBuffer for Playback Engine
const audioBuff = await playbackEngine.decodeAudioData(buffer, (error) => {
console.error(`Error decoding audio:`, error);
});
// sets audioBuffer for Wavesurfer to render Waveform (where I believe the problem
// begins)
Waveform.loadDecodedBuffer(audioBuff);
// sets audioBuffer for Playback Engine to playback audio
playbackEngine.setBuffer(audioBuff);
}
request.send();
}
回放.js
const {SimpleFilter, SoundTouch} = require('./soundtouch');
const BUFFER_SIZE = 4096;
class PlaybackEngine {
constructor({emitter, pitch}) {
this.emitter = emitter;
this.context = new (window.AudioContext || window.webkitAudioContext);
this.scriptProcessor = this.context.createScriptProcessor(BUFFER_SIZE, 2, 2);
this.scriptProcessor.onaudioprocess = e => {
const l = e.outputBuffer.getChannelData(0);
const r = e.outputBuffer.getChannelData(1);
const framesExtracted = this.simpleFilter.extract(this.samples, BUFFER_SIZE);
if (framesExtracted === 0) {
this.emitter.emit('stop');
}
for (let i = 0; i < framesExtracted; i++) {
l[i] = this.samples[i * 2];
r[i] = this.samples[i * 2 + 1];
}
};
this.soundTouch = new SoundTouch();
this.soundTouch.pitch = pitch;
this.duration = undefined;
}
get pitch() {
return this.soundTouch.pitch;
}
set pitch(pitch) {
this.soundTouch.pitch = pitch;
}
decodeAudioData(data) {
return this.context.decodeAudioData(data);
}
setBuffer(buffer) {
const bufferSource = this.context.createBufferSource();
bufferSource.buffer = buffer;
this.samples = new Float32Array(BUFFER_SIZE * 2);
this.source = {
extract: (target, numFrames, position) => {
this.emitter.emit('time', (position / this.context.sampleRate));
const l = buffer.getChannelData(0);
const r = buffer.getChannelData(1);
for (let i = 0; i < numFrames; i++) {
target[i * 2] = l[i + position];
target[i * 2 + 1] = r[i + position];
}
return Math.min(numFrames, l.length - position);
},
};
this.simpleFilter = new SimpleFilter(this.source, this.soundTouch);
this.duration = buffer.duration;
this.emitter.emit('duration', buffer.duration);
}
play() {
this.scriptProcessor.connect(this.context.destination);
}
pause() {
this.scriptProcessor.disconnect(this.context.destination);
}
seekPercent(percent) {
if (this.simpleFilter !== undefined) {
this.simpleFilter.sourcePosition = Math.round(
percent / 100 * this.duration * this.context.sampleRate
);
}
}
}
export default PlaybackEngine;
在此设置中,Waveform.play()
我可以从 wavesurfer 实例中播放,但不能操纵音高。同样,playbackEngine.play()
我可以操纵音高但失去所有 Wavesurfer 功能。
虽然我很确定问题源于 Wavesurfer 和我的播放引擎使用两个单独的 AudioBuffers,但我需要在播放上下文中设置缓冲区,并使用 wavesurfer 渲染波形。
我想看看是否有人可以确认如何使用播放引擎的上下文、脚本处理器和 AudioBuffer 来控制 Wavesurfer 实例(即让 Waveform.play() 播放来自播放引擎的音频,以及更新 Wavesurfer用户界面)。
感谢所有帮助。