0

我正在尝试从 Wavesurfer 的默认设置中使用单独的上下文/脚本处理器,这样我就可以独立于播放速率来操纵音频的音高。当我尝试将上下文/脚本处理器作为参数提供并播放音频时,我没有听到任何声音。

我的波形组件:

const playbackEngine = new PlaybackEngine({
                emitter: emitter,
                pitch: pitch,
        });

const Waveform = WaveSurfer.create({
            audioContext: playbackEngine.context,
            audioScriptProcessor: playbackEngine.scriptProcessor,
            barWidth: 1,
            cursorWidth: 1,
            pixelRatio: 1,
            container: '#audio-spectrum',
            progressColor: '#03a9f4',
            height: 100,
            normalize: true,
            responsive: true,
            waveColor: '#ccc',
            cursorColor: '#4a74a5'
        });

// called in ComponentDidMount()

function loadMediaUrl(url) {
    var request = new XMLHttpRequest();
    request.open('GET', url, true);
    request.responseType = 'arraybuffer';

    // Decode asynchronously
    request.onload = async function() {
        let buffer = request.response;

        // sets arrayBuffer for Playback Engine
        const audioBuff =  await playbackEngine.decodeAudioData(buffer, (error) => {
                        console.error(`Error decoding audio:`, error);
        });

        // sets audioBuffer for Wavesurfer to render Waveform (where I believe the problem 
        // begins)
        Waveform.loadDecodedBuffer(audioBuff);

        // sets audioBuffer for Playback Engine to playback audio
        playbackEngine.setBuffer(audioBuff);
    }
    request.send();
}

回放.js


const {SimpleFilter, SoundTouch} = require('./soundtouch');

const BUFFER_SIZE = 4096;

class PlaybackEngine {
    constructor({emitter, pitch}) {
        this.emitter = emitter;
        this.context = new (window.AudioContext || window.webkitAudioContext);
        this.scriptProcessor = this.context.createScriptProcessor(BUFFER_SIZE, 2, 2);

        this.scriptProcessor.onaudioprocess = e => {
            const l = e.outputBuffer.getChannelData(0);
            const r = e.outputBuffer.getChannelData(1);
            const framesExtracted = this.simpleFilter.extract(this.samples, BUFFER_SIZE);
            if (framesExtracted === 0) {
                this.emitter.emit('stop');
            }
            for (let i = 0; i < framesExtracted; i++) {
                l[i] = this.samples[i * 2];
                r[i] = this.samples[i * 2 + 1];
            }
        };

        this.soundTouch = new SoundTouch();
        this.soundTouch.pitch = pitch;

        this.duration = undefined;
    }

    get pitch() {
        return this.soundTouch.pitch;
    }
    set pitch(pitch) {
        this.soundTouch.pitch = pitch;
    }

    decodeAudioData(data) {
        return this.context.decodeAudioData(data);
    }

    setBuffer(buffer) {
        const bufferSource = this.context.createBufferSource();
        bufferSource.buffer = buffer;
        this.samples = new Float32Array(BUFFER_SIZE * 2);
        this.source = {
            extract: (target, numFrames, position) => {
                this.emitter.emit('time', (position / this.context.sampleRate));
                const l = buffer.getChannelData(0);
                const r = buffer.getChannelData(1);
                for (let i = 0; i < numFrames; i++) {
                    target[i * 2] = l[i + position];
                    target[i * 2 + 1] = r[i + position];
                }
                return Math.min(numFrames, l.length - position);
            },
        };
        this.simpleFilter = new SimpleFilter(this.source, this.soundTouch);

        this.duration = buffer.duration;
        this.emitter.emit('duration', buffer.duration);
    }

    play() {
        this.scriptProcessor.connect(this.context.destination);
    }

    pause() {
        this.scriptProcessor.disconnect(this.context.destination);
    }

    seekPercent(percent) {
        if (this.simpleFilter !== undefined) {
            this.simpleFilter.sourcePosition = Math.round(
                percent / 100 * this.duration * this.context.sampleRate
            );
        }
    }
}

export default PlaybackEngine;


在此设置中,Waveform.play()我可以从 wavesurfer 实例中播放,但不能操纵音高。同样,playbackEngine.play()我可以操纵音高但失去所有 Wavesurfer 功能。

虽然我很确定问题源于 Wavesurfer 和我的播放引擎使用两个单独的 AudioBuffers,但我需要在播放上下文中设置缓冲区,并使用 wavesurfer 渲染波形。

我想看看是否有人可以确认如何使用播放引擎的上下文、脚本处理器和 AudioBuffer 来控制 Wavesurfer 实例(即让 Waveform.play() 播放来自播放引擎的音频,以及更新 Wavesurfer用户界面)。

感谢所有帮助。

4

1 回答 1

0

所以我最终手动删除

audioScriptProcessor: playbackEngine.scriptProcessor,

从 Wavesurfer 初始化开始,然后手动将playbackEngine 的脚本处理器附加到destinationNode。我以前尝试过这样设置,在播放过程中听到烦人的爆裂声。我认为烦人的样本/缓冲区错误实际上来自一个 EventEmitter 实例,我在文件之间不断广播时间。删除解决了我的噪音问题(ツ)

于 2020-06-04T05:04:22.037 回答