1

我的程序正在ScriptProcessorNode使用AudioWorklet. 不幸的是,我没有服务器来测试它,但感谢Google Chrome Labs Audio Worklet Polyfill,我设法让它运行。无论如何,这个想法的目标是通过使用新的替换功能来重新创建与原始功能相似的功能ScriptProcessorNode(因为它现在已被弃用)AudioWorklet

这是代码:

scriptProcessingWorklet.js

class ScriptProcessingHandler extends AudioWorkletProcessor {
    constructor(options) {
        super();

        this._frameSize = 128;
        this._bufferSize = options.processorOptions.bufferSize;

        //this._frameCount represent a number indicating how many 128 sample frames it needs to
        //to match the buffer size since all of the valid buffer sizes at https://developer.mozilla.org/en-US/docs/Web/API/ScriptProcessorNode 
        // are all divisible by 128
        this._frameCount = this._bufferSize / this._frameSize;
         
        //arrays for storing audio data frames
        this._inputData = [];
        this._outputData = [];

        this.port.onmessage = this._processMessage.bind(this);
    }
    
    _processMessage(e) {
        switch (e.data.message) {
            case "PROCESS_OUTPUT":
                this._processOutput(e.data.detail);
                break;
        }
    }

    _sendMessage(message, detail = null) {
        if (!message) {
            return;
        }
        this.port.postMessage({message, detail});
    }

    _sendInputData(frames) {
        var numChannels = 0, len = 0, f = 0;
        for (f = 0; f < frames.length; f++) {
            numChannels = Math.max(frames[f].length, numChannels);
            len += frames[f][0].length;
        }
        //convert audio data frames in to one array with the correct amount of channels
        var channels = new Array(numChannels).fill(new Float32Array(len));
        for (f = 0; f < frames.length; f++) {
            for (var c = 0; c < frames[f].length; c++) {
                channels[c].set(frames[f][c]);
            }
        }
        var detail = {
            numberOfChannels: numChannels,
            numberOfFrames: this._frameCount,
            length: len,
            channelData: channels,
        };
        this._sendMessage("PROCESS_DATA", detail);
    }

    _processInput(data) {
        // if array length of audio data is equal to how many frames it needs to create a sample buffer
        if (this._inputData.length >= this._frameCount) {
            //then extract them out of the array and process each frame into one
            var extract = this._inputData.splice(0, this._frameCount);
            this._sendInputData(extract);
        }

        //add audio data from input to array
        this._inputData.push(data);
    }

    _processOutput(data) {
        //convert audio data back to multiple 128 sample frames
        var temp = new Array(data.numberOfChannels).fill(new Float32Array(this._frameSize));
        var frames = new Array(data.numberOfFrames).fill(temp);
        for (var f = 0; f < frames.length; f++) {
            var offset = 0, end = this._frameSize;
            for (var c = 0; c < frames[f].length; c++) {
                var samples = data.channelData[c].slice(offset, end);
                frames[f][c].set(samples);
            }
            offset += this._frameSize;
            end += this._frameSize;
        }
        this._outputData = this._outputData.concat(frames);
    } 

    //process() calls each time with 128 samples per call
    process(inputs, outputs, params) {
        if (!(inputs[0][0] instanceof Float32Array)) {
            return true;
        }
        this._processInput(inputs[0]);
        if (this._outputData.length > 0) {
            var output = outputs[0];
            var outData = this._outputData.shift();
            for (var c = 0; c < output.length; c++) {
                output[c].set(outData[c]);
            }
        } 
        return true;
    }
}

registerProcessor("script-processing-handler", ScriptProcessingHandler);

ScriptProcessorNodePollyfill.js

(function(root) {
    "use strict";

    if (typeof AudioWorkletNode === "undefined") {
        return;
    }

    //constants
    const bufferSizes = [128, 256, 512, 1024, 2048, 4096, 8192, 16384];
    const defaults = {
        bufferSize: 2048,
        numberOfInputChannels: 2,
        numberOfOutputChannels: 2,
    }

    //utilites
    const util = {
        isNumber: function(x) {
            return typeof x === "number" && isFinite(x);
        },

        assert: function(bool, msg) {
            if (!bool) {
                throw new Error(msg);
            }
        },

        isValidBufferSize: function(size) {
            util.assert(util.isNumber(size), "ScriptProcessorNodePoylyfill.buffeSize is not a number");
            var index = bufferSizes.indexOf(size);
            util.assert(index !== -1, "ScriptProcessorNodePolyfill.bufferSize is not a valid buffer size");
        },

        isValidChannels: function(n, t) {
            util.assert(util.isNumber(n), "ScriptProcessorNodePolyfill.numberOf" + t + "Channels is not a number");
            util.assert(n > 0, "ScriptProcessorNodePolyfill.numberOf" + t + "Channels must be at at least 1 or more channels");
        }

        
    }

    
    //Don't worry about the Events part of this code
    //AudioProcessingEvent Definition 
    //defined in https://webaudio.github.io/web-audio-api/#audioproccessingevent
    class AudioProcessingEventPolyfill extends Event {
        constructor(type, init) {
            super(type);
            this.inputBuffer = init.inputBuffer;
            this.outputBuffer = init.outputBuffer;
            this.playbackTime = init.playbackTime;
        }
    }

    //main libaray
    class ScriptProcessorNodePolyfill /*extends EventTarget*/ {
        constructor(context, options) {
            //super();
            var opts = Object.assign({}, defaults, options || {});
            util.isValidBufferSize(opts.bufferSize);
            util.isValidChannels(opts.numberOfInputChannels, "Input");
            util.isValidChannels(opts.numberOfOutputChannels, "Output");

            
            this._process = null;

            

            this._options = opts;
            this.context = context;
            this.bufferSize = opts.bufferSize;
            this._worklet;
            this._event;
            
            //initializing AudioWorklet
            context.audioWorklet.addModule("scriptProcessingWorklet.js").then(() => {
                //the properties of AudioWorkletNode and options are set to the properties based on
                //ScriptProcessorNode defined in https://webaudio.github.io/web-audio-api/#scriptprocessornode
                this._worklet = new AudioWorkletNode(context, "script-processing-handler", {
                    numberOfInputs: 1,
                    numberOfOutputs: 1,
                    outputChannelCount: [opts.numberOfOutputChannels],
                    processorOptions: {
                        bufferSize: opts.bufferSize,
                    }
                });
                this._worklet.channelInterpretation = "speakers";
                this._worklet.channelCountMode = "explicit";
                this._worklet.channelCount = opts.numberOfInputChannels;

                this._worklet.port.onmessage = this._handleMessage.bind(this);
            }).catch((err) => {
                throw err;
            });


        }

        //this function is what triggers the onaudioprocess event 
        // I have defined a class called AudioProcessingEventPolyfil that inherits Event class for this
        // so it can be disptached using window.dispatchEvent()
        // but i was easier to start of with a function input instead
        // using the property this._process to store the function
        // I will worry about Events later. One step at a time :)
        async processData(data) {
            //create input and output buffers
            var input = this.context.createBuffer(data.numberOfChannels, data.length, this.context.sampleRate);
            var output = this.context.createBuffer(this._options.numberOfOutputChannels, data.length, this.context.sampleRate);
            var playbackTime = this.context.currentTime;

            //this is where the AudioProcessingEventPolyfill will go
            this._event = {
                playbackTime: playbackTime,
                inputBuffer: input,
                outputBuffer: output,
            };
            
            //fill input buffer with data sent from worklet
            for (var c = 0; c < data.numberOfChannels; c++) {
                input.getChannelData(c).set(data.channelData[c]);
            }

            //execute function with input and output buffers with playbackTime from context.currentTime
            //using async/await to make sure the function is properly finished first before anything else happens
            //await window.dispatchEvent("audioprocess");
            await this._process(this._event);

            //create an array to fill with audio data from output buffer to be sent to worklet
            var channels = new Array(output.numberOfChannels);
            for (var c = 0; c < channels.length; c++) {
                channels[c] = output.getChannelData(c);
            }

            //send audio data to worklet
            this._worklet.port.postMessage({
                message: "PROCESS_OUTPUT",
                detail: {
                    channelData: channels,
                    length: output.length,
                    numberOfChannels: output.numberOfChannels,
                },
            });

            //garbage-collect audio buffers since we don't need them any
            //AudioWorkletGlobalScope does not define AudioBuffer in the worker enviroment
            input = null;
            output = null;
        }

        _handleMessage(e) {
            switch (e.data.message) {
                case "PROCESS_DATA": 
                    this.processData(e.data.detail);
                    break;
            }
        }

        connect() {
            this._worklet.connect.apply(this._worklet, arguments);
        }

        disconnect() {
            this._worklet.disconnect.apply(this._worklet, arguments);
        }

        addEventListener(type, fn) {
            this._process = fn;
        }
    }

    //exposed to global window scope
    root.ScriptProcessorNodePolyfill = ScriptProcessorNodePolyfill;
    root.AudioProcessingEventPolyfill = AudioProcessingEventPolyfill;
})(this);

问题是我让它工作,但设置一个 bufferSize 并没有输出一个很好的声音结果。当我测试它时, 128 可以bufferSize正常工作,但任何高于 128 的东西都会发出这种故障声音。

我知道问题是AudioWorkletProcessor.process()一次只能调用 128 个样本。我确实考虑过通过返回 true 或 false 来尝试使用活动源标志,但我意识到在阅读返回 false 的规范后不会再次调用 process() 方法。事实上,我不能通过脚本再次调用它来将活动标志从假转换为真。有没有人有任何想法?

4

0 回答 0