我找到了解决方案。似乎在使用时startRendering()
,您必须在创建时提供duration
和samplerate
上下文。
( new webkitAudioContext(channels, duration * samplerate, samplerate
);
这是我生成预渲染音频缓冲区并将其转换为 base64 wav 的解决方案。
var audioBuffer = audio.createBuffer(arrayBuffer, false), /* <- Input Audio */
offlineContext = new webkitAudioContext(1, audioBuffer.duration * 44100, 44100);
offlineContext.oncomplete = function(event) {
var buffer = event.renderedBuffer;
var UintWave = createWaveFileData(buffer);
var base64 = btoa(uint8ToString(UintWave));
document.getElementById("audio").src = "data:audio/wav;base64," + base64;
};
var source = offlineContext.createBufferSource();
source.buffer = audioBuffer;
var gain = offlineContext.createGainNode();
// Effects
var filter1 = offlineContext.createBiquadFilter();
filter1.type = 2;
filter1.frequency.value = 4000;
filter1.Q.value = 0.1;
// Connect
source.connect(filter1);
filter1.connect(gain);
gain.connect(offlineContext.destination);
source.noteOn(0);
offlineContext.startRendering();
function createWaveFileData(audioBuffer) {
var frameLength = audioBuffer.length;
var numberOfChannels = audioBuffer.numberOfChannels;
var sampleRate = audioBuffer.sampleRate;
var bitsPerSample = 16;
var byteRate = sampleRate * numberOfChannels * bitsPerSample/8;
var blockAlign = numberOfChannels * bitsPerSample/8;
var wavDataByteLength = frameLength * numberOfChannels * 2; // 16-bit audio
var headerByteLength = 44;
var totalLength = headerByteLength + wavDataByteLength;
var waveFileData = new Uint8Array(totalLength);
var subChunk1Size = 16; // for linear PCM
var subChunk2Size = wavDataByteLength;
var chunkSize = 4 + (8 + subChunk1Size) + (8 + subChunk2Size);
writeString("RIFF", waveFileData, 0);
writeInt32(chunkSize, waveFileData, 4);
writeString("WAVE", waveFileData, 8);
writeString("fmt ", waveFileData, 12);
writeInt32(subChunk1Size, waveFileData, 16); // SubChunk1Size (4)
writeInt16(1, waveFileData, 20); // AudioFormat (2)
writeInt16(numberOfChannels, waveFileData, 22); // NumChannels (2)
writeInt32(sampleRate, waveFileData, 24); // SampleRate (4)
writeInt32(byteRate, waveFileData, 28); // ByteRate (4)
writeInt16(blockAlign, waveFileData, 32); // BlockAlign (2)
writeInt32(bitsPerSample, waveFileData, 34); // BitsPerSample (4)
writeString("data", waveFileData, 36);
writeInt32(subChunk2Size, waveFileData, 40); // SubChunk2Size (4)
// Write actual audio data starting at offset 44.
writeAudioBuffer(audioBuffer, waveFileData, 44);
return waveFileData;
}
function writeString(s, a, offset) {
for (var i = 0; i < s.length; ++i) {
a[offset + i] = s.charCodeAt(i);
}
}
function writeInt16(n, a, offset) {
n = Math.floor(n);
var b1 = n & 255;
var b2 = (n >> 8) & 255;
a[offset + 0] = b1;
a[offset + 1] = b2;
}
function writeInt32(n, a, offset) {
n = Math.floor(n);
var b1 = n & 255;
var b2 = (n >> 8) & 255;
var b3 = (n >> 16) & 255;
var b4 = (n >> 24) & 255;
a[offset + 0] = b1;
a[offset + 1] = b2;
a[offset + 2] = b3;
a[offset + 3] = b4;
}
function writeAudioBuffer(audioBuffer, a, offset) {
var n = audioBuffer.length;
var channels = audioBuffer.numberOfChannels;
for (var i = 0; i < n; ++i) {
for (var k = 0; k < channels; ++k) {
var buffer = audioBuffer.getChannelData(k);
var sample = buffer[i] * 32768.0;
// Clip samples to the limitations of 16-bit.
// If we don't do this then we'll get nasty wrap-around distortion.
if (sample < -32768)
sample = -32768;
if (sample > 32767)
sample = 32767;
writeInt16(sample, a, offset);
offset += 2;
}
}
}
function uint8ToString(buf) {
var i, length, out = '';
for (i = 0, length = buf.length; i < length; i += 1) {
out += String.fromCharCode(buf[i]);
}
return out;
}
如果有人想出更简单或更短的方法来做到这一点,那就太好了!