我有一个为用户记录音频注释的功能。它使用带有 Flash 后备的 HTML5。我可以通过 getUserMedia() 从 HTML5 版本中获取音频数据,但闪存回退将数据作为浮点数组提供。
我需要这些数据作为 wav 文件,但我不知道该怎么做。非常感谢任何帮助!
我有一个为用户记录音频注释的功能。它使用带有 Flash 后备的 HTML5。我可以通过 getUserMedia() 从 HTML5 版本中获取音频数据,但闪存回退将数据作为浮点数组提供。
我需要这些数据作为 wav 文件,但我不知道该怎么做。非常感谢任何帮助!
var recLength = 0,
recBuffersL = [],
recBuffersR = [],
sampleRate;
this.onmessage = function (e) {
switch (e.data.command) {
case 'init':
init(e.data.config);
break;
case 'record':
record(e.data.buffer);
break;
case 'exportWAV':
exportWAV(e.data.type);
break;
case 'getBuffer':
getBuffer();
break;
case 'clear':
clear();
break;
}
};
function init(config) {
sampleRate = config.sampleRate;
}
function record(inputBuffer) {
recBuffersL.push(inputBuffer[0]);
recBuffersR.push(inputBuffer[1]);
recLength += inputBuffer[0].length;
}
function exportWAV(type) {
var bufferL = mergeBuffers(recBuffersL, recLength);
var bufferR = mergeBuffers(recBuffersR, recLength);
var interleaved = interleave(bufferL, bufferR);
var dataview = encodeWAV(interleaved);
var audioBlob = new Blob([dataview], {
type: type
});
this.postMessage(audioBlob);
}
function getBuffer() {
var buffers = [];
buffers.push(mergeBuffers(recBuffersL, recLength));
buffers.push(mergeBuffers(recBuffersR, recLength));
this.postMessage(buffers);
}
function clear() {
recLength = 0;
recBuffersL = [];
recBuffersR = [];
}
function mergeBuffers(recBuffers, recLength) {
var result = new Float32Array(recLength);
var offset = 0;
for (var i = 0; i < recBuffers.length; i++) {
result.set(recBuffers[i], offset);
offset += recBuffers[i].length;
}
return result;
}
function interleave(inputL, inputR) {
var length = inputL.length + inputR.length;
var result = new Float32Array(length);
var index = 0,
inputIndex = 0;
while (index < length) {
result[index++] = inputL[inputIndex];
result[index++] = inputR[inputIndex];
inputIndex++;
}
return result;
}
function floatTo16BitPCM(output, offset, input) {
for (var i = 0; i < input.length; i++, offset += 2) {
var s = Math.max(-1, Math.min(1, input[i]));
output.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
}
}
function writeString(view, offset, string) {
for (var i = 0; i < string.length; i++) {
view.setUint8(offset + i, string.charCodeAt(i));
}
}
function encodeWAV(samples) {
var buffer = new ArrayBuffer(44 + samples.length * 2);
var view = new DataView(buffer);
writeString(view, 0, 'RIFF');
view.setUint32(4, 32 + samples.length * 2, true);
writeString(view, 8, 'WAVE');
writeString(view, 12, 'fmt ');
view.setUint32(16, 16, true);
view.setUint16(20, 1, true);
view.setUint16(22, 2, true);
view.setUint32(24, sampleRate, true);
view.setUint32(28, sampleRate * 4, true);
view.setUint16(32, 4, true);
view.setUint16(34, 16, true);
writeString(view, 36, 'data');
view.setUint32(40, samples.length * 2, true);
floatTo16BitPCM(view, 44, samples);
return view;
}
用法:
var AudioContext = win.webkitAudioContext,
recorder, audioContext;
function recordAudio() {
if (!config.stream) {
alert('No audio.');
return;
}
initAudioRecorder(config.audioWorkerPath);
audioContext = new AudioContext;
var mediaStreamSource = audioContext.createMediaStreamSource(config.stream);
mediaStreamSource.connect(audioContext.destination);
recorder = new window.Recorder(mediaStreamSource);
recorder && recorder.record();
}
function stopAudioRecording() {
console.warn('Audio recording stopeed');
recorder && recorder.stop();
recorder && recorder.exportWAV(function (blob) {
fileType = 'wav';
setBlob(blob);
});
recorder && recorder.clear();
}
var writer;
function setBlob(blob) {
blobURL = blob;
var config = {
blob: blobURL,
type: 'audio/wav',
fileName: (Math.random() * 1000 << 1000) + '.' + fileType,
size: blobURL.length
};
writer = RecordRTCFileWriter(config);
var reader = new win.FileReader();
reader.readAsDataURL(blobURL);
reader.onload = function (event) {
blobURL2 = event.target.result;
};
}
return {
stopAudio: stopAudioRecording,
stopVideo: stopVideoRecording,
recordVideo: recordVideo,
recordAudio: recordAudio,
save: saveToDisk,
getBlob: function () {
return blobURL2;
},
toURL: function () {
return writer.toURL();
}
};