2

注意:我正在使用 android studio,目前我正在使用最新的tarsos 音频库,它应该与 android 兼容,实际上已经成功地将该库添加到我的 android studio 项目中。我之前尝试过使用 JTransforms 和 Minim 库,但没有成功。2017 年 8 月 23 日编辑:发现并修复了一些错误,重新发布了当前代码,但实际问题总结如下:

摘要:在我发布的第 5 个代码块中,在被注释掉的第 15 行,我需要知道如何让该行工作而不抛出编译错误

我要做的是从麦克风录制,并在录制时使用 tarsos 库中的 dsp BandPass 过滤器并将结果输出到 .wav 文件。按照本教程,我可以通过使用 android.media 导入成功地将麦克风流式传输到 .wav 文件,但这不允许我添加 BandPass 过滤器,并且使用 tarsos 导入函数不允许我使用保存作为该教程具有的 .wav 方法,我知道我遗漏了一些东西和/或做错了什么,但是我已经用谷歌搜索了将近一个星期,还没有找到一个可行的解决方案,我只找到了指向库内 java 文件的链接这没有帮助,因为我找不到有关如何正确使用它们的教程。我究竟做错了什么?这是我尝试使用的 tarsos 方法的相关代码:

相关的导入和“全局”变量

import android.media.AudioRecord;
import android.media.MediaRecorder;
import android.media.AudioFormat;
import android.media.AudioTrack;

import be.tarsos.dsp.AudioDispatcher;
import be.tarsos.dsp.AudioProcessor;
import be.tarsos.dsp.filters.BandPass;
import be.tarsos.dsp.io.android.AudioDispatcherFactory;

//start the class 

AudioRecord alteredRecord = null;
AudioDispatcher dispatcher;
float freqChange;
float tollerance;
private static final int RECORDER_BPP = 16;
private static final String AUDIO_RECORDER_FOLDER = "Crowd_Speech";
private static final String AUDIO_RECORDER_TEMP_FILE = "record_temp.raw";
private static final int RECORDER_SAMPLERATE = 44100;
private static final int RECORDER_CHANNELS = AudioFormat.CHANNEL_IN_MONO;
private static final int RECORDER_AUDIO_ENCODING = AudioFormat.ENCODING_PCM_16BIT;
private int bufferSize = 1024;
private Thread recordingThread = null;

//set the min buffer size in onCreate event
bufferSize = AudioRecord.getMinBufferSize(RECORDER_SAMPLERATE, 
RECORDER_CHANNELS, RECORDER_AUDIO_ENCODING)*4;

这将在 onClick 方法中开始麦克风录制,并且通过注释/取消注释 2 个“运行”变量值之一,我可以在调用 startRecording 方法时在过滤器或无过滤器(android 或 tarsos 函数)之间切换

if(crowdFilter && running==0 && set==0){//crowd speech mode, start talking
    Icons(2,"");
    running=4;//start recording from mic, apply bandpass filter and save as wave file using TARSOS import
    //running=5;//start recording from mic, no filter, save as wav file using android media import
    freqChange = Globals.minFr[Globals.curUser];
    tollerance = 40;
    set=1;
    startRecording();
}

开始录音方法:

private void startRecording() {

    if (running == 5) {//start recording from mic, no filter, save as wav file using android media library
        alteredRecord = new AudioRecord(MediaRecorder.AudioSource.MIC, RECORDER_SAMPLERATE, RECORDER_CHANNELS,RECORDER_AUDIO_ENCODING, bufferSize);
            alteredRecord.startRecording();
            isRecording = true;
            recordingThread = new Thread(new Runnable() {
                @Override
                public void run() {
                    writeAudioDataToFile();
                }
            }, "Crowd_Speech Thread");
            recordingThread.start();
    }

    if (running == 4) {//start recording from mic, apply bandpass filter and save as wave file using TARSOS library
        dispatcher = AudioDispatcherFactory.fromDefaultMicrophone(RECORDER_SAMPLERATE, bufferSize, 0);
        AudioProcessor p = new BandPass(freqChange, tollerance, RECORDER_SAMPLERATE);
        dispatcher.addAudioProcessor(p);
            isRecording = true;
            dispatcher.run();
            recordingThread = new Thread(new Runnable() {
                @Override
                public void run() {
                    writeAudioDataToFile();
                }
            }, "Crowd_Speech Thread");
            recordingThread.start();
    }
}

onClick 方法中的停止录制按钮

if(crowdFilter && (running==4 || running==5) && set==0) {//crowd speech finished talking
    Icons(1, "");
    stopRecording();
    set = 1;
}

在此之前,这两种情况都很好,如果 running==4(应用了 tarsos dsp 过滤器)程序崩溃。如果我使用 running==5 (没有过滤器的 android.media 方式),其余的工作正常并保存文件,但没有应用带通效果。如果我尝试用 tarsos dispatcher = AudioDispatcherFactory... (例如 dispatcher = new AudioRecord...)交换 changedRecord = new AudioRecord... ,它们是不兼容的,甚至不会考虑编译。(这就是为什么下面方法中的第 15 行被注释掉了)

private void writeAudioDataToFile(){
    byte data[] = new byte[bufferSize];
    String filename = getTempFilename();
    FileOutputStream os = null;
    try {
        os = new FileOutputStream(filename);
    } catch (FileNotFoundException e) {
        e.printStackTrace();
    }
    int read = 0;
    if(null != os){
        while(isRecording){
            if(running==4)
            {
                //read = dispatcher.(data, 0, bufferSize);
            }
            if(running==5)
            {
                read = alteredRecord.read(data, 0, bufferSize);
            }
            if(AudioRecord.ERROR_INVALID_OPERATION != read){
                try {
                    os.write(data);
                } catch (IOException e) {
                    e.printStackTrace();
                }
            }
        }
        try {
            os.close();
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
}

private void stopRecording(){
    if(null != alteredRecord) {
        isRecording = false;
        int i = alteredRecord.getState();
        if (i == 1) {
            running = 0;
            alteredRecord.stop();
            alteredRecord.release();
            alteredRecord = null;
            recordingThread = null;
        }
    }
        if(null !=dispatcher){
            isRecording = false;
            running = 0;
            dispatcher.stop();
            recordingThread = null;
        }
    copyWaveFile(getTempFilename(),getFilename());
    deleteTempFile();
}

private void deleteTempFile() {
    File file = new File(getTempFilename());
    file.delete();
}

private void copyWaveFile(String inFilename,String outFilename){
    FileInputStream in = null;
    FileOutputStream out = null;
    long totalAudioLen = 0;
    long totalDataLen = totalAudioLen + 36;
    long longSampleRate = RECORDER_SAMPLERATE;
    int channels = 1;
    long byteRate = RECORDER_BPP * RECORDER_SAMPLERATE * channels/8;
    byte[] data = new byte[bufferSize];
    try {
        in = new FileInputStream(inFilename);
        out = new FileOutputStream(outFilename);
        totalAudioLen = in.getChannel().size();
        totalDataLen = totalAudioLen + 36;
        WriteWaveFileHeader(out, totalAudioLen, totalDataLen,
                longSampleRate, channels, byteRate);
        while(in.read(data) != -1){
            out.write(data);
        }
        in.close();
        out.close();
    } catch (FileNotFoundException e) {
        e.printStackTrace();
    } catch (IOException e) {
        e.printStackTrace();
    }
}

private void WriteWaveFileHeader(
        FileOutputStream out, long totalAudioLen,
        long totalDataLen, long longSampleRate, int channels,
        long byteRate) throws IOException {
    byte[] header = new byte[44];
    header[0] = 'R';header[1] = 'I'; header[2] = 'F';header[3] = 'F';// RIFF/WAVE header
    header[4] = (byte) (totalDataLen & 0xff);
    header[5] = (byte) ((totalDataLen >> 8) & 0xff);
    header[6] = (byte) ((totalDataLen >> 16) & 0xff);
    header[7] = (byte) ((totalDataLen >> 24) & 0xff);
    header[8] = 'W';header[9] = 'A';header[10] = 'V';header[11] = 'E';header[12] = 'f';header[13] = 'm';header[14] = 't';header[15] = ' ';// 'fmt ' chunk
    header[16] = 16;header[17] = 0;header[18] = 0;header[19] = 0;// 4 bytes: size of 'fmt ' chunk
    header[20] = 1;header[21] = 0;header[22] = (byte) channels;header[23] = 0;// format = 1
    header[24] = (byte) (longSampleRate & 0xff);header[25] = (byte) ((longSampleRate >> 8) & 0xff);header[26] = (byte) ((longSampleRate >> 16) & 0xff);
    header[27] = (byte) ((longSampleRate >> 24) & 0xff);header[28] = (byte) (byteRate & 0xff);header[29] = (byte) ((byteRate >> 8) & 0xff);
    header[30] = (byte) ((byteRate >> 16) & 0xff); header[31] = (byte) ((byteRate >> 24) & 0xff);
    header[32] = (byte) (2 * 16 / 8);header[33] = 0;// block align
    header[34] = RECORDER_BPP;header[35] = 0;header[36] = 'd';header[37] = 'a';header[38] = 't';header[39] = 'a';
    header[40] = (byte) (totalAudioLen & 0xff);header[41] = (byte) ((totalAudioLen >> 8) & 0xff);header[42] = (byte) ((totalAudioLen >> 16) & 0xff);
    header[43] = (byte) ((totalAudioLen >> 24) & 0xff);// bits per sample
    out.write(header, 0, 44);
}
4

1 回答 1

3

解决了,你需要使用 writer 函数,而不是使用从 android.media 导入函数保存 wav 文件所需的任何方法,这是我更改的 startRecording 方法的工作代码段:

if (running == 4) {//start recording from mic, apply bandpass filter and save as wave file using TARSOS library
        dispatcher = AudioDispatcherFactory.fromDefaultMicrophone(RECORDER_SAMPLERATE, bufferSize, 0);
        AudioProcessor p = new BandPass(freqChange, tollerance, RECORDER_SAMPLERATE);
        dispatcher.addAudioProcessor(p);
        isRecording = true;
        // Output
        RandomAccessFile outputFile = new RandomAccessFile(getFilename(), "rw");
        TarsosDSPAudioFormat outputFormat = new TarsosDSPAudioFormat(44100, 16, 1, true, false);
        WriterProcessor writer = new WriterProcessor(outputFormat, outputFile);
        dispatcher.addAudioProcessor(writer);
        recordingThread = new Thread(new Runnable() {
            @Override
            public void run() {
                dispatcher.run();
            }
        }, "Crowd_Speech Thread");
        recordingThread.start();
    }
于 2017-08-24T17:04:51.920 回答