Android:我是语音处理的新手,我使用 JTransform 库创建了具有 sampleRate 8000Hz 的 AudioRecord 对象我正在尝试过滤频率以下代码中有几件事我不明白我的问题如下
Q.1 为什么我们将“toTransform[i] = (double) buffer[i] / 32768.0; // 有符号 16 位”缓冲区转换为 16 位值?
Q.2 Rite now audioRecord 读取数据数组是短数组,如果我读取字节数组,我将如何将其转换为单字节 16 位?
Q.3 我想用双数组以 Hz 为单位显示声音频率,如何计算声音频率?
Q.4 我写了过滤方法 filterAudio() 但它不是过滤频率范围?
请帮帮我我有很多问题在我的脑海里
/* 代码如下 */
private final int[] mSampleRates = new int[] { 8000, 11025, 22050, 44100 };
final AudioRecord audioRecord = findAudioRecord();
if(audioRecord == null){
return null;
}
final short[] buffer = new short[blockSize];
final double[] toTransform = new double[blockSize];
audioRecord.startRecording();
while (started) {
Thread.sleep(100);
final int bufferReadResult = audioRecord.read(buffer, 0, blockSize);
for (int i = 0; i < blockSize && i < bufferReadResult; i++) {
toTransform[i] = (double) buffer[i] / 32768.0; // signed 16 bit
}
//Audio Filter passing frequency of mSampleRates[3]
filterAudio(bufferReadResult, toTransform, mSampleRates[3]);
transformer.realForward(toTransform);
publishProgress(toTransform);
}
audioRecord.stop();
audioRecord.release();
public static void filterAudio(int bufferSize, double[] audioBuffer, float sampleRate ){
//it is assumed that a float array audioBuffer exists with even length = to
//the capture size of your audio buffer
//float frequency=0F;
//The size of the FFT will be the size of your audioBuffer / 2
int FFT_SIZE = bufferSize / 2;
//RealDoubleFFT mFFT = new RealDoubleFFT(FFT_SIZE);
DoubleFFT_1D mFFT = new DoubleFFT_1D(FFT_SIZE); //this is a jTransforms type
//Take the FFT
mFFT.realForward(audioBuffer);
//mFFT.ft(audioBuffer);
//The first 1/2 of audioBuffer now contains bins that represent the frequency
//of your wave, in a way. To get the actual frequency from the bin:
//frequency_of_bin = bin_index * sample_rate / FFT_SIZE
//assuming the length of audioBuffer is even, the real and imaginary parts will be
//stored as follows
//audioBuffer[2*k] = Re[k], 0<=k<n/2
//audioBuffer[2*k+1] = Im[k], 0<k<n/2
//Define the frequencies of interest
float freqMin = 14400;
float freqMax = 14500;
//Loop through the fft bins and filter frequencies
for(int fftBin = 0; fftBin < FFT_SIZE; fftBin++){
//Calculate the frequency of this bin assuming a sampling rate of 44,100 Hz
float frequency = (float)fftBin * sampleRate / (float)FFT_SIZE;
//Now filter the audio, I'm assuming you wanted to keep the
//frequencies of interest rather than discard them.
if(frequency < freqMin || frequency > freqMax){
//Calculate the index where the real and imaginary parts are stored
int real = 2 * fftBin;
int imaginary = 2 * fftBin + 1;
//zero out this frequency
audioBuffer[real] = 0;
audioBuffer[imaginary] = 0;
}
}
//Take the inverse FFT to convert signal from frequency to time domain
mFFT.realInverse(audioBuffer, false);
}
final AudioRecord findAudioRecord() {
for (int rate : mSampleRates) {
for (short audioFormat : new short[] { AudioFormat.ENCODING_PCM_8BIT, AudioFormat.ENCODING_PCM_16BIT }) {
for (short channelConfig : new short[] { AudioFormat.CHANNEL_CONFIGURATION_MONO , AudioFormat.CHANNEL_CONFIGURATION_STEREO }) {
try {
bufferSize = AudioRecord.getMinBufferSize(rate, channelConfig, audioFormat);
if (bufferSize != AudioRecord.ERROR_BAD_VALUE) {
// check if we can instantiate and have a success
AudioRecord recorder = new AudioRecord(AudioSource.DEFAULT, rate, channelConfig, audioFormat, bufferSize);
if (recorder.getState() == AudioRecord.STATE_INITIALIZED){
Log.d(TAG, "Attempting rate " + rate + "Hz, bits: " + audioFormat + ", channel: "
+ channelConfig);
return recorder;
}
}
} catch (Exception e) {
Log.e(TAG, rate + "Exception, keep trying.",e);
}
}
}
}
return null;
}