0

我已阅读此链接中的手册,但仍然无法解决您的问题。

使用代码混合两个 wav 文件后,我得到一个 wav 文件(sdcard 上的music.wav),但将其播放为噪声。我真的不知道如何从文件 music.wav 中删除它。这对我来说是一个难题。

这是我的源代码:

    public class MainActivity extends Activity {

public static final int FREQUENCY = 44100;
private InputStream in1, in2;
private byte[] output;


@Override
protected void onCreate(Bundle savedInstanceState) {
    super.onCreate(savedInstanceState);
    setContentView(R.layout.activity_main);

    try {
        mixSound();
    } catch (IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }
}

private void mixSound() throws IOException {
//  AudioTrack audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, 44100, AudioFormat.CHANNEL_OUT_STEREO, AudioFormat.ENCODING_PCM_16BIT, 44100, AudioTrack.MODE_STREAM);

    in1 = getResources().openRawResource(R.raw.media_b);
    in2 = getResources().openRawResource(R.raw.media_c);

    byte[] arrayMusic1 = null;
    arrayMusic1 = new byte[in1.available()];
    arrayMusic1 = createMusicArray(in1);
    in1.close();

    byte[] arrayMusic2 = null;
    arrayMusic2 = new byte[in2.available()];
    arrayMusic2 = createMusicArray(in2);
    in2.close();

    output = new byte[arrayMusic1.length];

    /** old
    audioTrack.play();  
    */

    for (int i = 0; i < output.length; i++) {
        float samplef1 = arrayMusic1[i] / 128.0f; 
        float samplef2 = arrayMusic2[i] / 128.0f;
        float mixed    = samplef1 + samplef2;

        // reduce the volume a bit:
        mixed *= 0.8;
        // hard clipping
        if (mixed > 1.0f)  mixed = 1.0f;
        if (mixed < -1.0f) mixed = -1.0f;

        byte outputSample  = (byte) (mixed * 128.0f);
        output[i]          = outputSample;  
    }

    /** old
    audioTrack.write(output, 0, output.length);
    convertByteToFile(output);
    save();  
    */
    saveToFile();
}

public static byte[] createMusicArray(InputStream is) throws IOException {

    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    //10*1024 = 10KB
    byte[] byteBuffer   = new byte[10*1024]; 

    int i = Integer.MAX_VALUE;
    while ((i = is.read(byteBuffer, 0, byteBuffer.length)) > 0) {
        baos.write(byteBuffer, 0, i);
    }

    return baos.toByteArray(); 
}


/**
 * .WAV
 * more info --> https://ccrma.stanford.edu/courses/422/projects/WaveFormat/

 The canonical WAVE format starts with the RIFF header:
 | FILE OFFSET | FIELD SIZE | FIELD NAME    
        0         4   ChunkID          Contains the letters "RIFF" in ASCII form
                                       (0x52494646 big-endian form).
        4         4   ChunkSize        36 + SubChunk2Size, or more precisely:
                                       4 + (8 + SubChunk1Size) + (8 + SubChunk2Size)
                                       This is the size of the rest of the chunk 
                                       following this number.  This is the size of the 
                                       entire file in bytes minus 8 bytes for the
                                       two fields not included in this count:
                                       ChunkID and ChunkSize.
        8         4   Format           Contains the letters "WAVE"
                                       (0x57415645 big-endian form).

        The "WAVE" format consists of two subchunks: "fmt " and "data":
        The "fmt " subchunk describes the sound data's format:

        12        4   Subchunk1ID      Contains the letters "fmt "
                                       (0x666d7420 big-endian form).
        16        4   Subchunk1Size    16 for PCM.  This is the size of the
                                       rest of the Subchunk which follows this number.
        20        2   AudioFormat      PCM = 1 (i.e. Linear quantization)
                                       Values other than 1 indicate some 
                                       form of compression.
        22        2   NumChannels      Mono = 1, Stereo = 2, etc.
        24        4   SampleRate       8000, 44100, etc.
        28        4   ByteRate         == SampleRate * NumChannels * BitsPerSample/8
        32        2   BlockAlign       == NumChannels * BitsPerSample/8
                                       The number of bytes for one sample including
                                       all channels. I wonder what happens when
                                       this number isn't an integer?
        34        2   BitsPerSample    8 bits = 8, 16 bits = 16, etc.
                  2   ExtraParamSize   if PCM, then doesn't exist
                  X   ExtraParams      space for extra parameters

        The "data" subchunk contains the size of the data and the actual sound:

        36        4   Subchunk2ID      Contains the letters "data"
                                       (0x64617461 big-endian form).
        40        4   Subchunk2Size    == NumSamples * NumChannels * BitsPerSample/8
                                       This is the number of bytes in the data.
                                       You can also think of this as the size
                                       of the read of the subchunk following this 
                                       number.
        44        *   Data             The actual sound data.

 */
public void saveToFile() {
    try {
        long mySubChunk1Size = 16;
        int myBitsPerSample  = 16;
        int myFormat         = 1;
        long myChannels      = 2;
        long mySampleRate    = 44100;
        long myByteRate      = mySampleRate * myChannels * (myBitsPerSample/8);
        int myBlockAlign     = (int) (myChannels * myBitsPerSample/8);

        long myDataSize      = output.length;
        long myChunk2Size    = myDataSize * myChannels * myBitsPerSample/8;
        long myChunkSize     = 36 + myChunk2Size;

        OutputStream os = new FileOutputStream(new File(Environment.getExternalStorageDirectory().getPath()+"/videokit/mixed.wav"));
        BufferedOutputStream bos = new BufferedOutputStream(os);
        DataOutputStream outFile = new DataOutputStream(bos);

        outFile.writeBytes("RIFF");                                     // 00 - RIFF
        outFile.write(intToByteArray((int)myChunkSize), 0, 4);          // 04 - how big is the rest of this file?
        outFile.writeBytes("WAVE");                                     // 08 - WAVE
        outFile.writeBytes("fmt ");                                     // 12 - fmt 
        outFile.write(intToByteArray((int)mySubChunk1Size), 0, 4);      // 16 - size of this chunk
        outFile.write(shortToByteArray((short)myFormat), 0, 2);         // 20 - what is the audio format? 1 for PCM = Pulse Code Modulation
        outFile.write(shortToByteArray((short)myChannels), 0, 2);       // 22 - mono or stereo? 1 or 2?  (or 5 or ???)
        outFile.write(intToByteArray((int)mySampleRate), 0, 4);         // 24 - samples per second (numbers per second)
        outFile.write(intToByteArray((int)myByteRate), 0, 4);           // 28 - bytes per second
        outFile.write(shortToByteArray((short)myBlockAlign), 0, 2);     // 32 - # of bytes in one sample, for all channels
        outFile.write(shortToByteArray((short)myBitsPerSample), 0, 2);  // 34 - how many bits in a sample(number)?  usually 16 or 24
        outFile.writeBytes("data");                                     // 36 - data
        outFile.write(intToByteArray((int)myDataSize), 0, 4);           // 40 - how big is this data chunk
        outFile.write(output);                                          // 44 - the actual data itself - just a long string of numbers

        outFile.flush();
        outFile.close();

    } catch (IOException e) {
        e.printStackTrace();
    }

}


 public static byte[] getBytesFromFile(File file) throws IOException {
     InputStream is = new FileInputStream(file);

     long length = file.length();
     if (length > Integer.MAX_VALUE) {
     }

     byte[] bytes = new byte[(int)length];
     int offset   = 0;
     int numRead  = 0;
     while (offset < bytes.length && (numRead = is.read(bytes, offset, Math.min(bytes.length - offset, 512*1024))) >= 0) {
             offset += numRead;
     }

     if (offset < bytes.length) {
         Log.i("xxx", "Could not completely read file");
     }
     is.close();

     return bytes;
}


private static byte[] intToByteArray(int i) {
    byte[] b = new byte[4];
    b[0]     = (byte) (i & 0x00FF);
    b[1]     = (byte) ((i >> 8)  & 0x000000FF);
    b[2]     = (byte) ((i >> 16) & 0x000000FF);
    b[3]     = (byte) ((i >> 24) & 0x000000FF);
    return b;
}

public static byte[] shortToByteArray(short data) {
        return new byte[]{(byte)(data & 0xff),(byte)((data >>> 8) & 0xff)};
}


public static long byteArrayToLong(byte[] b) {
    int  start = 0;
    int      i = 0;
    int    len = 4;
    int    cnt = 0;
    byte[] tmp = new byte[len];
    for (i = start; i < (start + len); i++) {
        tmp[cnt] = b[i];
        cnt++;
    }
    long accum = 0;
    i = 0;
    for (int shiftBy = 0; shiftBy < 32; shiftBy += 8) {
        accum |= ((long) (tmp[i] & 0xff)) << shiftBy;
        i++;
    }
    return accum;
}

}

文件混合.wav 的频谱分析,噪声范围视图 在此处输入链接描述

如果您遇到此问题,请给我一个解决方案。我该怎么处理我的代码。你能帮我吗。我期待着您的建议。非常感谢。

4

1 回答 1

0

您没有提供足够的代码来确定,但这里至少有一个问题:

在 saveToFile 函数中,您将样本解释为 16 位 (myBitsPerSample),而在混合代码中,您将样本解释为字节(可能是 8 位)。

其中一个不正确(我怀疑这是您的混合代码,但如果这些确实是 8 位文件,请记住 wav 文件不支持签名的 8 位音频,因此您需要更改转换方式) .

于 2013-05-29T14:11:10.010 回答