0

我已经看到了多个关于流 MP3 流(如 Icecast)的 Stack Overflow 问题。他们都说要使用 MP3SPI 库,我就是这样。MP3SPI 用于允许对audio/mpegmime 类型的支持。这就是我的 Icecast 流。我的类路径中的所有三个 jar 文件都正确,但是在使用它们在示例中提供的相同代码时,我仍然得到UnsupportedAudioFileException

 javax.sound.sampled.UnsupportedAudioFileException: could not get audio input str
eam from input URL
    at javax.sound.sampled.AudioSystem.getAudioInputStream(AudioSystem.java:
1153)
    at DJUtils.testPlay(DJUtils.java:16)
    at DJ.play(DJ.java:13)
    at DJ.init(DJ.java:4)
    at Loader.main(Loader.java:69)

这是我的代码:

public static void testPlay(){
    try {
        AudioInputStream in= AudioSystem.getAudioInputStream(new URL("http://localhost:8000/listen.m3u"));
        AudioInputStream din = null;
        AudioFormat baseFormat = in.getFormat();
        AudioFormat decodedFormat = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED,
                baseFormat.getSampleRate(),
                16,
                baseFormat.getChannels(),
                baseFormat.getChannels() * 2,
                baseFormat.getSampleRate(),
                false);
        din = AudioSystem.getAudioInputStream(decodedFormat, in);
        // Play now.
        rawplay(decodedFormat, din);
        in.close();
    } catch (Exception e){
        e.printStackTrace();
    }
}

private static void rawplay(AudioFormat targetFormat, AudioInputStream din) throws LineUnavailableException, IOException{
    try{
        byte[] data = new byte[4096];
        SourceDataLine line = getLine(targetFormat);
        if (line != null)
        {
            // Start
            line.start();
            int nBytesRead = 0, nBytesWritten = 0;
            while (nBytesRead != -1)
            {
                nBytesRead = din.read(data, 0, data.length);
                if (nBytesRead != -1) nBytesWritten = line.write(data, 0, nBytesRead);
            }
            // Stop
            line.drain();
            line.stop();
            line.close();
            din.close();
        }
    }catch(IOException e){
        e.printStackTrace();
    }
}

private static SourceDataLine getLine(AudioFormat audioFormat) throws LineUnavailableException{
    try{
        SourceDataLine res = null;
        DataLine.Info info = new DataLine.Info(SourceDataLine.class, audioFormat);
        res = (SourceDataLine) AudioSystem.getLine(info);
        res.open(audioFormat);
        return res;
    }catch(LineUnavailableException e){
        e.printStackTrace();
        return null;
    }
} 

我这个项目的启动脚本:

java -Dfile.encoding=Cp1252 -classpath bin;lib/libs.jar;lib/graphics.jar;lib/mp3spi/mp3spi.jar;lib/mp3spi/jl.jar;lib/mp3spi/tritonus.jar; Loader

我的 Icecast 控制面板说它目前正在流式传输audio/mpeg。我可以通过在任何媒体播放器中打开我的代码中的 URL 来访问我的流。有人可以指出我做错了什么吗?谢谢!

4

1 回答 1

1

mp3spi 库本身不将 m3u 播放列表文件视为受支持的文件。

尝试使用 m3u 文件中使用的真实流 url。即直接指向mp3 文件或流的url。

检查下面的功能。它直接来自 MpegAudioFileReader.java,mp3spi 库用于识别您使用 URL 呈现的数据流的格式。它无法识别 m3u 文件。如果需要,可以从http://www.javazoom.net/mp3spi/sources.html查看源代码。

    public AudioFileFormat getAudioFileFormat(InputStream inputStream, long mediaLength) throws UnsupportedAudioFileException, IOException
{
    if (TDebug.TraceAudioFileReader) TDebug.out(">MpegAudioFileReader.getAudioFileFormat(InputStream inputStream, long mediaLength): begin");
    HashMap aff_properties = new HashMap();
    HashMap af_properties = new HashMap();
    int mLength = (int) mediaLength;
    int size = inputStream.available();
    PushbackInputStream pis = new PushbackInputStream(inputStream, MARK_LIMIT);
    byte head[] = new byte[22];
    pis.read(head);
    if (TDebug.TraceAudioFileReader)
    {
        TDebug.out("InputStream : " + inputStream + " =>" + new String(head));
    }

    // Check for WAV, AU, and AIFF, Ogg Vorbis, Flac, MAC file formats.
    // Next check for Shoutcast (supported) and OGG (unsupported) streams.
    if ((head[0] == 'R') && (head[1] == 'I') && (head[2] == 'F') && (head[3] == 'F') && (head[8] == 'W') && (head[9] == 'A') && (head[10] == 'V') && (head[11] == 'E'))
    {
        if (TDebug.TraceAudioFileReader) TDebug.out("RIFF/WAV stream found");
        int isPCM = ((head[21]<<8)&0x0000FF00) | ((head[20])&0x00000FF);
        if (weak == null)
        {
            if (isPCM == 1) throw new UnsupportedAudioFileException("WAV PCM stream found");
        }

    }
    else if ((head[0] == '.') && (head[1] == 's') && (head[2] == 'n') && (head[3] == 'd'))
    {
        if (TDebug.TraceAudioFileReader) TDebug.out("AU stream found");
        if (weak == null) throw new UnsupportedAudioFileException("AU stream found");
    }
    else if ((head[0] == 'F') && (head[1] == 'O') && (head[2] == 'R') && (head[3] == 'M') && (head[8] == 'A') && (head[9] == 'I') && (head[10] == 'F') && (head[11] == 'F'))
    {
        if (TDebug.TraceAudioFileReader) TDebug.out("AIFF stream found");
        if (weak == null) throw new UnsupportedAudioFileException("AIFF stream found");
    }
    else if (((head[0] == 'M') | (head[0] == 'm')) && ((head[1] == 'A') | (head[1] == 'a')) && ((head[2] == 'C') | (head[2] == 'c')))
    {
        if (TDebug.TraceAudioFileReader) TDebug.out("APE stream found");
        if (weak == null) throw new UnsupportedAudioFileException("APE stream found");
    }
    else if (((head[0] == 'F') | (head[0] == 'f')) && ((head[1] == 'L') | (head[1] == 'l')) && ((head[2] == 'A') | (head[2] == 'a')) && ((head[3] == 'C') | (head[3] == 'c')))
    {
        if (TDebug.TraceAudioFileReader) TDebug.out("FLAC stream found");
        if (weak == null) throw new UnsupportedAudioFileException("FLAC stream found");
    }
    // Shoutcast stream ?
    else if (((head[0] == 'I') | (head[0] == 'i')) && ((head[1] == 'C') | (head[1] == 'c')) && ((head[2] == 'Y') | (head[2] == 'y')))
    {
        pis.unread(head);
        // Load shoutcast meta data.
        loadShoutcastInfo(pis, aff_properties);
    }
    // Ogg stream ?
    else if (((head[0] == 'O') | (head[0] == 'o')) && ((head[1] == 'G') | (head[1] == 'g')) && ((head[2] == 'G') | (head[2] == 'g')))
    {
        if (TDebug.TraceAudioFileReader) TDebug.out("Ogg stream found");
        if (weak == null) throw new UnsupportedAudioFileException("Ogg stream found");
    }
    // No, so pushback.
    else
    {
        pis.unread(head);
    }
    // MPEG header info.
    int nVersion = AudioSystem.NOT_SPECIFIED;
    int nLayer = AudioSystem.NOT_SPECIFIED;
    int nSFIndex = AudioSystem.NOT_SPECIFIED;
    int nMode = AudioSystem.NOT_SPECIFIED;
    int FrameSize = AudioSystem.NOT_SPECIFIED;
    int nFrameSize = AudioSystem.NOT_SPECIFIED;
    int nFrequency = AudioSystem.NOT_SPECIFIED;
    int nTotalFrames = AudioSystem.NOT_SPECIFIED;
    float FrameRate = AudioSystem.NOT_SPECIFIED;
    int BitRate = AudioSystem.NOT_SPECIFIED;
    int nChannels = AudioSystem.NOT_SPECIFIED;
    int nHeader = AudioSystem.NOT_SPECIFIED;
    int nTotalMS = AudioSystem.NOT_SPECIFIED;
    boolean nVBR = false;
    AudioFormat.Encoding encoding = null;
    try
    {
        Bitstream m_bitstream = new Bitstream(pis);
        aff_properties.put("mp3.header.pos", new Integer(m_bitstream.header_pos()));
        Header m_header = m_bitstream.readFrame();
        // nVersion = 0 => MPEG2-LSF (Including MPEG2.5), nVersion = 1 => MPEG1
        nVersion = m_header.version();
        if (nVersion == 2) aff_properties.put("mp3.version.mpeg", Float.toString(2.5f));
        else aff_properties.put("mp3.version.mpeg", Integer.toString(2 - nVersion));
        // nLayer = 1,2,3
        nLayer = m_header.layer();
        aff_properties.put("mp3.version.layer", Integer.toString(nLayer));
        nSFIndex = m_header.sample_frequency();
        nMode = m_header.mode();
        aff_properties.put("mp3.mode", new Integer(nMode));
        nChannels = nMode == 3 ? 1 : 2;
        aff_properties.put("mp3.channels", new Integer(nChannels));
        nVBR = m_header.vbr();
        af_properties.put("vbr", new Boolean(nVBR));
        aff_properties.put("mp3.vbr", new Boolean(nVBR));
        aff_properties.put("mp3.vbr.scale", new Integer(m_header.vbr_scale()));
        FrameSize = m_header.calculate_framesize();
        aff_properties.put("mp3.framesize.bytes", new Integer(FrameSize));
        if (FrameSize < 0) throw new UnsupportedAudioFileException("Invalid FrameSize : " + FrameSize);
        nFrequency = m_header.frequency();
        aff_properties.put("mp3.frequency.hz", new Integer(nFrequency));
        FrameRate = (float) ((1.0 / (m_header.ms_per_frame())) * 1000.0);
        aff_properties.put("mp3.framerate.fps", new Float(FrameRate));
        if (FrameRate < 0) throw new UnsupportedAudioFileException("Invalid FrameRate : " + FrameRate);
        if (mLength != AudioSystem.NOT_SPECIFIED)
        {
            aff_properties.put("mp3.length.bytes", new Integer(mLength));
            nTotalFrames = m_header.max_number_of_frames(mLength);
            aff_properties.put("mp3.length.frames", new Integer(nTotalFrames));
        }
        BitRate = m_header.bitrate();
        af_properties.put("bitrate", new Integer(BitRate));
        aff_properties.put("mp3.bitrate.nominal.bps", new Integer(BitRate));
        nHeader = m_header.getSyncHeader();
        encoding = sm_aEncodings[nVersion][nLayer - 1];
        aff_properties.put("mp3.version.encoding", encoding.toString());
        if (mLength != AudioSystem.NOT_SPECIFIED)
        {
            nTotalMS = Math.round(m_header.total_ms(mLength));
            aff_properties.put("duration", new Long((long) nTotalMS * 1000L));
        }
        aff_properties.put("mp3.copyright", new Boolean(m_header.copyright()));
        aff_properties.put("mp3.original", new Boolean(m_header.original()));
        aff_properties.put("mp3.crc", new Boolean(m_header.checksums()));
        aff_properties.put("mp3.padding", new Boolean(m_header.padding()));
        InputStream id3v2 = m_bitstream.getRawID3v2();
        if (id3v2 != null)
        {
            aff_properties.put("mp3.id3tag.v2", id3v2);
            parseID3v2Frames(id3v2, aff_properties);
        }
        if (TDebug.TraceAudioFileReader) TDebug.out(m_header.toString());
    }
    catch (Exception e)
    {
        if (TDebug.TraceAudioFileReader) TDebug.out("not a MPEG stream:" + e.getMessage());
        throw new UnsupportedAudioFileException("not a MPEG stream:" + e.getMessage());
    }
    // Deeper checks ?
    int cVersion = (nHeader >> 19) & 0x3;
    if (cVersion == 1)
    {
        if (TDebug.TraceAudioFileReader) TDebug.out("not a MPEG stream: wrong version");
        throw new UnsupportedAudioFileException("not a MPEG stream: wrong version");
    }
    int cSFIndex = (nHeader >> 10) & 0x3;
    if (cSFIndex == 3)
    {
        if (TDebug.TraceAudioFileReader) TDebug.out("not a MPEG stream: wrong sampling rate");
        throw new UnsupportedAudioFileException("not a MPEG stream: wrong sampling rate");
    }
    // Look up for ID3v1 tag
    if ((size == mediaLength) && (mediaLength != AudioSystem.NOT_SPECIFIED))
    {
        FileInputStream fis = (FileInputStream) inputStream;
        byte[] id3v1 = new byte[128];
        long bytesSkipped = fis.skip(inputStream.available() - id3v1.length);
        int read = fis.read(id3v1, 0, id3v1.length);
        if ((id3v1[0] == 'T') && (id3v1[1] == 'A') && (id3v1[2] == 'G'))
        {
            parseID3v1Frames(id3v1, aff_properties);
        }
    }
    AudioFormat format = new MpegAudioFormat(encoding, (float) nFrequency, AudioSystem.NOT_SPECIFIED // SampleSizeInBits - The size of a sample
            , nChannels // Channels - The number of channels
            , -1 // The number of bytes in each frame
            , FrameRate // FrameRate - The number of frames played or recorded per second
            , true, af_properties);
    return new MpegAudioFileFormat(MpegFileFormatType.MP3, format, nTotalFrames, mLength, aff_properties);
}
于 2012-08-20T07:13:07.453 回答