3

我正在尝试使用以下函数将多个 mp4 音频文件(每个仅包含一个音轨,全部使用相同的 MediaRecorder 和相同的参数录制)连接成一个:

@TargetApi(Build.VERSION_CODES.JELLY_BEAN_MR2)
public static boolean concatenateFiles(File dst, File... sources) {
    if ((sources == null) || (sources.length == 0)) {
        return false;
    }

    boolean result;
    MediaExtractor extractor = null;
    MediaMuxer muxer = null;
    try {
        // Set up MediaMuxer for the destination.
        muxer = new MediaMuxer(dst.getPath(), MediaMuxer.OutputFormat.MUXER_OUTPUT_MPEG_4);

        // Copy the samples from MediaExtractor to MediaMuxer.
        boolean sawEOS = false;
        int bufferSize = MAX_SAMPLE_SIZE;
        int frameCount = 0;
        int offset = 100;

        ByteBuffer dstBuf = ByteBuffer.allocate(bufferSize);
        BufferInfo bufferInfo = new BufferInfo();

        long timeOffsetUs = 0;
        int dstTrackIndex = -1;

        for (int fileIndex = 0; fileIndex < sources.length; fileIndex++) {
            int numberOfSamplesInSource = getNumberOfSamples(sources[fileIndex]);
            if (VERBOSE) {
                Log.d(TAG, String.format("Source file: %s", sources[fileIndex].getPath()));
            }

            // Set up MediaExtractor to read from the source.
            extractor = new MediaExtractor();
            extractor.setDataSource(sources[fileIndex].getPath());

            // Set up the tracks.
            SparseIntArray indexMap = new SparseIntArray(extractor.getTrackCount());
            for (int i = 0; i < extractor.getTrackCount(); i++) {
                extractor.selectTrack(i);
                MediaFormat format = extractor.getTrackFormat(i);
                if (dstTrackIndex < 0) {
                    dstTrackIndex = muxer.addTrack(format);
                    muxer.start();
                }
                indexMap.put(i, dstTrackIndex);
            }

            long lastPresentationTimeUs = 0;
            int currentSample = 0;

            while (!sawEOS) {
                bufferInfo.offset = offset;
                bufferInfo.size = extractor.readSampleData(dstBuf, offset);

                if (bufferInfo.size < 0) {
                    sawEOS = true;
                    bufferInfo.size = 0;
                    timeOffsetUs += (lastPresentationTimeUs + APPEND_DELAY);
                }
                else {
                    lastPresentationTimeUs = extractor.getSampleTime();
                    bufferInfo.presentationTimeUs = extractor.getSampleTime() + timeOffsetUs;
                    bufferInfo.flags = extractor.getSampleFlags();
                    int trackIndex = extractor.getSampleTrackIndex();

                    if ((currentSample < numberOfSamplesInSource) || (fileIndex == sources.length - 1)) {
                        muxer.writeSampleData(indexMap.get(trackIndex), dstBuf, bufferInfo);
                    }
                    extractor.advance();

                    frameCount++;
                    currentSample++;
                    if (VERBOSE) {
                        Log.d(TAG, "Frame (" + frameCount + ") " +
                                "PresentationTimeUs:" + bufferInfo.presentationTimeUs +
                                " Flags:" + bufferInfo.flags +
                                " TrackIndex:" + trackIndex +
                                " Size(KB) " + bufferInfo.size / 1024);
                    }
                }
            }
            extractor.release();
            extractor = null;
        }

        result = true;
    }
    catch (IOException e) {
        result = false;
    }
    finally {
        if (extractor != null) {
            extractor.release();
        }
        if (muxer != null) {
            muxer.stop();
            muxer.release();
        }
    }
    return result;
}

@TargetApi(Build.VERSION_CODES.JELLY_BEAN)
public static int getNumberOfSamples(File src) {
    MediaExtractor extractor = new MediaExtractor();
    int result;
    try {
        extractor.setDataSource(src.getPath());
        extractor.selectTrack(0);

        result = 0;
        while (extractor.advance()) {
            result ++;
        }
    }
    catch(IOException e) {
        result = -1;
    }
    finally {
        extractor.release();
    }
    return result;
}

代码编译并运行,但是在播放结果文件时,我只听到第一个文件的内容。我没有看到我做错了什么。

然而,在 Marlon 向我指出那个方向之后,我从 MediaMuxer 收到的消息有些奇怪。他们来了:

05-04 15:30:01.869: D/MediaMuxerTest(5455): Source file: /storage/emulated/0/Android/data/de.absprojects.catalogizer/files/copy.mp4
05-04 15:30:01.889: D/QCUtils(5455): extended extractor not needed, return default
05-04 15:30:01.889: I/MPEG4Writer(5455): limits: 2147483647/0 bytes/us, bit rate: -1 bps and the estimated moov size 3072 bytes
05-04 15:30:01.889: I/MPEG4Writer(5455): setStartTimestampUs: 0
05-04 15:30:01.889: I/MPEG4Writer(5455): Earliest track starting time: 0
05-04 15:30:01.889: D/MediaMuxerTest(5455): Frame (1) PresentationTimeUs:0 Flags:1 TrackIndex:0 Size(KB) 0
05-04 15:30:01.889: D/MediaMuxerTest(5455): Frame (2) PresentationTimeUs:23219 Flags:1 TrackIndex:0 Size(KB) 0
05-04 15:30:01.889: D/MediaMuxerTest(5455): Frame (3) PresentationTimeUs:46439 Flags:1 TrackIndex:0 Size(KB) 0
[...]
05-04 15:30:01.959: D/MediaMuxerTest(5455): Frame (117) PresentationTimeUs:2693401 Flags:1 TrackIndex:0 Size(KB) 0
05-04 15:30:01.959: D/MediaMuxerTest(5455): Frame (118) PresentationTimeUs:2716621 Flags:1 TrackIndex:0 Size(KB) 0
05-04 15:30:01.959: D/MediaMuxerTest(5455): Frame (119) PresentationTimeUs:2739841 Flags:1 TrackIndex:0 Size(KB) 0
05-04 15:30:01.959: D/MediaMuxerTest(5455): Frame (120) PresentationTimeUs:2763061 Flags:1 TrackIndex:0 Size(KB) 0
05-04 15:30:01.979: D/QCUtils(5455): extended extractor not needed, return default
05-04 15:30:01.979: D/MediaMuxerTest(5455): Source file: /storage/emulated/0/Android/data/de.absprojects.catalogizer/files/temp.mp4
05-04 15:30:01.979: I/MPEG4Writer(5455): Received total/0-length (120/0) buffers and encoded 120 frames. - audio
05-04 15:30:01.979: I/MPEG4Writer(5455): Audio track drift time: 0 us
05-04 15:30:01.979: D/MPEG4Writer(5455): Setting Audio track to done
05-04 15:30:01.979: D/MPEG4Writer(5455): Stopping Audio track
05-04 15:30:01.979: D/MPEG4Writer(5455): Stopping Audio track source
05-04 15:30:01.979: D/MPEG4Writer(5455): Audio track stopped
05-04 15:30:01.979: D/MPEG4Writer(5455): Stopping writer thread
05-04 15:30:01.979: D/MPEG4Writer(5455): 0 chunks are written in the last batch
05-04 15:30:01.979: D/MPEG4Writer(5455): Writer thread stopped
05-04 15:30:01.979: D/MPEG4Writer(5455): Stopping Audio track
05-04 15:30:01.979: E/MPEG4Writer(5455): Stop() called but track is not started
05-04 15:30:01.999: D/QCUtils(5455): extended extractor not needed, return default
05-04 15:30:01.999: D/copyOriginalFile()(5455): 120 samples in original file
05-04 15:30:02.009: D/QCUtils(5455): extended extractor not needed, return default
05-04 15:30:02.019: D/copyOriginalFile()(5455): 120 samples in copied file
05-04 15:30:02.019: W/MediaRecorder(5455): mediarecorder went away with unhandled events
05-04 15:30:02.099: I/dalvikvm(5455): Jit: resizing JitTable from 4096 to 8192

似乎从第一个文件复制数据后,MPEG4Writer(为什么不是 MediaMuxer?)停止轨道并且不写入更多数据。我怎样才能防止这种情况?我是否必须直接操作标题,如果是,如何操作?

任何帮助,将不胜感激。

此致,

基督教

4

3 回答 3

3

正式地,您不能加入 2 个编码音轨:每个音轨都可以使用存储在标头中的不同参数进行编码。如果这两个文件是由同一个编码器\复用器创建的,相同的编码参数和两个头是相等的,它可以工作,但它是相当严格的限制。据我所知,您将音频格式(它包含标题)设置为复用器中的音轨以从第一个文件格式化。因此,如果第二个文件音频格式不同,它可能会导致不同类型的错误,从而导致第二个文件音频不正确。

请尝试将一个源文件两次放入 dst 文件,分别为第一个和第二个。如果它有效 - 问题出在标题中。如果不是 - 那么在其他地方,我想。

于 2014-04-30T19:07:13.553 回答
0

我也想做同样的事情,想了很多,它行不通。希望它这样做,因为我也需要它。这就像试图将两个瓶子挤在一起并期望它们变成一个更大的瓶子。你需要喝... 啤酒?从每个(解码每个文件中的音频)然后将其倒入一个新瓶子中(再次编码音频,当第一个完成时从第二个喂入)......一旦瓶子被封盖,你就不能在里面添加更多的啤酒

于 2017-09-27T20:34:35.900 回答
0

如果两个视频文件具有相同的视频分辨率、视频编解码器、fps、音频采样率和音频编解码器,则此代码有效。

private const val MAX_SAMPLE_SIZE = 256 * 1024

fun concatenateFiles(dst: File, sources: ArrayList<File>): Boolean {

    println("---------------------")
    println("concatenateFiles")
    println("---------------------")

    if (sources.isEmpty()) {

        return false

    }

    var result : Boolean
    var muxer : MediaMuxer? = null

    try {

        // Set up MediaMuxer for the destination.

        muxer = MediaMuxer(dst.path, MediaMuxer.OutputFormat.MUXER_OUTPUT_MPEG_4)

        // Copy the samples from MediaExtractor to MediaMuxer.

        var videoFormat : MediaFormat? = null
        var audioFormat : MediaFormat? = null

        var idx = 0

        var muxerStarted : Boolean = false

        var videoTrackIndex = -1
        var audioTrackIndex = -1

        var totalDuration = 0

        for (file in sources) {

            println("-------------------")
            println("file: $idx")
            println("-------------------")

            // new

            // MediaMetadataRetriever

            val m = MediaMetadataRetriever()
            m.setDataSource(file.absolutePath)

            var trackDuration : Int = 0

            try {

                trackDuration = m.extractMetadata(MediaMetadataRetriever.METADATA_KEY_DURATION)!!.toInt()

            } catch (e: java.lang.Exception) {

                // error

            }

            // extractorVideo

            var extractorVideo = MediaExtractor()

            extractorVideo.setDataSource(file.path)

            val tracks = extractorVideo.trackCount

            for (i in 0 until tracks) {

                val mf = extractorVideo.getTrackFormat(i)

                val mime = mf.getString(MediaFormat.KEY_MIME)

                println("mime: $mime")

                if (mime!!.startsWith("video/")) {

                    extractorVideo.selectTrack(i)
                    videoFormat = extractorVideo.getTrackFormat(i)

                    break

                }

            }

            // extractorAudio

            var extractorAudio = MediaExtractor()

            extractorAudio.setDataSource(file.path)

            for (i in 0 until tracks) {

                val mf = extractorAudio.getTrackFormat(i)

                val mime = mf.getString(MediaFormat.KEY_MIME)

                if (mime!!.startsWith("audio/")) {

                    extractorAudio.selectTrack(i)
                    audioFormat = extractorAudio.getTrackFormat(i)

                    break

                }

            }

            // audioTracks

            val audioTracks = extractorAudio.trackCount

            println("audioTracks: $audioTracks")

            // videoTrackIndex

            if (videoTrackIndex == -1) {

                videoTrackIndex = muxer.addTrack(videoFormat!!)

            }

            // audioTrackIndex

            if (audioTrackIndex == -1) {

                audioTrackIndex = muxer.addTrack(audioFormat!!)

            }

            var sawEOS = false
            var sawAudioEOS = false
            val bufferSize = MAX_SAMPLE_SIZE
            val dstBuf = ByteBuffer.allocate(bufferSize)
            val offset = 0
            val bufferInfo = BufferInfo()

            // start muxer

            println("muxer.start()")

            if (!muxerStarted) {

                muxer.start()

                muxerStarted = true

            }

            // write video

            println("write video")

            while (!sawEOS) {

                bufferInfo.offset = offset
                bufferInfo.size = extractorVideo.readSampleData(dstBuf, offset)

                if (bufferInfo.size < 0) {

                    //println("videoBufferInfo.size < 0")

                    sawEOS = true
                    bufferInfo.size = 0

                } else {

                    bufferInfo.presentationTimeUs = extractorVideo.sampleTime + totalDuration
                    bufferInfo.flags = MediaCodec.BUFFER_FLAG_KEY_FRAME
                    muxer.writeSampleData(videoTrackIndex, dstBuf, bufferInfo)
                    extractorVideo.advance()

                }

            }

            // write audio

            println("write audio")

            val audioBuf = ByteBuffer.allocate(bufferSize)

            while (!sawAudioEOS) {

                bufferInfo.offset = offset
                bufferInfo.size = extractorAudio.readSampleData(audioBuf, offset)

                if (bufferInfo.size < 0) {

                    //println("audioBufferInfo.size < 0")

                    sawAudioEOS = true
                    bufferInfo.size = 0

                } else {

                    bufferInfo.presentationTimeUs = extractorAudio.sampleTime + totalDuration
                    bufferInfo.flags = MediaCodec.BUFFER_FLAG_KEY_FRAME
                    muxer.writeSampleData(audioTrackIndex, audioBuf, bufferInfo)
                    extractorAudio.advance()

                }

            }

            extractorVideo.release()
            extractorAudio.release()

            // should match

            totalDuration += (trackDuration * 1_000)

            if (VERBOSE) {
                println("PresentationTimeUs:" + bufferInfo.presentationTimeUs)
                println("totalDuration: $totalDuration")
            }

            // increment file index

            idx += 1

        }

        result = true

    } catch (e: IOException) {

        result = false

    } finally {

        if (muxer != null) {
            muxer.stop()
            muxer.release()
        }

    }

    return result

}
于 2021-12-01T20:27:55.077 回答