1

我正在做的是使用 Xcode 7.3 的 IOS 应用程序。

我使用 UDP 从 ip camera 获取 h264 数据,数据可以正确解码和显示(由 ffmpeg 解码)。现在我想使用 ffmpeg 将原始 H264 数据混合到一个 mp4 文件(一些用户可能想记录他们在他的手机上观看的内容)。代码运行时没有任何问题,结果文件在我的电脑上用QuickTime可以正常播放。但是当用iphone的默认视频播放器在iphone上播放时,无法正常播放。这是我的代码。

希望有人能告诉我该怎么做,谢谢!

在里面

AVFormatContext *formatContext;
AVOutputFormat *outputFormat;
AVStream *video_st;
int STREAM_FRAME_RATE = 15;
unsigned long video_PTS;
int initRecorder(char *fileName, int width, int height) {
    video_st = NULL;
    video_PTS = 0;

    av_register_all();

    outputFormat = av_guess_format(NULL, fileName, NULL);
    if (!outputFormat) {
        zj_printf("av_guess_format -> fail\n");
        return -1;
    }
    outputFormat->video_codec = AV_CODEC_ID_H264;

    avformat_alloc_output_context2(&formatContext, NULL, NULL, fileName);
    if (!formatContext) {
        zj_printf("avformat_alloc_context -> fail\n");
        return -2;
    }
    formatContext->oformat = outputFormat;
    strcpy(formatContext->filename, fileName);

    video_st = add_video_stream(formatContext, outputFormat, width, height);
    if (!video_st || open_video(formatContext, video_st)) {
        zj_printf("Could not open video codec\n");
        return -3;
    }

    av_dump_format(formatContext, 0, fileName, 1);
    if (!(outputFormat->flags & AVFMT_NOFILE)) {
        if (avio_open(&formatContext->pb, fileName, AVIO_FLAG_READ_WRITE) < 0) {
            zj_printf("could not open file: %s\n", fileName);
            return -7;
        }
    }

    /* write the stream header, if any */
    if (avformat_write_header(formatContext, NULL)) {
        zj_printf("avformat_write_header -> fail\n");
    }

    return 0;
}

添加视频流并打开

static AVStream * add_video_stream(AVFormatContext *pFormatContext, AVOutputFormat *pOutputFormat, int wight, int height) {

    AVStream *stream = avformat_new_stream(pFormatContext, NULL);
    if (!stream) {
        zj_fprintf(stderr, "Could not alloc stream\n");
        return NULL;
    }
    stream->id = 0;

    AVCodecContext *codecContext = stream->codec;
    codecContext->codec_id = pOutputFormat->video_codec;
    codecContext->codec_type = AVMEDIA_TYPE_VIDEO;

    /* resolution must be a multiple of two */
    codecContext->width = wight;
    codecContext->height = height;
    /* time base: this is the fundamental unit of time (in seconds) in terms
     of which frame timestamps are represented. for fixed-fps content,
     timebase should be 1/framerate and timestamp increments should be
     identically 1. */
    if (wight==1280 && height == 720) {
        codecContext->bit_rate = 512000;
        STREAM_FRAME_RATE = 15;
    } else {
        codecContext->bit_rate = 384000;
        STREAM_FRAME_RATE = 20;
    }
    codecContext->time_base = (AVRational){1,STREAM_FRAME_RATE};
    stream->time_base = (AVRational){1,STREAM_FRAME_RATE};
    codecContext->max_b_frames = 0;
    codecContext->pix_fmt = AV_PIX_FMT_YUV420P;
    // these are the encoding params, here we do not need them
    // codecContext->gop_size = 12;   //10
    // codecContext->me_range = 16;
    // codecContext->max_qdiff = 4;
    // codecContext->qmin = 10;
    // codecContext->qmax = 31;

    if (pFormatContext->oformat->flags & AVFMT_GLOBALHEADER)
        codecContext->flags |= CODEC_FLAG_GLOBAL_HEADER;

    return stream;
}

static int open_video(AVFormatContext *pFormatContext, AVStream *pStream) {
    /* find the video encoder */
    AVCodec *codec = avcodec_find_encoder(pStream->codec->codec_id);
    if (!codec) {
        return -1;
    }

    /* open the codec */
    if (avcodec_open2(pStream->codec, codec, NULL)) {
        return -2;
    }

    return 0;
}

写视频帧

static int write_video_frame(char *buffer, int size) {
    int ret = 0;

    if (size > 0) {
        AVPacket mAVPacket;
        av_init_packet(&mAVPacket);
        mAVPacket.flags = isIFrame(buffer, size);
        mAVPacket.stream_index = video_st->index;

        mAVPacket.data = buffer;
        mAVPacket.size = size;
        mAVPacket.pts = video_PTS;
        mAVPacket.dts = video_PTS;
        video_PTS += 1;

        mAVPacket.pts = av_rescale_q_rnd(mAVPacket.pts, video_st->codec->time_base, video_st->time_base, (AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
        mAVPacket.dts = av_rescale_q_rnd(mAVPacket.dts, video_st->codec->time_base, video_st->time_base, (AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
        mAVPacket.duration = 0;
        mAVPacket.pos = -1;

        ret = av_interleaved_write_frame(formatContext, &mAVPacket);
        }

        av_packet_unref(&mAVPacket);


    } else {
        ret = -2;
    }

    if (ret != 0) {
        zj_printf("av_write_frame error:%d\n", ret);
    }

    return ret;
}

在编解码器上下文中设置额外数据

unsigned char sps_pps[23] = {0x00, 0x00, 0x00, 0x01, 0x67, 0x64, 0x00, 0x29, 0xac, 0x1b, 0x1a, 0xc1, 0xe0, 0x51, 0x90, 0x00, 0x00, 0x00, 0x01, 0x68, 0xea, 0x43, 0xcb};
codecContext->extradata_size = 23;
codecContext->extradata = av_malloc(23 + AV_INPUT_BUFFER_PADDING_SIZE);
if (codecContext->extradata == NULL) {
    printf("could not av_malloc the video params extradata!\n");
    return -1;
}
memcpy(codecContext->extradata, sps_pps, 23);
4

1 回答 1

0

Your bitstream format is annex b. You must convert to the MP4 format by replacing start codes with nal length values. You must also populate the extradata in the codeccontext. Possible Locations for Sequence/Picture Parameter Set(s) for H.264 Stream

于 2016-08-18T14:25:13.503 回答