2

我一直在尝试按以下顺序制作“flv”视频文件:

av_register_all();

// Open video file
if (avformat_open_input(&pFormatCtx, "6.mp4", NULL, NULL) != 0)
    return -1; // Couldn't open file

// Retrieve stream information
if (avformat_find_stream_info(pFormatCtx, NULL) < 0)
    return -1; // Couldn't find stream information

// Dump information about file onto standard error
av_dump_format(pFormatCtx, 0, "input_file.mp4", 0);

// Find the first video stream
videoStream = -1;
for (i = 0; i < pFormatCtx->nb_streams; i++)
    if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
        videoStream = i;
        break;
    }
if (videoStream == -1)
    return -1; // Didn't find a video stream

// Get a pointer to the codec context for the video stream
pCodecCtx = pFormatCtx->streams[videoStream]->codec;

// Find the decoder for the video stream
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if (pCodec == NULL) {
    fprintf(stderr, "Unsupported codec!\n");
    return -1; // Codec not found
}
// Open codec
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0)
    return -1; // Could not open codec

// Allocate video frame
pFrame = avcodec_alloc_frame();

// Allocate video frame
pFrame = avcodec_alloc_frame();

// Allocate an AVFrame structure
pFrameYUV420 = avcodec_alloc_frame();
if (pFrameYUV420 == NULL)
    return -1;

// Determine required buffer size and allocate buffer
numBytes = avpicture_get_size(pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);
buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));

// Assign appropriate parts of buffer to image planes in pFrameYUV420
// Note that pFrameYUV420 is an AVFrame, but AVFrame is a superset of AVPicture
avpicture_fill((AVPicture *) pFrameRGB, buffer, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);

// Setup scaler
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, SWS_BILINEAR, 0, 0, 0);
if (img_convert_ctx == NULL) {
    fprintf(stderr, "Cannot initialize the conversion context!\n");
    exit(1);
}

// Setup encoder/muxing now
filename = "output_file.flv";
fmt = av_guess_format("flv", filename, NULL);
if (fmt == NULL) {
    printf("Could not guess format.\n");
    return -1;
}
/* allocate the output media context */
oc = avformat_alloc_context();
if (oc == NULL) {
    printf("could not allocate context.\n");
    return -1;
}
oc->oformat = fmt;
snprintf(oc->filename, sizeof(oc->filename), "%s", filename);

video_st = NULL;
if (fmt->video_codec != AV_CODEC_ID_NONE) {
    video_st = add_stream(oc, &video_codec, fmt->video_codec);
}

// Let's see some information about our format
av_dump_format(oc, 0, filename, 1);

/* open the output file, if needed */
if (!(fmt->flags & AVFMT_NOFILE)) {
    ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
    if (ret < 0) {
        fprintf(stderr, "Could not open '%s': %s\n", filename, av_err2str(ret));
        return 1;
    }
    }
/* Write the stream header, if any. */
ret = avformat_write_header(oc, NULL);
if (ret < 0) {
    fprintf(stderr, "Error occurred when opening output file: %s\n", av_err2str(ret));
    return 1;
}

// Setup x264 params
x264_param_t param;
x264_param_default_preset(&param, "veryfast", "zerolatency");
param.i_threads = 1;
param.i_width = video_st->codec->width;
param.i_height = video_st->codec->height;
param.i_fps_num = STREAM_FRAME_RATE; // 30 fps, same as video
param.i_fps_den = 1;
// Intra refres:
param.i_keyint_max = STREAM_FRAME_RATE;
param.b_intra_refresh = 1;
// Rate control:
param.rc.i_rc_method = X264_RC_CRF;
param.rc.f_rf_constant = 25;
param.rc.f_rf_constant_max = 35;
// For streaming:
param.b_repeat_headers = 1;
param.b_annexb = 1;
x264_param_apply_profile(&param, "baseline");

x264_t* encoder = x264_encoder_open(&param);
x264_picture_t pic_in, pic_out;
x264_picture_alloc(&pic_in, X264_CSP_I420, video_st->codec->width, video_st->codec->height);

x264_nal_t* nals;
int i_nals;

// The loop:
// 1. Read frames
// 2. Decode the frame
// 3. Attempt to re-encode using x264
// 4. Write the x264 encoded frame using av_interleaved_write_frame
while (av_read_frame(pFormatCtx, &packet) >= 0) {
    // Is this a packet from the video stream?
    if (packet.stream_index == videoStream) {
        // Decode video frame
        avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);

        // Did we get a video frame?
        if (frameFinished) {
            sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pic_in.img.plane, pic_in.img.i_stride);
            int frame_size = x264_encoder_encode(encoder, &nals, &i_nals, &pic_in, &pic_out);

            if (frame_size >= 0) {
                if (i_nals < 0)
                    printf("invalid frame size: %d\n", i_nals);
                // write out NALs
                for (i = 0; i < i_nals; i++) {
                    // initalize a packet
                    AVPacket p;
                    av_init_packet(&p);
                    p.data = nals[i].p_payload;
                    p.size = nals[i].i_payload;
                    p.stream_index = video_st->index;
                    p.flags = AV_PKT_FLAG_KEY;
                    p.pts = AV_NOPTS_VALUE;
                    p.dts = AV_NOPTS_VALUE;
                    ret = av_interleaved_write_frame(oc, &p);
                }
            }
            printf("encoded frame #%d\n", frame_count);
            frame_count++;
        }
    }

    // Free the packet that was allocated by av_read_frame
    av_free_packet(&packet);
}

// Now we free up resources used/close codecs, and finally close our program.

下面是add_stream()函数的实现:

/* Add an output stream. */
static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec, enum AVCodecID codec_id) {
    AVCodecContext *c;
    AVStream *st;
    int r;
    /* find the encoder */
    *codec = avcodec_find_encoder(codec_id);
    if (!(*codec)) {
        fprintf(stderr, "Could not find encoder for '%s'\n",
                avcodec_get_name(codec_id));
        exit(1);
    }
    st = avformat_new_stream(oc, *codec);
    if (!st) {
        fprintf(stderr, "Could not allocate stream\n");
        exit(1);
    }
    st->id = oc->nb_streams - 1;
    c = st->codec;
    switch ((*codec)->type) {
    case AVMEDIA_TYPE_AUDIO:
        st->id = 1;
        c->sample_fmt = AV_SAMPLE_FMT_FLTP;
        c->bit_rate = 64000;
        c->sample_rate = 44100;
        c->channels = 2;
        break;
    case AVMEDIA_TYPE_VIDEO:
        avcodec_get_context_defaults3(c, *codec);
        c->codec_id = codec_id;
        c->bit_rate = 500*1000;
        //c->rc_min_rate = 500*1000;
        //c->rc_max_rate = 500*1000;
        //c->rc_buffer_size = 500*1000;
        /* Resolution must be a multiple of two. */
        c->width = 1280;
        c->height = 720;
        /* timebase: This is the fundamental unit of time (in seconds) in terms
         * of which frame timestamps are represented. For fixed-fps content,
         * timebase should be 1/framerate and timestamp increments should be
         * identical to 1. */
        c->time_base.den = STREAM_FRAME_RATE;
        c->time_base.num = 1;
        c->gop_size = 12; /* emit one intra frame every twelve frames at most */
        c->pix_fmt = STREAM_PIX_FMT;
        if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
            /* just for testing, we also add B frames */
            c->max_b_frames = 2;
        }
        if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
            /* Needed to avoid using macroblocks in which some coeffs overflow.
             * This does not happen with normal video, it just happens here as
             * the motion of the chroma plane does not match the luma plane. */
            c->mb_decision = 2;
        }
        break;
    default:
        break;
    }
    /* Some formats want stream headers to be separate. */
    if (oc->oformat->flags & AVFMT_GLOBALHEADER)
        c->flags |= CODEC_FLAG_GLOBAL_HEADER;
    return st;
}

编码完成后,我检查输出文件output_file.flv。我注意到它的大小非常大:101MB并且无法播放。如果我使用 ffmpeg 对输入文件进行解码/编码,那么我会得到一个大小约为 83MB 的输出文件(与用作输入的原始 .mp4 文件大小大致相同)。此外,仅使用 ffmpeg C api 而不是使用 x264 进行编码步骤的 83MB 输出也可以正常播放。有谁知道我哪里出错了?我已经尝试研究了几天,但没有运气:(。我觉得我已经接近让它工作了,但是,我就是不知道我做错了什么。谢谢!

4

1 回答 1

4

要生成正确的 AVPacket,您应该将所有 nals 写入同一个数据包,就像在http://ffmpeg.org/doxygen/trunk/libx264_8c_source.html中所做的那样(参见encode_nalsX264_frame函数)

于 2013-01-21T11:28:14.403 回答