3

我使用 ffmpeg 的库编写了一个小程序。执行以下操作-

1)解码一帧。2)将帧转换为 rgb24 。3)将rgb24帧转换回yuv420p。4)编码yuv420p帧并打包成视频文件。

但结束视频与输入视频不同。最终视频中有一些伪影(水平线)。当调用 rgbToYuv 方法时,我也会收到警告 - 警告:数据未对齐!这可能会导致速度损失

我怀疑我的格式转换方法有问题,因为当我从我的程序中评论覆盖步骤时,输出视频与输入视频相同。

以下是我的方法 -

int VideoFileInstance::convertToRGBFrame(AVFrame **yuvframe,AVFrame **rgbPictInfo) {
    int ret;
    int width = ifmt_ctx->streams[VIDEO_STREAM_INDEX]->codec->width;
    int height = ifmt_ctx->streams[VIDEO_STREAM_INDEX]->codec->height;

    int m_bufferSize = avpicture_get_size(PIX_FMT_RGB24,width, height);

    uint8_t *buffer = (uint8_t *)av_malloc(m_bufferSize);

    //init context if not done already.
    if (imgConvertCtxYUVToRGB == NULL) {
        //init once
        imgConvertCtxYUVToRGB = sws_getContext(width, height, PIX_FMT_YUV420P, width, height, PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL);

        if(imgConvertCtxYUVToRGB == NULL) {
            av_log(NULL,AV_LOG_ERROR,"error creating img context");
            return -1;
        }

    }


    avpicture_fill((AVPicture*)(*rgbPictInfo), buffer,
                   PIX_FMT_RGB24,
                   width, height);

    uint8_t *inDate[3] = {
        (*yuvframe)->data[0] ,
        (*yuvframe)->data[1] ,
        (*yuvframe)->data[2]
    };

    int destLineSize[1] = {3*width};

    ret = sws_scale(imgConvertCtxYUVToRGB, inDate, (*yuvframe)->linesize, 0, height,
              (*rgbPictInfo)->data, destLineSize);

    av_free(buffer);


    return ret;
}

int VideoFileInstance::convertToYuvFrame (AVFrame **rgbFrame , AVFrame ** yuvFrame) {
    int ret = 0;
    int width = ifmt_ctx->streams[VIDEO_STREAM_INDEX]->codec->width;
    int height = ifmt_ctx->streams[VIDEO_STREAM_INDEX]->codec->height;
    int m_bufferSize = avpicture_get_size(PIX_FMT_YUV420P, width, height);

    uint8_t *buffer = (uint8_t *)av_malloc(m_bufferSize);

    avpicture_fill((AVPicture*)(*yuvFrame), buffer, PIX_FMT_YUV420P,
                   width, height);

    if(imgConvertCtxRGBToYUV == NULL) {
        imgConvertCtxRGBToYUV = sws_getContext(width, height, PIX_FMT_RGB24, width, height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);

        if(imgConvertCtxRGBToYUV == NULL){
            av_log(NULL,AV_LOG_ERROR,"error creating img context");
            return -1;
        }
    }

    avpicture_fill((AVPicture*)(*yuvFrame), buffer,
                   PIX_FMT_YUV420P,
                   width, height);




    sws_scale(imgConvertCtxRGBToYUV,(*rgbFrame)->data , (*rgbFrame)->linesize, 0, height,
              (*yuvFrame)->data , (*yuvFrame)->linesize);

    av_free(buffer);

    return ret;
}

输入视频的尺寸是424 X 200。我的转换功能有什么问题吗?

4

2 回答 2

1

https://stackoverflow.com/a/31270501/4726410第二个要点,avpicture_ 及相关函数不保证对齐,需要使用 av_image_ 对应的 align=16 或 align=32。

于 2015-07-22T12:14:45.570 回答
1

使用 Ronalds 建议使用 av_image* 方法为我解决了这个问题。以下是其中一种方法的固定代码。

int VideoFileInstance::convertToRGBFrame(AVFrame **yuvframe,AVFrame **rgbPictInfo) {
    int ret;
    int width = ifmt_ctx->streams[VIDEO_STREAM_INDEX]->codec->width;
    int height = ifmt_ctx->streams[VIDEO_STREAM_INDEX]->codec->height;




    //init context if not done already.
    if (imgConvertCtxYUVToRGB == NULL) {
        //init once
        imgConvertCtxYUVToRGB = sws_getContext(width, height, PIX_FMT_YUV420P, width, height, PIX_FMT_RGB24, SWS_FAST_BILINEAR, 0, 0, 0);

        if(imgConvertCtxYUVToRGB == NULL) {
            av_log(NULL,AV_LOG_ERROR,"error creating img context");
            return -1;
        }

    }


    // call av_freep(rgbPictInfo->data) to free memory

    av_image_alloc( (*rgbPictInfo)->data,   //data to be filled
                   (*rgbPictInfo)->linesize,//line sizes to be filled
                   width, height,
                   PIX_FMT_RGB24,           //pixel format
                   32                       //aling
                   );



    ret = sws_scale(imgConvertCtxYUVToRGB, (*yuvframe)->data, (*yuvframe)->linesize, 0, height,
              (*rgbPictInfo)->data, (*rgbPictInfo)->linesize);


    return ret;
}
于 2015-07-22T15:00:27.323 回答