在 Android JNI 中,ffmpeg with libx264
使用以下代码对原始 rgb 数据进行编码和解码!我应该按照 H.264 的要求使用 swscale 将 rgb565 转换为 yuv420p。但不清楚这种转换。请帮助,关于我得到的日志,我错了!
编码代码
codecinit()- 调用一次(JNI 包装函数)
int Java_com_my_package_codecinit (JNIEnv *env, jobject thiz) {
avcodec_register_all();
codec = avcodec_find_encoder(AV_CODEC_ID_H264);//AV_CODEC_ID_MPEG1VIDEO);
if(codec->id == AV_CODEC_ID_H264)
__android_log_write(ANDROID_LOG_ERROR, "set","h264_encoder");
if (!codec) {
fprintf(stderr, "codec not found\n");
__android_log_write(ANDROID_LOG_ERROR, "codec", "not found");
}
__android_log_write(ANDROID_LOG_ERROR, "codec", "alloc-contest3");
c= avcodec_alloc_context3(codec);
if(c == NULL)
__android_log_write(ANDROID_LOG_ERROR, "avcodec","context-null");
picture= av_frame_alloc();
if(picture == NULL)
__android_log_write(ANDROID_LOG_ERROR, "picture","context-null");
c->bit_rate = 400000;
c->height = 800;
c->time_base= (AVRational){1,25};
c->gop_size = 10;
c->max_b_frames=1;
c->pix_fmt = AV_PIX_FMT_YUV420P;
outbuf_size = 768000;
c->width = 480;
size = (c->width * c->height);
if (avcodec_open2(c, codec,NULL) < 0) {
__android_log_write(ANDROID_LOG_ERROR, "codec", "could not open");
}
ret = av_image_alloc(picture->data, picture->linesize, c->width, c->height,
c->pix_fmt, 32);
if (ret < 0) {
__android_log_write(ANDROID_LOG_ERROR, "image","alloc-failed");
fprintf(stderr, "could not alloc raw picture buffer\n");
}
picture->format = c->pix_fmt;
picture->width = c->width;
picture->height = c->height;
return 0;
}
encodeframe() - 在 while 循环中调用
int Java_com_my_package_encodeframe (JNIEnv *env, jobject thiz,jbyteArray buffer) {
jbyte *temp= (*env)->GetByteArrayElements(env, buffer, 0);
Output = (char *)temp;
const uint8_t * const inData[1] = { Output };
const int inLinesize[1] = { 2*c->width };
//swscale should implement here
av_init_packet(&pkt);
pkt.data = NULL; // packet data will be allocated by the encoder
pkt.size = 0;
fflush(stdout);
picture->data[0] = Output;
ret = avcodec_encode_video2(c, &pkt, picture,&got_output);
fprintf(stderr,"ret = %d, got-out = %d \n",ret,got_output);
if (ret < 0) {
__android_log_write(ANDROID_LOG_ERROR, "error","encoding");
if(got_output > 0)
__android_log_write(ANDROID_LOG_ERROR, "got_output","is non-zero");
}
if (got_output) {
fprintf(stderr,"encoding frame %3d (size=%5d): (ret=%d)\n", 1, pkt.size,ret);
fprintf(stderr,"before caling decode");
decode_inline(&pkt); //function that decodes right after the encode
fprintf(stderr,"after caling decode");
av_free_packet(&pkt);
}
fprintf(stderr,"y val: %d \n",y);
(*env)->ReleaseByteArrayElements(env, buffer, Output, 0);
return ((ret));
}
decode_inline() 函数
decode_inline(AVPacket *avpkt){
AVCodec *codec;
AVCodecContext *c = NULL;
int frame, got_picture, len = -1,temp=0;
AVFrame *rawFrame, *rgbFrame;
uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
char buf[1024];
char rawBuf[768000],rgbBuf[768000];
struct SwsContext *sws_ctx;
memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE);
avcodec_register_all();
c= avcodec_alloc_context3(codec);
if(c == NULL)
__android_log_write(ANDROID_LOG_ERROR, "avcodec","context-null");
codec = avcodec_find_decoder(AV_CODEC_ID_H264);
if (!codec) {
fprintf(stderr, "codec not found\n");
fprintf(stderr, "codec = %p \n", codec);
}
c->pix_fmt = AV_PIX_FMT_YUV420P;
c->width = 480;
c->height = 800;
rawFrame = av_frame_alloc();
rgbFrame = av_frame_alloc();
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "could not open codec\n");
exit(1);
}
sws_ctx = sws_getContext(c->width, c->height,/*PIX_FMT_RGB565BE*/
PIX_FMT_YUV420P, c->width, c->height, AV_PIX_FMT_RGB565/*PIX_FMT_YUV420P*/,
SWS_BILINEAR, NULL, NULL, NULL);
frame = 0;
unsigned short *decodedpixels = &rawBuf;
rawFrame->data[0] = &rawBuf;
rgbFrame->data[0] = &rgbBuf;
fprintf(stderr,"size of avpkt %d \n",avpkt->size);
temp = avpkt->size;
while (temp > 0) {
len = avcodec_decode_video2(c, rawFrame, &got_picture, avpkt);
if (len < 0) {
fprintf(stderr, "Error while decoding frame %d\n", frame);
exit(1);
}
temp -= len;
avpkt->data += len;
if (got_picture) {
printf("saving frame %3d\n", frame);
fflush(stdout);
//TODO
//memcpy(decodedpixels,rawFrame->data[0],rawFrame->linesize[0]);
// decodedpixels +=rawFrame->linesize[0];
frame++;
}
}
avcodec_close(c);
av_free(c);
//free(rawBuf);
//free(rgbBuf);
av_frame_free(&rawFrame);
av_frame_free(&rgbFrame);
}
我得到的日志
对于 decode_inline() 函数:
01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] non-existing PPS 0 referenced
01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] decode_slice_header error
01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] non-existing PPS 0 referenced
01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] decode_slice_header error
01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] non-existing PPS 0 referenced
01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] decode_slice_header error
01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] non-existing PPS 0 referenced
01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] decode_slice_header error
01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] non-existing PPS 0 referenced
01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] decode_slice_header error
01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] non-existing PPS 0 referenced
01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] decode_slice_header error
01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] non-existing PPS 0 referenced
01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] decode_slice_header error
01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] non-existing PPS 0 referenced
01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] decode_slice_header error
01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] non-existing PPS 0 referenced
01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] decode_slice_header error
01-02 14:50:50.160: I/stderr(3407): [h264 @ 0x8db540] Invalid mix of idr and non-idr slices
01-02 14:50:50.160: I/stderr(3407): Error while decoding frame 0
编辑:更改 GOP 值:
如果我按预期更改它每三帧c->gop_size = 3;
发出一次。在每三次执行中都不存在one I frame
该消息,但所有其他执行都具有此消息non-existing PPS 0 referenced