3

我一直在研究 dranger ffmpeg 教程,该教程解释了如何在显示帧和播放音频后同步音频和视频,这是我所在的位置。

不幸的是,该教程已过时(Stephen Dranger 向我解释了他自己)并且还使用了我没有使用的 sdl - 这是针对 Blackberry 10 应用程序的。

我只是无法让视频帧以正确的速度显示(它们只是播放速度非常快),而且我已经尝试了一个多星期 - 说真的!

我有 3 个线程正在发生 - 一个从流中读取到音频和视频队列,然后是 2 个线程用于音频和视频。

如果有人可以在扫描我的相关代码后解释发生了什么,那么您将成为救命稻草。

延迟(我传递给 usleep(testDelay) 的内容似乎正在上升(递增),这对我来说似乎不合适。

count = 1;
    MyApp* inst = worker->app;//(VideoUploadFacebook*)arg;
    qDebug() << "\n start loadstream";
    w = new QWaitCondition();
    w2 = new QWaitCondition();
    context = avformat_alloc_context();
    inst->threadStarted = true;
    cout << "start of decoding thread";
    cout.flush();


    av_register_all();
    avcodec_register_all();
    avformat_network_init();
    av_log_set_callback(&log_callback);
    AVInputFormat   *pFormat;
    //const char      device[]     = "/dev/video0";
    const char      formatName[] = "mp4";
    cout << "2start of decoding thread";
    cout.flush();



    if (!(pFormat = av_find_input_format(formatName))) {
        printf("can't find input format %s\n", formatName);
        //return void*;
    }
    //open rtsp
    if(avformat_open_input(&context, inst->capturedUrl.data(), pFormat,NULL) != 0){
        // return ;
        cout << "error opening of decoding thread: " << inst->capturedUrl.data();
        cout.flush();
    }

    cout << "3start of decoding thread";
    cout.flush();
    // av_dump_format(context, 0, inst->capturedUrl.data(), 0);
    /*   if(avformat_find_stream_info(context,NULL) < 0){
        return EXIT_FAILURE;
    }
     */
    //search video stream
    for(int i =0;i<context->nb_streams;i++){
        if(context->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
            inst->video_stream_index = i;
    }
    cout << "3z start of decoding thread";
    cout.flush();
    AVFormatContext* oc = avformat_alloc_context();
    av_read_play(context);//play RTSP
    AVDictionary *optionsDict = NULL;
    ccontext = context->streams[inst->video_stream_index]->codec;

    inst->audioc = context->streams[1]->codec;

    cout << "4start of decoding thread";
    cout.flush();
    codec = avcodec_find_decoder(ccontext->codec_id);
    ccontext->pix_fmt = PIX_FMT_YUV420P;

    AVCodec* audio_codec = avcodec_find_decoder(inst->audioc->codec_id);
    inst->packet = new AVPacket();
    if (!audio_codec) {
        cout << "audio codec not found\n"; //fflush( stdout );
        exit(1);
    }

    if (avcodec_open2(inst->audioc, audio_codec, NULL) < 0) {
        cout << "could not open codec\n"; //fflush( stdout );
        exit(1);
    }

    if (avcodec_open2(ccontext, codec, &optionsDict) < 0) exit(1);

    cout << "5start of decoding thread";
    cout.flush();
    inst->pic = avcodec_alloc_frame();

    av_init_packet(inst->packet);

    while(av_read_frame(context,inst->packet) >= 0 && &inst->keepGoing)
    {

        if(inst->packet->stream_index == 0){//packet is video

            int check = 0;



            // av_init_packet(inst->packet);
            int result = avcodec_decode_video2(ccontext, inst->pic, &check, inst->packet);

            if(check)
                break;
        }
    }



    inst->originalVideoWidth = inst->pic->width;
    inst->originalVideoHeight = inst->pic->height;
    float aspect = (float)inst->originalVideoHeight / (float)inst->originalVideoWidth;
    inst->newVideoWidth = inst->originalVideoWidth;
    int newHeight = (int)(inst->newVideoWidth * aspect);
    inst->newVideoHeight = newHeight;//(int)inst->originalVideoHeight / inst->originalVideoWidth * inst->newVideoWidth;// = new height
    int size = avpicture_get_size(PIX_FMT_YUV420P, inst->originalVideoWidth, inst->originalVideoHeight);
    uint8_t* picture_buf = (uint8_t*)(av_malloc(size));
    avpicture_fill((AVPicture *) inst->pic, picture_buf, PIX_FMT_YUV420P, inst->originalVideoWidth, inst->originalVideoHeight);

    picrgb = avcodec_alloc_frame();
    int size2 = avpicture_get_size(PIX_FMT_YUV420P, inst->newVideoWidth, inst->newVideoHeight);
    uint8_t* picture_buf2 = (uint8_t*)(av_malloc(size2));
    avpicture_fill((AVPicture *) picrgb, picture_buf2, PIX_FMT_YUV420P, inst->newVideoWidth, inst->newVideoHeight);



    if(ccontext->pix_fmt != PIX_FMT_YUV420P)
    {
        std::cout << "fmt != 420!!!: " << ccontext->pix_fmt << std::endl;//
        // return (EXIT_SUCCESS);//-1;

    }


    if (inst->createForeignWindow(inst->myForeignWindow->windowGroup(),
            "HelloForeignWindowAppIDqq", 0,
            0, inst->newVideoWidth,
            inst->newVideoHeight)) {

    } else {
        qDebug() << "The ForeginWindow was not properly initialized";
    }




    inst->keepGoing = true;

    inst->img_convert_ctx = sws_getContext(inst->originalVideoWidth, inst->originalVideoHeight, PIX_FMT_YUV420P, inst->newVideoWidth, inst->newVideoHeight,
            PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL);

    is = (VideoState*)av_mallocz(sizeof(VideoState));
    if (!is)
        return NULL;

    is->audioStream = 1;
    is->audio_st = context->streams[1];
    is->audio_buf_size = 0;
    is->audio_buf_index = 0;
    is->videoStream = 0;
    is->video_st = context->streams[0];

    is->frame_timer = (double)av_gettime() / 1000000.0;
    is->frame_last_delay = 40e-3;

    is->av_sync_type = DEFAULT_AV_SYNC_TYPE;
    //av_strlcpy(is->filename, filename, sizeof(is->filename));
    is->iformat = pFormat;
    is->ytop    = 0;
    is->xleft   = 0;

    /* start video display */
    is->pictq_mutex = new QMutex();
    is->pictq_cond  = new QWaitCondition();

    is->subpq_mutex = new QMutex();
    is->subpq_cond  = new QWaitCondition();

    is->video_current_pts_time = av_gettime();


    packet_queue_init(&audioq);

    packet_queue_init(&videoq);
    is->audioq = audioq;
    is->videoq = videoq;
    AVPacket* packet2  = new AVPacket();

    ccontext->get_buffer = our_get_buffer;
    ccontext->release_buffer = our_release_buffer;


    av_init_packet(packet2);
    while(inst->keepGoing)
    {


        if(av_read_frame(context,packet2) < 0 && keepGoing)
        {
            printf("bufferframe Could not read a frame from stream.\n");
            fflush( stdout );


        }else {



            if(packet2->stream_index == 0) {
                packet_queue_put(&videoq, packet2);
            } else if(packet2->stream_index == 1) {
                packet_queue_put(&audioq, packet2);
            } else {
                av_free_packet(packet2);
            }


            if(!videoThreadStarted)
            {
                videoThreadStarted = true;
                QThread* thread = new QThread;
                videoThread = new VideoStreamWorker(this);

                // Give QThread ownership of Worker Object
                videoThread->moveToThread(thread);
                connect(videoThread, SIGNAL(error(QString)), this, SLOT(errorHandler(QString)));
                QObject::connect(videoThread, SIGNAL(refreshNeeded()), this, SLOT(refreshNeededSlot()));
                connect(thread, SIGNAL(started()), videoThread, SLOT(doWork()));
                connect(videoThread, SIGNAL(finished()), thread, SLOT(quit()));
                connect(videoThread, SIGNAL(finished()), videoThread, SLOT(deleteLater()));
                connect(thread, SIGNAL(finished()), thread, SLOT(deleteLater()));

                thread->start();
            }

            if(!audioThreadStarted)
            {
                audioThreadStarted = true;
                QThread* thread = new QThread;
                AudioStreamWorker* videoThread = new AudioStreamWorker(this);

                // Give QThread ownership of Worker Object
                videoThread->moveToThread(thread);

                // Connect videoThread error signal to this errorHandler SLOT.
                connect(videoThread, SIGNAL(error(QString)), this, SLOT(errorHandler(QString)));

                // Connects the thread’s started() signal to the process() slot in the videoThread, causing it to start.
                connect(thread, SIGNAL(started()), videoThread, SLOT(doWork()));
                connect(videoThread, SIGNAL(finished()), thread, SLOT(quit()));
                connect(videoThread, SIGNAL(finished()), videoThread, SLOT(deleteLater()));

                // Make sure the thread object is deleted after execution has finished.
                connect(thread, SIGNAL(finished()), thread, SLOT(deleteLater()));

                thread->start();
            }

        }

    } //finished main loop

    int MyApp::video_thread() {
    //VideoState *is = (VideoState *)arg;
    AVPacket pkt1, *packet = &pkt1;
    int len1, frameFinished;

    double pts;
    pic = avcodec_alloc_frame();

    for(;;) {
        if(packet_queue_get(&videoq, packet, 1) < 0) {
            // means we quit getting packets
            break;
        }

        pts = 0;

        global_video_pkt_pts2 = packet->pts;
        // Decode video frame
        len1 =  avcodec_decode_video2(ccontext, pic, &frameFinished, packet);
        if(packet->dts == AV_NOPTS_VALUE
                && pic->opaque && *(uint64_t*)pic->opaque != AV_NOPTS_VALUE) {
            pts = *(uint64_t *)pic->opaque;
        } else if(packet->dts != AV_NOPTS_VALUE) {
            pts = packet->dts;
        } else {
            pts = 0;
        }
        pts *= av_q2d(is->video_st->time_base);
        // Did we get a video frame?

                if(frameFinished) {
                    pts = synchronize_video(is, pic, pts);
                    actualPts = pts;
                    refreshSlot();
                }
                av_free_packet(packet);
    }
    av_free(pic);
    return 0;
}


int MyApp::audio_thread() {
    //VideoState *is = (VideoState *)arg;
    AVPacket pkt1, *packet = &pkt1;
    int len1, frameFinished;
    ALuint source;
    ALenum format = 0;
    //   ALuint frequency;
    ALenum alError;
    ALint val2;
    ALuint buffers[NUM_BUFFERS];
    int dataSize;


    ALCcontext *aContext;
    ALCdevice *device;
    if (!alutInit(NULL, NULL)) {
        // printf(stderr, "init alut error\n");
    }
    device = alcOpenDevice(NULL);
    if (device == NULL) {
        // printf(stderr, "device error\n");
    }

    //Create a context
    aContext = alcCreateContext(device, NULL);
    alcMakeContextCurrent(aContext);
    if(!(aContext)) {
        printf("Could not create the OpenAL context!\n");
        return 0;
    }

    alListener3f(AL_POSITION, 0.0f, 0.0f, 0.0f);









    //ALenum alError;
    if(alGetError() != AL_NO_ERROR) {
        cout << "could not create buffers";
        cout.flush();
        fflush( stdout );
        return 0;
    }
    alGenBuffers(NUM_BUFFERS, buffers);
    alGenSources(1, &source);
    if(alGetError() != AL_NO_ERROR) {
        cout << "after Could not create buffers or the source.\n";
        cout.flush(  );
        return 0;
    }

    int i;
    int indexOfPacket;
    double pts;
    //double pts;
    int n;


    for(i = 0; i < NUM_BUFFERS; i++)
    {
        if(packet_queue_get(&audioq, packet, 1) < 0) {
            // means we quit getting packets
            break;
        }
        cout << "streamindex=audio \n";
        cout.flush(  );
        //printf("before decode  audio\n");
        //fflush( stdout );
        // AVPacket *packet = new AVPacket();//malloc(sizeof(AVPacket*));
        AVFrame *decodedFrame = NULL;
        int gotFrame = 0;
        // AVFrame* decodedFrame;

        if(!decodedFrame) {
            if(!(decodedFrame = avcodec_alloc_frame())) {
                cout << "Run out of memory, stop the streaming...\n";
                fflush( stdout );
                cout.flush();


                return -2;
            }
        } else {
            avcodec_get_frame_defaults(decodedFrame);
        }

        int  len = avcodec_decode_audio4(audioc, decodedFrame, &gotFrame, packet);
        if(len < 0) {
            cout << "Error while decoding.\n";
            cout.flush(  );

            return -3;
        }
        if(len < 0) {
            /* if error, skip frame */
            is->audio_pkt_size = 0;
            //break;
        }
        is->audio_pkt_data += len;
        is->audio_pkt_size -= len;

        pts = is->audio_clock;
        // *pts_ptr = pts;
        n = 2 * is->audio_st->codec->channels;
        is->audio_clock += (double)packet->size/
                (double)(n * is->audio_st->codec->sample_rate);
        if(gotFrame) {
            cout << "got audio frame.\n";
            cout.flush(  );
            // We have a buffer ready, send it
            dataSize = av_samples_get_buffer_size(NULL, audioc->channels,
                    decodedFrame->nb_samples, audioc->sample_fmt, 1);

            if(!format) {
                if(audioc->sample_fmt == AV_SAMPLE_FMT_U8 ||
                        audioc->sample_fmt == AV_SAMPLE_FMT_U8P) {
                    if(audioc->channels == 1) {
                        format = AL_FORMAT_MONO8;
                    } else if(audioc->channels == 2) {
                        format = AL_FORMAT_STEREO8;
                    }
                } else if(audioc->sample_fmt == AV_SAMPLE_FMT_S16 ||
                        audioc->sample_fmt == AV_SAMPLE_FMT_S16P) {
                    if(audioc->channels == 1) {
                        format = AL_FORMAT_MONO16;
                    } else if(audioc->channels == 2) {
                        format = AL_FORMAT_STEREO16;
                    }
                }

                if(!format) {
                    cout << "OpenAL can't open this format of sound.\n";
                    cout.flush(  );

                    return -4;
                }
            }
            printf("albufferdata audio b4.\n");
            fflush( stdout );
            alBufferData(buffers[i], format, *decodedFrame->data, dataSize, decodedFrame->sample_rate);
            cout << "after albufferdata all buffers \n";
            cout.flush(  );
            av_free_packet(packet);
            //=av_free(packet);
            av_free(decodedFrame);

            if((alError = alGetError()) != AL_NO_ERROR) {
                printf("Error while buffering.\n");

                printAlError(alError);
                return -6;
            }
        }
    }


    cout << "before quoe buffers \n";
    cout.flush();
    alSourceQueueBuffers(source, NUM_BUFFERS, buffers);
    cout << "before play.\n";
    cout.flush();
    alSourcePlay(source);
    cout << "after play.\n";
    cout.flush();
    if((alError = alGetError()) != AL_NO_ERROR) {
        cout << "error strating stream.\n";
        cout.flush();
        printAlError(alError);
        return 0;
    }


    // AVPacket *pkt = &is->audio_pkt;

    while(keepGoing)
    {
        while(packet_queue_get(&audioq, packet, 1)  >= 0) {
            // means we quit getting packets

            do {
                alGetSourcei(source, AL_BUFFERS_PROCESSED, &val2);
                usleep(SLEEP_BUFFERING);
            } while(val2 <= 0);
            if(alGetError() != AL_NO_ERROR)
            {
                fprintf(stderr, "Error gettingsource :(\n");
                return 1;
            }

            while(val2--)
            {



                ALuint buffer;
                alSourceUnqueueBuffers(source, 1, &buffer);
                if(alGetError() != AL_NO_ERROR)
                {
                    fprintf(stderr, "Error unqueue buffers :(\n");
                    //  return 1;
                }
                AVFrame *decodedFrame = NULL;
                int gotFrame = 0;
                // AVFrame* decodedFrame;

                if(!decodedFrame) {
                    if(!(decodedFrame = avcodec_alloc_frame())) {
                        cout << "Run out of memory, stop the streaming...\n";
                        //fflush( stdout );
                        cout.flush();


                        return -2;
                    }
                } else {
                    avcodec_get_frame_defaults(decodedFrame);
                }

                int  len = avcodec_decode_audio4(audioc, decodedFrame, &gotFrame, packet);
                if(len < 0) {
                    cout << "Error while decoding.\n";
                    cout.flush(  );
                    is->audio_pkt_size = 0;
                    return -3;
                }

                is->audio_pkt_data += len;
                is->audio_pkt_size -= len;
                if(packet->size <= 0) {
                    /* No data yet, get more frames */
                    //continue;
                }


                if(gotFrame) {
                    pts = is->audio_clock;
                    len = synchronize_audio(is, (int16_t *)is->audio_buf,
                            packet->size, pts);
                    is->audio_buf_size = packet->size;
                    pts = is->audio_clock;
                    // *pts_ptr = pts;
                    n = 2 * is->audio_st->codec->channels;
                    is->audio_clock += (double)packet->size /
                            (double)(n * is->audio_st->codec->sample_rate);
                    if(packet->pts != AV_NOPTS_VALUE) {
                        is->audio_clock = av_q2d(is->audio_st->time_base)*packet->pts;
                    }
                    len = av_samples_get_buffer_size(NULL, audioc->channels,
                            decodedFrame->nb_samples, audioc->sample_fmt, 1);
                    alBufferData(buffer, format, *decodedFrame->data, len, decodedFrame->sample_rate);
                    if(alGetError() != AL_NO_ERROR)
                    {
                        fprintf(stderr, "Error buffering :(\n");
                        return 1;
                    }
                    alSourceQueueBuffers(source, 1, &buffer);
                    if(alGetError() != AL_NO_ERROR)
                    {
                        fprintf(stderr, "Error queueing buffers :(\n");
                        return 1;
                    }
                }





            }

            alGetSourcei(source, AL_SOURCE_STATE, &val2);
            if(val2 != AL_PLAYING)
                alSourcePlay(source);

        }


        //pic = avcodec_alloc_frame();
    }
    qDebug() << "end audiothread";
    return 1;
}

void MyApp::refreshSlot()
{


    if(true)
    {

        printf("got frame %d, %d\n", pic->width, ccontext->width);
        fflush( stdout );

        sws_scale(img_convert_ctx, (const uint8_t **)pic->data, pic->linesize,
                0, originalVideoHeight, &picrgb->data[0], &picrgb->linesize[0]);

        printf("rescaled frame %d, %d\n", newVideoWidth, newVideoHeight);
        fflush( stdout );
        //av_free_packet(packet);
        //av_init_packet(packet);

        qDebug() << "waking audio as video finished";
        ////mutex.unlock();
        //mutex2.lock();
        doingVideoFrame = false;
        //doingAudioFrame = false;
        ////mutex2.unlock();


        //mutex2.unlock();
        //w2->wakeAll();
        //w->wakeAll();
        qDebug() << "now woke audio";

        //pic = picrgb;
        uint8_t *srcy = picrgb->data[0];
        uint8_t *srcu = picrgb->data[1];
        uint8_t *srcv = picrgb->data[2];
        printf("got src yuv frame %d\n", &srcy);
        fflush( stdout );
        unsigned char *ptr = NULL;
        screen_get_buffer_property_pv(mScreenPixelBuffer, SCREEN_PROPERTY_POINTER, (void**) &ptr);
        unsigned char *y = ptr;
        unsigned char *u = y + (newVideoHeight * mStride) ;
        unsigned char *v = u + (newVideoHeight * mStride) / 4;
        int i = 0;
        printf("got buffer  picrgbwidth= %d \n", newVideoWidth);
        fflush( stdout );
        for ( i = 0; i < newVideoHeight; i++)
        {
            int doff = i * mStride;
            int soff = i * picrgb->linesize[0];
            memcpy(&y[doff], &srcy[soff], newVideoWidth);
        }

        for ( i = 0; i < newVideoHeight / 2; i++)
        {
            int doff = i * mStride / 2;
            int soff = i * picrgb->linesize[1];
            memcpy(&u[doff], &srcu[soff], newVideoWidth / 2);
        }

        for ( i = 0; i < newVideoHeight / 2; i++)
        {
            int doff = i * mStride / 2;
            int soff = i * picrgb->linesize[2];
            memcpy(&v[doff], &srcv[soff], newVideoWidth / 2);
        }
        printf("before posttoscreen \n");
        fflush( stdout );

        video_refresh_timer();
        qDebug() << "end refreshslot";

    }
    else
    {

    }





}

void  MyApp::refreshNeededSlot2()
    {
        printf("blitting to buffer");
        fflush(stdout);

        screen_buffer_t screen_buffer;
        screen_get_window_property_pv(mScreenWindow, SCREEN_PROPERTY_RENDER_BUFFERS, (void**) &screen_buffer);
        int attribs[] = { SCREEN_BLIT_SOURCE_WIDTH, newVideoWidth, SCREEN_BLIT_SOURCE_HEIGHT, newVideoHeight, SCREEN_BLIT_END };
        int res2 = screen_blit(mScreenCtx, screen_buffer, mScreenPixelBuffer, attribs);
        printf("dirty rectangles");
        fflush(stdout);
        int dirty_rects[] = { 0, 0, newVideoWidth, newVideoHeight };
        screen_post_window(mScreenWindow, screen_buffer, 1, dirty_rects, 0);
        printf("done screneposdtwindow");
        fflush(stdout);

    }

void MyApp::video_refresh_timer() {
    testDelay = 0;
    //  VideoState *is = ( VideoState* )userdata;
    VideoPicture *vp;
    //double pts = 0    ;
    double actual_delay, delay, sync_threshold, ref_clock, diff;

    if(is->video_st) {
        if(false)////is->pictq_size == 0)
        {
            testDelay = 1;
            schedule_refresh(is, 1);
        } else {
            // vp = &is->pictq[is->pictq_rindex];

            delay = actualPts - is->frame_last_pts; /* the pts from last time */
            if(delay <= 0 || delay >= 1.0) {
                /* if incorrect delay, use previous one */
                delay = is->frame_last_delay;
            }
            /* save for next time */
            is->frame_last_delay = delay;
            is->frame_last_pts = actualPts;

            is->video_current_pts = actualPts;
            is->video_current_pts_time = av_gettime();
            /* update delay to sync to audio */
            ref_clock = get_audio_clock(is);
            diff = actualPts - ref_clock;

            /* Skip or repeat the frame. Take delay into account
     FFPlay still doesn't "know if this is the best guess." */
            sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD;
            if(fabs(diff) < AV_NOSYNC_THRESHOLD) {
                if(diff <= -sync_threshold) {
                    delay = 0;
                } else if(diff >= sync_threshold) {
                    delay = 2 * delay;
                }
            }
            is->frame_timer += delay;
            /* computer the REAL delay */
            actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
            if(actual_delay < 0.010) {
                /* Really it should skip the picture instead */
                actual_delay = 0.010;
            }
            testDelay = (int)(actual_delay * 1000 + 0.5);
            schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
            /* show the picture! */
            //video_display(is);


            // SDL_CondSignal(is->pictq_cond);
            // SDL_UnlockMutex(is->pictq_mutex);
        }
    } else {
        testDelay = 100;
        schedule_refresh(is, 100);

    }
}

void MyApp::schedule_refresh(VideoState *is, int delay) {
    qDebug() << "start schedule refresh timer" << delay;
    typeOfEvent = FF_REFRESH_EVENT2;
    w->wakeAll();
    //  SDL_AddTimer(delay,


}

我目前正在通过以下方式循环等待数据

QMutex mutex;
    mutex.lock();
    while(keepGoing)
    {



        qDebug() << "MAINTHREAD" << testDelay;


        w->wait(&mutex);
        mutex.unlock();
        qDebug() << "MAINTHREAD past wait";

        if(!keepGoing)
        {
            break;
        }
        if(testDelay > 0 && typeOfEvent == FF_REFRESH_EVENT2)
        {
            usleep(testDelay);
            refreshNeededSlot2();
        }
        else   if(testDelay > 0 && typeOfEvent == FF_QUIT_EVENT2)
        {
            keepGoing = false;
            exit(0);
            break;
            // usleep(testDelay);
            // refreshNeededSlot2();
        }
        qDebug() << "MAINTHREADend";
        mutex.lock();

    }
    mutex.unlock();

如果我需要提供更多相关代码,请告诉我。很抱歉我的代码不整洁——我还在学习 c++,并且已经修改了这个代码一个多星期了,如前所述。

刚刚添加了一个我从打印输出中看到的输出样本到控制台 - 我无法理解它(这对于我的专业水平来说几乎太复杂了)但是当你看到正在播放的帧和正在播放的音频时很难放弃,尤其是当我花了几个星期才达到这个阶段时。

如果他们发现问题,请帮助我。

MAINTHREAD 在 syncvideo= 1073394046 得到第 640 帧后过去的等待点,640 开始 video_refresh_timer 实际点 = 1.66833 帧 lastpts = 1.63497 开始计划刷新计时器需要延迟 123

syncvideo 之后的 pts= 1073429033 得到帧 640,刷新前 640 MAINTHREAD 循环延迟 = 123 开始 video_refresh_timer 实际pts = 1.7017 帧 lastpts = 1.66833 开始计划刷新计时器需要延迟 115

MAINTHREAD 在 syncvideo= 1073464021 得到第 640 帧后过去的等待点,640 开始 video_refresh_timer 实际点 = 1.73507 帧 lastpts = 1.7017 开始计划刷新计时器需要延迟 140

刷新前的 MAINTHREAD 循环延迟 = 140 点后同步视频 = 1073499008 得到帧 640, 640 开始 video_refresh_timer 实际点 = 1.76843 帧 lastpts = 1.73507 开始计划刷新计时器需要延迟 163

MAINTHREAD 在 syncvideo= 1073533996 得到第 640 帧后过去的等待点,640 开始 video_refresh_timer actualpts = 1.8018 帧 lastpts = 1.76843 开始计划刷新计时器需要延迟 188

刷新前的 MAINTHREAD 循环延迟 = 188 点后同步视频 = 1073568983 得到帧 640, 640 开始 video_refresh_timer 实际点 = 1.83517 帧 lastpts = 1.8018 开始计划刷新计时器需要延迟 246

MAINTHREAD 在 syncvideo= 1073603971 得到第 640 帧后过去的等待点,640 开始 video_refresh_timer actualpts = 1.86853 帧 lastpts = 1.83517 开始计划刷新计时器需要延迟 299

刷新前的 MAINTHREAD 循环延迟 = 299 点后同步视频 = 1073638958 得到帧 640, 640 开始 video_refresh_timer 实际点 = 1.9019 帧 lastpts = 1.86853 开始计划刷新计时器需要延迟 358

MAINTHREAD 在 syncvideo= 1073673946 得到第 640 帧后过去的等待点,640 开始 video_refresh_timer 实际点 = 1.93527 帧 lastpts = 1.9019 开始计划刷新计时器需要延迟 416

刷新前的 MAINTHREAD 循环延迟 = 416 pts 后 syncvideo = 1073708933 得到帧 640, 640 开始 video_refresh_timer 实际pts = 1.96863 帧 lastpts = 1.93527 开始计划刷新计时器需要延迟 474

MAINTHREAD 在 syncvideo= 1073742872 获得第 640 帧后过去的等待点,刷新前 640 MAINTHREAD 循环延迟 = 474 开始 video_refresh_timer 实际点 = 2.002 帧 lastpts = 1.96863 开始计划刷新计时器需要延迟 518

MAINTHREAD 在 syncvideo= 1073760366 得到第 640 帧后过去的等待点,640 开始 video_refresh_timer 实际点 = 2.03537 帧 lastpts = 2.002 开始计划刷新计时器需要延迟 575

4

0 回答 0