4

我正在解码 OGG 视频(theora 和 vorbis 作为编解码器),并希望在播放声音时将其显示在屏幕上(使用 Ogre 3D)。我可以很好地解码图像流,并且视频以正确的帧速率完美播放,等等。

但是,我根本无法使用 OpenAL 播放声音。

编辑:我设法使播放声音至少在某种程度上类似于视频中的实际音频。更新了示例代码。

编辑 2:我现在能够获得“几乎”正确的声音。我必须将 OpenAL 设置为使用 AL_FORMAT_STEREO_FLOAT32(在初始化扩展之后),而不仅仅是 STEREO16。现在声音“只有”极高的音调和口吃,但速度正确。

这是我解码音频数据包的方法(在后台线程中,等效的对于视频文件的图像流来说效果很好):

//------------------------------------------------------------------------------
int decodeAudioPacket(  AVPacket& p_packet, AVCodecContext* p_audioCodecContext, AVFrame* p_frame,
                        FFmpegVideoPlayer* p_player, VideoInfo& p_videoInfo)
{
    // Decode audio frame
    int got_frame = 0;
    int decoded = avcodec_decode_audio4(p_audioCodecContext, p_frame, &got_frame, &p_packet);
    if (decoded < 0) 
    {
        p_videoInfo.error = "Error decoding audio frame.";
        return decoded;
    }

    // Frame is complete, store it in audio frame queue
    if (got_frame)
    {
        int bufferSize = av_samples_get_buffer_size(NULL, p_audioCodecContext->channels, p_frame->nb_samples, 
                                                    p_audioCodecContext->sample_fmt, 0);

        int64_t duration = p_frame->pkt_duration;
        int64_t dts = p_frame->pkt_dts;

        if (staticOgreLog)
        {
            staticOgreLog->logMessage("Audio frame bufferSize / duration / dts: " 
                    + boost::lexical_cast<std::string>(bufferSize) + " / "
                    + boost::lexical_cast<std::string>(duration) + " / "
                    + boost::lexical_cast<std::string>(dts), Ogre::LML_NORMAL);
        }

        // Create the audio frame
        AudioFrame* frame = new AudioFrame();
        frame->dataSize = bufferSize;
        frame->data = new uint8_t[bufferSize];
        if (p_frame->channels == 2)
        {
            memcpy(frame->data, p_frame->data[0], bufferSize >> 1);
            memcpy(frame->data + (bufferSize >> 1), p_frame->data[1], bufferSize >> 1);
        }
        else
        {
            memcpy(frame->data, p_frame->data, bufferSize);
        }
        double timeBase = ((double)p_audioCodecContext->time_base.num) / (double)p_audioCodecContext->time_base.den;
        frame->lifeTime = duration * timeBase;

        p_player->addAudioFrame(frame);
    }

    return decoded;
}

因此,如您所见,我将帧解码,然后将其 memcpy 到我自己的结构 AudioFrame 中。现在,当播放声音时,我会像这样使用这些音频帧:

    int numBuffers = 4;
    ALuint buffers[4];
    alGenBuffers(numBuffers, buffers);
    ALenum success = alGetError();
    if(success != AL_NO_ERROR)
    {
        CONSOLE_LOG("Error on alGenBuffers : " + Ogre::StringConverter::toString(success) + alGetString(success));
        return;
    }

    // Fill a number of data buffers with audio from the stream
    std::vector<AudioFrame*> audioBuffers;
    std::vector<unsigned int> audioBufferSizes;
    unsigned int numReturned = FFMPEG_PLAYER->getDecodedAudioFrames(numBuffers, audioBuffers, audioBufferSizes);

    // Assign the data buffers to the OpenAL buffers
    for (unsigned int i = 0; i < numReturned; ++i)
    {
        alBufferData(buffers[i], _streamingFormat, audioBuffers[i]->data, audioBufferSizes[i], _streamingFrequency);

        success = alGetError();
        if(success != AL_NO_ERROR)
        {
            CONSOLE_LOG("Error on alBufferData : " + Ogre::StringConverter::toString(success) + alGetString(success)
                            + " size: " + Ogre::StringConverter::toString(audioBufferSizes[i]));
            return;
        }
    }

    // Queue the buffers into OpenAL
    alSourceQueueBuffers(_source, numReturned, buffers);
    success = alGetError();
    if(success != AL_NO_ERROR)
    {
        CONSOLE_LOG("Error queuing streaming buffers: " + Ogre::StringConverter::toString(success) + alGetString(success));
        return;
    }
}

alSourcePlay(_source);

我给 OpenAL 的格式和频率是 AL_FORMAT_STEREO_FLOAT32(它是一个立体声流,我确实初始化了 FLOAT32 扩展)和 48000(这是音频流的 AVCodecContext 的采样率)。

在播放过程中,我执行以下操作来重新填充 OpenAL 的缓冲区:

ALint numBuffersProcessed;

// Check if OpenAL is done with any of the queued buffers
alGetSourcei(_source, AL_BUFFERS_PROCESSED, &numBuffersProcessed);
if(numBuffersProcessed <= 0)
    return;

// Fill a number of data buffers with audio from the stream
std::vector<AudiFrame*> audioBuffers;
std::vector<unsigned int> audioBufferSizes;
unsigned int numFilled = FFMPEG_PLAYER->getDecodedAudioFrames(numBuffersProcessed, audioBuffers, audioBufferSizes);

// Assign the data buffers to the OpenAL buffers
ALuint buffer;
for (unsigned int i = 0; i < numFilled; ++i)
{
    // Pop the oldest queued buffer from the source, 
    // fill it with the new data, then re-queue it
    alSourceUnqueueBuffers(_source, 1, &buffer);

    ALenum success = alGetError();
    if(success != AL_NO_ERROR)
    {
        CONSOLE_LOG("Error Unqueuing streaming buffers: " + Ogre::StringConverter::toString(success));
        return;
    }

    alBufferData(buffer, _streamingFormat, audioBuffers[i]->data, audioBufferSizes[i], _streamingFrequency);

    success = alGetError();
    if(success != AL_NO_ERROR)
    {
        CONSOLE_LOG("Error on re- alBufferData: " + Ogre::StringConverter::toString(success));
        return;
    }

    alSourceQueueBuffers(_source, 1, &buffer);

    success = alGetError();
    if(success != AL_NO_ERROR)
    {
        CONSOLE_LOG("Error re-queuing streaming buffers: " + Ogre::StringConverter::toString(success) + " "
                    + alGetString(success));
        return;
    }
}

// Make sure the source is still playing, 
// and restart it if needed.
ALint playStatus;
alGetSourcei(_source, AL_SOURCE_STATE, &playStatus);
if(playStatus != AL_PLAYING)
    alSourcePlay(_source);

如您所见,我进行了非常繁重的错误检查。但是我没有收到任何错误,无论是来自 OpenAL 还是来自 FFmpeg。 编辑:我听到的声音有点类似于视频中的实际音频,但音调非常高,而且非常口吃。此外,它似乎是在电视噪音之上播放的。很奇怪。另外,它的播放速度比正确的音频慢得多。 编辑:2使用 AL_FORMAT_STEREO_FLOAT32 后,声音以正确的速度播放,但仍然非常高音和口吃(虽然比以前少)。

视频本身没有损坏,可以在任何播放器上正常播放。OpenAL 也可以在同一个应用程序中很好地播放 *.way 文件,因此它也可以正常工作。

任何想法这里可能有什么问题或如何正确地做到这一点?

我唯一的猜测是,不知何故,FFmpeg 的解码功能不会产生 OpenGL 可以读取的数据。但这只是 FFmpeg 解码示例的情况,所以我不知道缺少什么。据我了解, decode_audio4 函数将帧解码为原始数据。OpenAL 应该能够处理 RAW 数据(或者更确切地说,不能处理其他任何数据)。

4

1 回答 1

2

所以,我终于想出了如何去做。哎呀,什么乱七八糟的。这是来自 libav-users 邮件列表中的用户的提示,让我走上了正确的道路。

以下是我的错误:

  1. 在 alBufferData 函数中使用了错误的格式。我使用了 AL_FORMAT_STEREO16(因为这是每个使用 OpenAL 的流式传输示例都使用的)。我应该使用 AL_FORMAT_STEREO_FLOAT32,因为我流的视频是 Ogg 并且 vorbis 存储在浮点中。并且使用 swr_convert 从 AV_SAMPLE_FMT_FLTP 转换为 AV_SAMPLE_FMT_S16 只是崩溃。不知道为什么。

  2. 不使用 swr_convert 将解码的音频帧转换为目标格式。在我尝试使用 swr_convert 从 FLTP 转换为 S16 之后,它会在没有给出原因的情况下简单地崩溃,我认为它已经坏了。但是在找出我的第一个错误之后,我再次尝试,从 FLTP 转换为 FLT(非平面),然后它成功了!所以 OpenAL 使用交错格式,而不是平面格式。很高兴知道。

所以这里是 decodeAudioPacket 函数,它适用于我的 Ogg 视频、vorbis 音频流:

int decodeAudioPacket(  AVPacket& p_packet, AVCodecContext* p_audioCodecContext, AVFrame* p_frame,
                        SwrContext* p_swrContext, uint8_t** p_destBuffer, int p_destLinesize,
                        FFmpegVideoPlayer* p_player, VideoInfo& p_videoInfo)
{
    // Decode audio frame
    int got_frame = 0;
    int decoded = avcodec_decode_audio4(p_audioCodecContext, p_frame, &got_frame, &p_packet);
    if (decoded < 0) 
    {
        p_videoInfo.error = "Error decoding audio frame.";
        return decoded;
    }

    if(decoded <= p_packet.size)
    {
        /* Move the unread data to the front and clear the end bits */
        int remaining = p_packet.size - decoded;
        memmove(p_packet.data, &p_packet.data[decoded], remaining);
        av_shrink_packet(&p_packet, remaining);
    }

    // Frame is complete, store it in audio frame queue
    if (got_frame)
    {
        int outputSamples = swr_convert(p_swrContext, 
                                        p_destBuffer, p_destLinesize, 
                                        (const uint8_t**)p_frame->extended_data, p_frame->nb_samples);

        int bufferSize = av_get_bytes_per_sample(AV_SAMPLE_FMT_FLT) * p_videoInfo.audioNumChannels
                            * outputSamples;

        int64_t duration = p_frame->pkt_duration;
        int64_t dts = p_frame->pkt_dts;

        if (staticOgreLog)
        {
            staticOgreLog->logMessage("Audio frame bufferSize / duration / dts: " 
                    + boost::lexical_cast<std::string>(bufferSize) + " / "
                    + boost::lexical_cast<std::string>(duration) + " / "
                    + boost::lexical_cast<std::string>(dts), Ogre::LML_NORMAL);
        }

        // Create the audio frame
        AudioFrame* frame = new AudioFrame();
        frame->dataSize = bufferSize;
        frame->data = new uint8_t[bufferSize];
        memcpy(frame->data, p_destBuffer[0], bufferSize);
        double timeBase = ((double)p_audioCodecContext->time_base.num) / (double)p_audioCodecContext->time_base.den;
        frame->lifeTime = duration * timeBase;

        p_player->addAudioFrame(frame);
    }

    return decoded;
}

下面是我如何初始化上下文和目标缓冲区:

// Initialize SWR context
SwrContext* swrContext = swr_alloc_set_opts(NULL, 
            audioCodecContext->channel_layout, AV_SAMPLE_FMT_FLT, audioCodecContext->sample_rate,
            audioCodecContext->channel_layout, audioCodecContext->sample_fmt, audioCodecContext->sample_rate, 
            0, NULL);
int result = swr_init(swrContext);

// Create destination sample buffer
uint8_t** destBuffer = NULL;
int destBufferLinesize;
av_samples_alloc_array_and_samples( &destBuffer,
                                    &destBufferLinesize,
                                    videoInfo.audioNumChannels,
                                    2048,
                                    AV_SAMPLE_FMT_FLT,
                                    0);
于 2014-01-28T11:42:40.093 回答