6

I am working on capturing and streaming audio to RTMP server at a moment. I work under MacOS (in Xcode), so for capturing audio sample-buffer I use AVFoundation-framework. But for encoding and streaming I need to use ffmpeg-API and libfaac encoder. So output format must be AAC (for supporting stream playback on iOS-devices).

And I faced with such problem: audio-capturing device (in my case logitech camera) gives me sample-buffer with 512 LPCM samples, and I can select input sample-rate from 16000, 24000, 36000 or 48000 Hz. When I give these 512 samples to AAC-encoder (configured for appropriate sample-rate), I hear a slow and jerking audio (seems as like pice of silence after each frame).

I figured out (maybe I am wrong), that libfaac encoder accepts audio frames only with 1024 samples. When I set input samplerate to 24000 and resample input sample-buffer to 48000 before encoding, I obtain 1024 resampled samples. After encoding these 1024 sampels to AAC, I hear proper sound on output. But my web-cam produce 512 samples in buffer for any input samplerate, when output sample-rate must be 48000 Hz. So I need to do resampling in any case, and I will not obtain exactly 1024 samples in buffer after resampling.

Is there a way to solve this problem within ffmpeg-API functionality?

I would be grateful for any help.

PS: I guess that I can accumulate resampled buffers until count of samples become 1024, and then encode it, but this is stream so there will be troubles with resulting timestamps and with other input devices, and such solution is not suitable.

The current issue came out of the problem described in [question]: How to fill audio AVFrame (ffmpeg) with the data obtained from CMSampleBufferRef (AVFoundation)?

Here is a code with audio-codec configs (there also was video stream but video work fine):

    /*global variables*/
    static AVFrame *aframe;
    static AVFrame *frame;
    AVOutputFormat *fmt; 
    AVFormatContext *oc; 
    AVStream *audio_st, *video_st;
Init ()
{
    AVCodec *audio_codec, *video_codec;
    int ret;

    avcodec_register_all();  
    av_register_all();
    avformat_network_init();
    avformat_alloc_output_context2(&oc, NULL, "flv", filename);
    fmt = oc->oformat;
    oc->oformat->video_codec = AV_CODEC_ID_H264;
    oc->oformat->audio_codec = AV_CODEC_ID_AAC;
    video_st = NULL;
    audio_st = NULL;
    if (fmt->video_codec != AV_CODEC_ID_NONE) 
      { //…  /*init video codec*/}
    if (fmt->audio_codec != AV_CODEC_ID_NONE) {
    audio_codec= avcodec_find_encoder(fmt->audio_codec);

    if (!(audio_codec)) {
        fprintf(stderr, "Could not find encoder for '%s'\n",
                avcodec_get_name(fmt->audio_codec));
        exit(1);
    }
    audio_st= avformat_new_stream(oc, audio_codec);
    if (!audio_st) {
        fprintf(stderr, "Could not allocate stream\n");
        exit(1);
    }
    audio_st->id = oc->nb_streams-1;

    //AAC:
    audio_st->codec->sample_fmt  = AV_SAMPLE_FMT_S16;
    audio_st->codec->bit_rate    = 32000;
    audio_st->codec->sample_rate = 48000;
    audio_st->codec->profile=FF_PROFILE_AAC_LOW;
    audio_st->time_base = (AVRational){1, audio_st->codec->sample_rate };
    audio_st->codec->channels    = 1;
    audio_st->codec->channel_layout = AV_CH_LAYOUT_MONO;      


    if (oc->oformat->flags & AVFMT_GLOBALHEADER)
        audio_st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
    }

    if (video_st)
    {
    //   …
    /*prepare video*/
    }
    if (audio_st)
    {
    aframe = avcodec_alloc_frame();
    if (!aframe) {
        fprintf(stderr, "Could not allocate audio frame\n");
        exit(1);
    }
    AVCodecContext *c;
    int ret;

    c = audio_st->codec;


    ret = avcodec_open2(c, audio_codec, 0);
    if (ret < 0) {
        fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
        exit(1);
    }

    //…
}

And resampling and encoding audio:

if (mType == kCMMediaType_Audio)
{
    CMSampleTimingInfo timing_info;
    CMSampleBufferGetSampleTimingInfo(sampleBuffer, 0, &timing_info);
    double  pts=0;
    double  dts=0;
    AVCodecContext *c;
    AVPacket pkt = { 0 }; // data and size must be 0;
    int got_packet, ret;
     av_init_packet(&pkt);
    c = audio_st->codec;
      CMItemCount numSamples = CMSampleBufferGetNumSamples(sampleBuffer);

    NSUInteger channelIndex = 0;

    CMBlockBufferRef audioBlockBuffer = CMSampleBufferGetDataBuffer(sampleBuffer);
    size_t audioBlockBufferOffset = (channelIndex * numSamples * sizeof(SInt16));
    size_t lengthAtOffset = 0;
    size_t totalLength = 0;
    SInt16 *samples = NULL;
    CMBlockBufferGetDataPointer(audioBlockBuffer, audioBlockBufferOffset, &lengthAtOffset, &totalLength, (char **)(&samples));

    const AudioStreamBasicDescription *audioDescription = CMAudioFormatDescriptionGetStreamBasicDescription(CMSampleBufferGetFormatDescription(sampleBuffer));

    SwrContext *swr = swr_alloc();

    int in_smprt = (int)audioDescription->mSampleRate;
    av_opt_set_int(swr, "in_channel_layout",  AV_CH_LAYOUT_MONO, 0);

    av_opt_set_int(swr, "out_channel_layout", audio_st->codec->channel_layout,  0);

    av_opt_set_int(swr, "in_channel_count", audioDescription->mChannelsPerFrame,  0);
    av_opt_set_int(swr, "out_channel_count", audio_st->codec->channels,  0);

    av_opt_set_int(swr, "out_channel_layout", audio_st->codec->channel_layout,  0);
    av_opt_set_int(swr, "in_sample_rate",     audioDescription->mSampleRate,0);

    av_opt_set_int(swr, "out_sample_rate",    audio_st->codec->sample_rate,0);

    av_opt_set_sample_fmt(swr, "in_sample_fmt",  AV_SAMPLE_FMT_S16, 0);

    av_opt_set_sample_fmt(swr, "out_sample_fmt", audio_st->codec->sample_fmt,  0);

    swr_init(swr);
    uint8_t **input = NULL;
    int src_linesize;
    int in_samples = (int)numSamples;
    ret = av_samples_alloc_array_and_samples(&input, &src_linesize, audioDescription->mChannelsPerFrame,
                                             in_samples, AV_SAMPLE_FMT_S16P, 0);


    *input=(uint8_t*)samples;
    uint8_t *output=NULL;


    int out_samples = av_rescale_rnd(swr_get_delay(swr, in_smprt) +in_samples, (int)audio_st->codec->sample_rate, in_smprt, AV_ROUND_UP);

    av_samples_alloc(&output, NULL, audio_st->codec->channels, out_samples, audio_st->codec->sample_fmt, 0);
    in_samples = (int)numSamples;
    out_samples = swr_convert(swr, &output, out_samples, (const uint8_t **)input, in_samples);


    aframe->nb_samples =(int) out_samples;


    ret = avcodec_fill_audio_frame(aframe, audio_st->codec->channels, audio_st->codec->sample_fmt,
                             (uint8_t *)output,
                             (int) out_samples *
                             av_get_bytes_per_sample(audio_st->codec->sample_fmt) *
                             audio_st->codec->channels, 1);

    aframe->channel_layout = audio_st->codec->channel_layout;
    aframe->channels=audio_st->codec->channels;
    aframe->sample_rate= audio_st->codec->sample_rate;

    if (timing_info.presentationTimeStamp.timescale!=0)
        pts=(double) timing_info.presentationTimeStamp.value/timing_info.presentationTimeStamp.timescale;

    aframe->pts=pts*audio_st->time_base.den;
    aframe->pts = av_rescale_q(aframe->pts, audio_st->time_base, audio_st->codec->time_base);

    ret = avcodec_encode_audio2(c, &pkt, aframe, &got_packet);

    if (ret < 0) {
        fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
        exit(1);
    }
    swr_free(&swr);
    if (got_packet)
    {
        pkt.stream_index = audio_st->index;

        pkt.pts = av_rescale_q(pkt.pts, audio_st->codec->time_base, audio_st->time_base);
        pkt.dts = av_rescale_q(pkt.dts, audio_st->codec->time_base, audio_st->time_base);

        // Write the compressed frame to the media file.
       ret = av_interleaved_write_frame(oc, &pkt);
       if (ret != 0) {
            fprintf(stderr, "Error while writing audio frame: %s\n",
                    av_err2str(ret));
            exit(1);
        }

}
4

4 回答 4

1

在遇到类似问题后,我也来到了这里。我正在从 Blackmagic Decklink SDI 卡中读取 720p50 的音频和视频,这意味着我想与视频一起编码的每个视频帧 (48k/50fps) 有 960 个样本。仅将 960 个样本发送到 aacenc 时得到了非常奇怪的音频,它也没有真正抱怨这个事实。

开始使用 AVAudioFifo(参见 ffmpeg/doc/examples/transcode_aac.c)并不断向其中添加帧,直到我有足够的帧来满足 aacenc。这意味着我猜我的样本播放得太晚了,因为当第一个 960 应该真的有另一个值时,pts 将设置在 1024 个样本上。但是,就我所听到/看到的而言,它并不是很明显。

于 2016-08-19T07:41:16.717 回答
0

您必须将样本缓冲区分成大小为 1024 的块,我在 android 中录制 mp3 以获取更多信息,请按照这些链接链接1链接 2

于 2014-01-14T12:02:45.833 回答
0

如果有人最终来到这里,我遇到了同样的问题,正如@Mohit 为 AAC 指出的那样,每个音频帧都必须分解为 1024 字节的块。

例子:

uint8_t *buffer = (uint8_t*) malloc(1024);
AVFrame *frame = av_frame_alloc();
while((fread(buffer, 1024, 1, fp)) == 1) {
    frame->data[0] = buffer;
}
于 2015-02-19T22:28:20.703 回答
0

我遇到了类似的问题。我正在将PCM数据包编码为AAC,而PCM数据包的长度有时小于1024

如果我对小于 1024 的数据包进行编码,音频会很。另一方面,如果我把它扔掉,音频会变得更快swr_convert根据我的观察,函数没有任何自动缓冲。

我最终得到了一个缓冲区方案,将数据包填充到1024 缓冲区,每次缓冲区满时都会对其进行编码清理。

填充缓冲区的函数如下:

// put frame data into buffer of fixed size
bool ffmpegHelper::putAudioBuffer(const AVFrame *pAvFrameIn, AVFrame **pAvFrameBuffer, AVCodecContext *dec_ctx, int frame_size, int &k0) {
  // prepare pFrameAudio
  if (!(*pAvFrameBuffer)) {
    if (!(*pAvFrameBuffer = av_frame_alloc())) {
      av_log(NULL, AV_LOG_ERROR, "Alloc frame failed\n");
      return false;
    } else {
      (*pAvFrameBuffer)->format = dec_ctx->sample_fmt;
      (*pAvFrameBuffer)->channels = dec_ctx->channels;
      (*pAvFrameBuffer)->sample_rate = dec_ctx->sample_rate;
      (*pAvFrameBuffer)->nb_samples = frame_size;
      int ret = av_frame_get_buffer(*pAvFrameBuffer, 0);
      if (ret < 0) {
        char err[500];
        av_log(NULL, AV_LOG_ERROR, "get audio buffer failed: %s\n",
          av_make_error_string(err, AV_ERROR_MAX_STRING_SIZE, ret));
        return false;
      }
      (*pAvFrameBuffer)->nb_samples = 0;
      (*pAvFrameBuffer)->pts = pAvFrameIn->pts;
    }
  }

  // copy input data to buffer
  int n_channels = pAvFrameIn->channels;
  int new_samples = min(pAvFrameIn->nb_samples - k0, frame_size - (*pAvFrameBuffer)->nb_samples);
  int k1 = (*pAvFrameBuffer)->nb_samples;

  if (pAvFrameIn->format == AV_SAMPLE_FMT_S16) {
    int16_t *d_in = (int16_t *)pAvFrameIn->data[0];
    d_in += n_channels * k0;
    int16_t *d_out = (int16_t *)(*pAvFrameBuffer)->data[0];
    d_out += n_channels * k1;

    for (int i = 0; i < new_samples; ++i) {
      for (int j = 0; j < pAvFrameIn->channels; ++j) {
        *d_out++ = *d_in++;
      }
    }
  } else {
    printf("not handled format for audio buffer\n");
    return false;
  }

  (*pAvFrameBuffer)->nb_samples += new_samples;
  k0 += new_samples;

  return true;
}

填充缓冲区和编码的循环如下:

// transcoding needed
int got_frame;
AVMediaType stream_type;
// decode the packet (do it your self)
decodePacket(packet, dec_ctx, &pAvFrame_, got_frame);

if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
    ret = 0;
    // break audio packet down to buffer
    if (enc_ctx->frame_size > 0) {
        int k = 0;
        while (k < pAvFrame_->nb_samples) {
            if (!putAudioBuffer(pAvFrame_, &pFrameAudio_, dec_ctx, enc_ctx->frame_size, k))
                return false;
            if (pFrameAudio_->nb_samples == enc_ctx->frame_size) {
                // the buffer is full, encode it (do it yourself)
                ret = encodeFrame(pFrameAudio_, stream_index, got_frame, false);
                if (ret < 0)
                    return false;
                pFrameAudio_->pts += enc_ctx->frame_size;
                pFrameAudio_->nb_samples = 0;
            }
        }
    } else {
        ret = encodeFrame(pAvFrame_, stream_index, got_frame, false);
    }
} else {
    // encode packet directly
    ret = encodeFrame(pAvFrame_, stream_index, got_frame, false);
}
于 2016-09-26T01:39:07.113 回答