0

在我的应用程序中,我必须从设备的默认麦克风录制声音并将其发送到 RTSP 服务器以进一步流式传输。

我的记录器状态结构是:

typedef struct AQRecorderState {
    AudioStreamBasicDescription  mDataFormat;                   // 2
    AudioQueueRef                mQueue;                        // 3
    AudioQueueBufferRef          mBuffers[kNumberRecordBuffers];// 4
    AudioFileID                  mAudioFile;                    // 5
    UInt32                       bufferByteSize;                // 6
    SInt64                       mCurrentPacket;                // 7
    bool                         mIsRunning;                    // 8
}AQRecorderState;

我的录音设置参数是:

    ars.mDataFormat.mSampleRate     = 44100;
    ars.mDataFormat.mChannelsPerFrame   = 1;
    ars.mDataFormat.mFramesPerPacket    = 1;
    ars.mDataFormat.mBitsPerChannel     = 16;
    ars.mDataFormat.mBytesPerFrame      = ars.mDataFormat.mChannelsPerFrame * sizeof (SInt16); // BYTES_PER_FRAME;
    ars.mDataFormat.mBytesPerPacket     = ars.mDataFormat.mChannelsPerFrame * sizeof (SInt16); // BYTES_PER_PACKET;

    /*----------------- FORMAT -------------------*/
    ars.mDataFormat.mFormatID = kAudioFormatLinearPCM;
    ars.mDataFormat.mFormatFlags = kLinearPCMFormatFlagIsBigEndian | kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;

    // derive the buffer size
    DeriveBufferSize(ars.mQueue, ars.mDataFormat, kBufferSeconds, &ars.bufferByteSize);

    // create the queue
    AudioQueueNewInput(&ars.mDataFormat,
                       AQInputCallback,
                       (__bridge void *)(self),
                       NULL,
                       kCFRunLoopCommonModes,
                       0,
                       &ars.mQueue);

// set the magic cookie for the queue
    setMagicCookieForFile(ars.mQueue, ars.mAudioFile);

    // allocate and enque the recording buffers
    for (int i=0; i<kNumberRecordBuffers; i++){
        AudioQueueAllocateBuffer(ars.mQueue, ars.bufferByteSize, &ars.mBuffers[i]);
        AudioQueueEnqueueBuffer(ars.mQueue, ars.mBuffers[i], 0, NULL);
    }

    // set current packet index and run state
    ars.mCurrentPacket = 0;
    ars.mIsRunning = true;

    // start the recording
    AudioQueueStart(ars.mQueue, NULL);

我的输入回调看起来像:

static void AQInputCallback(void *aqRecorderState,
            AudioQueueRef                        inQ,
            AudioQueueBufferRef                  inBuffer,
            const AudioTimeStamp                 *timestamp,
            unsigned long                        inNumPackets,
            const AudioStreamPacketDescription   *mDataFormat)
{

    NSLog(@"........Callback called");
    AppDelegate *THIS=(__bridge AppDelegate *)aqRecorderState;
    AQRecorderState *pArs = &(THIS->ars);

    if (inNumPackets > 0) {
        write_audio_frame(THIS->oc, THIS->audio_st);
        // Stream audio frame
        uint8_t *data;

        AVCodecContext *codec;
        AVPacket packet;
        uint32_t size = inBuffer->mAudioDataByteSize;

        long sample_time_ms = (long)(timestamp->mSampleTime * 1000 / SAMPLE_RATE);

        codec = THIS->audio_st->codec;

        data = av_malloc(size*100);
        if (!data) {
            fprintf(stderr, "Couldn't allocate data for audio_frame\n");
            exit(1);
        }

        NSLog(@"Size 1 : %d",size);

        av_init_packet(&packet);
        packet.size = avcodec_encode_audio(codec, data, size, inBuffer->mAudioData);

        NSLog(@"Size 2: %d",packet.size);

        packet.data = data;
        packet.pts = sample_time_ms;
        packet.dts = sample_time_ms;
        packet.flags |= AV_PKT_FLAG_KEY;
        packet.duration = (size * 8 * 1024 * 1000000)/BITS_PER_CHANNEL;
        packet.stream_index = THIS->audio_st->index; //audio_st->index

        pArs->mCurrentPacket += inNumPackets;  // advance packet index pointer

        if (av_interleaved_write_frame(THIS->oc, &packet) != 0) {
            exit(1);
        }

        pArs->mCurrentPacket += inNumPackets;  // advance packet index pointer

        av_free(data);
    }

}

我在输入回调中获取数据,如果将其写入文件然后播放,那么它工作正常,但我的工作是不保存在文件中,而是直接将数据发送到 RTSP 服务器。

注意:av_interleaved_write_frame()用于将帧写入服务器,我认为问题在于AVPacket在我的输入回调中进行转换,这就是为什么我在服务器端收到“无序”数据包的原因。

我在互联网上到处搜索,但找不到任何工作。如果有人对此有所了解,请提供帮助。

4

1 回答 1

0

您使用的是 tcp 还是 udp,我认为乱序数据包和丢弃的数据包将是 udp 的问题,ffmpeg 可以在必要时进行数据包重新排序。

于 2012-12-15T21:14:50.860 回答