2

当我使用 AudioQueue 将语音录制到文件时,这没关系。

我尝试使用 MyInputBufferHandler 函数

AudioQueueBufferRef->mAudioData

可以获取原始数据,但是在这个 MyInputBufferHandler 函数中不能调用其他对象,比如 oStream 。

我想获取 AudioQueue Buffer 的原始数据,并将这些原始数据发送到互联网,怎么办?

4

3 回答 3

0

您需要设置您希望接收数据到 AudioQueue 的方式的格式,请参考以下功能,

http://developer.apple.com/library/mac/#documentation/MusicAudio/Reference/CoreAudioDataTypesRef/Reference/reference.html

一个例子,

FillOutASBDForLPCM (sRecordFormat,
                    16000,
                    1,
                    8,
                    8,
                    false,
                    false
                    );
于 2011-06-08T09:31:43.313 回答
0

请参阅此问题的答案,它为您提供原始数据。然后,您可以将其捆绑为 NSData 或其他任何东西,压缩并上传。

于 2012-04-29T21:20:32.787 回答
0

您需要修改 myInputBufferHandler 中的一些代码,我创建了一个 obj-c 对象来采用 Apple SpeakHere 示例中的 cpp 代码。

请随意使用它:MIP_StreamAudioRecorder.h

//
//  MIP_StreamAudioRecorder.h
//
//  Created by Dennies Chang on 12/10/3.
//  Copyright (c) 2012年 Dennies Chang. All rights reserved.
//

#import <Foundation/Foundation.h>
#include <AudioToolbox/AudioToolbox.h>
#include <Foundation/Foundation.h>
#include <libkern/OSAtomic.h>

#include "CAStreamBasicDescription.h"
#include "CAXException.h"

#define kNumberRecordBuffers    3

@protocol MIP_StreamAudioRecorderDelegate;

@interface MIP_StreamAudioRecorder : NSObject {
    CAStreamBasicDescription    mRecordFormat;
    AudioQueueRef               mQueue;
    AudioQueueBufferRef         mBuffers[kNumberRecordBuffers];
    BOOL                        mIsRunning;

    id <MIP_StreamAudioRecorderDelegate> delegate;
}
@property (nonatomic, assign) id <MIP_StreamAudioRecorderDelegate> delegate;
@property (nonatomic, readonly) BOOL mIsRunning;

- (void)SetupAudioFormat:(UInt32) inFormatID;
- (void)startRecord;
- (void)stopRecord;
- (int)computeRecordBufferSize:(AudioStreamBasicDescription *)format duration:(float)second;

@end


@protocol MIP_StreamAudioRecorderDelegate <NSObject>
@optional
- (void)gotAudioData:(NSData *)audioData;

@end

和 .mm 文件:MIP_StreamAudioRecorder.mm

//
//  MIP_StreamAudioRecorder.mm
//
//  Created by Dennies Chang on 12/10/3.
//  Copyright (c) 2012年 Dennies Chang. All rights reserved.
//


#import "MIP_StreamAudioRecorder.h"

@implementation MIP_StreamAudioRecorder
@synthesize delegate;
@synthesize mIsRunning;

- (id)init {
    self = [super init];

    return self;
}

- (void)dealloc {
    [super dealloc];
}


- (void)SetupAudioFormat:(UInt32) inFormatID {
    memset(&mRecordFormat, 0, sizeof(mRecordFormat));

    UInt32 size = sizeof(mRecordFormat.mSampleRate);
    XThrowIfError(AudioSessionGetProperty(  kAudioSessionProperty_CurrentHardwareSampleRate,
                                          &size,
                                          &mRecordFormat.mSampleRate), "couldn't get hardware sample rate");

    size = sizeof(mRecordFormat.mChannelsPerFrame);
    XThrowIfError(AudioSessionGetProperty(  kAudioSessionProperty_CurrentHardwareInputNumberChannels,
                                      &size,
                                      &mRecordFormat.mChannelsPerFrame), "couldn't get input channel count");

    mRecordFormat.mFormatID = inFormatID;
    if (inFormatID == kAudioFormatLinearPCM)
    {
        // if we want pcm, default to signed 16-bit little-endian
        mRecordFormat.mChannelsPerFrame = 1;
        mRecordFormat.mSampleRate = 8000;

        mRecordFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
        mRecordFormat.mBitsPerChannel = 16;
        mRecordFormat.mBytesPerPacket = mRecordFormat.mBytesPerFrame = (mRecordFormat.mBitsPerChannel / 8) * mRecordFormat.mChannelsPerFrame;
        mRecordFormat.mFramesPerPacket = 1;
    }
}

- (int)computeRecordBufferSize:(AudioStreamBasicDescription *)format duration:(float)second {
    int packets, frames, bytes = 0;
    try {
        frames = (int)ceil(second * format->mSampleRate);

        if (format->mBytesPerFrame > 0)
            bytes = frames * format->mBytesPerFrame;
        else {
            UInt32 maxPacketSize;
            if (format->mBytesPerPacket > 0)
                maxPacketSize = format->mBytesPerPacket;    // constant packet size
            else {
                UInt32 propertySize = sizeof(maxPacketSize);
                XThrowIfError(AudioQueueGetProperty(mQueue, kAudioQueueProperty_MaximumOutputPacketSize, &maxPacketSize,
                                                    &propertySize), "couldn't get queue's maximum output packet size");
            }
            if (format->mFramesPerPacket > 0)
                packets = frames / format->mFramesPerPacket;
            else
                packets = frames;   // worst-case scenario: 1 frame in a packet
            if (packets == 0)       // sanity check
                packets = 1;
                bytes = packets * maxPacketSize;
        }
    } catch (CAXException e) {
        char buf[256];
        fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
        return 0;
    }   
    return bytes;
}

/*
- (void)myInputBufferHandler:(id)inUserData AudioQueue:(AudioQueueRef) inAQ BufferRef:(AudioQueueBufferRef)inBuffer withAudioTS:(AudioTimeStamp *)inStartTime andNumPackets:(UInt32)inNumPackets andDescription:(AudioStreamPacketDescription *)inPacketDesc {
*/
void MyInputBufferHandler(  void *                              inUserData,
                                  AudioQueueRef                     inAQ,
                                  AudioQueueBufferRef                   inBuffer,
                                  const AudioTimeStamp *                inStartTime,
                                  UInt32                                inNumPackets,
                                  const AudioStreamPacketDescription*   inPacketDesc)
{

    MIP_StreamAudioRecorder *THIS = (MIP_StreamAudioRecorder *)inUserData;
    try {
        if (inNumPackets > 0) {
            //use delegate to handle;

            if (THIS.delegate) {
                NSMutableData *data = [[NSMutableData alloc] init];
                if ([THIS.delegate respondsToSelector:@selector(gotAudioData:)]) {
                    [data appendBytes:inBuffer->mAudioData length:inBuffer->mAudioDataByteSize];
                    [THIS.delegate gotAudioData:data];
                }
                [data release];
            }
            /*
            // write packets to file
            XThrowIfError(AudioFileWritePackets(aqr->mRecordFile, FALSE, inBuffer->mAudioDataByteSize,
                                                inPacketDesc, aqr->mRecordPacket, &inNumPackets, inBuffer->mAudioData),
                          "AudioFileWritePackets failed");
            aqr->mRecordPacket += inNumPackets;
            */
        }

        // if we're not stopping, re-enqueue the buffe so that it gets filled again
        if (THIS->mIsRunning)
            XThrowIfError(AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, NULL), "AudioQueueEnqueueBuffer failed");
    } catch (CAXException e) {
        char buf[256];
        fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
    }
}

- (void)startRecord {
    int i, bufferByteSize;

    try {
        [self SetupAudioFormat:kAudioFormatLinearPCM];

        // create the queue
        XThrowIfError(AudioQueueNewInput(
                                         &mRecordFormat,
                                         MyInputBufferHandler,
                                         self /* userData */,
                                         NULL /* run loop */, NULL /* run loop mode */,
                                         0 /* flags */, &mQueue), "AudioQueueNewInput failed");

        // get the record format back from the queue's audio converter --
        // the file may require a more specific stream description than was necessary to create the encoder.

        UInt32 size = sizeof(mRecordFormat);
        XThrowIfError(AudioQueueGetProperty(mQueue, kAudioQueueProperty_StreamDescription,
                                            &mRecordFormat, &size), "couldn't get queue's format");


        // allocate and enqueue buffers
        bufferByteSize = [self computeRecordBufferSize:&mRecordFormat duration:kBufferDurationSeconds]; // enough bytes for half a second
        for (i = 0; i < kNumberRecordBuffers; ++i) {
            XThrowIfError(AudioQueueAllocateBuffer(mQueue, bufferByteSize, &mBuffers[i]),
                          "AudioQueueAllocateBuffer failed");
            XThrowIfError(AudioQueueEnqueueBuffer(mQueue, mBuffers[i], 0, NULL),
                          "AudioQueueEnqueueBuffer failed");
        }
        // start the queue
        mIsRunning = true;
        XThrowIfError(AudioQueueStart(mQueue, NULL), "AudioQueueStart failed");
    }
    catch (CAXException &e) {
        char buf[256];
        fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
    }
    catch (...) {
        fprintf(stderr, "An unknown error occurred\n");
    }
}

- (void)stopRecord {
    XThrowIfError(AudioQueueStop(mQueue, true), "AudioQueueStop failed");
    AudioQueueDispose(mQueue, true);
}

@end

请告知,您应该更改采样率和相关条件,我将其设置为单声道(1 通道),16 位,8Khz 进行录制。

并且您可以在实现 MIP_StreamAudioRecorderDelegate 的 obj-c 代码中获取原始数据,您可以通过互联网渠道发送原始数据,或将其保存到文件中。

最好的问候,丹尼斯。

于 2012-10-05T09:53:09.527 回答