3

在我的 iPhone 应用程序中,我想录制我自己的应用程序内部产生的声音,而不是录制麦克风捕获的外部声音。另一种说法是,我想在播放时直接从声卡上录制声音。从那里我想将新录制的声音文件保存到指定的本地 URL。此处发布了一个类似的问题。我已经阅读了一些教程并有一些代码,但是有一些事情我需要帮助。这是我的代码:

头文件

OSStatus status;

执行文件

#define kOutputBus 0
#define kInputBus 1

static AudioComponentInstance audioUnit;

static OSStatus recordingCallback(void *inRefCon,
                                  AudioUnitRenderActionFlags *ioActionFlags,
                                  const AudioTimeStamp *inTimeStamp,
                                  UInt32 inBusNumber,
                                  UInt32 inNumberFrames,
                                  AudioBufferList *ioData) {

    // TODO: Use inRefCon to access our interface object to do stuff
    // Then, use inNumberFrames to figure out how much data is available, and make
    // that much space available in buffers in an AudioBufferList.

    AudioBufferList *bufferList; // <- Fill this up with buffers (you will want to malloc it, as it's a dynamic-length list)

    // Then:
    // Obtain recorded samples

    OSStatus status;

    status = AudioUnitRender([audioInterface audioUnit],
                             ioActionFlags,
                             inTimeStamp,
                             inBusNumber,
                             inNumberFrames,
                             bufferList);
    checkStatus(status);

    // Now, we have the samples we just read sitting in buffers in bufferList
    DoStuffWithTheRecordedAudio(bufferList);
    return noErr;
}

static OSStatus playbackCallback(void *inRefCon,
                                 AudioUnitRenderActionFlags *ioActionFlags,
                                 const AudioTimeStamp *inTimeStamp,
                                 UInt32 inBusNumber,
                                 UInt32 inNumberFrames,
                                 AudioBufferList *ioData) {
    // Notes: ioData contains buffers (may be more than one!)
    // Fill them up as much as you can. Remember to set the size value in each buffer to match how
    // much data is in the buffer.
    return noErr;
}


void initializeInternalAudioRecorder() {
    AudioStreamBasicDescription audioFormat; //this is currently being called as a local variable, try calling it as a golbal variable if it doesnt work
    OSStatus status;
    AudioComponentInstance audioUnit;


    // Describe audio component
    AudioComponentDescription desc;
    desc.componentType = kAudioUnitType_Output;
    desc.componentSubType = kAudioUnitSubType_RemoteIO;
    desc.componentFlags = 0;
    desc.componentFlagsMask = 0;
    desc.componentManufacturer = kAudioUnitManufacturer_Apple;

    // Get component
    AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);

    // Get audio units
    status = AudioComponentInstanceNew(inputComponent, &audioUnit);
    checkStatus(status);

    // Enable IO for recording
    UInt32 flag = 1;
    status = AudioUnitSetProperty(audioUnit,
                                  kAudioOutputUnitProperty_EnableIO,
                                  kAudioUnitScope_Input,
                                  kInputBus,
                                  &flag,
                                  sizeof(flag));
    checkStatus(status);

    // Enable IO for playback
    status = AudioUnitSetProperty(audioUnit,
                                  kAudioOutputUnitProperty_EnableIO,
                                  kAudioUnitScope_Output,
                                  kOutputBus,
                                  &flag,
                                  sizeof(flag));
    checkStatus(status);

    // Describe format
    audioFormat.mSampleRate         = 44100.00;
    audioFormat.mFormatID           = kAudioFormatLinearPCM;
    audioFormat.mFormatFlags        = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
    audioFormat.mFramesPerPacket    = 1;
    audioFormat.mChannelsPerFrame   = 1;
    audioFormat.mBitsPerChannel     = 16;
    audioFormat.mBytesPerPacket     = 2;
    audioFormat.mBytesPerFrame      = 2;

    // Apply format
    status = AudioUnitSetProperty(audioUnit,
                                  kAudioUnitProperty_StreamFormat,
                                  kAudioUnitScope_Output,
                                  kInputBus,
                                  &audioFormat,
                                  sizeof(audioFormat));
    checkStatus(status);
    status = AudioUnitSetProperty(audioUnit,
                                  kAudioUnitProperty_StreamFormat,
                                  kAudioUnitScope_Input,
                                  kOutputBus,
                                  &audioFormat,
                                  sizeof(audioFormat));
    checkStatus(status);


    // Set input callback
    AURenderCallbackStruct callbackStruct;
    callbackStruct.inputProc = recordingCallback;
    callbackStruct.inputProcRefCon = self;
    status = AudioUnitSetProperty(audioUnit,
                                  kAudioOutputUnitProperty_SetInputCallback,
                                  kAudioUnitScope_Global,
                                  kInputBus,
                                  &callbackStruct,
                                  sizeof(callbackStruct));
    checkStatus(status);

    // Set output callback
    callbackStruct.inputProc = playbackCallback;
    callbackStruct.inputProcRefCon = self;
    status = AudioUnitSetProperty(audioUnit,
                                  kAudioUnitProperty_SetRenderCallback,
                                  kAudioUnitScope_Global,
                                  kOutputBus,
                                  &callbackStruct,
                                  sizeof(callbackStruct));
    checkStatus(status);

    // Disable buffer allocation for the recorder (optional - do this if we want to pass in our own)
    flag = 0;
    status = AudioUnitSetProperty(audioUnit,
                                  kAudioUnitProperty_ShouldAllocateBuffer,
                                  kAudioUnitScope_Output,
                                  kInputBus,
                                  &flag,
                                  sizeof(flag));

    // TODO: Allocate our own buffers if we want

    // Initialise
    status = AudioUnitInitialize(audioUnit);
    checkStatus(status);
}

-(void)startInternalRecorder {
        OSStatus status = AudioOutputUnitStart(audioUnit);
        checkStatus(status);
    }

-(void)stopInternalRecorder {
        OSStatus status = AudioOutputUnitStop(audioUnit);
        checkStatus(status);
        AudioComponentInstanceDispose(audioUnit);
    }

现在,我在实现中遇到以下错误

  • 'audioInterface' 未声明,
  • 'self' 未声明,

所以我的问题是如何修复这些错误,以及如何指定一个 URL 来保存录制的声音文件。

这是我得到我的代码的地方:http: //atastypixel.com/blog/using-remoteio-audio-unit/comment-page-6/#comment-6734

我知道我的很多问题都与我对音频单元缺乏了解有关,但我会非常感谢任何提供帮助的人。谢谢你。

4

1 回答 1

1

嗯,您复制/粘贴的代码似乎非常不完整。我会小心的。:) 此外,您似乎已经复制/粘贴而没有保留它应该具有的结构。

无论如何audioFormat都应该声明为局部变量,它的类型是AudioStreamBasicDescription. 代码的顶部(即recordingCallback函数声明之上的所有内容)实际上是一个初始化函数,尽管原作者对此并没有那么明确。所以代码需要被包装成这样的东西:

void initializeMyStuff() {
  // Describe audio component
  AudioComponentDescription desc;
  desc.componentType = kAudioUnitType_Output;

  ... lots more code ...

  // Initialise
  status = AudioUnitInitialize(audioUnit);
  checkStatus(status);
} // <-- you were missing this end bracket, which caused the compilation errors

static OSStatus recordingCallback(void *inRefCon,
                                  AudioUnitRenderActionFlags *ioActionFlags,
                                  const AudioTimeStamp *inTimeStamp, ... etc

...并在您启动应用程序的音频部分时调用它。如果将此代码的第一部分包装在适当的 C 函数中,则嵌套函数错误将消失。至于未声明的函数,你应该要么 move recordingCallbackand playbackCallbackabove initializeMyStuff,要么在文件顶部声明它们。

您还应该摆脱-(void)testMethod并只调用 C 函数initializeMyStuff()而不是那样。这有意义吗?

于 2013-08-12T08:59:23.910 回答