4

我正在使用下面的代码来初始化我的音频组件。

-(void) startListeningWithCoreAudio
{
     NSError *error = nil;

     [[AVAudioSession sharedInstance] setCategory: AVAudioSessionCategoryPlayAndRecord error:&error];
     if (error) 
          NSLog(@"error setting up audio session: %@", [error localizedDescription]);

     [[AVAudioSession sharedInstance] setDelegate:self];

     OSStatus status = AudioSessionSetActive(YES);
     checkStatus(status);

          // Find the apple mic
    AudioComponentDescription desc;
    desc.componentType = kAudioUnitType_Output;
    desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
    desc.componentFlags = 0;
    desc.componentFlagsMask = 0;
    desc.componentManufacturer = kAudioUnitManufacturer_Apple;

    AudioComponent inputComponent = AudioComponentFindNext( NULL, &desc );
    status = AudioComponentInstanceNew( inputComponent, &kAudioUnit );
    checkStatus( status );

          // enable mic output as our input
    UInt32 flag = 1;
    status = AudioUnitSetProperty( kAudioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, kInputBus, &flag, sizeof(flag) );
    checkStatus(status);


          // Define mic output audio format
    AudioStreamBasicDescription audioFormat;
    audioFormat.mSampleRate         = 16000.0;
    audioFormat.mFormatID           = kAudioFormatLinearPCM;
    audioFormat.mFormatFlags        = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
    audioFormat.mFramesPerPacket    = 1;
    audioFormat.mChannelsPerFrame   = 1;
    audioFormat.mBitsPerChannel     = 16;
    audioFormat.mBytesPerPacket     = 2;
    audioFormat.mBytesPerFrame      = 2;

    status = AudioUnitSetProperty( kAudioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kInputBus, &audioFormat, sizeof(audioFormat) );
    checkStatus(status);

          // Define our callback methods
    AURenderCallbackStruct callbackStruct;
    callbackStruct.inputProc = recordingCallback;
    callbackStruct.inputProcRefCon = self;
    status = AudioUnitSetProperty( kAudioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, kInputBus, &callbackStruct, sizeof(callbackStruct) );
    checkStatus(status);

          // By pass voice processing
     UInt32 audiobypassProcessing = [[NSUserDefaults standardUserDefaults] boolForKey:VOICE_BY_PASS_PROCESSING];
     status = AudioUnitSetProperty(kAudioUnit, kAUVoiceIOProperty_BypassVoiceProcessing, 
                                   kAudioUnitScope_Global, kInputBus, &audiobypassProcessing, sizeof(audiobypassProcessing));
     checkStatus(status);

          // Automatic Gain Control
     UInt32 audioAGC = [[NSUserDefaults standardUserDefaults]boolForKey:VOICE_AGC];
     status = AudioUnitSetProperty(kAudioUnit, kAUVoiceIOProperty_VoiceProcessingEnableAGC, 
                                   kAudioUnitScope_Global, kInputBus, &audioAGC, sizeof(audioAGC));
     checkStatus(status);

          //Non Audio Voice Ducking
     UInt32 audioDucking = [[NSUserDefaults standardUserDefaults]boolForKey:VOICE_DUCKING];
     status = AudioUnitSetProperty(kAudioUnit, kAUVoiceIOProperty_DuckNonVoiceAudio, 
                                   kAudioUnitScope_Global, kInputBus, &audioDucking, sizeof(audioDucking));
     checkStatus(status);

          //Audio Quality
     UInt32 quality = [[NSUserDefaults standardUserDefaults]integerForKey:VOICE_QUALITY];
     status = AudioUnitSetProperty(kAudioUnit, kAUVoiceIOProperty_VoiceProcessingQuality, 
                                   kAudioUnitScope_Global, kInputBus, &quality, sizeof(quality));
     checkStatus(status);

     status = AudioUnitInitialize(kAudioUnit);
     checkStatus(status);

    status = AudioOutputUnitStart( kAudioUnit );
    checkStatus(status);

    UInt32 audioRoute = (UInt32)kAudioSessionOverrideAudioRoute_Speaker;
    status = AudioSessionSetProperty(kAudioSessionProperty_OverrideAudioRoute, sizeof (audioRoute), &audioRoute);
    checkStatus(status);     
}


-(void) stopListeningWithCoreAudio
{ 
    OSStatus     status = AudioUnitUninitialize( kAudioUnit );
     checkStatus(status);

     status = AudioOutputUnitStop( kAudioUnit );
    checkStatus( status );

//     if(kAudioUnit)
//     {
//          status = AudioComponentInstanceDispose(kAudioUnit);
//          checkStatus(status);
//          kAudioUnit = nil;
//     }

    status = AudioSessionSetActive(NO);
     checkStatus(status);

     NSError *error = nil;
    [[AVAudioSession sharedInstance] setCategory: AVAudioSessionCategorySoloAmbient error:&error];
    if (error)
          NSLog(@"error setting up audio session: %@", [error localizedDescription]);
}

它第一次工作正常。我的意思startListeningWithCoreAudio是由按钮按下事件调用。它可以很好地录制/处理音频。在其他事件中,我呼吁stopListeningWithCoreAudio停止录制/处理音频。

当我再次尝试调用该函数时,问题就来了startListeningWithCoreAudio。它为两个函数抛出错误。AudioUnitInitialize并且AudioOutputUnitStart从 调用startListeningWithCoreAudio

任何人都可以帮我解决问题吗?

4

2 回答 2

2

我找到了解决方案。如果我们背靠背调用下面的函数,就会产生问题。

extern OSStatus AudioUnitUninitialize(AudioUnit inUnit)                     
extern OSStatus AudioComponentInstanceDispose(AudioComponentInstance inInstance)

因此,我通过以下方式在主线程上调用了 dispose 方法。

[self performSelectorOnMainThread:@selector(disposeCoreAudio) withObject:nil waitUntilDone:NO];

-(void) disposeCoreAudio
{
     OSStatus status = AudioComponentInstanceDispose(kAudioUnit);
     kAudioUnit = nil;
}

它解决了这个问题。因此,正确的顺序是停止录制,取消初始化录制器并在主线程上处理录制器。

于 2012-06-05T09:27:42.110 回答
1

一个可能的问题是您的代码试图在停止之前取消初始化正在运行的音频单元。

于 2012-05-15T18:42:19.847 回答