我将 Callkit 与 iOS10.0.1 一起使用,他工作得很好(呼出和呼入电话)。将我的 iPhone7 更新到 iOS 10.2 后。当我接到呼入电话时,我什么也没听到。
对于音频控制器:
try {
// Configure the audio session
AVAudioSession *sessionInstance = [AVAudioSession sharedInstance];
// we are going to play and record so we pick that category
NSError *error = nil;
[sessionInstance setCategory:AVAudioSessionCategoryPlayAndRecord error:&error];
XThrowIfError((OSStatus)error.code, "couldn't set session's audio category");
// set the mode to voice chat
[sessionInstance setMode:AVAudioSessionModeVoiceChat error:&error];
XThrowIfError((OSStatus)error.code, "couldn't set session's audio mode");
// set the buffer duration to 5 ms
NSTimeInterval bufferDuration = .005;
[sessionInstance setPreferredIOBufferDuration:bufferDuration error:&error];
XThrowIfError((OSStatus)error.code, "couldn't set session's I/O buffer duration");
// set the session's sample rate
[sessionInstance setPreferredSampleRate:44100 error:&error];
XThrowIfError((OSStatus)error.code, "couldn't set session's preferred sample rate");
// add interruption handler
[[NSNotificationCenter defaultCenter] addObserver:self
selector:@selector(handleInterruption:)
name:AVAudioSessionInterruptionNotification
object:sessionInstance];
// we don't do anything special in the route change notification
[[NSNotificationCenter defaultCenter] addObserver:self
selector:@selector(handleRouteChange:)
name:AVAudioSessionRouteChangeNotification
object:sessionInstance];
// if media services are reset, we need to rebuild our audio chain
[[NSNotificationCenter defaultCenter] addObserver: self
selector: @selector(handleMediaServerReset:)
name: AVAudioSessionMediaServicesWereResetNotification
object: sessionInstance];
}
catch (CAXException &e) {
NSLog(@"Error returned from setupAudioSession: %d: %s", (int)e.mError, e.mOperation);
}
catch (...) {
NSLog(@"Unknown error returned from setupAudioSession");
}
和
try {
// Create a new instance of Apple Voice Processing IO
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
AudioComponent comp = AudioComponentFindNext(NULL, &desc);
XThrowIfError(AudioComponentInstanceNew(comp, &_rioUnit), "couldn't create a new instance of Apple Voice Processing IO");
// Enable input and output on Apple Voice Processing IO
// Input is enabled on the input scope of the input element
// Output is enabled on the output scope of the output element
UInt32 one = 1;
XThrowIfError(AudioUnitSetProperty(_rioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &one, sizeof(one)), "could not enable input on Apple Voice Processing IO");
XThrowIfError(AudioUnitSetProperty(_rioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &one, sizeof(one)), "could not enable output on Apple Voice Processing IO");
// Explicitly set the input and output client formats
// sample rate = 44100, num channels = 1, format = 32 bit floating point
CAStreamBasicDescription ioFormat = CAStreamBasicDescription(44100, 1, CAStreamBasicDescription::kPCMFormatFloat32, false);
XThrowIfError(AudioUnitSetProperty(_rioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &ioFormat, sizeof(ioFormat)), "couldn't set the input client format on Apple Voice Processing IO");
XThrowIfError(AudioUnitSetProperty(_rioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &ioFormat, sizeof(ioFormat)), "couldn't set the output client format on Apple Voice Processing IO");
// Set the MaximumFramesPerSlice property. This property is used to describe to an audio unit the maximum number
// of samples it will be asked to produce on any single given call to AudioUnitRender
UInt32 maxFramesPerSlice = 4096;
XThrowIfError(AudioUnitSetProperty(_rioUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, sizeof(UInt32)), "couldn't set max frames per slice on Apple Voice Processing IO");
// Get the property value back from Apple Voice Processing IO. We are going to use this value to allocate buffers accordingly
UInt32 propSize = sizeof(UInt32);
XThrowIfError(AudioUnitGetProperty(_rioUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, &propSize), "couldn't get max frames per slice on Apple Voice Processing IO");
// We need references to certain data in the render callback
// This simple struct is used to hold that information
cd.rioUnit = _rioUnit;
cd.muteAudio = &_muteAudio;
cd.audioChainIsBeingReconstructed = &_audioChainIsBeingReconstructed;
// Set the render callback on Apple Voice Processing IO
AURenderCallbackStruct renderCallback;
renderCallback.inputProc = performRender;
renderCallback.inputProcRefCon = NULL;
XThrowIfError(AudioUnitSetProperty(_rioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &renderCallback, sizeof(renderCallback)), "couldn't set render callback on Apple Voice Processing IO");
// Initialize the Apple Voice Processing IO instance
XThrowIfError(AudioUnitInitialize(_rioUnit), "couldn't initialize Apple Voice Processing IO instance");
}
catch (CAXException &e) {
NSLog(@"Error returned from setupIOUnit: %d: %s", (int)e.mError, e.mOperation);
}
catch (...) {
NSLog(@"Unknown error returned from setupIOUnit");
}
我的日志中有这个:
[aurioc] 892: failed: '!pri' (enable 3, outf< 1 ch, 44100 Hz, Float32> inf< 1 ch, 44100 Hz, Float32>)
Error returned from setupIOUnit: 561017449: couldn't initialize Apple Voice Processing IO instance
你有想法吗 ?