我正在尝试录制混音器单元输出产生的声音。
目前,我的代码基于苹果 MixerHost iOS 应用演示:混音器节点连接到音频图形上的远程 IO 节点。
我尝试在混音器输出上的远程 IO 节点输入上设置输入回调。
我做错了什么,但我找不到错误。
这是下面的代码。这是在多通道混音器单元设置之后完成的:
UInt32 flag = 1;
// Enable IO for playback
result = AudioUnitSetProperty(iOUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output,
0, // Output bus
&flag,
sizeof(flag));
if (noErr != result) {[self printErrorMessage: @"AudioUnitSetProperty EnableIO" withStatus: result]; return;}
/* can't do that because *** AudioUnitSetProperty EnableIO error: -1073752493 00000000
result = AudioUnitSetProperty(iOUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input,
0, // Output bus
&flag,
sizeof(flag));
if (noErr != result) {[self printErrorMessage: @"AudioUnitSetProperty EnableIO" withStatus: result]; return;}
*/
然后创建一个流格式:
// I/O stream format
iOStreamFormat.mSampleRate = 44100.0;
iOStreamFormat.mFormatID = kAudioFormatLinearPCM;
iOStreamFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
iOStreamFormat.mFramesPerPacket = 1;
iOStreamFormat.mChannelsPerFrame = 1;
iOStreamFormat.mBitsPerChannel = 16;
iOStreamFormat.mBytesPerPacket = 2;
iOStreamFormat.mBytesPerFrame = 2;
[self printASBD: iOStreamFormat];
然后影响格式并指定采样率:
result = AudioUnitSetProperty(iOUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output,
1, // Input bus
&iOStreamFormat,
sizeof(iOStreamFormat));
if (noErr != result) {[self printErrorMessage: @"AudioUnitSetProperty StreamFormat" withStatus: result]; return;}
result = AudioUnitSetProperty(iOUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input,
0, // Output bus
&iOStreamFormat,
sizeof(iOStreamFormat));
if (noErr != result) {[self printErrorMessage: @"AudioUnitSetProperty StreamFormat" withStatus: result]; return;}
// SampleRate I/O
result = AudioUnitSetProperty (iOUnit, kAudioUnitProperty_SampleRate, kAudioUnitScope_Input,
0, // Output
&graphSampleRate,
sizeof (graphSampleRate));
if (noErr != result) {[self printErrorMessage: @"AudioUnitSetProperty (set I/O unit input stream format)" withStatus: result]; return;}
然后,我尝试设置渲染回调。
解决方案 1 >>> 我的录音回调从未被调用
effectState.rioUnit = iOUnit;
AURenderCallbackStruct renderCallbackStruct;
renderCallbackStruct.inputProc = &recordingCallback;
renderCallbackStruct.inputProcRefCon = &effectState;
result = AudioUnitSetProperty (iOUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input,
0, // Output bus
&renderCallbackStruct,
sizeof (renderCallbackStruct));
if (noErr != result) {[self printErrorMessage: @"AudioUnitSetProperty SetRenderCallback" withStatus: result]; return;}
解决方案 2 >>> 我的应用程序在此启动时崩溃
AURenderCallbackStruct renderCallbackStruct;
renderCallbackStruct.inputProc = &recordingCallback;
renderCallbackStruct.inputProcRefCon = &effectState;
result = AUGraphSetNodeInputCallback (processingGraph, iONode,
0, // Output bus
&renderCallbackStruct);
if (noErr != result) {[self printErrorMessage: @"AUGraphSetNodeInputCallback (I/O unit input callback bus 0)" withStatus: result]; return;}
如果有人有想法...
编辑解决方案 3(感谢 arlo anwser)>> 现在存在格式问题
AudioStreamBasicDescription dstFormat = {0};
dstFormat.mSampleRate=44100.0;
dstFormat.mFormatID=kAudioFormatLinearPCM;
dstFormat.mFormatFlags=kAudioFormatFlagsNativeEndian|kAudioFormatFlagIsSignedInteger|kAudioFormatFlagIsPacked;
dstFormat.mBytesPerPacket=4;
dstFormat.mBytesPerFrame=4;
dstFormat.mFramesPerPacket=1;
dstFormat.mChannelsPerFrame=2;
dstFormat.mBitsPerChannel=16;
dstFormat.mReserved=0;
result = AudioUnitSetProperty(iOUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output,
1,
&stereoStreamFormat,
sizeof(stereoStreamFormat));
if (noErr != result) {[self printErrorMessage: @"AudioUnitSetProperty" withStatus: result]; return;}
result = AudioUnitSetProperty(iOUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input,
0,
&stereoStreamFormat,
sizeof(stereoStreamFormat));
if (noErr != result) {[self printErrorMessage: @"AudioUnitSetProperty" withStatus: result]; return;}
AudioUnitAddRenderNotify(
iOUnit,
&recordingCallback,
&effectState
);
和文件设置:
if (noErr != result) {[self printErrorMessage: @"AUGraphInitialize" withStatus: result]; return;}
// On initialise le fichier audio
NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
NSString *documentsDirectory = [paths objectAtIndex:0];
NSString *destinationFilePath = [[[NSString alloc] initWithFormat: @"%@/output.caf", documentsDirectory] autorelease];
NSLog(@">>> %@", destinationFilePath);
CFURLRef destinationURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, (CFStringRef)destinationFilePath, kCFURLPOSIXPathStyle, false);
OSStatus setupErr = ExtAudioFileCreateWithURL(destinationURL, kAudioFileWAVEType, &dstFormat, NULL, kAudioFileFlags_EraseFile, &effectState.audioFileRef);
CFRelease(destinationURL);
NSAssert(setupErr == noErr, @"Couldn't create file for writing");
setupErr = ExtAudioFileSetProperty(effectState.audioFileRef, kExtAudioFileProperty_ClientDataFormat, sizeof(AudioStreamBasicDescription), &stereoStreamFormat);
NSAssert(setupErr == noErr, @"Couldn't create file for format");
setupErr = ExtAudioFileWriteAsync(effectState.audioFileRef, 0, NULL);
NSAssert(setupErr == noErr, @"Couldn't initialize write buffers for audio file");
和录音回调:
static OSStatus recordingCallback (void * inRefCon,
AudioUnitRenderActionFlags * ioActionFlags,
const AudioTimeStamp * inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList * ioData) {
if (*ioActionFlags == kAudioUnitRenderAction_PostRender && inBusNumber == 0)
{
EffectState *effectState = (EffectState *)inRefCon;
ExtAudioFileWriteAsync(effectState->audioFileRef, inNumberFrames, ioData);
}
return noErr;
}
输出文件output.caf中缺少某些内容:)。我完全迷失了要申请的格式。