0

我是 Core Audio 的新手,所以我可能看不到明显的东西……我的目标是处理以前录制的音频信号。但是,应首先过滤这些信号。我不需要播放任何音频。我只需要记录和处理它。

我的图表如下所示: RemoteIO --> HighPassFilter --> 回调(处理音频)

我这样设置:

LBAudioDetectiveGetDefaultFormat(&detective->streamFormat);

// Create new AUGraph
OSStatus error = NewAUGraph(&detective->graph);
LBErrorCheck(error);

// Initialize AUNodes
AudioComponentDescription rioCD = {0};
rioCD.componentType = kAudioUnitType_Output;
rioCD.componentSubType = kAudioUnitSubType_RemoteIO;
rioCD.componentManufacturer = kAudioUnitManufacturer_Apple;

AUNode rioNode;
error = AUGraphAddNode(detective->graph, &rioCD, &rioNode);
LBErrorCheck(error);

AudioComponentDescription filterCD = {0};
filterCD.componentType = kAudioUnitType_Effect;
filterCD.componentSubType = kAudioUnitSubType_HighPassFilter;
filterCD.componentManufacturer = kAudioUnitManufacturer_Apple;

AUNode filterNode;
error = AUGraphAddNode(detective->graph, &filterCD, &filterNode);
LBErrorCheck(error);

// Open the graph so I can modify the AudioUnits
error = AUGraphOpen(detective->graph);

// Adapt rioUnit
error = AUGraphNodeInfo(detective->graph, rioNode, NULL, &detective->rioUnit);
LBErrorCheck(error);

UInt32 onFlag = 1;
AudioUnitElement bus1 = 1;
UInt32 propertySize = sizeof(UInt32);    
error = AudioUnitSetProperty(detective->rioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, bus1, &onFlag, propertySize);
LBErrorCheck(error);

propertySize = sizeof(AudioStreamBasicDescription);
error = AudioUnitSetProperty(detective->rioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, bus1, &detective->streamFormat, propertySize);
LBErrorCheck(error);

// Adapt filterUnit
error = AUGraphNodeInfo(detective->graph, filterNode, NULL, &detective->filterUnit);
LBErrorCheck(error);

AURenderCallbackStruct callback = {0};
callback.inputProc = _LBAudioDetectiveFilterOutput;
callback.inputProcRefCon = detective;
propertySize = sizeof(AURenderCallbackStruct);
error = AudioUnitSetProperty(detective->filterUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &callback, propertySize);
LBErrorCheck(error);

// Connect the two units
AudioUnitElement bus0 = 0;
error = AUGraphConnectNodeInput(detective->graph, rioNode, bus1, filterNode, bus0);
LBErrorCheck(error);

AUGraphInitialize(detective->graph);

当我尝试设置回调时,我的代码失败并显示错误 -10879。我假设过滤器音频单元不支持此属性。那我应该如何获得录制的声音呢?据我所知,不允许使用第二个通用输出单元。

提前感谢您的帮助

4

1 回答 1

3

在 RemoteIO 音频单元(而不是过滤器单元或输入总线)的输出(总线 0)上配置回调,但在该 RemoteIO 回调中,从过滤器单元拉(渲染)。然后将调用过滤器单元以从 RemoteIO 输入(总线 1)中提取可用数据以进行处理。

如果您不想发出任何声音,也可以用零填充输出缓冲区。

基本上,RemoteIO 的扬声器端为 RemoteIO 的麦克风端的数据提供回调的时间,即使您不想通过扬声器播放任何声音。这是因为整个音频单元链是基于拉模型(ADC 没有地方将数据推送到 API),但输入和输出的硬件采样率恰好相同。因此,在音频单元链从麦克风输入缓冲区中拉出的正确时间调用输出。

于 2013-04-22T16:08:12.370 回答