7

在以下上下文中,我从 AudioUnitRender 收到错误 -50(无效参数)。我使用这个Pitch Detector示例应用程序作为我的起点,它运行良好。我的项目中唯一的主要区别是我还使用远程 I/O 单元进行音频输出。音频输出工作正常。这是我的输入回调和初始化代码(为简洁起见,删除了错误检查)。我知道这很多,但错误 -50 确实给我很少的关于问题可能出在哪里的信息。

输入回调:

OSStatus inputCallback( void* inRefCon, 
                            AudioUnitRenderActionFlags  *ioActionFlags, 
                            const AudioTimeStamp        *inTimeStamp, 
                            UInt32                      inBusNumber, 
                            UInt32                      inNumberFrames, 
                            AudioBufferList             *ioData) {

    WBAudio* audioObject= (WBAudio*)inRefCon;

    AudioUnit rioUnit = audioObject->m_audioUnit;
    OSStatus renderErr;
    UInt32 bus1 = 1;

    renderErr = AudioUnitRender(rioUnit, ioActionFlags, 
                                inTimeStamp, bus1, inNumberFrames, audioObject->m_inBufferList );
    if (renderErr < 0) {
        return renderErr; // breaks here
    }

    return noErr;
} // end inputCallback()

初始化:

- (id) init {

    self= [super init];
    if( !self ) return nil;

    OSStatus result;

    //! Initialize a buffer list for rendering input
    size_t bytesPerSample;
    bytesPerSample = sizeof(SInt16);
    m_inBufferList = (AudioBufferList *)malloc(sizeof(AudioBuffer));
    m_inBufferList->mNumberBuffers = 1;
    m_inBufferList->mBuffers[0].mNumberChannels = 1;
    m_inBufferList->mBuffers[0].mDataByteSize = 512*bytesPerSample;
    m_inBufferList->mBuffers[0].mData = calloc(512, bytesPerSample);

    //! Initialize an audio session to get buffer size
    result = AudioSessionInitialize(NULL, NULL, NULL, NULL);

    UInt32 audioCategory = kAudioSessionCategory_PlayAndRecord;
    result = AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(audioCategory), &audioCategory);

    // Set preferred buffer size
    Float32 preferredBufferSize = static_cast<float>(m_pBoard->m_uBufferSize) / m_pBoard->m_fSampleRate;
    result = AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration, sizeof(preferredBufferSize), &preferredBufferSize);

    // Get actual buffer size
    Float32 audioBufferSize;
    UInt32 size = sizeof (audioBufferSize);
    result = AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareIOBufferDuration, &size, &audioBufferSize);

    result = AudioSessionSetActive(true);

    //! Create our Remote I/O component description
    AudioComponentDescription desc;
    desc.componentType= kAudioUnitType_Output;
    desc.componentSubType= kAudioUnitSubType_RemoteIO;
    desc.componentFlags= 0;
    desc.componentFlagsMask= 0;
    desc.componentManufacturer= kAudioUnitManufacturer_Apple;

    //! Find the corresponding component
    AudioComponent outputComponent = AudioComponentFindNext(NULL, &desc);

    //! Create the component instance
    result = AudioComponentInstanceNew(outputComponent, &m_audioUnit);

    //! Enable audio output
    UInt32 flag = 1;
    result = AudioUnitSetProperty( m_audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, kOutputBus, &flag, sizeof(flag));

    //! Enable audio input
    result= AudioUnitSetProperty( m_audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, kInputBus, &flag, sizeof(flag));

    //! Create our audio stream description
    m_audioFormat.mSampleRate= m_pBoard->m_fSampleRate;
    m_audioFormat.mFormatID= kAudioFormatLinearPCM;
    m_audioFormat.mFormatFlags= kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
    m_audioFormat.mFramesPerPacket= 1;
    m_audioFormat.mChannelsPerFrame= 1;
    m_audioFormat.mBitsPerChannel= 16;
    m_audioFormat.mBytesPerPacket= 2;
    m_audioFormat.mBytesPerFrame= 2;

    //! Set the stream format
    result = AudioUnitSetProperty( m_audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, kOutputBus, &m_audioFormat, sizeof(m_audioFormat));

    result = AudioUnitSetProperty(m_audioUnit, kAudioUnitProperty_StreamFormat, 
                               kAudioUnitScope_Output, 
                               kInputBus, &m_audioFormat, sizeof(m_audioFormat));

    //! Set the render callback
    AURenderCallbackStruct renderCallbackStruct= {0};
    renderCallbackStruct.inputProc= renderCallback;
    renderCallbackStruct.inputProcRefCon= m_pBoard;
    result = AudioUnitSetProperty(m_audioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, kOutputBus, &renderCallbackStruct, sizeof(renderCallbackStruct));

    //! Set the input callback
    AURenderCallbackStruct inputCallbackStruct = {0};
    inputCallbackStruct.inputProc= inputCallback;
    inputCallbackStruct.inputProcRefCon= self;
    result= AudioUnitSetProperty( m_audioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Input, kOutputBus, &inputCallbackStruct, sizeof( inputCallbackStruct ) );

    //! Initialize the unit
    result = AudioUnitInitialize( m_audioUnit );

    return self;
}
4

3 回答 3

0

您将 m_inBufferList 分配为:

m_inBufferList = (AudioBufferList *)malloc(sizeof(AudioBuffer));

这应该是:

m_inBufferList = (AudioBufferList *)malloc(sizeof(AudioBufferList) + sizeof(AudioBuffer) * numberOfBuffers);   //numberOfBuffers in your case is 1

也许这可以解决你的问题。

于 2012-04-30T11:24:10.427 回答
0

开发文档中的错误-50表示参数错误,请确保您在AudioUnitRender中传递了正确的参数。检查流格式和您的单元

于 2013-09-01T11:45:02.147 回答
0

我同意 Kurt Pattyn 的说法,即分配m_inBufferList不正确,并且可能是 -50 bad param 错误的原因。

除了我认为对于单个缓冲区它应该是

(AudioBufferList *)malloc(sizeof(AudioBufferList)

我的证据是以下尺寸和来自 Adamson & Avila 的代码。

(lldb) sizeof(AudioBufferList) 24

(lldb) po sizeof(AudioBuffer) 16

(lldb) po offsetof(AudioBufferList, mBuffers[0]) 8

根据 Chris Adamson 和 Kevin Avila 在Learning Core Audio中的说法:

    // Allocate an AudioBufferList plus enough space for 
    // array of AudioBuffers
    UInt32 propsize = offsetof(AudioBufferList, mBuffers[0]) + 
        (sizeof(AudioBuffer) * player->streamFormat.mChannelsPerFrame);

    // malloc buffer lists
    player->inputBuffer = (AudioBufferList *)malloc(propsize); 
    player->inputBuffer->mNumberBuffers = player->streamFormat.mChannelsPerFrame;

    // Pre-malloc buffers for AudioBufferLists
    for(UInt32 i =0; i< player->inputBuffer->mNumberBuffers ; i++) {
        player->inputBuffer->mBuffers[i].mNumberChannels = 1;
        player->inputBuffer->mBuffers[i].mDataByteSize = bufferSizeBytes; 
        player->inputBuffer->mBuffers[i].mData = malloc(bufferSizeBytes);
    }

最后但并非最不重要的一点是,我刚刚偶然发现了这段代码,并附有以下评论:)

//credit to TheAmazingAudioEngine for an illustration of proper audiobufferlist allocation. 
// Google leads to some really really bad allocation code...
[other code]
sampleABL = malloc(sizeof(AudioBufferList) + (bufferCnt-1)*sizeof(AudioBuffer));

https://github.com/zakk4223/CocoaSplit/blob/master/CocoaSplit/CAMultiAudio/CAMultiAudioPCMPlayer.m#L203

于 2019-11-30T23:20:43.773 回答