0

我想创建一个具有以下 AudioUnits 的 AUGraph:

  • 1*440HZ正弦波发生器
  • 1 * 880HZ 正弦波发生器
  • 1 * 混合器
  • 1 * 输出

如果我将正弦波发生器连接到混音器,那么混音器到输出我就听不到声音了。

如果我将正弦波发生器直接连接到输出,我确实会听到声音,可能是因为输出单元连接到正弦波发生器回调。

应该如何连接,我有什么遗漏吗?

完整的代码示例,Connect 1 * Joins 1 不工作,将这些注释掉并运行 Connect 2 和 Joins 2 以听到正弦波工作

    //
//  main.c
//  TestAudioUnit
//
//  Created by Chris Davis on 25/08/2013.
//  Copyright (c) 2013 Chris Davis. All rights reserved.
//

#include <CoreFoundation/CoreFoundation.h>
#import <AudioToolbox/AudioToolbox.h>

typedef struct MyAUGraphPlayer
{
    AudioStreamBasicDescription streamFormat;

    AUGraph graph;
    AUNode output;
    AUNode mixer;
    AUNode sine;
    AudioUnit audioUnits[3];


    AudioBufferList *inputBuffer;


    Float64 firstInputSampleTime;
    Float64 firstOutputSampleTime;
    Float64 inToOutSampleTimeOffset;
} MyAUGraphPlayer;

OSStatus SineWaveRenderCallback(void * inRefCon,
                                AudioUnitRenderActionFlags * ioActionFlags,
                                const AudioTimeStamp * inTimeStamp,
                                UInt32 inBusNumber,
                                UInt32 inNumberFrames,
                                AudioBufferList * ioData)
{
    // inRefCon is the context pointer we passed in earlier when setting the render callback
    double currentPhase = *((double *)inRefCon);
    // ioData is where we're supposed to put the audio samples we've created
    Float32 * outputBuffer = (Float32 *)ioData->mBuffers[0].mData;
    const double frequency = 880.0;
    const double phaseStep = (frequency / 44100.) * (M_PI * 2.);

    for(int i = 0; i < inNumberFrames; i++) {
        outputBuffer[i] = sin(currentPhase);
        currentPhase += phaseStep;
    }

    // If we were doing stereo (or more), this would copy our sine wave samples
    // to all of the remaining channels
    for(int i = 1; i < ioData->mNumberBuffers; i++) {
        memcpy(ioData->mBuffers[i].mData, outputBuffer, ioData->mBuffers[i].mDataByteSize);
    }

    // writing the current phase back to inRefCon so we can use it on the next call
    *((double *)inRefCon) = currentPhase;
    return noErr;
}


int main(int argc, const char * argv[])
{

    MyAUGraphPlayer *player = {0};
    MyAUGraphPlayer p = {0};
    player=&p;

    NewAUGraph(&player->graph);


    //Output
    {
    AudioComponentDescription description = {
        .componentType = kAudioUnitType_Output,
        .componentSubType = kAudioUnitSubType_DefaultOutput,
        .componentManufacturer = kAudioUnitManufacturer_Apple
    };
    AUGraphAddNode(player->graph, &description, &player->output);
    AudioComponent comp = AudioComponentFindNext(NULL, &description);
    AudioComponentInstanceNew(comp, &player->audioUnits[0]);
    AudioUnitInitialize(player->audioUnits[0]);
    AudioStreamBasicDescription ASBD = {
        .mSampleRate       = 44100,
        .mFormatID         = kAudioFormatLinearPCM,
        .mFormatFlags      = kAudioFormatFlagsNativeFloatPacked,
        .mChannelsPerFrame = 1,
        .mFramesPerPacket  = 1,
        .mBitsPerChannel   = sizeof(Float32) * 8,
        .mBytesPerPacket   = sizeof(Float32),
        .mBytesPerFrame    = sizeof(Float32)
    };
    AudioUnitSetProperty(player->audioUnits[0],
                         kAudioUnitProperty_StreamFormat,
                         kAudioUnitScope_Global,
                         0,
                         &ASBD,
                         sizeof(ASBD));
    }

    //Mixer
    {
        AudioComponentDescription description = {
            .componentType = kAudioUnitType_Mixer,
            .componentSubType = kAudioUnitSubType_StereoMixer,
            .componentManufacturer = kAudioUnitManufacturer_Apple
        };
        AUGraphAddNode(player->graph, &description, &player->mixer);
        AudioComponent comp = AudioComponentFindNext(NULL, &description);
        AudioComponentInstanceNew(comp, &player->audioUnits[1]);
        AudioUnitInitialize(player->audioUnits[1]);

    }


    //Sine
    {
        AudioComponentDescription description = {
            .componentType = kAudioUnitType_Generator,
            .componentSubType = kAudioUnitSubType_ScheduledSoundPlayer,
            .componentManufacturer = kAudioUnitManufacturer_Apple
        };
        AUGraphAddNode(player->graph, &description, &player->sine);
        AudioComponent comp = AudioComponentFindNext(NULL, &description);
        AudioComponentInstanceNew(comp, &player->audioUnits[2]);
        AudioUnitInitialize(player->audioUnits[2]);

    }



    //Connect 1
    {
        AURenderCallbackStruct callbackInfo = {
            .inputProc       = SineWaveRenderCallback,
            .inputProcRefCon = player
        };

        AudioUnitSetProperty(player->audioUnits[1],
                                        kAudioUnitProperty_SetRenderCallback,
                                        kAudioUnitScope_Global,
                                        0,
                                        &callbackInfo,
                             sizeof(callbackInfo));

    }

    //Joins 1 - sine to mixer to outout
    {
        AUGraphConnectNodeInput(player->graph,
                                player->sine,
                                0,
                                player->mixer,
                                0);

        AUGraphConnectNodeInput(player->graph,
                                player->mixer,
                                0,
                                player->output,
                                0);

    }

    //connect 2
    /*{
        AURenderCallbackStruct callbackInfo = {
            .inputProc       = SineWaveRenderCallback,
            .inputProcRefCon = player
        };

        AudioUnitSetProperty(player->audioUnits[0],
                             kAudioUnitProperty_SetRenderCallback,
                             kAudioUnitScope_Global,
                             0,
                             &callbackInfo,
                             sizeof(callbackInfo));

    }


    //Joins 2 - sine direct to output
    {
        AUGraphConnectNodeInput(player->graph,
                                player->sine,
                                0,
                                player->output,
                                0);
    }*/

    AUGraphInitialize(player->graph);
    player->firstOutputSampleTime = -1;
    AudioOutputUnitStart(player->audioUnits[0]);
    AUGraphStart(player->graph);

    printf("enter key to stop\n");
    getchar();

    return 0;
}
4

1 回答 1

0

此代码正确播放正弦波,但是,我在设置过程中从 CoreAudio 收到很多错误:

    //
//  main.c
//  TestAudioUnit
//
//  Created by Chris Davis on 25/08/2013.
//  Copyright (c) 2013 Chris Davis. All rights reserved.
//

#include <CoreFoundation/CoreFoundation.h>
#import <AudioToolbox/AudioToolbox.h>

typedef struct MyAUGraphPlayer
{
    AudioStreamBasicDescription streamFormat;

    AUGraph graph;
    AUNode output;
    AUNode mixer;
    AUNode sine;
    AudioUnit audioUnits[3];


    AudioBufferList *inputBuffer;


    Float64 firstInputSampleTime;
    Float64 firstOutputSampleTime;
    Float64 inToOutSampleTimeOffset;
} MyAUGraphPlayer;

OSStatus SineWaveRenderCallback(void * inRefCon,
                                AudioUnitRenderActionFlags * ioActionFlags,
                                const AudioTimeStamp * inTimeStamp,
                                UInt32 inBusNumber,
                                UInt32 inNumberFrames,
                                AudioBufferList * ioData)
{
    // inRefCon is the context pointer we passed in earlier when setting the render callback
    double currentPhase = *((double *)inRefCon);
    // ioData is where we're supposed to put the audio samples we've created
    Float32 * outputBuffer = (Float32 *)ioData->mBuffers[0].mData;
    const double frequency = 880.0;
    const double phaseStep = (frequency / 44100.) * (M_PI * 2.);

    for(int i = 0; i < inNumberFrames; i++) {
        outputBuffer[i] = sin(currentPhase);
        currentPhase += phaseStep;
    }

    // If we were doing stereo (or more), this would copy our sine wave samples
    // to all of the remaining channels
    for(int i = 1; i < ioData->mNumberBuffers; i++) {
        memcpy(ioData->mBuffers[i].mData, outputBuffer, ioData->mBuffers[i].mDataByteSize);
    }

    // writing the current phase back to inRefCon so we can use it on the next call
    *((double *)inRefCon) = currentPhase;
    return noErr;
}


int main(int argc, const char * argv[])
{

    MyAUGraphPlayer *player = {0};
    MyAUGraphPlayer p = {0};
    player=&p;

    NewAUGraph(&player->graph);

    OSStatus result = 0;

    AudioStreamBasicDescription ASBD = {
        .mSampleRate       = 44100,
        .mFormatID         = kAudioFormatLinearPCM,
        .mFormatFlags      = kAudioFormatFlagsNativeFloatPacked,
        .mChannelsPerFrame = 2,
        .mFramesPerPacket  = 1,
        .mBitsPerChannel   = sizeof(Float32) * 8,
        .mBytesPerPacket   = sizeof(Float32),
        .mBytesPerFrame    = sizeof(Float32)
    };


    //Output
    {
        AudioComponentDescription description = {
            .componentType = kAudioUnitType_Output,
            .componentSubType = kAudioUnitSubType_DefaultOutput,
            .componentManufacturer = kAudioUnitManufacturer_Apple
        };
        result = AUGraphAddNode(player->graph, &description, &player->output);
        printf("err: %d\n", result);
        AudioComponent comp = AudioComponentFindNext(NULL, &description);
        result = AudioComponentInstanceNew(comp, &player->audioUnits[0]);
        printf("err: %d\n", result);
        result = AudioUnitInitialize(player->audioUnits[0]);
        printf("err: %d\n", result);

    }

    //Mixer
    {
        AudioComponentDescription description = {
            .componentType = kAudioUnitType_Mixer,
            .componentSubType = kAudioUnitSubType_StereoMixer,
            .componentManufacturer = kAudioUnitManufacturer_Apple
        };
        result = AUGraphAddNode(player->graph, &description, &player->mixer);
        printf("err: %d\n", result);
        AudioComponent comp = AudioComponentFindNext(NULL, &description);
        result = AudioComponentInstanceNew(comp, &player->audioUnits[1]);
        printf("err: %d\n", result);


    }


    //Sine
    {
        AudioComponentDescription description = {
            .componentType = kAudioUnitType_Generator,
            .componentSubType = kAudioUnitSubType_ScheduledSoundPlayer,
            .componentManufacturer = kAudioUnitManufacturer_Apple
        };
        result = AUGraphAddNode(player->graph, &description, &player->sine);
        printf("err: %d\n", result);
        AudioComponent comp = AudioComponentFindNext(NULL, &description);
        result = AudioComponentInstanceNew(comp, &player->audioUnits[2]);
        printf("err: %d\n", result);
        result = AudioUnitInitialize(player->audioUnits[2]);
        printf("err: %d\n", result);

    }




    result = AUGraphConnectNodeInput(player->graph,
                            player->sine,
                            0,
                            player->mixer,
                            0);
    printf("err: %d\n", result);

    result = AUGraphConnectNodeInput(player->graph,
                            player->mixer,
                            0,
                            player->output,
                            0);
    printf("err: %d\n", result);

    result = AUGraphOpen(player->graph);
    printf("err: %d\n", result);


    UInt32 numbuses = 1;


    result = AudioUnitSetProperty(player->audioUnits[1], kAudioUnitProperty_ElementCount, kAudioUnitScope_Input, 0, &numbuses, sizeof(numbuses));
    printf("err: %d\n", result);

    for (UInt32 i = 0; i <= numbuses; ++i) {
        // setup render callback struct
        AURenderCallbackStruct rcbs;
        rcbs.inputProc = &SineWaveRenderCallback;
        rcbs.inputProcRefCon = &player;

        printf("set AUGraphSetNodeInputCallback\n");

        // set a callback for the specified node's specified input
        result = AUGraphSetNodeInputCallback(player->graph, player->mixer, i, &rcbs);
        printf("AUGraphSetNodeInputCallback err: %d\n", result);

        printf("set input bus %d, client kAudioUnitProperty_StreamFormat\n", (unsigned int)i);

        // set the input stream format, this is the format of the audio for mixer input
        result = AudioUnitSetProperty(player->audioUnits[1], kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, i, &ASBD, sizeof(ASBD));
        printf("err: %d\n", result);
    }



    result = AudioUnitSetProperty(player->audioUnits[1], kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &ASBD, sizeof(ASBD));
    printf("err: %d\n", result);


    OSStatus status = AUGraphInitialize(player->graph);
    printf("err: %d\n", status);


    player->firstOutputSampleTime = -1;
    AudioOutputUnitStart(player->audioUnits[0]);
    AUGraphStart(player->graph);

    printf("enter key to stop\n");
    getchar();

    return 0;
}
于 2013-08-26T20:29:33.123 回答