3

我的任务是播放本地保存在文档目录中的音频文件,使用效果音频单元在该音频文件中应用音频效果,并将具有该效果的新音频文件保存在文档目录中。这是我到目前为止编写的代码,但它不起作用。音频中未应用效果。请建议我这段代码有什么问题??提前致谢..

- (void) setUpAudioUnits
{
OSStatus setupErr = noErr;

// describe unit
AudioComponentDescription audioCompDesc;
audioCompDesc.componentType = kAudioUnitType_Output;
audioCompDesc.componentSubType = kAudioUnitSubType_RemoteIO;
audioCompDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
audioCompDesc.componentFlags = 0;
audioCompDesc.componentFlagsMask = 0;

// get rio unit from audio component manager
AudioComponent rioComponent = AudioComponentFindNext(NULL, &audioCompDesc);
setupErr = AudioComponentInstanceNew(rioComponent, &remoteIOUnit);
NSAssert (setupErr == noErr, @"Couldn't get RIO unit instance");

// set up the rio unit for playback
UInt32 oneFlag = 1;
AudioUnitElement outputElement = 0;
setupErr = 
AudioUnitSetProperty (remoteIOUnit,
                      kAudioOutputUnitProperty_EnableIO,
                      kAudioUnitScope_Output,
                      outputElement,
                      &oneFlag,
                      sizeof(oneFlag));
NSAssert (setupErr == noErr, @"Couldn't enable RIO output");

// enable rio input
AudioUnitElement inputElement = 1;

// setup an asbd in the iphone canonical format
AudioStreamBasicDescription myASBD;
memset (&myASBD, 0, sizeof (myASBD));
// myASBD.mSampleRate = 44100;
myASBD.mSampleRate = hardwareSampleRate;
myASBD.mFormatID = kAudioFormatLinearPCM;
myASBD.mFormatFlags = kAudioFormatFlagsCanonical;
myASBD.mBytesPerPacket = 4;
myASBD.mFramesPerPacket = 1;
myASBD.mBytesPerFrame = 4;
myASBD.mChannelsPerFrame = 2;
myASBD.mBitsPerChannel = 16;

/*
 // set format for output (bus 0) on rio's input scope
 */
setupErr =
AudioUnitSetProperty (remoteIOUnit,
                      kAudioUnitProperty_StreamFormat,
                      kAudioUnitScope_Input,
                      outputElement,
                      &myASBD,
                      sizeof (myASBD));
NSAssert (setupErr == noErr, @"Couldn't set ASBD for RIO on input scope / bus 0");


// song must be an LPCM file, preferably in caf container
// to convert, use /usr/bin/afconvert, like this:
//  /usr/bin/afconvert --data LEI16 Girlfriend.m4a song.caf

// read in the entire audio file (NOT recommended)
// better to use a ring buffer: thread or timer fills, render callback drains
NSURL *songURL = [NSURL fileURLWithPath:
                  [[NSBundle mainBundle] pathForResource: @"song"
                                                  ofType: @"caf"]];
AudioFileID songFile;
setupErr = AudioFileOpenURL((CFURLRef) songURL,
                            kAudioFileReadPermission,
                            0,
                            &songFile);
NSAssert (setupErr == noErr, @"Couldn't open audio file");

UInt64 audioDataByteCount;
UInt32 audioDataByteCountSize = sizeof (audioDataByteCount);
setupErr = AudioFileGetProperty(songFile,
                                kAudioFilePropertyAudioDataByteCount,
                                &audioDataByteCountSize,
                                &audioDataByteCount);
NSAssert (setupErr == noErr, @"Couldn't get size property");

musicPlaybackState.audioData = malloc (audioDataByteCount);
musicPlaybackState.audioDataByteCount = audioDataByteCount;
musicPlaybackState.samplePtr = musicPlaybackState.audioData;

NSLog (@"reading %qu bytes from file", audioDataByteCount);
UInt32 bytesRead = audioDataByteCount;
setupErr = AudioFileReadBytes(songFile,
                              false,
                              0,
                              &bytesRead,
                              musicPlaybackState.audioData);
NSAssert (setupErr == noErr, @"Couldn't read audio data");
NSLog (@"read %d bytes from file", bytesRead);

AudioStreamBasicDescription fileASBD;
UInt32 asbdSize = sizeof (fileASBD);
setupErr = AudioFileGetProperty(songFile,
                                kAudioFilePropertyDataFormat,
                                &asbdSize,
                                &fileASBD);
NSAssert (setupErr == noErr, @"Couldn't get file asbd");

ExtAudioFileCreateWithURL(outputFileURL,
                          kAudioFileCAFType,
                          &fileASBD,
                          nil,
                          kAudioFileFlags_EraseFile,
                          &musicPlaybackState.extAudioFile);

// get the mixer unit
AudioComponentDescription mixerDesc;
mixerDesc.componentType = kAudioUnitType_Effect;
mixerDesc.componentSubType = kAudioUnitSubType_Delay;
mixerDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
mixerDesc.componentFlags = 0;
mixerDesc.componentFlagsMask = 0;

// get mixer unit from audio component manager
AudioComponent mixerComponent = AudioComponentFindNext(NULL, &mixerDesc);
setupErr = AudioComponentInstanceNew(mixerComponent, &mixerUnit);
NSAssert (setupErr == noErr, @"Couldn't get mixer unit instance");

// set up connections and callbacks

// connect mixer bus 0 input to robot voice render callback
effectState.rioUnit = remoteIOUnit;
effectState.sineFrequency = 23;
effectState.sinePhase = 0;
effectState.asbd = myASBD;

// connect mixer bus 1 input to music player callback

AURenderCallbackStruct musicPlayerCallbackStruct;
musicPlayerCallbackStruct.inputProc = MusicPlayerCallback; // callback function
musicPlayerCallbackStruct.inputProcRefCon = &musicPlaybackState;

setupErr = 
AudioUnitSetProperty(mixerUnit, 
                     kAudioUnitProperty_SetRenderCallback,
                     kAudioUnitScope_Global,
                     outputElement,
                     &musicPlayerCallbackStruct,
                     sizeof (musicPlayerCallbackStruct));
NSAssert (setupErr == noErr, @"Couldn't set mixer render callback on bus 1");

// direct connect mixer to output
AudioUnitConnection connection;
connection.sourceAudioUnit = mixerUnit;
connection.sourceOutputNumber = outputElement;
connection.destInputNumber = outputElement;

setupErr = 
AudioUnitSetProperty(remoteIOUnit, 
                     kAudioUnitProperty_MakeConnection,
                     kAudioUnitScope_Input,
                     outputElement,
                     &connection,
                     sizeof (connection));
NSAssert (setupErr == noErr, @"Couldn't set mixer-to-RIO connection");

setupErr = AudioUnitInitialize(mixerUnit);
NSAssert (setupErr == noErr, @"Couldn't initialize mixer unit");

setupErr =  AudioUnitInitialize(remoteIOUnit);
NSAssert (setupErr == noErr, @"Couldn't initialize RIO unit");

    setupErr = AudioOutputUnitStart (remoteIOUnit);
 }
4

1 回答 1

4

当您有初始化音频单元的实例时,您可以AudioUnitRender通过向其提供 AudioBufferList 来将效果应用于声音。

首先,确保您有音频单元接受的格式的声音。您可以通过获取kAudioUnitProperty_StreamFormat属性来获取此格式。

如果您的音频文件的格式与您从音频单元获得的格式不同,您可以使用 ExtAudioFile “即时”转换音频。为此,您必须将kExtAudioFileProperty_ClientDataFormatExtAudioFile 中的属性设置为您从“kAudioUnitProperty_StreamFormat”获得的格式。现在,当您阅读音频文件时,您将获得所需格式的音频。

另外,请确保kAudioUnitProperty_ShouldAllocateBufferAudio Unit 的属性设置为1

要调用AudioUnitRender你必须准备有效AudioTimeStamp的,AudioUnitRenderActionFlags(可以设置为0)和AudioBufferList。您不需要为缓冲区分配内存,您只需要提供缓冲区的数量及其大小。

AudioBufferList *buffer = malloc(sizeof(AudioBufferList) + sizeof(AudioBuffer));
buffer->mNumberBuffers = 2; // at least 2 buffers
buffer->mBuffers[0].mDataByteSize = ...; // size of one buffer
buffer->mBuffers[1].mDataByteSize = ...; 

AudioUnitRenderActionFlags flags = 0;

AudioTimeStamp timeStamp;
memset(&timeStamp, 0, sizeof(AudioTimeStamp));
timeStamp.mFlags = kAudioTimeStampSampleTimeValid;

UInt32 frames = ...; // number of frames in buffer
AudioUnit unit = ...; // your Delay unit

现在您可以致电AudioUnitRender

AudioUnitRender(unit, &flags, &timeStamp, 0, frames, buffer);

音频单元将要求回调填充缓冲区并对声音应用效果,之后您将拥有具有有效音频的缓冲区。在这种情况下,您需要将kAudioUnitProperty_SetRenderCallback属性设置为有效回调。

于 2012-11-20T19:26:07.843 回答