4

我有一个音调发生器应用程序,它根据频率的滑块值生成音调。应用程序的这一部分工作正常。我正在使用

#import <AudioToolbox/AudioToolbox.h>

OSStatus RenderTone(
void *inRefCon, 
AudioUnitRenderActionFlags  *ioActionFlags, 
const AudioTimeStamp        *inTimeStamp, 
UInt32                      inBusNumber, 
UInt32                      inNumberFrames, 
AudioBufferList             *ioData)

{
// Fixed amplitude is good enough for our purposes
const double amplitude = 0.25;

// Get the tone parameters out of the view controller
ToneGeneratorViewController *viewController =
    (ToneGeneratorViewController *)inRefCon;
double theta = viewController->theta;
double theta_increment = 2.0 * M_PI * viewController->frequency / viewController-    >sampleRate;

// This is a mono tone generator so we only need the first buffer
const int channel = 0;
Float32 *buffer = (Float32 *)ioData->mBuffers[channel].mData;

// Generate the samples
for (UInt32 frame = 0; frame < inNumberFrames; frame++) 
{
    buffer[frame] = sin(theta) * amplitude;

    theta += theta_increment;
    if (theta > 2.0 * M_PI)
    {
        theta -= 2.0 * M_PI;
    }
}

// Store the theta back in the view controller
viewController->theta = theta;

return noErr;
}



- (void)createToneUnit
{
// Configure the search parameters to find the default playback output unit
// (called the kAudioUnitSubType_RemoteIO on iOS but
// kAudioUnitSubType_DefaultOutput on Mac OS X)
AudioComponentDescription defaultOutputDescription;
defaultOutputDescription.componentType = kAudioUnitType_Output;
defaultOutputDescription.componentSubType = kAudioUnitSubType_RemoteIO;
defaultOutputDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
defaultOutputDescription.componentFlags = 0;
defaultOutputDescription.componentFlagsMask = 0;

// Get the default playback output unit
AudioComponent defaultOutput = AudioComponentFindNext(NULL, &defaultOutputDescription);
NSAssert(defaultOutput, @"Can't find default output");

// Create a new unit based on this that we'll use for output
OSErr err = AudioComponentInstanceNew(defaultOutput, &toneUnit);
NSAssert1(toneUnit, @"Error creating unit: %ld", err);

// Set our tone rendering function on the unit
AURenderCallbackStruct input;
input.inputProc = RenderTone;
input.inputProcRefCon = self;
err = AudioUnitSetProperty(toneUnit, 
    kAudioUnitProperty_SetRenderCallback, 
    kAudioUnitScope_Input,
    0, 
    &input, 
    sizeof(input));
NSAssert1(err == noErr, @"Error setting callback: %ld", err);

// Set the format to 32 bit, single channel, floating point, linear PCM
const int four_bytes_per_float = 4;
const int eight_bits_per_byte = 8;
AudioStreamBasicDescription streamFormat;
streamFormat.mSampleRate = sampleRate;
streamFormat.mFormatID = kAudioFormatLinearPCM;
streamFormat.mFormatFlags =
    kAudioFormatFlagsNativeFloatPacked | kAudioFormatFlagIsNonInterleaved;
streamFormat.mBytesPerPacket = four_bytes_per_float;
streamFormat.mFramesPerPacket = 1;  
streamFormat.mBytesPerFrame = four_bytes_per_float;     
streamFormat.mChannelsPerFrame = 1; 
streamFormat.mBitsPerChannel = four_bytes_per_float * eight_bits_per_byte;
err = AudioUnitSetProperty (toneUnit,
    kAudioUnitProperty_StreamFormat,
    kAudioUnitScope_Input,
    0,
    &streamFormat,
    sizeof(AudioStreamBasicDescription));
NSAssert1(err == noErr, @"Error setting stream format: %ld", err);
}

现在我需要修改应用程序中的模式,例如 Dog Whistler Application。谁能告诉我我需要做什么来修改这个源代码之后的波形?

提前致谢

4

1 回答 1

5

对于每个特定模式,您可能需要不同的 RenderTone 实现。您的代码中的实现会产生一个没有调制的采样纯正弦波。您可以生成各种模式,这取决于您的需要您将实现什么。

例如,生成更短或更长的哔声将需要您在“for”循环中为循环内一定数量的帧的正弦曲线生成“静音”(将 0-s 写入缓冲区),然后再次生成正弦样本然后再次沉默......(这就像切断信号)

您还可以通过使用另一个正弦信号(频率低得多)计算的因子缩放样本值来进行幅度调制(颤音效果)。

另一个例子是通过调制生成样本的频率(颤音效果)来产生“警笛”声音,本质上是变量 theta_increment 的值,也根据低频信号。或者,简单地使用两个不同的值来交替使用上面的“哔”效果。

希望这可以帮助。

于 2014-05-19T15:03:59.587 回答