1

我有一个多通道混合器音频单元在 iOS 应用程序中播放音频文件,我需要弄清楚如何更新应用程序的 UI 并在渲染回调到达最长音频文件的末尾时执行重置(设置为在总线 0 上运行)。正如我下面的代码所示,我正在尝试使用 KVO 来实现这一点(使用布尔变量tapesUnderway- AutoreleasePool 是必要的,因为此 Objective-C 代码在其正常域之外运行,请参阅http://www.cocoabuilder.com/archive /cocoa/57412-nscfnumber-no-pool-in-place-just-leaking.html )。

static OSStatus tapesRenderInput(void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags, const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList *ioData)
{
    SoundBufferPtr sndbuf = (SoundBufferPtr)inRefCon;

    UInt32 bufferFrames = sndbuf[inBusNumber].numFrames;
    AudioUnitSampleType *in = sndbuf[inBusNumber].data; 

    // These mBuffers are the output buffers and are empty; these two lines are just  setting the references to them (via outA and outB)
    AudioUnitSampleType *outA = (AudioUnitSampleType *)ioData->mBuffers[0].mData;
    AudioUnitSampleType *outB = (AudioUnitSampleType *)ioData->mBuffers[1].mData;

    UInt32 sample = sndbuf[inBusNumber].sampleNum;


    // --------------------------------------------------------------
    // Set the start time here
    if(inBusNumber == 0 && !tapesFirstRenderPast)
    {
        printf("Tapes first render past\n");

        tapesStartSample = inTimeStamp->mSampleTime;
        tapesFirstRenderPast = YES;                     // MAKE SURE TO RESET THIS ON SONG RESTART
        firstPauseSample = tapesStartSample;
    }

    // --------------------------------------------------------------
    // Now process the samples
     for(UInt32 i = 0; i < inNumberFrames; ++i)
     {
         if(inBusNumber == 0)
         {
            // ------------------------------------------------------
            // Bus 0 is the backing track, and is always playing back

            outA[i] = in[sample++];
            outB[i] = in[sample++];     // For stereo set desc.SetAUCanonical to (2, true) and increment samples in both output calls

            lastSample = inTimeStamp->mSampleTime + (Float64)i;     // Set the last played sample in order to compensate for pauses


            // ------------------------------------------------------
            // Use this logic to mark end of tune
            if(sample >= (bufferFrames * 2) && !tapesEndPast)
            {
                // USE KVO TO NOTIFY METHOD OF VALUE CHANGE

                NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
                FuturesEPMedia *futuresMedia = [FuturesEPMedia sharedFuturesEPMedia];
                NSNumber *boolNo = [[NSNumber alloc] initWithBool: NO];
                [futuresMedia setValue: boolNo forKey: @"tapesUnderway"];
                [boolNo release];
                [pool release];

                tapesEndPast = YES;
            }
        }
        else
        {
            // ------------------------------------------------------
            // The other buses are the open sections, and are synched through the tapesSectionsTimes array

            Float64 sectionTime = tapesSectionTimes[inBusNumber] * kGraphSampleRate;        // Section time in samples
            Float64 currentSample = inTimeStamp->mSampleTime + (Float64)i;

            if(!isPaused && !playFirstRenderPast)
            {
                pauseGap += currentSample - firstPauseSample;
                playFirstRenderPast = YES;
                pauseFirstRenderPast = NO;
            }


            if(currentSample > (tapesStartSample + sectionTime + pauseGap) && sample < (bufferFrames * 2))
            {
                outA[i] = in[sample++];
                outB[i] = in[sample++];
            }
            else
            {
                outA[i] = 0;
                outB[i] = 0;
            }
        }
    }

   sndbuf[inBusNumber].sampleNum = sample;

   return noErr;
}

在更改此变量的那一刻,它会触发 self 中的一个方法,但这会导致从此渲染回调执行时出现不可接受的延迟(20-30 秒)(我在想,因为它是以高优先级运行的 Objective-C 代码音频线程?)。我如何有效地触发这样的变化而不延迟?(触发器会将暂停按钮变为播放按钮,并调用重置方法为下一次播放做准备。)

谢谢

4

2 回答 2

4

是的。不要在渲染线程中使用 objc 代码,因为它的优先级很高。如果您将状态存储在内存(ptr 或 struct)中,然后在主线程中获取一个计时器来轮询(检查)内存中的值。计时器不需要与渲染线程一样快,并且非常准确。

于 2011-08-30T02:32:27.613 回答
2

试试这个。

全球的 :

BOOL FlgTotalSampleTimeCollected = False;
Float64 HigestSampleTime = 0 ;
Float64 TotalSampleTime = 0;

在-(OSStatus)setUpAUFilePlayer:

AudioStreamBasicDescription fileASBD;
// get the audio data format from the file
UInt32 propSize = sizeof(fileASBD);
CheckError(AudioFileGetProperty(inputFile, kAudioFilePropertyDataFormat,
                                &propSize, &fileASBD),
           "couldn't get file's data format");
UInt64 nPackets;
UInt32 propsize = sizeof(nPackets);
CheckError(AudioFileGetProperty(inputFile, kAudioFilePropertyAudioDataPacketCount,
                                &propsize, &nPackets),
           "AudioFileGetProperty[kAudioFilePropertyAudioDataPacketCount] failed");
Float64 sTime = nPackets * fileASBD.mFramesPerPacket;
if (HigestSampleTime < sTime)
{
    HigestSampleTime = sTime;
}

在 RenderCallBack 中:

if (*actionFlags & kAudioUnitRenderAction_PreRender)
{


    if (!THIS->FlgTotalSampleTimeCollected)
    {
        [THIS setFlgTotalSampleTimeCollected:TRUE];
       [THIS setTotalSampleTime:(inTimeStamp->mSampleTime + THIS->HigestSampleTime)];
    }


}
else if (*actionFlags & kAudioUnitRenderAction_PostRender)
{



    if (inTimeStamp->mSampleTime > THIS->TotalSampleTime)
    {
        NSLog(@"inTimeStamp->mSampleTime :%f",inTimeStamp->mSampleTime);
        NSLog(@"audio completed");
        [THIS callAudioCompletedMethodHere];

    }


}

这对我有用。在设备中测试。

于 2013-03-08T14:07:43.197 回答