0

我尝试在 AU 渲染块中使用 SuperpoweredTimeStretching。例如在一个简单代码的频道上。

我现在不改变音频的速度(所以,我不需要使用“循环缓冲区”或类似的东西 - 缓冲区中输入和输出样本的计数是固定的)。但我的情况很奇怪。我的代码很好用,但如果我不改变音调!

如果我不改变音高 - 我有一个切片(1024 个样本)。但是,如果我更改音高参数 - 我将有两个切片(每个切片 512 个样本)这似乎完全正常(我实现了迭代器)。但是,当切片将超过一个(两个 512,而不是一个 1024)时 - 听起来有人工制品。

我不明白我做错了什么。

- (AUInternalRenderBlock)internalRenderBlock {

AudioBufferList *renderABLCapture = &renderABL;

SuperpoweredTimeStretching *timeStretch = new SuperpoweredTimeStretching(48000);

//If I change pitch it sounds very "ugly" with artefacts. If nothig to change - everything is ok.
timeStretch->setRateAndPitchShift(1.0f, 1); // Speed is fixed. Only pitch changed.

// This buffer list will receive the time-stretched samples.
SuperpoweredAudiopointerList *outputBuffers = new SuperpoweredAudiopointerList(8, 16);


return ^AUAudioUnitStatus(AudioUnitRenderActionFlags    *actionFlags,
                          const AudioTimeStamp        *timestamp,
                          AVAudioFrameCount            frameCount,
                          NSInteger                outputBusNumber,
                          AudioBufferList            *outputBufferListPtr,
                          const AURenderEvent        *realtimeEventListHead,
                          AURenderPullInputBlock        pullInputBlock ) {

    int numBuffers = outputBufferListPtr->mNumberBuffers;

    pullInputBlock(actionFlags, timestamp, frameCount, 0, renderABLCapture);

    Float32 *sampleDataOutLeft  = (Float32*)outputBufferListPtr->mBuffers[0].mData;
    Float32 *sampleDataOutRight = (Float32*)outputBufferListPtr->mBuffers[1].mData;


    size_t sampleSize = sizeof(Float32);
    //***********

    SuperpoweredAudiobufferlistElement inputBuffer;
    inputBuffer.samplePosition = 0;
    inputBuffer.startSample = 0;
    inputBuffer.samplesUsed = 0;
    inputBuffer.endSample = frameCount; //
    inputBuffer.buffers[0] = SuperpoweredAudiobufferPool::getBuffer(frameCount * 18 + 64);
    inputBuffer.buffers[1] = inputBuffer.buffers[2] = inputBuffer.buffers[3] = NULL;


    //Input sample data to inputBuffer for timeStretch

    memcpy((float*)inputBuffer.buffers[0], renderABLCapture->mBuffers[0].mData, sampleSize * frameCount);

    timeStretch->process(&inputBuffer, outputBuffers); //Process

    if (outputBuffers->makeSlice(0, outputBuffers->sampleLength)) {

        int count = 0;
        int numSamples = 0;

        while (true) { // Iterate on every output slice.

            //If I have more than one slice - it sounds very "ugly" with artefacts.

            // Get pointer to the output samples.
            float *timeStretchedAudio = (float *)outputBuffers->nextSliceItem(&numSamples);
            if (!timeStretchedAudio) break;

            for (int i = 0; i < numSamples; i++) {

                Float32 *sample = &timeStretchedAudio[i];
                sampleDataOutLeft[i + count] = *sample;

            }

            count += numSamples;

        };

        outputBuffers->clear();

    }

    return noErr;
};

}

4

1 回答 1

0

我的问题是交错通道错误。

这是正确的代码

return ^AUAudioUnitStatus(AudioUnitRenderActionFlags    *actionFlags,
                              const AudioTimeStamp      *timestamp,
                              AVAudioFrameCount         frameCount,
                              NSInteger             outputBusNumber,
                              AudioBufferList           *outputBufferListPtr,
                              const AURenderEvent       *realtimeEventListHead,
                              AURenderPullInputBlock        pullInputBlock ) {

        pullInputBlock(actionFlags, timestamp, frameCount, 0, renderABLCapture);

        Float32 *sampleDataInLeft = (Float32*) renderABLCapture->mBuffers[0].mData;
        Float32 *sampleDataInRight = (Float32*) renderABLCapture->mBuffers[1].mData;

        Float32 *sampleDataOutLeft  = (Float32*)outputBufferListPtr->mBuffers[0].mData;
        Float32 *sampleDataOutRight = (Float32*)outputBufferListPtr->mBuffers[1].mData;


        SuperpoweredAudiobufferlistElement inputBuffer;
        inputBuffer.samplePosition = 0;
        inputBuffer.startSample = 0;
        inputBuffer.samplesUsed = 0;
        inputBuffer.endSample = frameCount;
        inputBuffer.buffers[0] = SuperpoweredAudiobufferPool::getBuffer(frameCount * 8 + 64);
        inputBuffer.buffers[1] = inputBuffer.buffers[2] = inputBuffer.buffers[3] = NULL;

        SuperpoweredInterleave(sampleDataInLeft, sampleDataInRight, (Float32*)inputBuffer.buffers[0], frameCount);

        timeStretch->setRateAndPitchShift(1.0f, -2);
        timeStretch->setSampleRate(48000);
        timeStretch->process(&inputBuffer, outputBuffers);

        if (outputBuffers->makeSlice(0, outputBuffers->sampleLength)) {

            int numSamples = 0;
            int samplesOffset =0;

            while (true) {

                Float32 *timeStretchedAudio = (Float32 *)outputBuffers->nextSliceItem(&numSamples);
                if (!timeStretchedAudio) break;

                  SuperpoweredDeInterleave(timeStretchedAudio, sampleDataOutLeft + samplesOffset, sampleDataOutRight + samplesOffset, numSamples);

                samplesOffset += numSamples;

            };

            outputBuffers->clear();

        }

        return noErr;
    };
于 2018-09-21T17:14:35.933 回答