1

我正在使用 cpp/winrt 开发 WASAPI UWP 音频应用程序,该应用程序需要从输入中获取音频并在处理后将其发送到输出。

我想用 设置我的音频线程特性AvSetMmThreadCharacteristicsW(L"Pro Audio", &taskIndex),但我只是注意到这个功能(和大部分avrt.h)仅限于WINAPI_PARTITION_DESKTOPWINAPI_PARTITION_GAMES

我想我需要这个,因为当我的代码集成到我的 UWP 应用程序中时,音频输入充满了不连续性,我在使用avrtAPI 的测试代码中没有问题。

还有另一种方法来配置我的线程以进行音频处理吗?


编辑:这是我的测试程序https://github.com/loics2/test-wasapi。有趣的部分发生在AudioStream课堂上。我无法共享我的 UWP 应用,但我可以将这些类原样复制到 Windows 运行时组件中。


编辑 2:这是音频线程代码:

void AudioStream::StreamWorker()
    {
        WAVEFORMATEX* captureFormat = nullptr;
        WAVEFORMATEX* renderFormat = nullptr;

        RingBuffer<float> captureBuffer;
        RingBuffer<float> renderBuffer;

        BYTE* streamBuffer = nullptr;
        unsigned int streamBufferSize = 0;
        unsigned int bufferFrameCount = 0;
        unsigned int numFramesPadding = 0;
        unsigned int inputBufferSize = 0;
        unsigned int outputBufferSize = 0;
        DWORD captureFlags = 0;

        winrt::hresult hr = S_OK;

        // m_inputClient is a winrt::com_ptr<IAudioClient3>
        if (m_inputClient) {

            hr = m_inputClient->GetMixFormat(&captureFormat);
            
            // m_audioCaptureClient is a winrt::com_ptr<IAudioCaptureClient>
            if (!m_audioCaptureClient) {
                hr = m_inputClient->Initialize(
                    AUDCLNT_SHAREMODE_SHARED,
                    AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
                    0, 
                    0,
                    captureFormat,
                    nullptr);
               
                hr = m_inputClient->GetService(__uuidof(IAudioCaptureClient), m_audioCaptureClient.put_void());
                hr = m_inputClient->SetEventHandle(m_inputReadyEvent.get());
                hr = m_inputClient->Reset();
                hr = m_inputClient->Start();
            }
        }

        hr = m_inputClient->GetBufferSize(&inputBufferSize);

        // multiplying the buffer size by the number of channels
        inputBufferSize *= 2;

        // m_outputClient is a winrt::com_ptr<IAudioClient3>
        if (m_outputClient) {
            hr = m_outputClient->GetMixFormat(&renderFormat);

            // m_audioRenderClientis a winrt::com_ptr<IAudioRenderClient>
            if (!m_audioRenderClient) {
                hr = m_outputClient->Initialize(
                    AUDCLNT_SHAREMODE_SHARED,
                    AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
                    0,
                    0,
                    captureFormat,
                    nullptr);
                hr = m_outputClient->GetService(__uuidof(IAudioRenderClient), m_audioRenderClient.put_void());
                hr = m_outputClient->SetEventHandle(m_outputReadyEvent.get());

                hr = m_outputClient->Reset();
                hr = m_outputClient->Start();
            }
        }

        hr = m_outputClient->GetBufferSize(&outputBufferSize);

        // multiplying the buffer size by the number of channels
        outputBufferSize *= 2;

        while (m_isRunning)
        {
            // ===== INPUT =====

            // waiting for the capture event
            WaitForSingleObject(m_inputReadyEvent.get(), INFINITE);

            // getting the input buffer data
            hr = m_audioCaptureClient->GetNextPacketSize(&bufferFrameCount);

            while (SUCCEEDED(hr) && bufferFrameCount > 0) {
                m_audioCaptureClient->GetBuffer(&streamBuffer, &bufferFrameCount, &captureFlags, nullptr, nullptr);
                if (bufferFrameCount != 0) {
                    captureBuffer.write(reinterpret_cast<float*>(streamBuffer), bufferFrameCount * 2);

                    hr = m_audioCaptureClient->ReleaseBuffer(bufferFrameCount);
                    if (FAILED(hr)) {
                        m_audioCaptureClient->ReleaseBuffer(0);
                    }
                }
                else
                {
                    m_audioCaptureClient->ReleaseBuffer(0);
                }

                hr = m_audioCaptureClient->GetNextPacketSize(&bufferFrameCount);
            }

            // ===== CALLBACK =====

            auto size = captureBuffer.size();
            float* userInputData = (float*)calloc(size, sizeof(float));
            float* userOutputData = (float*)calloc(size, sizeof(float));
            captureBuffer.read(userInputData, size);

            OnData(userInputData, userOutputData, size / 2, 2, 48000);

            renderBuffer.write(userOutputData, size);

            free(userInputData);
            free(userOutputData);

            // ===== OUTPUT =====

            // waiting for the render event
            WaitForSingleObject(m_outputReadyEvent.get(), INFINITE);

            // getting information about the output buffer
            hr = m_outputClient->GetBufferSize(&bufferFrameCount);
            hr = m_outputClient->GetCurrentPadding(&numFramesPadding);

            // adjust the frame count with the padding
            bufferFrameCount -= numFramesPadding;

            if (bufferFrameCount != 0) {
                hr = m_audioRenderClient->GetBuffer(bufferFrameCount, &streamBuffer);

                auto count = (bufferFrameCount * 2);
                if (renderBuffer.read(reinterpret_cast<float*>(streamBuffer), count) < count) {
                    // captureBuffer is not full enough, we should fill the remainder with 0
                }

                hr = m_audioRenderClient->ReleaseBuffer(bufferFrameCount, 0);
                if (FAILED(hr)) {
                    m_audioRenderClient->ReleaseBuffer(0, 0);
                }
            }
            else
            {
                m_audioRenderClient->ReleaseBuffer(0, 0);
            }
        }

    exit:
        // Cleanup code

    }

为了清楚起见,我删除了错误处理代码,其中大部分是:

if (FAILED(hr)) 
    goto exit;
4

1 回答 1

0

@IInspectable 是对的,我的代码有问题:音频处理是由一个库完成的,然后调用回调并得到一些结果。

在我的回调中,我尝试提高 a winrt::event,但有时需要超过 50 毫秒。当它发生时,它会阻塞音频线程,并产生不连续性......

于 2021-02-19T14:31:51.477 回答