2

我正在尝试使用 Microsoft 示例DX11VideoRenderer渲染视频,该示例位于:https ://github.com/Microsoft/Windows-classic-samples/tree/master/Samples/DX11VideoRenderer 从我的广泛研究看来,将 DirectX 11 与硬件加速是最新的方法(最不可能被弃用)并提供最佳性能解决方案。

Presenter.cpp中有 2 个类似的函数可以处理帧,但我不知道它们之间有什么区别。ProcessFrameUsingD3D11()用于VideoProcessorBlt()实际进行渲染。奥秘在于ProcessFrameUsingXVP()它不使用这个函数,那么它实际上是如何进行渲染的呢?还是它完全在做其他事情?此外,我的实现似乎是基于默认设置为“1”ProcessFrameUsingXVP()的变量值使用的。m_useXVP这是代码示例:

    if (m_useXVP)
    {
        BOOL bInputFrameUsed = FALSE;

        hr = ProcessFrameUsingXVP( pCurrentType, pSample, pTexture2D, rcDest, ppOutputSample, &bInputFrameUsed );

        if (SUCCEEDED(hr) && !bInputFrameUsed)
        {
            *pbProcessAgain = TRUE;
        }
    }
    else
    {
        hr = ProcessFrameUsingD3D11( pTexture2D, pEVTexture2D, dwViewIndex, dwEVViewIndex, rcDest, *punInterlaceMode, ppOutputSample );

        LONGLONG hnsDuration = 0;
        LONGLONG hnsTime = 0;
        DWORD dwSampleFlags = 0;

        if (ppOutputSample != NULL && *ppOutputSample != NULL)
        {
            if (SUCCEEDED(pSample->GetSampleDuration(&hnsDuration)))
            {
                (*ppOutputSample)->SetSampleDuration(hnsDuration);
            }

            if (SUCCEEDED(pSample->GetSampleTime(&hnsTime)))
            {
                (*ppOutputSample)->SetSampleTime(hnsTime);
            }

            if (SUCCEEDED(pSample->GetSampleFlags(&dwSampleFlags)))
            {
                (*ppOutputSample)->SetSampleFlags(dwSampleFlags);
            }
        }
    }

设置的原因对m_useXVP我来说也是一个谜,我在研究中找不到答案。它使用我的特定 Windows10 PC 上不存在的注册表项,因此不会修改该值。

const TCHAR* lpcszInVP = TEXT("XVP");
const TCHAR* lpcszREGKEY = TEXT("SOFTWARE\\Microsoft\\Scrunch\\CodecPack\\MSDVD");

if(0 == RegOpenKeyEx(HKEY_CURRENT_USER, lpcszREGKEY, 0, KEY_READ, &hk))
{
    dwData = 0;
    cbData = sizeof(DWORD);
    if (0 == RegQueryValueEx(hk, lpcszInVP, 0, &cbType, (LPBYTE)&dwData, &cbData))
    {
        m_useXVP = dwData;
    }
}

因此,由于我的 PC 没有此密钥,因此代码默认使用ProcessFrameUsingXVP(). 这是定义:

HRESULT DX11VideoRenderer::CPresenter::ProcessFrameUsingXVP(IMFMediaType* pCurrentType, IMFSample* pVideoFrame, ID3D11Texture2D* pTexture2D, RECT rcDest, IMFSample** ppVideoOutFrame, BOOL* pbInputFrameUsed)
{
    HRESULT hr = S_OK;
    ID3D11VideoContext* pVideoContext = NULL;
    ID3D11Texture2D* pDXGIBackBuffer = NULL;
    IMFSample* pRTSample = NULL;
    IMFMediaBuffer* pBuffer = NULL;
    IMFAttributes*  pAttributes = NULL;
    D3D11_VIDEO_PROCESSOR_CAPS vpCaps = { 0 };

    do
    {
        if (!m_pDX11VideoDevice)
        {
            hr = m_pD3D11Device->QueryInterface(__uuidof(ID3D11VideoDevice), (void**)&m_pDX11VideoDevice);
            if (FAILED(hr))
            {
                break;
            }
        }

        hr = m_pD3DImmediateContext->QueryInterface(__uuidof(ID3D11VideoContext), (void**)&pVideoContext);
        if (FAILED(hr))
        {
            break;
        }

        // remember the original rectangles
        RECT TRectOld = m_rcDstApp;
        RECT SRectOld = m_rcSrcApp;
        UpdateRectangles(&TRectOld, &SRectOld);

        //Update destination rect with current client rect
        m_rcDstApp = rcDest;

        D3D11_TEXTURE2D_DESC surfaceDesc;
        pTexture2D->GetDesc(&surfaceDesc);

        BOOL fTypeChanged = FALSE;
        if (!m_pVideoProcessorEnum || !m_pSwapChain1 || m_imageWidthInPixels != surfaceDesc.Width || m_imageHeightInPixels != surfaceDesc.Height)
        {
            SafeRelease(m_pVideoProcessorEnum);
            SafeRelease(m_pSwapChain1);

            m_imageWidthInPixels = surfaceDesc.Width;
            m_imageHeightInPixels = surfaceDesc.Height;
            fTypeChanged = TRUE;

            D3D11_VIDEO_PROCESSOR_CONTENT_DESC ContentDesc;
            ZeroMemory(&ContentDesc, sizeof(ContentDesc));
            ContentDesc.InputFrameFormat = D3D11_VIDEO_FRAME_FORMAT_INTERLACED_TOP_FIELD_FIRST;
            ContentDesc.InputWidth = surfaceDesc.Width;
            ContentDesc.InputHeight = surfaceDesc.Height;
            ContentDesc.OutputWidth = surfaceDesc.Width;
            ContentDesc.OutputHeight = surfaceDesc.Height;
            ContentDesc.Usage = D3D11_VIDEO_USAGE_PLAYBACK_NORMAL;

            hr = m_pDX11VideoDevice->CreateVideoProcessorEnumerator(&ContentDesc, &m_pVideoProcessorEnum);
            if (FAILED(hr))
            {
                break;
            }

            m_rcSrcApp.left = 0;
            m_rcSrcApp.top = 0;
            m_rcSrcApp.right = m_uiRealDisplayWidth;
            m_rcSrcApp.bottom = m_uiRealDisplayHeight;

            if (m_b3DVideo)
            {
                hr = m_pVideoProcessorEnum->GetVideoProcessorCaps(&vpCaps);
                if (FAILED(hr))
                {
                    break;
                }

                if (vpCaps.FeatureCaps & D3D11_VIDEO_PROCESSOR_FEATURE_CAPS_STEREO)
                {
                    m_bStereoEnabled = TRUE;
                }

                DXGI_MODE_DESC1 modeFilter = { 0 };
                modeFilter.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
                modeFilter.Width = surfaceDesc.Width;
                modeFilter.Height = surfaceDesc.Height;
                modeFilter.Stereo = m_bStereoEnabled;

                DXGI_MODE_DESC1 matchedMode;
                if (m_bFullScreenState)
                {
                    hr = m_pDXGIOutput1->FindClosestMatchingMode1(&modeFilter, &matchedMode, m_pD3D11Device);
                    if (FAILED(hr))
                    {
                        break;
                    }
                }

                hr = m_pXVP->GetAttributes(&pAttributes);
                if (FAILED(hr))
                {
                    break;
                }

                hr = pAttributes->SetUINT32(MF_ENABLE_3DVIDEO_OUTPUT, (0 != m_vp3DOutput) ? MF3DVideoOutputType_Stereo : MF3DVideoOutputType_BaseView);
                if (FAILED(hr))
                {
                    break;
                }
            }
        }

        // now create the input and output media types - these need to reflect
        // the src and destination rectangles that we have been given.
        RECT TRect = m_rcDstApp;
        RECT SRect = m_rcSrcApp;
        UpdateRectangles(&TRect, &SRect);

        const BOOL fDestRectChanged = !EqualRect(&TRect, &TRectOld);
        const BOOL fSrcRectChanged = !EqualRect(&SRect, &SRectOld);

        if (!m_pSwapChain1 || fDestRectChanged)
        {
            hr = UpdateDXGISwapChain();
            if (FAILED(hr))
            {
                break;
            }
        }

        if (fTypeChanged || fSrcRectChanged || fDestRectChanged)
        {
            // stop streaming to avoid multiple start\stop calls internally in XVP
            hr = m_pXVP->ProcessMessage(MFT_MESSAGE_NOTIFY_END_STREAMING, 0);
            if (FAILED(hr))
            {
                break;
            }

            if (fTypeChanged)
            {
                hr = SetXVPOutputMediaType(pCurrentType, DXGI_FORMAT_B8G8R8A8_UNORM);
                if (FAILED(hr))
                {
                    break;
                }
            }

            if (fDestRectChanged)
            {
                hr = m_pXVPControl->SetDestinationRectangle(&m_rcDstApp);
                if (FAILED(hr))
                {
                    break;
                }
            }

            if (fSrcRectChanged)
            {
                hr = m_pXVPControl->SetSourceRectangle(&SRect);
                if (FAILED(hr))
                {
                    break;
                }
            }

            hr = m_pXVP->ProcessMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0);
            if (FAILED(hr))
            {
                break;
            }
        }

        m_bCanProcessNextSample = FALSE;

        // Get Backbuffer
        hr = m_pSwapChain1->GetBuffer(0, __uuidof(ID3D11Texture2D), (void**)&pDXGIBackBuffer);
        if (FAILED(hr))
        {
            break;
        }

        // create the output media sample
        hr = MFCreateSample(&pRTSample);
        if (FAILED(hr))
        {
            break;
        }

        hr = MFCreateDXGISurfaceBuffer(__uuidof(ID3D11Texture2D), pDXGIBackBuffer, 0, FALSE, &pBuffer);
        if (FAILED(hr))
        {
            break;
        }

        hr = pRTSample->AddBuffer(pBuffer);
        if (FAILED(hr))
        {
            break;
        }

        if (m_b3DVideo && 0 != m_vp3DOutput)
        {
            SafeRelease(pBuffer);

            hr = MFCreateDXGISurfaceBuffer(__uuidof(ID3D11Texture2D), pDXGIBackBuffer, 1, FALSE, &pBuffer);
            if (FAILED(hr))
            {
                break;
            }

            hr = pRTSample->AddBuffer(pBuffer);
            if (FAILED(hr))
            {
                break;
            }
        }

        DWORD dwStatus = 0;
        MFT_OUTPUT_DATA_BUFFER outputDataBuffer = {};
        outputDataBuffer.pSample = pRTSample;
        hr = m_pXVP->ProcessOutput(0, 1, &outputDataBuffer, &dwStatus);
        if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT)
        {
            //call process input on the MFT to deliver the YUV video sample
            // and the call process output to extract of newly processed frame
            hr = m_pXVP->ProcessInput(0, pVideoFrame, 0);
            if (FAILED(hr))
            {
                break;
            }

            *pbInputFrameUsed = TRUE;

            hr = m_pXVP->ProcessOutput(0, 1, &outputDataBuffer, &dwStatus);
            if (FAILED(hr))
            {
                break;
            }
        }
        else
        {
            *pbInputFrameUsed = FALSE;
        }

        if (ppVideoOutFrame != NULL)
        {
            *ppVideoOutFrame = pRTSample;
            (*ppVideoOutFrame)->AddRef();
        }
    } while (FALSE);

    SafeRelease(pAttributes);
    SafeRelease(pBuffer);
    SafeRelease(pRTSample);
    SafeRelease(pDXGIBackBuffer);
    SafeRelease(pVideoContext);

    return hr;
}

这是 的定义ProcessFrameUsingD3D11()

HRESULT DX11VideoRenderer::CPresenter::ProcessFrameUsingD3D11( ID3D11Texture2D* pLeftTexture2D, ID3D11Texture2D* pRightTexture2D, UINT dwLeftViewIndex, UINT dwRightViewIndex, 
    RECT rcDest, UINT32 unInterlaceMode, IMFSample** ppVideoOutFrame )
{
    HRESULT hr = S_OK;
    ID3D11VideoContext* pVideoContext = NULL;
    ID3D11VideoProcessorInputView* pLeftInputView = NULL;
    ID3D11VideoProcessorInputView* pRightInputView = NULL;
    ID3D11VideoProcessorOutputView* pOutputView = NULL;
    ID3D11Texture2D* pDXGIBackBuffer = NULL;
    ID3D11RenderTargetView* pRTView = NULL;
    IMFSample* pRTSample = NULL;
    IMFMediaBuffer* pBuffer = NULL;
    D3D11_VIDEO_PROCESSOR_CAPS vpCaps = {0};
    LARGE_INTEGER lpcStart,lpcEnd;

    do
    {
        if (!m_pDX11VideoDevice)
        {
            hr = m_pD3D11Device->QueryInterface(__uuidof(ID3D11VideoDevice), (void**)&m_pDX11VideoDevice);
            if (FAILED(hr))
            {
                break;
            }
        }

        hr = m_pD3DImmediateContext->QueryInterface(__uuidof( ID3D11VideoContext ), (void**)&pVideoContext);
        if (FAILED(hr))
        {
            break;
        }

        // remember the original rectangles
        RECT TRectOld = m_rcDstApp;
        RECT SRectOld = m_rcSrcApp;
        UpdateRectangles(&TRectOld, &SRectOld);

        //Update destination rect with current client rect
        m_rcDstApp = rcDest;

        D3D11_TEXTURE2D_DESC surfaceDesc;
        pLeftTexture2D->GetDesc(&surfaceDesc);

        if (!m_pVideoProcessorEnum || !m_pVideoProcessor || m_imageWidthInPixels != surfaceDesc.Width || m_imageHeightInPixels != surfaceDesc.Height)
        {
            SafeRelease(m_pVideoProcessorEnum);
            SafeRelease(m_pVideoProcessor);

            m_imageWidthInPixels = surfaceDesc.Width;
            m_imageHeightInPixels = surfaceDesc.Height;

            D3D11_VIDEO_PROCESSOR_CONTENT_DESC ContentDesc;
            ZeroMemory( &ContentDesc, sizeof( ContentDesc ) );
            ContentDesc.InputFrameFormat = D3D11_VIDEO_FRAME_FORMAT_INTERLACED_TOP_FIELD_FIRST;
            ContentDesc.InputWidth = surfaceDesc.Width;
            ContentDesc.InputHeight = surfaceDesc.Height;
            ContentDesc.OutputWidth = surfaceDesc.Width;
            ContentDesc.OutputHeight = surfaceDesc.Height;
            ContentDesc.Usage = D3D11_VIDEO_USAGE_PLAYBACK_NORMAL;

            hr = m_pDX11VideoDevice->CreateVideoProcessorEnumerator(&ContentDesc, &m_pVideoProcessorEnum);
            if (FAILED(hr))
            {
                break;
            }

            UINT uiFlags;
            DXGI_FORMAT VP_Output_Format = DXGI_FORMAT_B8G8R8A8_UNORM;

            hr = m_pVideoProcessorEnum->CheckVideoProcessorFormat(VP_Output_Format, &uiFlags);
            if (FAILED(hr) || 0 == (uiFlags & D3D11_VIDEO_PROCESSOR_FORMAT_SUPPORT_OUTPUT))
            {
                hr = MF_E_UNSUPPORTED_D3D_TYPE;
                break;
            }

            m_rcSrcApp.left = 0;
            m_rcSrcApp.top = 0;
            m_rcSrcApp.right = m_uiRealDisplayWidth;
            m_rcSrcApp.bottom = m_uiRealDisplayHeight;

            DWORD index;
            hr = FindBOBProcessorIndex(&index);     // GG This may not be needed. BOB is something to do with deinterlacing.
            if (FAILED(hr))
            {
                break;
            }

            hr = m_pDX11VideoDevice->CreateVideoProcessor(m_pVideoProcessorEnum, index, &m_pVideoProcessor);
            if (FAILED(hr))
            {
                break;
            }

            if (m_b3DVideo)
            {
                hr = m_pVideoProcessorEnum->GetVideoProcessorCaps(&vpCaps);
                if (FAILED(hr))
                {
                    break;
                }

                if (vpCaps.FeatureCaps & D3D11_VIDEO_PROCESSOR_FEATURE_CAPS_STEREO)
                {
                    m_bStereoEnabled = TRUE;
                }

                DXGI_MODE_DESC1 modeFilter = { 0 };
                modeFilter.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
                modeFilter.Width = surfaceDesc.Width;
                modeFilter.Height = surfaceDesc.Height;
                modeFilter.Stereo = m_bStereoEnabled;

                DXGI_MODE_DESC1 matchedMode;
                if (m_bFullScreenState)
                {
                    hr = m_pDXGIOutput1->FindClosestMatchingMode1(&modeFilter, &matchedMode, m_pD3D11Device);
                    if (FAILED(hr))
                    {
                        break;
                    }
                }
            }
        }

        // now create the input and output media types - these need to reflect
        // the src and destination rectangles that we have been given.
        RECT TRect = m_rcDstApp;
        RECT SRect = m_rcSrcApp;
        UpdateRectangles(&TRect, &SRect);

        const BOOL fDestRectChanged = !EqualRect(&TRect, &TRectOld);

        if (!m_pSwapChain1 || fDestRectChanged)
        {
            hr = UpdateDXGISwapChain();
            if (FAILED(hr))
            {
                break;
            }
        }

        m_bCanProcessNextSample = FALSE;

        // Get Backbuffer
        hr = m_pSwapChain1->GetBuffer(0, __uuidof(ID3D11Texture2D), (void**)&pDXGIBackBuffer);
        if (FAILED(hr))
        {
            break;
        }

        // create the output media sample
        hr = MFCreateSample(&pRTSample);
        if (FAILED(hr))
        {
            break;
        }

        hr = MFCreateDXGISurfaceBuffer(__uuidof(ID3D11Texture2D), pDXGIBackBuffer, 0, FALSE, &pBuffer);
        if (FAILED(hr))
        {
            break;
        }

        hr = pRTSample->AddBuffer(pBuffer);
        if (FAILED(hr))
        {
            break;
        }

        // GG For 3D - don't need.
        if (m_b3DVideo && 0 != m_vp3DOutput)
        {
            SafeRelease(pBuffer);

            hr = MFCreateDXGISurfaceBuffer(__uuidof(ID3D11Texture2D), pDXGIBackBuffer, 1, FALSE, &pBuffer);
            if (FAILED(hr))
            {
                break;
            }

            hr = pRTSample->AddBuffer(pBuffer);
            if (FAILED(hr))
            {
                break;
            }
        }

        QueryPerformanceCounter(&lpcStart);

        QueryPerformanceCounter(&lpcEnd);

        //
        // Create Output View of Output Surfaces.
        //
        D3D11_VIDEO_PROCESSOR_OUTPUT_VIEW_DESC OutputViewDesc;
        ZeroMemory( &OutputViewDesc, sizeof( OutputViewDesc ) );
        if (m_b3DVideo && m_bStereoEnabled)
        {
            OutputViewDesc.ViewDimension =  D3D11_VPOV_DIMENSION_TEXTURE2DARRAY;
        }
        else
        {
            OutputViewDesc.ViewDimension =  D3D11_VPOV_DIMENSION_TEXTURE2D;
        }
        OutputViewDesc.Texture2D.MipSlice = 0;
        OutputViewDesc.Texture2DArray.MipSlice = 0;
        OutputViewDesc.Texture2DArray.FirstArraySlice = 0;
        if (m_b3DVideo && 0 != m_vp3DOutput)
        {
            OutputViewDesc.Texture2DArray.ArraySize = 2; // STEREO
        }

        QueryPerformanceCounter(&lpcStart);

        hr  = m_pDX11VideoDevice->CreateVideoProcessorOutputView(pDXGIBackBuffer, m_pVideoProcessorEnum, &OutputViewDesc, &pOutputView);
        if (FAILED(hr))
        {
            break;
        }

        D3D11_VIDEO_PROCESSOR_INPUT_VIEW_DESC InputLeftViewDesc;
        ZeroMemory( &InputLeftViewDesc, sizeof( InputLeftViewDesc ) );
        InputLeftViewDesc.FourCC = 0;
        InputLeftViewDesc.ViewDimension = D3D11_VPIV_DIMENSION_TEXTURE2D;
        InputLeftViewDesc.Texture2D.MipSlice = 0;
        InputLeftViewDesc.Texture2D.ArraySlice = dwLeftViewIndex;

        hr = m_pDX11VideoDevice->CreateVideoProcessorInputView(pLeftTexture2D, m_pVideoProcessorEnum, &InputLeftViewDesc, &pLeftInputView);
        if (FAILED(hr))
        {
            break;
        }

        if (m_b3DVideo && MFVideo3DSampleFormat_MultiView == m_vp3DOutput && pRightTexture2D)
        {
            D3D11_VIDEO_PROCESSOR_INPUT_VIEW_DESC InputRightViewDesc;
            ZeroMemory( &InputRightViewDesc, sizeof( InputRightViewDesc ) );
            InputRightViewDesc.FourCC = 0;
            InputRightViewDesc.ViewDimension = D3D11_VPIV_DIMENSION_TEXTURE2D;
            InputRightViewDesc.Texture2D.MipSlice = 0;
            InputRightViewDesc.Texture2D.ArraySlice = dwRightViewIndex;

            hr = m_pDX11VideoDevice->CreateVideoProcessorInputView(pRightTexture2D, m_pVideoProcessorEnum, &InputRightViewDesc, &pRightInputView);
            if (FAILED(hr))
            {
                break;
            }
        }
        QueryPerformanceCounter(&lpcEnd);

        QueryPerformanceCounter(&lpcStart);

        SetVideoContextParameters(pVideoContext, &SRect, &TRect, unInterlaceMode);

        // Enable/Disable Stereo
        if (m_b3DVideo)
        {
            pVideoContext->VideoProcessorSetOutputStereoMode(m_pVideoProcessor, m_bStereoEnabled);

            D3D11_VIDEO_PROCESSOR_STEREO_FORMAT vpStereoFormat = D3D11_VIDEO_PROCESSOR_STEREO_FORMAT_SEPARATE;
            if (MFVideo3DSampleFormat_Packed_LeftRight == m_vp3DOutput)
            {
                vpStereoFormat = D3D11_VIDEO_PROCESSOR_STEREO_FORMAT_HORIZONTAL;
            }
            else if (MFVideo3DSampleFormat_Packed_TopBottom == m_vp3DOutput)
            {
                vpStereoFormat = D3D11_VIDEO_PROCESSOR_STEREO_FORMAT_VERTICAL;
            }

            pVideoContext->VideoProcessorSetStreamStereoFormat(m_pVideoProcessor,
                0, m_bStereoEnabled, vpStereoFormat, TRUE, TRUE, D3D11_VIDEO_PROCESSOR_STEREO_FLIP_NONE, 0);
        }

        QueryPerformanceCounter(&lpcEnd);

        QueryPerformanceCounter(&lpcStart);

        D3D11_VIDEO_PROCESSOR_STREAM StreamData;
        ZeroMemory( &StreamData, sizeof( StreamData ) );
        StreamData.Enable = TRUE;
        StreamData.OutputIndex = 0;
        StreamData.InputFrameOrField = 0;
        StreamData.PastFrames = 0;
        StreamData.FutureFrames = 0;
        StreamData.ppPastSurfaces = NULL;
        StreamData.ppFutureSurfaces = NULL;
        StreamData.pInputSurface = pLeftInputView;
        StreamData.ppPastSurfacesRight = NULL;
        StreamData.ppFutureSurfacesRight = NULL;

        if (m_b3DVideo && MFVideo3DSampleFormat_MultiView == m_vp3DOutput && pRightTexture2D)
        {
            StreamData.pInputSurfaceRight = pRightInputView;
        }

        hr = pVideoContext->VideoProcessorBlt(m_pVideoProcessor, pOutputView, 0, 1, &StreamData );
        if (FAILED(hr))
        {
            break;
        }
        QueryPerformanceCounter(&lpcEnd);

        if (ppVideoOutFrame != NULL)
        {
            *ppVideoOutFrame = pRTSample;
            (*ppVideoOutFrame)->AddRef();
        }
    }
    while (FALSE);

    SafeRelease(pBuffer);
    SafeRelease(pRTSample);
    SafeRelease(pDXGIBackBuffer);
    SafeRelease(pOutputView);
    SafeRelease(pLeftInputView);
    SafeRelease(pRightInputView);
    SafeRelease(pVideoContext);

    return hr;
}

最后一点,文档指出:

具体来说,此示例展示了如何:

  • 使用 Media Foundation API解码视频
  • 使用 DirectX 11 API 渲染解码的视频
  • 将视频流输出到多显示器

除非通过一些我还没有偶然发现的 MF 魔法圣歌短语,否则我找不到任何可以解码的东西。但这不是什么大问题,因为我可以在前面放一个 H.264 解码器 MFT 没问题。我只想澄清文档。

任何帮助将非常感激。谢谢!

4

1 回答 1

1

Presenter.cpp 中有 2 个类似的函数可以处理帧,但我不知道它们之间有什么区别。ProcessFrameUsingD3D11() 使用 VideoProcessorBlt() 来实际进行渲染。

这些功能不是渲染 - 它们是缩放视频帧的两种方式。缩放可以通过渲染器的演示者内部管理的现成媒体基础转换来完成,或者可以在 Direct3D 11 处理器的帮助下完成。实际上两者都使用 Direct3D 11,因此这两种方法彼此接近,并且只是渲染过程中的一个步骤。

除非通过一些我还没有偶然发现的 MF 魔法圣歌短语,否则我找不到任何可以解码的东西。

没有解码,StreamSink.cpp中的接收器视频格式列表建议仅列出未压缩的视频格式。渲染器呈现由 Direct3D 11 纹理承载的帧,而后者又假定解码,尤其是。基于 DXVA2 的硬件解码器已经在渲染器输入上提供了解码的纹理。

于 2018-05-02T14:38:44.863 回答