2

操作系统: Windows 8.1 64 位 - 完全更新

IDE: Visual Studio Professional 2013 - 版本 12.0.30110.00 更新 1 - 完全更新

我有一种情况,不是在编译期间而是在运行时出现以下异常。

parallel_for_each 调用的入口函数中引用的可写数据容器的数量 (17) 超过了所选加速器的限制 (8)。

发生这种情况的函数如下所示

void run_epoch(
    accelerator_view mainAccelView,
    ActivatorState activatorState,
    TrainingState trainingState,
    array_view<double, 2> avLayer1,
    array_view<double, 2> avLayer2,
    array_view<double, 2> avLayer3,
    array_view<const double, 2> avPredictors,
    array_view<const double, 2> avTargets,
    array_view<double> avErrors,
    int epoch
    ){
    accelerator_view mainAccelView = accelerator::accelerator().create_view(queuing_mode::queuing_mode_immediate);

    int noOfColumnsPredictors = AmpUtils::get_no_of_columns(avPredictors);
    int noOfRowsPredictors = AmpUtils::get_no_of_rows(avPredictors, noOfColumnsPredictors);

    int noOfColumnsLayer1 = AmpUtils::get_no_of_columns(avLayer1);
    int noOfColumnsLayer2 = AmpUtils::get_no_of_columns(avLayer2);
    int noOfColumnsLayer3 = AmpUtils::get_no_of_columns(avLayer3);

    int noOfRowsLayer1 = AmpUtils::get_no_of_rows(avLayer1, noOfColumnsLayer1);
    int noOfRowsLayer2 = AmpUtils::get_no_of_rows(avLayer2, noOfColumnsLayer2);
    int noOfRowsLayer3 = AmpUtils::get_no_of_rows(avLayer3, noOfColumnsLayer3);

    array_view<double, 2> avOutputLayer1(noOfRowsPredictors, noOfRowsLayer1);
    array_view<double, 2> avOutputLayer2(noOfRowsPredictors, noOfRowsLayer2);
    array_view<double, 2> avOutputLayer3(noOfRowsPredictors, noOfRowsLayer3);

    array_view<double, 2> avErrorsLayer1(noOfRowsPredictors, noOfRowsLayer1);
    array_view<double, 2> avErrorsLayer2(noOfRowsPredictors, noOfRowsLayer2);
    array_view<double, 2> avErrorsLayer3(noOfRowsPredictors, noOfRowsLayer3);

    array_view<double, 2> avThresholdLayer1(noOfRowsPredictors, noOfRowsLayer1);
    array_view<double, 2> avThresholdLayer2(noOfRowsPredictors, noOfRowsLayer2);
    array_view<double, 2> avThresholdLayer3(noOfRowsPredictors, noOfRowsLayer3);

    array_view<double, 3> avWeightsLayer1(noOfRowsPredictors, noOfRowsLayer1, (noOfColumnsLayer1 - 1));
    array_view<double, 3> avWeightsLayer2(noOfRowsPredictors, noOfRowsLayer2, (noOfColumnsLayer2 - 1));
    array_view<double, 3> avWeightsLayer3(noOfRowsPredictors, noOfRowsLayer3, (noOfColumnsLayer3 - 1));

    array_view<double, 2> avErrorsTempBuffer(noOfRowsPredictors, noOfRowsLayer3);
    int errorTempBufferSize = avErrorsTempBuffer.extent.size();

    array_view<double> avEpochErrors(noOfRowsPredictors);

    try{
        parallel_for_each(extent<1>(AmpUtils::get_no_of_rows(avPredictors)), [=](index<1> idx) restrict(cpu, amp){
            int predictorRow = idx[0];

            // step 1: compute
            // step 11: compute layer 1
            compute_layer(activatorState, avPredictors[predictorRow], avLayer1, avOutputLayer1, noOfColumnsLayer1, predictorRow);

            // step 12: compute layer 2
            compute_layer(activatorState, avPredictors[predictorRow], avLayer2, avOutputLayer2, noOfColumnsLayer2, predictorRow);

            // step 13: compute layer 3
            compute_layer(activatorState, avPredictors[predictorRow], avLayer3, avOutputLayer3, noOfColumnsLayer3, predictorRow);


            // step 2: calculate_error
            // step 21: calculate_error layer 3
            for (int column = 0; column < noOfRowsLayer3; column++){
                double neuronError = avTargets[predictorRow][column] - avOutputLayer3[predictorRow][column];
                avErrorsTempBuffer[predictorRow][column] = neuronError * neuronError;
                avErrorsLayer3[predictorRow][column] = neuronError * AmpActivator::derivative2(activatorState, avOutputLayer3[predictorRow][column]);
            }

            double errorSum = 0.0;
            for (int column = 0; column < errorTempBufferSize; column++){
                errorSum += avErrorsTempBuffer[predictorRow][column];
            }

            avEpochErrors[predictorRow] = errorSum;

            // step 22: calculate_error layer 2
            calculate_error_layer(activatorState, avErrorsLayer2[predictorRow], avErrorsLayer3, avLayer3, avOutputLayer2[predictorRow], noOfRowsLayer3, noOfRowsLayer3);

            // step 23: calculate_error layer 1
            calculate_error_layer(activatorState, avErrorsLayer1[predictorRow], avErrorsLayer2, avLayer2, avOutputLayer1[predictorRow], noOfRowsLayer2, noOfRowsLayer2);


            // step 3: calculate_updates
            // step 31: calculate_updates layer 1
            calculate_updates_layer(trainingState, avErrorsLayer1[predictorRow], avPredictors[predictorRow], avThresholdLayer1[predictorRow], avWeightsLayer1[predictorRow], (noOfColumnsLayer1 - 1), noOfRowsLayer1);

            // step 31: calculate_updates layer 2
            calculate_updates_layer(trainingState, avErrorsLayer2[predictorRow], avPredictors[predictorRow], avThresholdLayer2[predictorRow], avWeightsLayer2[predictorRow], (noOfColumnsLayer2 - 1), noOfRowsLayer2);

            // step 31: calculate_updates layer 3
            calculate_updates_layer(trainingState, avErrorsLayer3[predictorRow], avPredictors[predictorRow], avThresholdLayer3[predictorRow], avWeightsLayer3[predictorRow], (noOfColumnsLayer3 - 1), noOfRowsLayer3);


            // step 4: update_network
            // step 41: update_network layer 1
            update_layer(avLayer1, avWeightsLayer1[predictorRow], avThresholdLayer1[predictorRow], noOfColumnsLayer1, noOfRowsLayer1);

            // step 42: update_network layer 2
            update_layer(avLayer2, avWeightsLayer2[predictorRow], avThresholdLayer2[predictorRow], noOfColumnsLayer2, noOfRowsLayer2);

            // step 43: update_network layer 3
            update_layer(avLayer3, avWeightsLayer3[predictorRow], avThresholdLayer3[predictorRow], noOfColumnsLayer3, noOfRowsLayer3);
        });

        avEpochErrors.synchronize();

        double epochErrorsSum = 0.0;
        for (int i = 0; i < (int)avEpochErrors.extent.size(); i++){
            epochErrorsSum += avEpochErrors[i];
        }

        avErrors[epoch] = epochErrorsSum;
    }
    catch (std::exception e){
        std::wcout << "Exception Project::run_epoch: " << e.what() << std::endl;
    }
}

根据这个 MSDN-post herehere,自 Windows 8 以来,可写容器的最大数量应该增加到 64 个。

我现在的问题是,是否有不同类型的可写容器,而我仍然只能使用最多 8 个某种类型的容器?

4

1 回答 1

0

严格来说,限制在于无人机的数量。这与 DX 版本而不是 Windows 相关联。

每个内核允许的可写 array_view/array/texture/writeonly_texture_view 对象数量有限 C++ AMP 支持每个内核的可写 array_view/array/texture/writeonly_texture_view 对象数量有限。具体来说,每个内核的可写 array_view + array + texture + writeonly_texture_view 的总数在 DirectX 11 上不应超过 8,在 DirectX11.1 上不应超过 64。每个内核允许的只读 array_view/array/texture 对象总数为 128,指定只读限制可以帮助您避免达到每个内核允许的可写 array_view/array/texture/writeonly_texture_view 对象的最大数量限制。

来自本机代码中的并行编程

DX11.1 在 Win8 上受支持,并且已以某种有限的形式向后移植到 Win7。查看我的机器,它运行的是 Windows 8.1,但似乎使用的是 DX 11 而不是 11.1 驱动程序。DXDIAG.EXE会告诉你你在用什么。您需要确保您的卡支持 DX11.1 并且您安装了最新的驱动程序。

于 2014-03-13T18:20:58.783 回答