3

我正在尝试解决一些标准问题来学习 CUDA。例如,我正在使用以下代码求解二维扩散方程。但是我的结果与标准结果不同,我无法弄清楚。

//kernel definition
__global__ void diffusionSolver(double* A, double * old,int n_x,int n_y)
{

    int i = blockIdx.x * blockDim.x + threadIdx.x;
    int j = blockIdx.y * blockDim.y + threadIdx.y;

    if(i*(n_x-i-1)*j*(n_y-j-1)!=0)
        A[i+n_y*j] = A[i+n_y*j] + (old[i-1+n_y*j]+old[i+1+n_y*j]+
                       old[i+(j-1)*n_y]+old[i+(j+1)*n_y] -4*old[i+n_y*j])/40;


}

int main()
{


    int i,j ,M;
    M = n_y ;
    phi = (double *) malloc( n_x*n_y* sizeof(double));
    phi_old = (double *) malloc( n_x*n_y* sizeof(double));
    dummy = (double *) malloc( n_x*n_y* sizeof(double));
    int iterationMax =10;
    //phase initialization
    for(j=0;j<n_y ;j++)
    {
        for(i=0;i<n_x;i++)
        {
            if((.4*n_x-i)*(.6*n_x-i)<0)
                phi[i+M*j] = -1;
            else 
                phi[i+M*j] = 1;

            phi_old[i+M*j] = phi[i+M*j];
        }
    }

    double *dev_phi;
    cudaMalloc((void **) &dev_phi, n_x*n_y*sizeof(double));

    dim3 threadsPerBlock(100,10);
    dim3 numBlocks(n_x*n_y / threadsPerBlock.x, n_x*n_y / threadsPerBlock.y);

    //start iterating 
    for(int z=0; z<iterationMax; z++)
    {
        //copy array on host to device
        cudaMemcpy(dev_phi, phi, n_x*n_y*sizeof(double),
                cudaMemcpyHostToDevice);

        //call kernel
        diffusionSolver<<<numBlocks, threadsPerBlock>>>(dev_phi, phi_old,n_x,n_y);

        //get updated array back on host
        cudaMemcpy(phi, dev_phi,n_x*n_y*sizeof(double), cudaMemcpyDeviceToHost);

        //old values will be assigned new values
        for(j=0;j<n_y ;j++)
        {
            for(i=0;i<n_x;i++)
            {
                phi_old[i+n_y*j] = phi[i+n_y*j];
            }
        }
    }

    return 0;
}

有人可以告诉我这个过程是否有任何问题吗?任何帮助将不胜感激。

4

3 回答 3

4

talonmies、brano 和 huseyin 已经指出了你的代码的一些错误。

扩散(热)方程是 CUDA 可解的偏微分方程的经典例子之一。在 CUDA by Example 一书的第 7 章中也有一个完整的示例。

作为对未来用户的参考,我在下面提供了一个完整的工作示例,包括 CPU 和 GPU 代码。正如 talonmies 所建议的那样,我没有交换指针,而是将两个 Jacobi 迭代浓缩在一个循环中。

#include <iostream>

#include "cuda_runtime.h"
#include "device_launch_parameters.h"

#include "Utilities.cuh"

#define BLOCK_SIZE_X 16
#define BLOCK_SIZE_Y 16

/***********************************/
/* JACOBI ITERATION FUNCTION - GPU */
/***********************************/
__global__ void Jacobi_Iterator_GPU(const float * __restrict__ T_old, float * __restrict__ T_new, const int NX, const int NY)
{
    const int i = blockIdx.x * blockDim.x + threadIdx.x ;
    const int j = blockIdx.y * blockDim.y + threadIdx.y ;

                                //                         N 
    int P = i + j*NX;           // node (i,j)              |
    int N = i + (j+1)*NX;       // node (i,j+1)            |
    int S = i + (j-1)*NX;       // node (i,j-1)     W ---- P ---- E
    int E = (i+1) + j*NX;       // node (i+1,j)            |
    int W = (i-1) + j*NX;       // node (i-1,j)            |
                                //                         S 

    // --- Only update "interior" (not boundary) node points
    if (i>0 && i<NX-1 && j>0 && j<NY-1) T_new[P] = 0.25 * (T_old[E] + T_old[W] + T_old[N] + T_old[S]);
}

/***********************************/
/* JACOBI ITERATION FUNCTION - CPU */
/***********************************/
void Jacobi_Iterator_CPU(float * __restrict T, float * __restrict T_new, const int NX, const int NY, const int MAX_ITER)
{
    for(int iter=0; iter<MAX_ITER; iter=iter+2)
    {
        // --- Only update "interior" (not boundary) node points
        for(int j=1; j<NY-1; j++) 
            for(int i=1; i<NX-1; i++) {
                float T_E = T[(i+1) + NX*j];
                float T_W = T[(i-1) + NX*j];
                float T_N = T[i + NX*(j+1)];
                float T_S = T[i + NX*(j-1)];
                T_new[i+NX*j] = 0.25*(T_E + T_W + T_N + T_S);
            }

        for(int j=1; j<NY-1; j++) 
            for(int i=1; i<NX-1; i++) {
                float T_E = T_new[(i+1) + NX*j];
                float T_W = T_new[(i-1) + NX*j];
                float T_N = T_new[i + NX*(j+1)];
                float T_S = T_new[i + NX*(j-1)];
                T[i+NX*j] = 0.25*(T_E + T_W + T_N + T_S);
            }
    }
}

/******************************/
/* TEMPERATURE INITIALIZATION */
/******************************/
void Initialize(float * __restrict h_T, const int NX, const int NY)
{
    // --- Set left wall to 1
    for(int j=0; j<NY; j++) h_T[j * NX] = 1.0;
}


/********/
/* MAIN */
/********/
int main()
{
    const int NX = 256;         // --- Number of discretization points along the x axis
    const int NY = 256;         // --- Number of discretization points along the y axis

    const int MAX_ITER = 1;     // --- Number of Jacobi iterations

    // --- CPU temperature distributions
    float *h_T              = (float *)calloc(NX * NY, sizeof(float));
    float *h_T_old          = (float *)calloc(NX * NY, sizeof(float));
    Initialize(h_T,     NX, NY);
    Initialize(h_T_old, NX, NY);
    float *h_T_GPU_result   = (float *)malloc(NX * NY * sizeof(float));

    // --- GPU temperature distribution
    float *d_T;     gpuErrchk(cudaMalloc((void**)&d_T,      NX * NY * sizeof(float)));
    float *d_T_old; gpuErrchk(cudaMalloc((void**)&d_T_old,  NX * NY * sizeof(float)));

    gpuErrchk(cudaMemcpy(d_T,     h_T, NX * NY * sizeof(float), cudaMemcpyHostToDevice));
    gpuErrchk(cudaMemcpy(d_T_old, d_T, NX * NY * sizeof(float), cudaMemcpyDeviceToDevice));

    // --- Grid size
    dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y);
    dim3 dimGrid (iDivUp(NX, BLOCK_SIZE_X), iDivUp(NY, BLOCK_SIZE_Y));

    // --- Jacobi iterations on the host
    Jacobi_Iterator_CPU(h_T, h_T_old, NX, NY, MAX_ITER);

    // --- Jacobi iterations on the device
    for (int k=0; k<MAX_ITER; k=k+2) {
        Jacobi_Iterator_GPU<<<dimGrid, dimBlock>>>(d_T,     d_T_old, NX, NY);   // --- Update d_T_old     starting from data stored in d_T
        Jacobi_Iterator_GPU<<<dimGrid, dimBlock>>>(d_T_old, d_T    , NX, NY);   // --- Update d_T         starting from data stored in d_T_old
    }

    // --- Copy result from device to host
    gpuErrchk(cudaMemcpy(h_T_GPU_result, d_T, NX * NY * sizeof(float), cudaMemcpyDeviceToHost));

    // --- Calculate percentage root mean square error between host and device results
    float sum = 0., sum_ref = 0.;
    for (int j=0; j<NY; j++)
        for (int i=0; i<NX; i++) {
            sum     = sum     + (h_T_GPU_result[j * NX + i] - h_T[j * NX + i]) * (h_T_GPU_result[j * NX + i] - h_T[j * NX + i]);
            sum_ref = sum_ref + h_T[j * NX + i]                                * h_T[j * NX + i];
        }
    printf("Percentage root mean square error = %f\n", 100.*sqrt(sum / sum_ref));

    // --- Release host memory 
    free(h_T);
    free(h_T_GPU_result);

    // --- Release device memory
    gpuErrchk(cudaFree(d_T));
    gpuErrchk(cudaFree(d_T_old));

    return 0;
}

运行此类示例所需的Utilities.cuUtilities.cuh文件保存在此github 页面上

于 2015-03-12T06:32:24.490 回答
3

您犯的一个大错误是 phi_old 被传递给内核并由内核使用,但这是一个主机指针。
使用 cudaMalloc 分配一个 dev_phi_old。将其设置为默认值并在进入 z 循环之前首次将其复制到 GPU。

于 2012-08-17T07:11:56.510 回答
2

这里:

A[i+n_y*j] = A[i+n_y*j] + (old[i-1+n_y*j]+old[i+1+n_y*j]+old[i+(j-1)*n_y]+old[i+(j+1)*n_y] -4*old[i+n_y*j])/40;

您正在除以 40(整数),这可能导致错误的扩散率。实际上可以导致不扩散。

但是 A 是一个双精度数组。

将漫反射率除以 40.0,看看它是否有效。

如果这是来自 Jos-Stam 的求解器,它应该是 4.0 而不是 40

还有一件事:

-4*old[i+n_y*j])/40;

在这里,您将乘以 4(整数)。这也可能导致整体铸造!

这个:

-4.0*old[i+n_y*j])/40.0;

减少一些错误。

祝你今天过得愉快。

于 2012-08-16T20:10:52.207 回答