1

我正在做《CUDA C Programming Guide》第 35 页中的矩阵乘法示例,为了练习,我复制了代码并完成了缺失的代码。我了解程序的逻辑以及它应该如何工作,但我没有得到预期的结果。

这是我制作的完整代码,我不知道错误是我的还是来自示例?

编码:

#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>    
#include <stdio.h>
#include <stdio.h>

using namespace std;
#define BLOCK_SIZE 16

typedef struct
{
    int width;
    int height;
    float *elements;
}Matrix;

__global__ void MatMulKernel(const Matrix,const Matrix, Matrix C);

void MatMul(const Matrix A,const Matrix B, Matrix C) 
{
    size_t size;
    //Matrix A creation y storage in device memory 
    Matrix d_A;
    d_A.width=A.width;
    d_A.height=A.height;
    size=A.height*A.width*sizeof(float);
    cudaMalloc(&d_A.elements,size);
    cudaMemcpy(d_A.elements,A.elements,size,cudaMemcpyHostToDevice);
    //Matrix B creation y storage in device memory 
    Matrix d_B;
    d_B.width=B.width;
    d_B.height=B.height;
    size=B.height*B.width*sizeof(float);
    cudaMalloc(&d_B.elements,size);
    cudaMemcpy(d_B.elements,B.elements,size,cudaMemcpyHostToDevice);
    //Matrix C creation y storage in device memory         
    Matrix d_C;
    d_C.width=C.width;
    d_C.height=C.height;
    size=C.height*C.width*sizeof(float);
    cudaMalloc(&d_C.elements,size);
    //        
    dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
    dim3 dimGrid(B.width/dimBlock.x,A.height/dimBlock.y);
    MatMulKernel<<<dimGrid,dimBlock>>>(d_A,d_B,d_C);
    //Copy the result in the matrix C from the device to the host.        
    cudaMemcpy(C.elements,d_C.elements,size,cudaMemcpyDeviceToHost);  
    //edit the missing code.
    // for(int i=0;i<BLOCK_SIZE*BLOCK_SIZE;i++){cout<<C.elements[i]<<endl;}      
    // result in random numbers
    cudaFree(d_A.elements);
    cudaFree(d_B.elements);
    cudaFree(d_C.elements);
}

__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
    float Cvalue=0;
    int row=blockIdx.y*blockDim.y+threadIdx.y;
    int col=blockIdx.x*blockDim.x+threadIdx.x;
    for(int e=0;e<A.width;++e)
    {
        Cvalue+=A.elements[row*A.width+e]*B.elements[e*B.width+col];
    }
    C.elements[row*C.width+col]=Cvalue;
}

int main()
{
    cout<<"Matrices"<<endl;
    //Declarationd of the A,B,C matrix´s
    float a[15][15];        
    float b[15][15];
    float c[15][15];
    //Fill the matrix whit some numbers.
    int cont0=0;
    for(int c=0;c<15;c++)
    {
        for(int v=0;v<15;v++)
        {
            a[v][c]=cont0;
            b[v][c]=cont0;
            cont0++;
        }
    }
    //Flatten the matrix for the passing to the kernel
    int offset=0;
    float a_t[256];
    float b_t[256];
    for(int y=0;y<15;y++)
    {                        
        for(int x=0;x<15;x++)
        {
            a_t[x+offset]=a[x][y];
            b_t[x+offset]=a[x][y];
        }
        offset=offset+15;
    }
    float t_C[256];
    //Completing the matrix format for the kernel.
    Matrix m_A;
    m_A.height=15;
    m_A.width=15;
    m_A.elements=a_t;
    Matrix m_B;
    m_B.height=15;
    m_B.width=15;
    m_B.elements=b_t;
    Matrix m_C;
    m_C.height=15;
    m_C.width=15;
    m_C.elements=t_C;
    //Passing the formated matrix to the kernel.
    MatMul(m_A,m_B,m_C);                
    cout<<"Final"<<endl;        
return 0;
}

程序编译并运行,但结果矩阵C.elements来自: cudaMemcpy(C.elements,d_C.elements,size,cudaMemcpyDeviceToHost); 是一个随机数。我试图将它用作指向数组的指针,但我没有从中得到任何东西,并且像数组一样对待它也不起作用。

如果有人能帮我完成这件事,我会很高兴。

4

2 回答 2

3

您的代码在内核中的数组索引和 CPU 上的初始化之间存在轻微的不匹配。这是@harrism建议的带有调试的更正代码:

    #include "cuda_runtime.h"
    #include "device_launch_parameters.h"
    #include "cuda_runtime.h"
    #include "device_launch_parameters.h"
    #include <iostream>
    #include <stdio.h>
    #include <stdio.h>

    using namespace std;
    #define BLOCK_SIZE 16

    typedef struct
    {
        int width;
        int height;
        float *elements;
    }Matrix;

    __global__ void MatMulKernel(const Matrix,const Matrix, Matrix C);

    void MatMul(const Matrix A,const Matrix B, Matrix C)
    {
        size_t size;
        //Matrix A creation y storage in device memory
        Matrix d_A;
        d_A.width=A.width;
        d_A.height=A.height;
        size=A.height*A.width*sizeof(float);
        cudaMalloc(&d_A.elements,size);
        cudaMemcpy(d_A.elements,A.elements,size,cudaMemcpyHostToDevice);
        //Matrix B creation y storage in device memory
        Matrix d_B;
        d_B.width=B.width;
        d_B.height=B.height;
        size=B.height*B.width*sizeof(float);
        cudaMalloc(&d_B.elements,size);
    cudaMemcpy(d_B.elements,B.elements,size,cudaMemcpyHostToDevice);
    //Matrix C creation y storage in device memory
    Matrix d_C;
    d_C.width=C.width;
    d_C.height=C.height;
    //cudaMalloc(&d_C,sizeof(Matrix));
    //cudaMemcpy(d_C,C,sizeof(Matrix),cudaMemcpyHostToDevice);
    size=C.height*C.width*sizeof(float);
    cudaMalloc(&d_C.elements,size);
    //
    dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
    dim3 dimGrid(B.width/dimBlock.x,A.height/dimBlock.y);
    MatMulKernel<<<dimGrid,dimBlock>>>(d_A,d_B,d_C);
    //Copy the result in the matrix C from the device to the host.
    printf("error code: %s\n",cudaGetErrorString(cudaGetLastError()));
    cudaMemcpy(C.elements,d_C.elements,size,cudaMemcpyDeviceToHost);
    //
    cudaFree(d_A.elements);
    cudaFree(d_B.elements);
    cudaFree(d_C.elements);
}

__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
        //printf("%d\n",threadIdx.x);
    float Cvalue=0;
    int row=blockIdx.y*blockDim.y+threadIdx.y;
    int col=blockIdx.x*blockDim.x+threadIdx.x;
    for(int e=0;e<A.width;++e)
    {
        Cvalue+=A.elements[row*A.width+e]*B.elements[e*B.width+col];
    }
    C.elements[row*C.width+col]=Cvalue;
}

int print_matrix(Matrix A){
        printf("Matrix:\n");
        int i;
        for(i=0; i<A.width*A.height; i++){
                if(i%A.width==0) printf("\n");
                printf("%6.4f\t",A.elements[i]);
        }
        printf("\n");
}
int main()
{
    cout<<"Matrices"<<endl;
    //Declarationd of the A,B,C matrix.s
    float a[BLOCK_SIZE][BLOCK_SIZE];
    float b[BLOCK_SIZE][BLOCK_SIZE];
    float c[BLOCK_SIZE][BLOCK_SIZE];
    //Fill the matrix whit some numbers.
    int cont0=0;
    for(int c=0;c<BLOCK_SIZE;c++)
    {
        for(int v=0;v<BLOCK_SIZE;v++)
        {
            a[v][c]=cont0;
            b[v][c]=cont0;
            cont0++;
        }
    }
    //Flatten the matrix for the passing to the kernel
    int offset=0;
    float a_t[BLOCK_SIZE*BLOCK_SIZE];
    float b_t[BLOCK_SIZE*BLOCK_SIZE];
    for(int y=0;y<BLOCK_SIZE;y++)
    {
        for(int x=0;x<BLOCK_SIZE;x++)
        {
            a_t[x+offset]=a[x][y];
            b_t[x+offset]=a[x][y];
        }
        offset=offset+BLOCK_SIZE;
    }
    float t_C[BLOCK_SIZE*BLOCK_SIZE];
    //Completing the matrix format for the kernel.
    Matrix m_A;
    m_A.height=BLOCK_SIZE;
    m_A.width=BLOCK_SIZE;
    m_A.elements=a_t;
    Matrix m_B;
    m_B.height=BLOCK_SIZE;
    m_B.width=BLOCK_SIZE;
    m_B.elements=b_t;
    Matrix m_C;
    m_C.height=BLOCK_SIZE;
    m_C.width=BLOCK_SIZE;
    m_C.elements=t_C;
    //Passing the formated matrix to the kernel.
    print_matrix(m_A);
    print_matrix(m_B);
    MatMul(m_A,m_B,m_C);
    print_matrix(m_C);
    cout<<"Final"<<endl;
return 0;
}

检查输出。如果您看到结果错误,请检查系统上的内核错误,该错误会在输出中报告。

于 2012-09-13T08:19:46.123 回答
1

首先,请参阅此处了解如何为您的问题获得有用的答案。特别是,您应该始终检查 CUDA API 调用和内核启动的返回值。此外,运行cuda-memcheck通常对于检测这样的越界访问非常有帮助。

@harrism 问你怎么知道结果是错误的,因为你似乎没有对它做任何事情。

但更重要的是,您使用 16x16 线程块计算 15x15 矩阵,但您没有注意禁用越界线程。由于您正在尝试创建一个简单的示例,只需将矩阵大小增加到 16x16 - 如果您想处理奇数大小,那么您需要实现控制逻辑(或使用 cuBLAS!)。

于 2012-09-13T08:07:37.423 回答