0

内核update_umatrix无法启动,分析器显示它需要-100%!计算的时间。

这可能是一个简单的问题,但我已经花了两个星期的时间,但内核启动仍然无法根据 nsight 分析器启动,至少U矩阵没有更新并且包含全零(这是 FCM 的部分实现)。

我的 GPU 是 GeForce 330M,计算能力为1.2.

float *U;
float *V;
float *X;

__device__ float *U_d;
__device__ float *V_d;
__device__ float *X_d;

__global__ void update_umatrix(float *sqrerror,int C,int N,int S,float m)
{

    int i,j,k;
    int example_is_centroid;
    float summation, D_ki, D_kj;
    float newU;

    __shared__ float tmp_sqrerror[DIM];
    /* For each example in the dataset */
    k = threadIdx.x + blockIdx.x*blockDim.x;
    int local_offset = threadIdx.x;
    tmp_sqrerror[local_offset]=0;
        /* Special case: If Example is equal to a Cluster Centroid,
       then U=1.0 for that cluster and 0 for all others */
        if ( (example_is_centroid=is_example_centroid(k,S,C)) != -1 ) {
            for(int i=0; i<C; i++)
            {
            if ( i == example_is_centroid )
                U_d[k*C+i]=1.0;
            else
                U_d[k*C+i]=0.0;
            }
            return;
        }
    /* For each class */
    for(int i=0; i< C; i++)
    {
        summation=0;

        /* Calculate summation */
        for (j=0; j < C; j++) {
            D_ki=distance(X_d, V_d,k*DIM,i*S,S);
            D_kj=distance(X_d, V_d,k*DIM,j*S,S);
            summation += powf( D_ki / D_kj , (2.0/ (m-1)));
        }

        /* Weight is 1/sum */
        newU=1.0/summation;

        /* Add to the squareDifference */
        tmp_sqrerror[local_offset] += powf(U_d[k*C+i] - newU, 2);

        U_d[k*C+i]=newU;

    }
    __syncthreads();
    int t= blockDim.x/2;
    while(t>0)
    {
        if(k+t < N && threadIdx.x<t)
            tmp_sqrerror[local_offset] += tmp_sqrerror[local_offset+t];
        t/=2;
        __syncthreads();
    }

    if(threadIdx.x==0)
        sqrerror[blockIdx.x] = tmp_sqrerror[0];

}




int init()
{

float m = 2.0;
int C=2;
int S=2;
int N=340*340;
    int i,j;

    /* Allocate necessary storage */
    V=(float *)CALLOC(S*C, sizeof(float));

    U=(float *)CALLOC(C*N,sizeof(float));
    cudaGetErrorString(cudaMalloc(&U_d,N*C*sizeof(float)));
    cudaGetErrorString(cudaMalloc(&V_d,C*S*sizeof(float)));

    /* Place random values in V, then update U matrix based on it */
    srand48(seed);
    for (i=0; i < C; i++) {
        for (j=0; j < S; j++) {
            V[i*S+j]=drand48() * max_value[j];
        }
    }
    float *dummy;
    cudaMalloc(&dummy,N*sizeof(float));
    cudaGetErrorString(cudaMemcpyToSymbol(&V_d,V,C*S*sizeof(float),0,cudaMemcpyHostToDevice));
    /* Once values are populated in V, update the U Matrix for sane values */
    update_umatrix<<<(N+DIM-1)/DIM,DIM>>>(dummy,C,N,S,m);
    cudaGetErrorString(cudaGetLastError());
cudaDeviceSynchronize();

cudaGetErrorString(cudaMemcpyFromSymbol(U,&U_d,N*C*sizeof(float),cudaMemcpyDeviceToHost));
fprintf(stdout,"Initialization completed.\n");

    return 0;
}

如果某个 i 的 X[k] == V[i],则返回那个 i。否则,返回 -1

__device__ int is_example_centroid(int k,int S, int C)
{
    int  i,x;

    for (i=0; i < C; i++) {
        for (x=0; x < S; x++) {
            if ( X_d[k*DIM+x] != V_d[i*S+x] ) break;
        }
        if ( x == S )  /* X==V */
            return i;
    }
    return -1;
}

和距离函数:

__device__ float distance(float *v1, float *v2,int startV1,int startV2,int S)
{
    int x,i;
    float sum=0;

    for (x=startV1,i=startV2; x < startV1+DIM && i<startV2+S; x++, i++)
        sum += (v1[x] - v2[i]) * (v1[x] - v2[i]);

    return sqrt(sum);
}
4

1 回答 1

2

这行代码无效:

cudaGetErrorString(cudaMemcpyToSymbol(&V_d,V,C*S*sizeof(float),0,cudaMemcpyHostToDevice));

它会编译,但会在运行时抛出错误。由于您似乎已将其包裹在错误检查中,因此我只能假设您的错误检查已损坏。

您传递给的符号只能cudaMemcpyToSymbol是有效的符号。它不能是符号的地址、符号加上偏移量或类似的东西。

我也认为这行代码是不明智的,尽管我无法向自己证明它不起作用:

cudaGetErrorString(cudaMalloc(&V_d,C*S*sizeof(float)));

如果你愿意,你可以这样做,但我不确定它是否在做你想要的,或者 malloc 的区域是否可以从主机以任何方式访问。

如果您想要可变大小的动态设备分配,为什么不使用普通的 cudaMalloc 方法呢?为什么使用设备符号?我并不是说你不能让它以某种方式工作,但这不是做到这一点的方法。

EDIT responding to a question below: If you want to eliminate a function parameter and use a device variable instead, you can probably make it work but it seems like a lot of trouble to me, and to what end?

Anyway this is what I would do if I felt I really needed to do that:

#include <stdio.h>

#define cudaCheckErrors(msg) \
    do { \
        cudaError_t __err = cudaGetLastError(); \
        if (__err != cudaSuccess) { \
            fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
                msg, cudaGetErrorString(__err), \
                __FILE__, __LINE__); \
            fprintf(stderr, "*** FAILED - ABORTING\n"); \
            exit(1); \
        } \
    } while (0)




float *V, *d_V;

__device__ float *V_d;

__global__ void my_kernel(){

  printf("V[3] = %f\n", V_d[3]);

}

int main() {

  int C=2;
  int S=2;

  V=(float *)calloc(S*C, sizeof(float));
  V[0] = 0.0f;
  V[3] = 4.0f;
  cudaMalloc((void **)&d_V,C*S*sizeof(float));
  cudaCheckErrors("malloc");
  cudaMemcpy(d_V, V, C*S*sizeof(float), cudaMemcpyHostToDevice);
  cudaCheckErrors("memcpy");
  cudaMemcpyToSymbol(V_d,&d_V,sizeof(float *));
  cudaCheckErrors("symbol");
  my_kernel<<<1,1>>>();
  cudaDeviceSynchronize();
  cudaCheckErrors("kernel");

  return 0;
}
于 2013-04-07T23:01:20.053 回答