2

I am having a memory allocation problem which I can't understand. I am trying to allocate a char array in GPU (I am guessing it is probably a memory fragmentation issue).

here is my code,

#include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include<cuda.h>

inline void gpuAssert(cudaError_t code, char *file, int line, 
                 int abort=1)
{  
   if (code != cudaSuccess) {
      printf("GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
      if (abort) exit(code);
   }
}
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }

__global__ void calc(char *k,char *i)
{
       *i=*k;
}
int main()
{
        char *dev_o=0;
        char *i;
        i = (char*)malloc(10*sizeof(char));

        cudaMalloc((void**)&dev_o,10*sizeof(char)); //Line 31


        calc<<<1,1>>>("arun",dev_o);

        gpuErrchk(cudaMemcpy(&i,dev_o,10*sizeof(char),cudaMemcpyDeviceToHost));

        cudaFree(dev_o);

        printf("string : %s \n",i);

        return 0;
}

but I'm getting output as,

GPUassert: out of memory sample2.cu 31

In the same case, I tried to allocate integer in GPU and it's working properly.

My GPU device information is given as,

--- General Information for device 0 ---
Name:GeForce GTX 460 SE
Compute capability:2.1
Clock rate:1296000
Device copy overlap:Enabled
Kernel execition timeout :Enabled
--- Memory Information for device 0 ---
Total global mem:1073283072
Total constant Mem:65536
Max mem pitch:2147483647
Texture Alignment:512
--- MP Information for device 0 ---
Multiprocessor count:6
Shared mem per mp:49152
Registers per mp:32768
Threads in warp:32
Max threads per block:1024
Max thread dimensions:(1024, 1024, 64)
Max grid dimensions:(65535, 65535, 65535)

Can anyone tell me what is the problem and how I can overcome it?

4

1 回答 1

2

Several things were wrong in your code.

  1. cudaMemcpy(&i, ...) should be cudaMemcpy(i, ...).
  2. Check the return error of your kernel call as explained in this post. If you don't, the error will seem to appear later in your code.

    gpuErrchk(cudaPeekAtLastError());
    gpuErrchk(cudaDeviceSynchronize());
    
  3. Your char *k argument to your kernel is a host pointer. You should create another device array and copy your data to the device before calling your kernel.
  4. You were also not doing any parallel work on your threads in your calc() kernel since you were not using the thread indices, threadIdx.x. This was probably for testing though.

Here is what you would get if you fix these issues:

#include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include<cuda.h>

inline void gpuAssert(cudaError_t code, char *file, int line, 
                 int abort=1)
{  
   if (code != cudaSuccess) {
      printf("GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
      if (abort) exit(code);
   }
}
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }

__global__ void calc(char* k, char *i)
{
       i[threadIdx.x] = k[threadIdx.x];
}

int main()
{
        const char* msg = "arun";

        char *dev_i, *dev_k;
        char *i, *k;

        k = (char*)malloc(10*sizeof(char));
        i = (char*)malloc(10*sizeof(char));

        sprintf(k, msg);

        cudaMalloc((void**)&dev_i, 10*sizeof(char));
        cudaMalloc((void**)&dev_k, 10*sizeof(char));

        gpuErrchk(cudaMemcpy(dev_k, k, 10*sizeof(char), cudaMemcpyHostToDevice));

        calc<<<1,5>>>(dev_k, dev_i);

        gpuErrchk(cudaPeekAtLastError());
        // Synchronization will be done in the next synchronous cudaMemCpy call, else
        // you would need cudaDeviceSynchronize() to detect execution errors.
        //gpuErrchk(cudaDeviceSynchronize());

        gpuErrchk(cudaMemcpy(i, dev_i, 10*sizeof(char), cudaMemcpyDeviceToHost));

        printf("string : %s\n", i);

        cudaFree(dev_i);
        cudaFree(dev_k);
        free(i);
        free(k);

        return 0;
}
于 2013-05-16T10:23:41.883 回答