Cuda said that the shared memory can only be shared by data in the same block. But a block can only have at most 1024 threads. What if I have a huge matrix, and want to take average of them with maximized threads.
Takes this as an example. (I didn't use the maximized threads in one block, just as a demo)
#include <iostream>
#include <stdio.h>
__global__ void
kernel(int *a, int dimx, int dimy)
{
int ix = blockDim.x * blockIdx.x + threadIdx.x;
int iy = blockDim.y * blockIdx.y + threadIdx.y;
int idx = iy * dimx + ix;
__shared__ int array[64];
a[idx] = a[idx] + 1;
array[idx] = a[idx];
__syncthreads();
int sum=0;
for(int i=0; i<dimx*dimy; i++)
{
sum += array[i];
}
int average = sum/(dimx*dimy+1.0f);
a[idx] = average;
}
int
main()
{
int dimx = 8;
int dimy = 8;
int num_bytes = dimx*dimy*sizeof(int);
int *d_a=0, *h_a=0; // device and host pointers
h_a = (int*)malloc(num_bytes);
for (int i=0; i < dimx*dimy; i++){
*(h_a+i) = i;
}
cudaMalloc( (void**)&d_a, num_bytes );
//cudaMemset( d_a, 0, num_bytes );
cudaMemcpy( d_a, h_a, num_bytes, cudaMemcpyHostToDevice);
dim3 grid, block;
block.x = 4;
block.y = 4;
grid.x = dimx / block.x;
grid.y = dimy / block.y;
kernel<<<grid, block>>>(d_a, dimx, dimy);
cudaMemcpy( h_a, d_a, num_bytes, cudaMemcpyDeviceToHost );
std::cout << "the array a is:" << std::endl;
for (int row = 0; row < dimy; row++)
{
for (int col =0; col < dimx; col++)
{
std::cout << h_a[row * dimx + col] << " ";
}
std::cout << std::endl;
}
free(h_a);
cudaFree(d_a);
}
I create four blocks, and want to the results to be average of all of them. Now the result is:
the array a is:
3 3 3 3 4 4 4 4
3 3 3 3 4 4 4 4
3 3 3 3 4 4 4 4
3 3 3 3 4 4 4 4
11 11 11 11 12 12 12 12
11 11 11 11 12 12 12 12
11 11 11 11 12 12 12 12
11 11 11 11 12 12 12 12
Each block has its own average, rather overall average. How could I take the average over all the blocks?
I'm new to Cuda. Any relevant answer is welcomed.