I have the following code, and I want to calculate the running time of several matrix multiplications, with different sizes. I started with matrix size of 100, and moved till 500, but when I try 1000, I get an error saying: Segmentation fault (core dumped), so I assume it has to do something with the memory. I want to calculate the running time even of the matrices with sizes of 5000 and maybe 10000. Anyone can help me solve my problem?
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define SIZE 1000
int main(void)
{
int A[SIZE][SIZE], B[SIZE][SIZE], C[SIZE][SIZE] = {0};
int i, j, k;
srand(time(NULL));
for(i = 0; i < SIZE; i++)
{
for(j = 0; j < SIZE; j++)
{
A[i][j] = rand()%100;
B[i][j] = rand()%100;
}
}
clock_t begin, end;
double time_spent;
begin = clock();
for(i = 0; i < SIZE; i++)
for(j = 0; j < SIZE; j++)
for(k = 0; k < SIZE; k++)
C[i][j] += A[i][k] * B[k][j];
end = clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("Elapsed time: %.2lf seconds.\n", time_spent);
return 0;
}