3

我正在尝试运行 MPI 矩阵乘法示例,但我修改它以读取文件,当然事情会爆炸。

具体来说,我收到此错误:

Entering first MPI_Recv in p0 and recieving data from slave processor 1
Fatal error in MPI_Recv: Invalid count, error stack:
MPI_Recv(186): MPI_Recv(buf=0xbfd30930, count=-1807265191, MPI_FLOAT, src=0, tag=1, MPI_COMM_WORLD, status=0x804b080) failed
MPI_Recv(104): Negative count, value is -1807265191

这是修改后的代码:

 MPI_Init(&argc, &argv);  
 MPI_Comm_rank(MPI_COMM_WORLD, &id);  
 MPI_Comm_size(MPI_COMM_WORLD, &p);  
 slaves = p-1;  //slaves=numworkers
 /*---------------------------- master ----------------------------*/  
 if(id == 0) 
   {  
  /* check the number of arguments */

    if(argc!=4)
    {
        printf("Invalid number of aguements!\n./program matrix_file1 matrix_file2 result_matrix_file\n");
        return -1;
    }

         /* read matrix A */
    printf("read matrix A from %s\n", argv[1]);
    read_matrix( argv[1],&a, &sa, &i, &j);

    if(i != j) 
    { 
        printf("ERROR: matrix A not square\n"); 
        return -1;
    }

        

    n = i;



  /* read matrix B */
     printf("read matrix B from %s\n", argv[2]);
     read_matrix(argv[2],&b, &sb, &i, &j);

     

    if(i != j) 
    {     
          printf("ERROR: matrix B not square\n"); 
          return -1; 
    }   

    if(n != i) 
    {   printf("ERROR: matrix A and B incompatible\n"); 
        return -1; 
    }



    if((n%p)!=0)
    {
        printf("ERROR: %d processor(s) cannot divide matrices %d x %d! \n", p,n,n); 
        return -1;
    }
    
    
 
        rows = n/slaves;
        offset=0;
        remainPart=n%slaves;


    for(dest=1;dest<=slaves;dest++)
    {
        

        if(remainPart>0)
        {
            originalRows=rows;
            ++rows;
            remainPart--;
             printf("Sending %d rows to task %d offset=%d\n",rows,dest,offset);
            MPI_Send(&offset, 1, MPI_INT, dest, 1, MPI_COMM_WORLD);  
            MPI_Send(&rows, 1, MPI_INT, dest, 1, MPI_COMM_WORLD);  
            MPI_Send(&a[offset][0], rows*n, MPI_FLOAT,dest,1, MPI_COMM_WORLD);  
            MPI_Send(&b, n*n, MPI_FLOAT, dest, 1, MPI_COMM_WORLD);  
            offset = offset + rows;   
            rows = originalRows;  

        }
        else
        {
             printf("Sending %d rows to task %d offset=%d\n",rows,dest,offset);
            MPI_Send(&offset, 1, MPI_INT, dest, 1, MPI_COMM_WORLD);  
            MPI_Send(&rows, 1, MPI_INT, dest, 1, MPI_COMM_WORLD);  
            MPI_Send(&a[offset][0], rows*n, MPI_FLOAT,dest,1, MPI_COMM_WORLD);  
            MPI_Send(&b, n*n, MPI_FLOAT, dest, 1, MPI_COMM_WORLD);  
            offset = offset + rows; 
        }
    }
    /* initialize matrix C */

    sc = (float*)malloc(n*n*sizeof(float));
    memset(sc, 0, n*n*sizeof(float));
    c = (float**)malloc(n*sizeof(float*));
    for(i=0; i<n; i++) c[i] = &sc[i*n];

    /* wait for results from all worker tasks */  
   for (k=1; k<=slaves; k++)      
   {              
    source = k;  
    printf("Entering first MPI_Recv in p0 and recieving data from slave processor %d\n", source);
    MPI_Recv(&offset, 1, MPI_INT, source, 2, MPI_COMM_WORLD, &status); 
    printf("Entering second MPI_Recv in p0\n"); 
    MPI_Recv(&rows, 1, MPI_INT, source, 2, MPI_COMM_WORLD, &status);
    printf("Entering third MPI_Recv in p0\n");  
    MPI_Recv(&c[offset][0], rows*n, MPI_FLOAT, source, 2, MPI_COMM_WORLD, &status);  
   }     
    

   write_matrix(argv[3], sc, i, j);

    free(sc);
    free(c);
  }   



if(id>0)
{
      source = 0; 
        //printf("Entered first MPI_Recv for process %d\n", id); 
       MPI_Recv(&offset, 1, MPI_INT, source, 1, MPI_COMM_WORLD, &status);  
        //printf("Entered second MPI_Recv for process %d\n", id);
       MPI_Recv(&rows, 1, MPI_INT, source, 1, MPI_COMM_WORLD, &status);  
        //printf("Entered third MPI_Recv for process %d\n", id);
       MPI_Recv(&a, rows*n, MPI_FLOAT, source, 1, MPI_COMM_WORLD, &status);
        //printf("Entered fourth MPI_Recv for process %d\n", id);  
       MPI_Recv(&b, n*n, MPI_FLOAT, source, 1, MPI_COMM_WORLD, &status);  
       /* Matrix multiplication */  
       for (k=0; k<n; k++)  
        for (l=0; l<rows; l++) {   
         for (m=0; m<n; m++)  
          c[l][k] = c[l][k] + a[l][m] * b[m][k];  
        }  


        //printf("Entered first MPI_send for process %d\n", id);
       MPI_Send(&offset, 1, MPI_INT, 0, 2, MPI_COMM_WORLD);  
        //printf("Entered second MPI_send for process %d\n", id);
       MPI_Send(&rows, 1, MPI_INT, 0, 2, MPI_COMM_WORLD);  
        //printf("Entered third MPI_send for process %d\n", id);
       MPI_Send(&c, rows*n, MPI_FLOAT, 0, 2, MPI_COMM_WORLD);  

        
}






MPI_Finalize();}

在此之前,我错误地经历了所有流程,而不仅仅是工人,所以我已经修复了这个问题,但我不知道随机负数从哪里出现。特别是因为 print 语句之后的内容

printf("Entering first MPI_Recv in p0 and recieving data from slave processor %d\n", source);
    MPI_Recv(&offset, 1, MPI_INT, source, 2, MPI_COMM_WORLD, &status); 
    printf("Entering second MPI_Recv in p0\n"); 
    MPI_Recv(&rows, 1, MPI_INT, source, 2, MPI_COMM_WORLD, &status);
    printf("Entering third MPI_Recv in p0\n");  
    MPI_Recv(&c[offset][0], rows*n, MPI_FLOAT, source, 2, MPI_COMM_WORLD, &status);  

只不过是个数和原始维度 n 乘以赋予从属的行的平均值。

更新:好的,所以问题的一部分似乎是当我的数组在主进程中分配了空间时,对于从进程而言并非如此。

意识到这一点后,我在检查处理器是否是工作人员之前为传输数据的矩阵添加了缓冲区。尽管显然打印语句不会显示出来,但显然它并没有完全按计划工作。

float buffA[n][n];
float buffB[n][n];
float buffC[n][n];

for(l=0;l<n;l++)
    for(m=0;m<n;m++)
    {
        buffA[l][m]=a[l][m];
        buffB[l][m]=b[l][m];

                        //buffA[l][m]=sa[(i*n) + j];
                        //buffB[l][m]=sb[(i*n) + j];
        printf("buffA[%d][%d] =%f\n",l,m, buffA[l][m]);
        printf("buffB[%d][%d] =%f\n",l,m,buffB[l][m]);
    }

if(id>0)
{
        /*read_matrix( argv[1],&a, &sa, &i, &j);
        read_matrix(argv[2],&b, &sb, &i, &j);*/



        source = 0; 
        printf("Entered first MPI_Recv for process %d\n", id); 
       MPI_Recv(&offset, 1, MPI_INT, source, 1, MPI_COMM_WORLD, &status);  
        printf ("offset =%d\n", offset);
       MPI_Recv(&rows, 1, MPI_INT, source, 1, MPI_COMM_WORLD, &status);  
        printf ("row =%d\n", rows);
       MPI_Recv(&buffA[offset][0], rows*n, MPI_FLOAT, source, 1, MPI_COMM_WORLD, &status);
        printf("buffA[offset][0] =%f\n", buffA[offset][0]); //they're not getting the matrices 
       MPI_Recv(&buffB, n*n, MPI_FLOAT, source, 1, MPI_COMM_WORLD, &status);  
        //printf ("b=\n");

       /* Matrix multiplication */  
       for (k=0; k<n; k++)  
        for (l=0; l<rows; l++) {   
            //c[l][k]=0.0;
         for (m=0; m<n; m++)  
          buffC[l][k] = buffC[l][k] + buffA[l][m] * buffB[m][k];  
            //printf("c[%d][%d]= %f\n", l,k, c[l][k]);
        }  


        //printf("Entered first MPI_send for process %d\n", id);
       MPI_Send(&offset, 1, MPI_INT, source, 2, MPI_COMM_WORLD);  
        //printf("Entered second MPI_send for process %d\n", id);
       MPI_Send(&rows, 1, MPI_INT, source, 2, MPI_COMM_WORLD);  
        //printf("Entered third MPI_send for process %d\n", id);
       MPI_Send(&buffC, rows*n, MPI_FLOAT, source, 2, MPI_COMM_WORLD);  

        printf("Exit via MPI_send for process %d\n", id);
}

错误编号也发生了变化,尽管我不确定这是否意味着什么。

Fatal error in MPI_Recv: Invalid count, error stack:
MPI_Recv(186): MPI_Recv(buf=0xbf8e642c, count=-8, MPI_FLOAT, src=0, tag=1,MPI_COMM_WORLD, status=0x804c088) failed
MPI_Recv(104): Negative count, value is -8

好的,现在我发现维度 n 没有被转移,这导致了初始随机负数。所以我为 n 添加了发送和接收。现在看来最后一个问题是如何为 MPI 传输动态分配的数组。仍在努力。

更新

它有效,当前的工人代码就是这样,虽然乘法到处都是,但我想是小步骤。经验值

if(id>0)
{

        


        source = 0; 
        printf("Entered first MPI_Recv for process %d\n", id); 
       MPI_Recv(&offset, 1, MPI_INT, source, 1, MPI_COMM_WORLD, &status);  
        printf ("offset =%d\n", offset);
       MPI_Recv(&rows, 1, MPI_INT, source, 1, MPI_COMM_WORLD, &status); 
         MPI_Recv(&n, 1, MPI_INT, source, 1, MPI_COMM_WORLD, &status); 
        printf ("row =%d\nn=%d\n", rows,n);

        float buffA[rows][n];
        float buffB[n][n];
        float buffC[rows][n];

        
       MPI_Recv(&buffA[offset][0], rows*n, MPI_FLOAT, source, 1, MPI_COMM_WORLD, &status);
        printf("buffA[offset][0] =%f\n", buffA[offset][0]); //they're not getting the matrices 
       MPI_Recv(&buffB, n*n, MPI_FLOAT, source, 1, MPI_COMM_WORLD, &status);  
        //printf ("b=\n");

       /* Matrix multiplication */  
       for (k=0; k<n; k++)  
        for (l=0; l<rows; l++) {   
            //c[l][k]=0.0;
         for (m=0; m<n; m++)  
          //buffC[l][k] = buffC[l][k] + buffA[l][m] * buffB[m][k];  
            //printf("c[%d][%d]= %f\n", l,k, c[l][k]);
            buffC[l][k] = buffC[l][k] + buffA[l][m] * buffB[m][k];  

        }  


        //printf("Entered first MPI_send for process %d\n", id);
       MPI_Send(&offset, 1, MPI_INT, source, 2, MPI_COMM_WORLD);  
        //printf("Entered second MPI_send for process %d\n", id);
       MPI_Send(&rows, 1, MPI_INT, source, 2, MPI_COMM_WORLD);  
        //printf("Entered third MPI_send for process %d\n", id);
       MPI_Send(&buffC, rows*n, MPI_FLOAT, source, 2, MPI_COMM_WORLD);  

        printf("Exit via MPI_send for process %d\n", id);
}

结果

0.00 -0.00 -0.00 -0.00 -0.00 -0.00 0.00 0.00 
0.00 -0.00 -0.00 -0.00 -1.26 -1.26 -0.00 -1.26 
-0.00 -1.26 -0.00 0.00 -0.00 0.00 0.00 0.00 
-0.00 0.00 -0.00 -0.00 0.00 -0.00 -0.00 0.00 
0.00 0.00 0.00 0.00 -0.00 -1.26 -0.00 0.00 
-0.00 -0.00 0.00 35833769696167556769392596671120015360.00 0.00 0.00 -0.00 0.00 
-0.00 -0.00 0.00 -0.00 -0.00 0.00 0.00 0.00 
0.00 -nan -0.00 -0.00 -0.00 -0.00 -0.00 -0.00 
4

1 回答 1

2

(从评论中移出,所以可以回答这个问题)

打印语句在分布式环境中非常不可靠。不能保证它们是按顺序到达的。GDB 真的没有那么糟糕。您不需要附加到所有流程,只需选择一个。您可以在此处查看我的答案(stackoverflow.com/questions/17347778/...),了解如何操作。

于 2013-07-26T13:08:31.807 回答