0

我正在尝试为 BMP 图像并行化灰度过滤器,尝试从像素阵列发送数据时,我的函数卡住了。

#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "mpi.h"

#define MASTER_TO_SLAVE_TAG 1 //tag for messages sent from master to slaves
#define SLAVE_TO_MASTER_TAG 10 //tag for messages sent from slaves to master

#pragma pack(1)

struct image {
       struct fileHeader fh;
       struct imageHeader ih;
       pixel *array;
};

struct fileHeader {
       //blablabla...      
};

struct imageHeader {
       //blablabla...
};

typedef struct
{
        unsigned char R;
        unsigned char G;
        unsigned char B;
}pixel;


void grayScale_Parallel(struct image *im, int size, int rank)
{
     int i,j,lum,aux,r;
     pixel tmp;

     int total_pixels = (*im).ih.width * (*im).ih.height;
     int qty = total_pixels/(size-1);
     int rest = total_pixels % (size-1);
     MPI_Status status;

     //printf("\n%d\n", rank);

     if(rank == 0)
     {
         for(i=1; i<size; i++){
         j = i*qty - qty;
         aux = j;

         if(rest != 0 && i==size-1) {qty=qty+rest;} //para distrubuir toda la carga
         printf("\nj: %d  qty: %d  rest: %d\n", j, qty, rest);

         //it gets stuck  here,it doesn't send the data
         MPI_Send(&(*im).array[j], qty*3, MPI_BYTE, i, MASTER_TO_SLAVE_TAG, MPI_COMM_WORLD);
         MPI_Send(&aux, 1, MPI_INT, i, MASTER_TO_SLAVE_TAG+1, MPI_COMM_WORLD);
         MPI_Send(&qty, 1, MPI_INT, i, MASTER_TO_SLAVE_TAG+2, MPI_COMM_WORLD);

         printf("\nSending to node=%d, sender node=%d\n", i, rank);
        }

     }
     else
     {
    MPI_Recv(&aux, 1, MPI_INT, MPI_ANY_SOURCE, MASTER_TO_SLAVE_TAG+1, MPI_COMM_WORLD,&status);
    MPI_Recv(&qty, 1, MPI_INT, MPI_ANY_SOURCE, MASTER_TO_SLAVE_TAG+2, MPI_COMM_WORLD,&status);

    pixel *arreglo = (pixel *)calloc(qty, sizeof(pixel));
    MPI_Recv(&arreglo[0], qty*3, MPI_BYTE, MPI_ANY_SOURCE, MASTER_TO_SLAVE_TAG, MPI_COMM_WORLD,&status);
        //PROCESS RECEIVED PIXELS...
            //SEND to ROOT PROCESS

     }


    if (rank==0){
        //RECEIVE DATA FROM ALL PROCESS
    }
}


int main(int argc, char *argv[])
{    
    int rank, size;

    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Status status;

    int op=1;
    char filename_toload[50];
    int bright_number=0;
    struct image image2;

    if (rank==0)
    {
    printf("File to load: \n");
    scanf("%s", filename_toload);
    loadImage(&image2, filename_toload);
    }

    while(op != 0)
    {
        if (rank==0)
        {
        printf("Welcome to example program!\n\n");
        printf("\t1.- GrayScale Parallel Function\n");
        printf("\t2.- Call another Function\n");
        printf("\t0.- Exit\n\t");

        printf("\n\n\tEnter option:");
        scanf("%d", &op);
        }

        //Broadcast the user's choice to all other ranks
        MPI_Bcast(&op, 1, MPI_INT, 0, MPI_COMM_WORLD);

        switch(op)
        {
            case 1:
                    grayScale_Parallel(&image2, size, rank);
                    MPI_Barrier(MPI_COMM_WORLD);
                    printf("GrayScale applied successfully!\n\n");
                    break;
            case 2:
                    function_blabla();
                    printf("Function called successfully\n\n");
                    break;
        }
    }

    MPI_Finalize();
    return 0;
}

我认为 MPI_Send 函数无法读取像素数组,但很奇怪,因为我可以打印像素。

任何的想法?

4

2 回答 2

1

要详细说明 Soravux 的答案,您应该按如下方式MPI_Send更改调用顺序(注意更改后MASTER_TO_SLAVE_TAG的 s)以避免死锁:

MPI_Send(&aux, 1, MPI_INT, i, MASTER_TO_SLAVE_TAG, MPI_COMM_WORLD);
MPI_Send(&qty, 1, MPI_INT, i, MASTER_TO_SLAVE_TAG+1, MPI_COMM_WORLD);
MPI_Send(&(*im).array[j], qty*3, MPI_BYTE, i, MASTER_TO_SLAVE_TAG+2, MPI_COMM_WORLD);

MPI_Recv这些调用需要通过以下调用序列来匹配

MPI_Recv(&aux, 1, MPI_INT, MPI_ANY_SOURCE, MASTER_TO_SLAVE_TAG, MPI_COMM_WORLD,&status);
MPI_Recv(&qty, 1, MPI_INT, MPI_ANY_SOURCE, MASTER_TO_SLAVE_TAG+1, MPI_COMM_WORLD,&status);

pixel *arreglo = (pixel *)calloc(qty, sizeof(pixel));
MPI_Recv(&arreglo[0], qty*3, MPI_BYTE, MPI_ANY_SOURCE, MASTER_TO_SLAVE_TAG+2, MPI_COMM_WORLD,&status);

希望这能回答你的问题。

于 2013-11-13T13:20:14.627 回答
0

MPI_Send你打电话的顺序MPI_Recv很重要。由于这些功能是阻塞的,因此您必须确保您的调用始终处于相同的顺序。MPI_Send只要MPI_Recv未在目标上执行其对应的(相同标记)对应项,调用就不会返回。否则可能会导致死锁。

于 2013-11-13T06:07:29.110 回答