2

我在将一些变量从 void* 转换为 MPI_Aint 时遇到了一些麻烦。这是代码的一部分:

C:
void myfunc_(MPI_Aint *out_ptr, ...)
...
void *ptr = mmap(...)
...
*out_ptr = (MPI_Aint) ptr;

Fortran :
#ifdef DOUBLE_PREC
  integer, parameter, public :: mytype = KIND(0.0D0)
  integer, parameter, public :: real_type = MPI_DOUBLE_PRECISION
#endif
INTEGER BSIZ, CORE_COMM, status
real(mytype), pointer :: SND
...
call myfunc(SND, BSIZ, real_type, CORE_COMM, status)

mmap 正在工作但出现错误(当我评论最后一行时没有错误)

...
mmap succeeded 0x7fab7b490000
...
*** Process received signal ***
Signal: Segmentation fault (11)
Signal code: Address not mapped (1)
Failing at address: (nil)

有什么想法可以帮忙吗?以下是完整的 C 函数代码:

void myfunc_(MPI_Aint *out_ptr, MPI_Fint *nelem, MPI_Fint *type,
            MPI_Fint *comm, MPI_Fint *ret)
{
MPI_Comm world;
int mype;

world = MPI_Comm_f2c(*comm);
MPI_Comm_rank(world, &mype);

char filename[20];

#define POSIX_SHM

int i,j;

int world_rank = -1, world_size = -1;
int mpi_result = MPI_SUCCESS;

int color = -1;
int ranks_per_node = -1;
MPI_Comm IntraNodeComm;

int node_shmem_bytes; 

mpi_result = MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
assert(mpi_result==MPI_SUCCESS);
mpi_result = MPI_Comm_size(MPI_COMM_WORLD, &world_size);
assert(mpi_result==MPI_SUCCESS);

if (world_rank==0)
{
    char * env_char;
    int units = 1;
    int num_count = 0;
    env_char = getenv("NODE_SHARED_MEMORY");
    if (env_char!=NULL)
    {
        if      ( NULL != strstr(env_char,"G") ) units = 1000000000;
        else if ( NULL != strstr(env_char,"M") ) units = 1000000;
        else if ( NULL != strstr(env_char,"K") ) units = 1000;
        else                                     units = 1;

        num_count = strspn(env_char, "0123456789");
        memset( &env_char[num_count], ' ', strlen(env_char)-num_count);

        node_shmem_bytes = units * atoi(env_char);
        printf("%7d: NODE_SHARED_MEMORY = %d bytes \n", world_rank, node_shmem_bytes );
    }
    else
    {
        node_shmem_bytes = getpagesize();
        printf("%7d: NODE_SHARED_MEMORY = %d bytes \n", world_rank, node_shmem_bytes );
    }
}
mpi_result = MPI_Bcast( &node_shmem_bytes, 1, MPI_INT, 0, MPI_COMM_WORLD );
assert(mpi_result==MPI_SUCCESS);

int node_shmem_count = node_shmem_bytes/sizeof(double);
node_shmem_count = (int) *nelem;
node_shmem_bytes = node_shmem_count * sizeof(double) * 2;

fflush(stdout);
MPI_Barrier(MPI_COMM_WORLD);

IntraNodeComm = world;

int subcomm_rank = -1;
mpi_result = MPI_Comm_rank(IntraNodeComm, &subcomm_rank);
assert(mpi_result==MPI_SUCCESS);

sprintf(filename,"/foo_%d_%d_%d",*nelem,*type,*comm);

#if defined(POSIX_SHM)
int fd;
if (subcomm_rank==0)
    fd = shm_open(filename, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR );

mpi_result = MPI_Barrier(MPI_COMM_WORLD);
assert(mpi_result==MPI_SUCCESS);

if (subcomm_rank!=0)
    fd = shm_open(filename, O_RDWR, S_IRUSR | S_IWUSR );

if (fd<0) printf("%7d: shm_open failed: %d \n", world_rank, fd);
else      printf("%7d: shm_open succeeded: %d \n", world_rank, fd);
#elif defined(DEV_SHM)
int fd = open("/dev/shm/foo", O_RDWR | O_CREAT, S_IRUSR | S_IWUSR );
if (fd<0) printf("%7d: open failed: %d \n", world_rank, fd);
else      printf("%7d: open succeeded: %d \n", world_rank, fd);
#else
int fd = -1;
printf("%7d: no file backing \n", world_rank);
#endif
fflush(stdout);
mpi_result = MPI_Barrier(MPI_COMM_WORLD);
assert(mpi_result==MPI_SUCCESS);

if (fd>=0 && subcomm_rank==0)
{
    int rc = ftruncate(fd, node_shmem_bytes);
    if (rc==0) printf("%7d: ftruncate succeeded \n", world_rank);
    else       printf("%7d: ftruncate failed \n", world_rank);
}
fflush(stdout);
mpi_result = MPI_Barrier(MPI_COMM_WORLD);
assert(mpi_result==MPI_SUCCESS);

#ifdef __bgp__
double * ptr = NULL;
_BGP_Personality_t pers;
Kernel_GetPersonality(&pers, sizeof(pers));

if( BGP_Personality_processConfig(&pers) == _BGP_PERS_PROCESSCONFIG_SMP )
{
    printf("SMP mode => MAP_PRIVATE | MAP_ANONYMOUS \n");
    ptr = mmap( NULL, node_shmem_bytes, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, fd, 0 );
}
else
{
    if (node_shmem_bytes>pers.Kernel_Config.SharedMemMB)
    {
        printf("node_shmem_bytes (%d) greater than pers.Kernel_Config.SharedMemMB (%d) - allocating the latter \n", 
               node_shmem_bytes, pers.Kernel_Config.SharedMemMB );
        node_shmem_bytes = pers.Kernel_Config.SharedMemMB;
    }
    ptr = mmap( NULL, node_shmem_bytes, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0 );
}
#else
void *ptr = mmap( NULL, node_shmem_bytes, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0 );
#endif
if (ptr==NULL) printf("%7d: mmap failed \n", world_rank);
else           printf("%7d: mmap succeeded %p\n", world_rank,ptr);
fflush(stdout);
mpi_result = MPI_Barrier(MPI_COMM_WORLD);
assert(mpi_result==MPI_SUCCESS);

mpi_result = MPI_Comm_size(IntraNodeComm, &ranks_per_node );
assert(mpi_result==MPI_SUCCESS);
if (0==subcomm_rank) printf("%7d: ranks_per_node = %d \n", world_rank, ranks_per_node);
fflush(stdout);

for (i=0; i<ranks_per_node; i++)
{
    if (i==subcomm_rank)
   {
        printf("%7d: subcomm_rank %d setting the buffer \n", world_rank, subcomm_rank );
        //for (j=0; j<node_shmem_count; j++ ) ptr[j] = (double)i;
        printf("%7d: memset succeeded \n", world_rank);

        int rc = msync(ptr, node_shmem_bytes, MS_INVALIDATE | MS_SYNC);
        if (rc==0) printf("%7d: msync succeeded, %p \n", world_rank, ptr);
        else       printf("%7d: msync failed \n", world_rank);
    }

    fflush(stdout);
    mpi_result = MPI_Barrier(MPI_COMM_WORLD);
    assert(mpi_result==MPI_SUCCESS);

    //printf("%7d: ptr = %lf ... %lf \n", world_rank, ptr[0], ptr[node_shmem_count-1]);
    fflush(stdout);

    mpi_result = MPI_Barrier(MPI_COMM_WORLD);
    assert(mpi_result==MPI_SUCCESS);
}
fflush(stdout);
mpi_result = MPI_Barrier(MPI_COMM_WORLD);
assert(mpi_result==MPI_SUCCESS);

if (ptr!=NULL)
{
    int rc = munmap(ptr, node_shmem_bytes);
    if (rc==0) printf("%7d: munmap succeeded %p, %d\n", world_rank,ptr, (MPI_Aint) ptr);
    else       printf("%7d: munmap failed \n", world_rank);
}
fflush(stdout);
mpi_result = MPI_Barrier(MPI_COMM_WORLD);
assert(mpi_result==MPI_SUCCESS);

#if defined(POSIX_SHM)
//if (fd>=0)
if (fd>=0 && subcomm_rank==0)
{
    int rc = -1;

    rc = shm_unlink(filename);
    if (rc==0) printf("%7d: shm_unlink succeeded %p\n", world_rank,ptr);
    else       printf("%7d: shm_unlink failed \n", world_rank);
}
#elif defined(DEV_SHM)
if (fd>=0 && subcomm_rank==0)
{
    int rc = -1;

    rc = ftruncate(fd, 0);
    if (rc==0) printf("%7d: ftruncate succeeded \n", world_rank);
    else       printf("%7d: ftruncate failed \n", world_rank);

    rc = close(fd);
    if (rc==0) printf("%7d: close succeeded \n", world_rank);
    else       printf("%7d: close failed \n", world_rank);
}
#endif
fflush(stdout);
mpi_result = MPI_Barrier(MPI_COMM_WORLD);
assert(mpi_result==MPI_SUCCESS);

*out_ptr = (MPI_Aint) ptr;

}
4

2 回答 2

1

我本来想给你写一个简短的评论,但它不知何故增长了一点点……

MPI 标准体和实现者多年来一直在为这个 C 到 Fortran 内存传递问题而苦苦挣扎。为什么不重用他们的努力,而不是重新发现圆形轮子比方形轮子更好用的事实呢?

只需看一下 MPI 标准函数MPI_ALLOC_MEM,它应该在 MPI 中分配特殊内存并将其返回给用户代码。MPI-2.2 标准将其 Fortran 接口定义为:

MPI_ALLOC_MEM(SIZE, INFO, BASEPTR, IERROR)
    INTEGER INFO, IERROR
    INTEGER(KIND=MPI_ADDRESS_KIND) SIZE, BASEPTR

MPI-3.0 中的现代 Fortran 2008 接口使用ISO_C_BINDING并提供如下:

MPI_Alloc_mem(size, info, baseptr, ierror)
    USE, INTRINSIC :: ISO_C_BINDING, ONLY : C_PTR
    INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: size
    TYPE(MPI_Info), INTENT(IN) :: info
    TYPE(C_PTR), INTENT(OUT) :: baseptr
    INTEGER, OPTIONAL, INTENT(OUT) :: ierror

该标准给出了如何使用调用的以下示例:

USE mpi_f08
USE, INTRINSIC :: ISO_C_BINDING
TYPE(C_PTR) :: p
REAL, DIMENSION(:,:), POINTER :: a
INTEGER, DIMENSION(2) :: shape
INTEGER(KIND=MPI_ADDRESS_KIND) :: size
shape = (/100,100/)
size = 4 * shape(1) * shape(2)
CALL MPI_Alloc_mem(size,MPI_INFO_NULL,p,ierr)
CALL C_F_POINTER(p, a, shape)
...
a(3,5) = 2.71
...
CALL MPI_Free_mem(a, ierr)

基本上,C_F_POINTER例程ISO_C_BINDING将 C 指针绑定到 Fortran 指针,然后前者指向的内存通过后者变得可用。

这就是 Open MPI 实现 F08 的方式MPI_Alloc_mem

subroutine MPI_Alloc_mem_f08(size,info,baseptr,ierror)
   use, intrinsic :: ISO_C_BINDING, only : C_PTR
   use :: mpi_f08_types, only : MPI_Info, MPI_ADDRESS_KIND
   use :: mpi_f08, only : ompi_alloc_mem_f
   implicit none
   INTEGER(MPI_ADDRESS_KIND), INTENT(IN) :: size
   TYPE(MPI_Info), INTENT(IN) :: info
   TYPE(C_PTR), INTENT(OUT) :: baseptr
   INTEGER, OPTIONAL, INTENT(OUT) :: ierror
   integer :: c_ierror

   call ompi_alloc_mem_f(size,info%MPI_VAL,baseptr,c_ierror)
   if (present(ierror)) ierror = c_ierror

end subroutine MPI_Alloc_mem_f08

ompi_alloc_mem_f是一个将内部 C 实现与 Fortran 接口的 C 函数:

void ompi_alloc_mem_f(MPI_Aint *size, MPI_Fint *info, char *baseptr, MPI_Fint *ierr)
{
    int ierr_c;
    MPI_Info c_info = MPI_Info_f2c(*info);

    ierr_c = MPI_Alloc_mem(*size, c_info, baseptr);
    if (NULL != ierr) *ierr = OMPI_INT_2_FINT(ierr_c);
}

因此,您可以看到TYPE(C_PTR) baseptr来自 Fortran 的参数只是作为指针传入,(像往常一样)通过引用传递。这在这里不是很明显,因为 MPI 标准定义了 的最后一个参数MPI_Alloc_mem,其中返回指向已分配内存的指针,因为void *它实际上是void通过引用传递的指针(即void **)。虚拟baseptr参数实际上void **也只是简单地声明,char *因为原因:) 相同的函数用于实现旧的 Fortran 接口,因此char *baseptr映射到INTEGER(KIND=MPI_ADDRESS_KIND)实际参数。

教训是,虽然MPI_ADDRESS_KINDFortran 中的整数用于存储指针和指针差值,但不应MPI_Aint在 C 中用作指针参数类型,而应使用常规双指针,如void **.

于 2012-11-15T11:30:17.897 回答
0

我不确定您是否可以注释掉以避免该问题的行如下:

*out_ptr = (MPI_Aint) ptr;

您的取消引用不一致。

ptr是 adouble *并且不能直接转换为 a MPI_Aint

也许你想要

*out_ptr = *(MPI_Aint *)ptr;

如果调用者将指针(作为 out_ptr,)传递到您想要存储MPI_Aint*ptr. 但是,鉴于您的分配node_shmem_bytes,这可能没有意义:

out_ptr = (MPI_Aint *)ptr

这会将(myfunc 副本的本地)out_ptr 设置为 MPI_Aint 对象块,但调用者不会看到。我不知道正在使用的 Fortran -> C 调用约定,但也许您想传递一个指向MPI_Aint *C 程序可以放置 ptr 的指针?

于 2012-11-14T18:28:00.290 回答