(正如评论中所讨论的)我强烈建议不要为此使用 Fortran 流访问。仅当文件被单个进程访问时,标准 Fortran I/O 才能保证工作,在我自己的工作中,当多个进程尝试一次写入文件时,我看到文件的随机损坏,即使进程正在写入到文件的不同部分。MPI-I/O 或使用 MPI-I/O 的库(如 HDF5 或 NetCDF)是实现此目的的唯一明智方法。下面是一个简单的程序,说明使用mpi_file_write_at_all
ian@eris:~/work/stack$ cat at.f90
Program write_at
Use mpi
Implicit None
Integer, Parameter :: n = 4
Real, Dimension( 1:n ) :: a
Real, Dimension( : ), Allocatable :: all_of_a
Integer :: me, nproc
Integer :: handle
Integer :: i
Integer :: error
! Set up MPI
Call mpi_init( error )
Call mpi_comm_size( mpi_comm_world, nproc, error )
Call mpi_comm_rank( mpi_comm_world, me , error )
! Provide some data
a = [ ( i, i = n * me, n * ( me + 1 ) - 1 ) ]
! Open the file
Call mpi_file_open( mpi_comm_world, 'stuff.dat', &
mpi_mode_create + mpi_mode_wronly, mpi_info_null, handle, error )
! Describe how the processes will view the file - in this case
! simply a stream of mpi_real
Call mpi_file_set_view( handle, 0_mpi_offset_kind, &
mpi_real, mpi_real, 'native', &
mpi_info_null, error )
! Write the data using a collective routine - generally the most efficent
! but as collective all processes within the communicator must call the routine
Call mpi_file_write_at_all( handle, Int( me * n,mpi_offset_kind ) , &
a, Size( a ), mpi_real, mpi_status_ignore, error )
! Close the file
Call mpi_file_close( handle, error )
! Read the file on rank zero using Fortran to check the data
If( me == 0 ) Then
Open( 10, file = 'stuff.dat', access = 'stream' )
Allocate( all_of_a( 1:n * nproc ) )
Read( 10, pos = 1 ) all_of_a
Write( *, * ) all_of_a
End If
! Shut down MPI
Call mpi_finalize( error )
End Program write_at
ian@eris:~/work/stack$ mpif90 --version
GNU Fortran (Ubuntu 7.4.0-1ubuntu1~18.04.1) 7.4.0
Copyright (C) 2017 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
ian@eris:~/work/stack$ mpif90 -Wall -Wextra -fcheck=all -std=f2008 at.f90
ian@eris:~/work/stack$ mpirun -np 2 ./a.out
0.00000000 1.00000000 2.00000000 3.00000000 4.00000000 5.00000000 6.00000000 7.00000000
ian@eris:~/work/stack$ mpirun -np 5 ./a.out
0.00000000 1.00000000 2.00000000 3.00000000 4.00000000 5.00000000 6.00000000 7.00000000 8.00000000 9.00000000 10.0000000 11.0000000 12.0000000 13.0000000 14.0000000 15.0000000 16.0000000 17.0000000 18.0000000 19.0000000
ian@eris:~/work/stack$