出于某种原因,MPI-IO 只是将我的一个进程中的数据写入文件。我使用 MPI_File_open 打开文件,使用 MPI_File_set_view 设置每个进程的视图,使用 MPI_File_write_all 写出数据。当我运行代码时,一切似乎都执行得很好,没有任何错误,但由于某种原因,文件输出包含 CSV 文件第一行的乱码垃圾(它只是说 NULL NULL NULL 一堆,并不断重复当我打开它以读取 VS Code 时的第一行),文件的其余部分是第二个进程块的输出(因为我在两个进程上使用块分解)。我似乎无法弄清楚为什么我的程序没有正确输出值(或者至少是第一个进程),我想我会在这里问。
我在这里附上了代码,并省略了不适用于手头问题的部分:
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <time.h>
#include <mpi.h>
int main (int argc, char** argv) {
int iproc, nproc;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &iproc);
MPI_Comm_size(MPI_COMM_WORLD, &nproc);
//Inputs:
int depth = 3;
float p_x_error = 0.05;
float p_z_error = 0.05;
int max_char[] = {128, 640, 328};
int i_max_char = max_char[(depth%3)];
int num_data_qubits = depth * depth;
int data_qubit_x_error[ depth + 2][ depth + 2 ];
int data_qubit_z_error[ depth + 2 ][ depth + 2 ];
int ancilla_qubit_value[ depth + 1 ][ depth + 1 ];
// Parallel block decomposition variables
int total_num_iter = pow(4, num_data_qubits); // Total number of outer loop iterations
int block_size = floor(total_num_iter/nproc); // Number of iterations per process (block)
if (total_num_iter%nproc > 0) { block_size += 1; } // Add 1 if blocks don't divide evenly
int iter_first = iproc * block_size;
int iter_last = iter_first + block_size;
MPI_Status status;
MPI_File fh;
char buf[i_max_char];
//Output:
MPI_File_open(MPI_COMM_SELF, "testfile.csv", MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fh);
MPI_File_set_view(fh, iproc * block_size * strlen(buf) * sizeof(char), MPI_CHAR, MPI_CHAR, "native", MPI_INFO_NULL);
if(iproc == 0) {
printf("Block size: %d\n", block_size);
}
for ( int i = iter_first; i < iter_last; i++ ) {
// A bunch of stuff happens where values are written to the 2d arrays listed above
char label_list[i_max_char];
strcpy(label_list, "\n");
char anc_name[3];
// Output the ancilla qubit values in proper format
int ancilla_value;
for (int k=1; k < depth; k++) {
if (k%2 == 0) {
ancilla_value = (ancilla_qubit_value[depth][k] == 1) ? -1 : 1;
sprintf(anc_name, "%d,", ancilla_value);
strcat(label_list, anc_name);
}
for (int j=depth-1; j > 0; j--) {
if (k == 1 && j%2 == 0) {
ancilla_value = (ancilla_qubit_value[j][k-1] == 1) ? -1 : 1;
sprintf(anc_name, "%d,", ancilla_value);
strcat(label_list, anc_name);
} else if (k == (depth - 1) && j%2 == 1) {
ancilla_value = (ancilla_qubit_value[j][k+1] == 1) ? -1 : 1;
sprintf(anc_name, "%d,", ancilla_value);
strcat(label_list, anc_name);
}
ancilla_value = (ancilla_qubit_value[j][k] == 1) ? -1 : 1;
sprintf(anc_name, "%d,", ancilla_value);
strcat(label_list, anc_name);
}
if (k%2 == 1) {
ancilla_value = (ancilla_qubit_value[0][k] == 1) ? -1 : 1;
sprintf(anc_name, "%d,", ancilla_value);
strcat(label_list, anc_name);
}
}
// For printing label list:
strcat(label_list, "\"[");
char qubit_name[6];
int first = 1;
for (int k = 1; k < depth + 1; k++) {
for (int j = depth; j > 0; j--) {
if (data_qubit_x_error[j][k] == 1) {
if (first == 1) {
first = 0;
} else {
strcat(label_list, ", ");
}
sprintf(qubit_name, "'X%d%d'", (k-1), (depth-j));
strcat(label_list, qubit_name);
}
if (data_qubit_z_error[j][k] == 1) {
if (first == 1) {
first = 0;
} else {
strcat(label_list, ", ");
}
sprintf(qubit_name, "'Z%d%d'", (k-1), (depth-j));
strcat(label_list, qubit_name);
}
}
}
strcat(label_list, "]\"");
MPI_File_write_all(fh, label_list, strlen(label_list) * sizeof(char), MPI_CHAR, MPI_STATUS_IGNORE);
}
MPI_File_close(&fh);
MPI_Finalize();
return 0;
}