1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
|
int tag_xp = 0;
int tag_xm = 1;
int mpi_error;
MPI_Status status;
if (SubDomain_.my_left_ != MPI_PROC_NULL /* && SubDomain_.need_communication_[0] == 1*/) {
std::vector<double> send_data;
for (int k = 0; k < SubDomain_.my_Nz_; k++) {
for (int j = 0; j < SubDomain_.my_Ny_; j++) {
if (SubDomain_.m_lattice[1][j][k] == nullptr) {
for (int dir = 0; dir < _nLatNodes; dir++) {
send_data.push_back(0.0);
}
}
else {
for (int dir = 0; dir < _nLatNodes; dir++) {
send_data.push_back(SubDomain_.m_lattice[1][j][k]->m_distributions[dir]);
}
}
}
}
//log_data("send", myrank_, SubDomain_.my_left_, iter, send_data);
std::vector<double> recv_data(send_data.size());
mpi_error = MPI_Sendrecv(send_data.data(), send_data.size(), MPI_DOUBLE, SubDomain_.my_left_, tag_xm,
recv_data.data(), recv_data.size(), MPI_DOUBLE, SubDomain_.my_left_, tag_xp,
MPI_COMM_WORLD, &status);
if (mpi_error != MPI_SUCCESS) {
std::cerr << "MPI_Sendrecv failed with error code: " << mpi_error << std::endl;
}
//log_data("recv", SubDomain_.my_left_, myrank_, iter, recv_data);
int index = 0;
for (int k = 0; k < SubDomain_.my_Nz_; k++) {
for (int j = 0; j < SubDomain_.my_Ny_; j++) {
for (int dir = 0; dir < _nLatNodes; dir++) {
SubDomain_.m_lattice[0][j][k]->m_distributions[dir] = recv_data[index];
index++;
}
}
}
// Log the lattice state after retrieving data
std::vector<double> lattice_data;
for (int k = 0; k < SubDomain_.my_Nz_; k++) {
for (int j = 0; j < SubDomain_.my_Ny_; j++) {
for (int dir = 0; dir < _nLatNodes; dir++) {
lattice_data.push_back(SubDomain_.m_lattice[0][j][k]->m_distributions[dir]);
}
}
}
//log_data("post-retrieve", SubDomain_.my_left_, myrank_, iter, lattice_data);
}
if (SubDomain_.my_right_ != MPI_PROC_NULL /* && SubDomain_.need_communication_[1] == 1*/) {
std::vector<double> send_data;
for (int k = 0; k < SubDomain_.my_Nz_; k++) {
for (int j = 0; j < SubDomain_.my_Ny_; j++) {
if (SubDomain_.m_lattice[SubDomain_.my_Nx_ - 2][j][k] == nullptr) {
for (int dir = 0; dir < _nLatNodes; dir++) {
send_data.push_back(0.0);
}
}
else {
for (int dir = 0; dir < _nLatNodes; dir++) {
send_data.push_back(SubDomain_.m_lattice[SubDomain_.my_Nx_ - 2][j][k]->m_distributions[dir]);
}
}
}
}
//log_data("send", myrank_, SubDomain_.my_right_, iter, send_data);
std::vector<double> recv_data(send_data.size());
mpi_error = MPI_Sendrecv(send_data.data(), send_data.size(), MPI_DOUBLE, SubDomain_.my_right_, tag_xp,
recv_data.data(), recv_data.size(), MPI_DOUBLE, SubDomain_.my_right_, tag_xm,
MPI_COMM_WORLD, &status);
if (mpi_error != MPI_SUCCESS) {
std::cerr << "MPI_Sendrecv failed with error code: " << mpi_error << std::endl;
}
//log_data("recv", SubDomain_.my_right_, myrank_, iter, recv_data);
int index = 0;
for (int k = 0; k < SubDomain_.my_Nz_; k++) {
for (int j = 0; j < SubDomain_.my_Ny_; j++) {
for (int dir = 0; dir < _nLatNodes; dir++) {
SubDomain_.m_lattice[SubDomain_.my_Nx_ - 1][j][k]->m_distributions[dir] = recv_data[index];
index++;
}
}
}
// Log the lattice state after retrieving data
std::vector<double> lattice_data;
for (int k = 0; k < SubDomain_.my_Nz_; k++) {
for (int j = 0; j < SubDomain_.my_Ny_; j++) {
for (int dir = 0; dir < _nLatNodes; dir++) {
lattice_data.push_back(SubDomain_.m_lattice[SubDomain_.my_Nx_ - 1][j][k]->m_distributions[dir]);
}
}
}
//log_data("post-retrieve", SubDomain_.my_right_, myrank_, iter, lattice_data);
}
|