I'm currently working on a lattice Boltzmann code (D3Q27) employing MPI for parallelization. I've implemented MPI 3D topology for communication, and my code snippet handles communication as follows, I also have the same structure for the communication between front-back and up-down. void Simulation::Communicate(int iter) { int tag_xp = 0; int tag_xm = 1; int tag_yp = 2; int tag_ym = 3; int tag_zp = 4; int tag_zm = 5; MPI_Status status; if (SubDomain_.my_right_ != MPI_PROC_NULL) { std::vector send_data; for (int k = 0; k < SubDomain_.my_Nz_; k++) { for (int j = 0; j < SubDomain_.my_Ny_; j++) { if (SubDomain_.lattice_[SubDomain_.my_Nx_ - 2][j][k] == nullptr) { for (int dir = 0; dir < _nLatNodes; dir++) { send_data.push_back(0.0); } } else { for (int dir = 0; dir < _nLatNodes; dir++) { send_data.push_back(SubDomain_.lattice_[SubDomain_.my_Nx_ - 2][j][k]->m_distributions[dir]); } } } } std::vector recv_data(send_data.size()); MPI_Sendrecv(send_data.data(), send_data.size(), MPI_DOUBLE, SubDomain_.my_right_, tag_xp, recv_data.data(), recv_data.size(), MPI_DOUBLE, SubDomain_.my_right_, tag_xm, MPI_COMM_WORLD, &status); int index = 0; for (int k = 0; k < SubDomain_.my_Nz_; k++) { for (int j = 0; j < SubDomain_.my_Ny_; j++) { for (int dir = 0; dir < _nLatNodes; dir++) { SubDomain_.lattice_[SubDomain_.my_Nx_ - 1][j][k]->m_distributions[dir] = recv_data[index]; index++; } } } } if (SubDomain_.my_left_ != MPI_PROC_NULL) { std::vector send_data; for (int k = 0; k < SubDomain_.my_Nz_; k++) { for (int j = 0; j < SubDomain_.my_Ny_; j++) { if (SubDomain_.lattice_[1][j][k] == nullptr) { for (int dir = 0; dir < _nLatNodes; dir++) { send_data.push_back(0.0); } } else { for (int dir = 0; dir < _nLatNodes; dir++) { send_data.push_back(SubDomain_.lattice_[1][j][k]->m_distributions[dir]); }
M
Member 15181211
@Member 15181211
Posts
-
Mpi blocking communication -
Overloading += in c++Imagine we want to do something like below with defining operator overloading.
for (int i=0;i