20 #ifdef ENABLE_MULTI_INSTANCE
21 int Communicator_impl::m_n_instance = 1;
22 int Communicator_impl::m_instance_id = 0;
25 int Communicator_impl::m_global_rank = 0;
26 int Communicator_impl::m_global_size = 1;
39 int is_initialized = 0;
40 MPI_Initialized(&is_initialized);
43 fprintf(stderr,
"Communicator: MPI already initialized. skip.\n");
46 int required = MPI_THREAD_FUNNELED;
49 MPI_Init_thread(pargc, pargv, required, &provided);
51 if (provided < required) {
52 fprintf(stderr,
"Communicator: MPI not supporting sufficient thread level. exiting.\n");
56 MPI_Init(pargc, pargv);
60 MPI_Comm_size(MPI_COMM_WORLD, &m_global_size);
61 MPI_Comm_rank(MPI_COMM_WORLD, &m_global_rank);
68 MPI_Comm_dup(MPI_COMM_WORLD, &
m_comm);
78 return MPI_Finalize();
87 #ifdef ENABLE_MULTI_INSTANCE
88 if ((ninstance == 0) || (m_global_size % ninstance != 0)) {
89 printf(
"%s: invalid number of instance: %d\n",
"Communicator::init", ninstance);
93 m_n_instance = ninstance;
95 int gsize = m_global_size / ninstance;
96 m_instance_id = m_global_rank / gsize;
98 MPI_Comm_split(MPI_COMM_WORLD, m_instance_id, 0 , &
m_comm);
103 MPI_Comm_dup(MPI_COMM_WORLD, &
m_comm);
121 MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
149 bool Communicator_impl::is_primary_master()
151 return m_global_rank == 0;
156 #ifdef ENABLE_MULTI_INSTANCE
158 int Communicator_impl::self_global()
160 return m_global_rank;
165 int Communicator_impl::world_id()
167 return m_instance_id;
176 return MPI_Barrier(
m_comm);
181 #ifdef ENABLE_MULTI_INSTANCE
183 int Communicator_impl::sync_global()
186 return MPI_Barrier(MPI_COMM_WORLD);
195 return MPI_Bcast(data, size, MPI_BYTE, sender,
m_comm);
207 assert(ipm == 1 || ipm == -1);
210 memcpy(recv_buf, send_buf, size);
226 send_buf, size, MPI_BYTE, p_send, tag_send,
227 recv_buf, size, MPI_BYTE, p_recv, tag_recv,
239 if (send_to == recv_from) {
240 memcpy(recv_buf, send_buf, size);
242 if (
self() == recv_from)
243 MPI_Send(send_buf, size, MPI_BYTE, send_to, tag,
m_comm);
245 if (
self() == send_to)
246 MPI_Recv(recv_buf, size, MPI_BYTE, recv_from, tag,
m_comm, &status);
259 return MPI_Allreduce((
void *)send_buf, (
void *)recv_buf, count, type, op,
Layout::m_sub_comm[pattern]);
276 MPI_Bcast((
void *)&size,
sizeof(
size_t), MPI_BYTE, sender,
m_comm);
279 char *buf =
new char[size + 1];
280 memset(buf,
'\0', size + 1);
283 data.copy(buf, size, 0);
287 int retv = MPI_Bcast((
void *)buf, size, MPI_BYTE, sender,
m_comm);
312 #ifdef ENABLE_MULTI_INSTANCE
313 printf(
"global_rank=%2d/%2d: ngrid=%d, grid_id=%d: grid_rank=%2d/%2d\n",
314 m_global_rank, m_global_size,
315 m_n_instance, m_instance_id,
318 printf(
"grid_rank=%2d/%2d\n",
static int send_1to1(size_t size, void *recv_buf, void *send_buf, int send_to, int recv_from, int tag)
static int broadcast(size_t size, void *data, int sender)
static int layout_setup()
layout_setup() – setup logical layout
static void abort()
terminate communicator
static int self()
rank within small world.
static int sync()
synchronize within small world.
static int setup(int ninstance=1)
static int status()
for debug
static bool is_primary()
info about rank
static int reduce(int count, void *recv_buf, void *send_buf, int type, int op, int pattern)
static int init(int *pargc, char ***pargv)
static int m_comm
instead of MPI_Comm m_comm;
static int * m_grid_dims
grid dimensions in directions.
static int broadcast_string(int count, string &data, int sender)
for specific datatypes
static double get_time()
for getting time interval using clock count.
static int * m_ipe_up
rank of upward neighbour in directions.
static int tag(int rank, int idir, int ipm)
static int * m_ipe_dn
rank of downward neighbour in directions.
static int exchange(size_t size, void *recv_buf, void *send_buf, int idir, int ipm, int tag)
static std::terminate_handler default_handler