Bridge++  Version 1.5.4
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
communicator_mpi.cpp
Go to the documentation of this file.
1 
14 #include <unistd.h> // for usleep
15 #include "layout.h"
16 
17 // exception handler for uncaught throw.
18 static std::terminate_handler default_handler = std::set_terminate(Communicator::abort);
19 
20 // define static members
21 #ifdef ENABLE_MULTI_INSTANCE
22 int Communicator_impl::m_n_instance = 1; // number of instances
23 int Communicator_impl::m_instance_id = 0; // id of present instance
24 #endif
25 
26 int Communicator_impl::m_global_rank = 0;
27 int Communicator_impl::m_global_size = 1;
28 
31 
33 
35 
36 //====================================================================
37 // class methods
38 int Communicator_impl::init(int *pargc, char ***pargv)
39 {
40  LOG;
41 
42  int is_initialized = 0;
43  MPI_Initialized(&is_initialized);
44 
45  if (is_initialized) {
46  fprintf(stderr, "Communicator: MPI already initialized. skip.\n");
47  } else {
48 #ifdef USE_OPENMP
49  int required = MPI_THREAD_FUNNELED;
50  int provided;
51 
52  MPI_Init_thread(pargc, pargv, required, &provided);
53 
54  if (provided < required) {
55  fprintf(stderr, "Communicator: MPI not supporting sufficient thread level. exiting.\n");
56  exit(EXIT_FAILURE);
57  }
58 #else
59  MPI_Init(pargc, pargv);
60 #endif
61  }
62 
63  MPI_Comm_size(MPI_COMM_WORLD, &m_global_size);
64  MPI_Comm_rank(MPI_COMM_WORLD, &m_global_rank);
65 
66  // grid size and rank equal to global ones for a moment until layout is set.
67  m_grid_size = m_global_size;
68  m_grid_rank = m_global_rank;
69 
70  //- initialize m_comm, thanks to Aoyama-san.
71  MPI_Comm_dup(MPI_COMM_WORLD, &m_comm);
72 
73  return EXIT_SUCCESS;
74 }
75 
76 
77 //====================================================================
79 {
80  LOG;
81  return MPI_Finalize();
82 }
83 
84 
85 //====================================================================
86 int Communicator_impl::setup(int ninstance)
87 {
88  LOG;
89 
90 #ifdef ENABLE_MULTI_INSTANCE
91  if ((ninstance == 0) || (m_global_size % ninstance != 0)) {
92  printf("%s: invalid number of instance: %d\n", "Communicator::init", ninstance);
93  abort();
94  }
95 
96  m_n_instance = ninstance;
97 
98  int gsize = m_global_size / ninstance;
99  m_instance_id = m_global_rank / gsize;
100 
101  MPI_Comm_split(MPI_COMM_WORLD, m_instance_id, 0 /* key */, &m_comm);
102 #else
103 // m_n_instance = 1;
104 // m_instance_id = 0;
105 
106  MPI_Comm_dup(MPI_COMM_WORLD, &m_comm);
107 #endif
108 
109  MPI_Comm_size(m_comm, &m_grid_size);
110  MPI_Comm_rank(m_comm, &m_grid_rank);
111 
113 
114  status();
115 
116  return EXIT_SUCCESS;
117 }
118 
119 
120 //====================================================================
122  const std::vector<int>& lattice_size,
123  std::vector<int>& grid_size,
124  int ninstance)
125 {
126  LOG;
127 
128 #ifdef ENABLE_MULTI_INSTANCE
129  if ((ninstance == 0) || (m_global_size % ninstance != 0)) {
130  printf("%s: invalid number of instance: %d\n", "Communicator::init", ninstance);
131  abort();
132  }
133 
134  m_n_instance = ninstance;
135 
136  int gsize = m_global_size / ninstance;
137  m_instance_id = m_global_rank / gsize;
138 
139  MPI_Comm_split(MPI_COMM_WORLD, m_instance_id, 0 /* key */, &m_comm);
140 #else
141 // m_n_instance = 1;
142 // m_instance_id = 0;
143 
144  MPI_Comm_dup(MPI_COMM_WORLD, &m_comm);
145 #endif
146 
147  MPI_Comm_size(m_comm, &m_grid_size);
148  MPI_Comm_rank(m_comm, &m_grid_rank);
149 
150  Communicator_impl::Layout::layout_setup(lattice_size, grid_size);
151 
152  status();
153 
154  return EXIT_SUCCESS;
155 }
156 
157 
158 //====================================================================
160 {
161  LOG;
162  MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
163  // unreached.
164 }
165 
166 
167 //====================================================================
168 // information
170 {
171  return m_grid_rank == 0;
172 }
173 
174 
175 //====================================================================
177 {
178  return m_grid_rank;
179 }
180 
181 
182 //====================================================================
184 {
185  return m_grid_size;
186 }
187 
188 
189 //====================================================================
190 bool Communicator_impl::is_primary_master()
191 {
192  return m_global_rank == 0;
193 }
194 
195 
196 //====================================================================
197 #ifdef ENABLE_MULTI_INSTANCE
198 
199 int Communicator_impl::self_global()
200 {
201  return m_global_rank;
202 }
203 
204 
205 //====================================================================
206 int Communicator_impl::world_id()
207 {
208  return m_instance_id;
209 }
210 
211 
212 #endif
213 
214 //====================================================================
215 // synchronize
217 {
218  LOG;
219  return MPI_Barrier(m_comm);
220 }
221 
222 //====================================================================
223 // sync w/o (possible) busy wait
225 {
226  LOG;
227  char dummy='\0';
228  const int interval=100000; // 100ms
229  MPI_Request req;
230  MPI_Ibcast(&dummy, 1, MPI_BYTE, 0, m_comm, &req);
231 
232  MPI_Status status;
233  int finished=0;
234 
235  while(!finished){
236  usleep(interval);
237  MPI_Test(&req, &finished, &status);
238  }
239  return finished;
240 }
241 
242 
243 //====================================================================
244 #ifdef ENABLE_MULTI_INSTANCE
245 
246 int Communicator_impl::sync_global()
247 {
248  LOG;
249  return MPI_Barrier(MPI_COMM_WORLD);
250 }
251 
252 
253 #endif
254 
255 //====================================================================
256 // data transfer: base cases
257 int Communicator_impl::Base::broadcast(size_t size, void *data, int sender)
258 {
259  LOG;
260  return MPI_Bcast(data, size, MPI_BYTE, sender, m_comm);
261 }
262 
263 
264 //====================================================================
265 int Communicator_impl::Base::exchange(size_t size, void *recv_buf, void *send_buf, int idir, int ipm, int itag)
266 {
267  LOG;
268 
269  MPI_Status status;
270  int p_send, p_recv;
271 
272  assert(ipm == 1 || ipm == -1);
273 
274  if (Layout::m_grid_dims[idir] == 1) { // no need to transfer
275  memcpy(recv_buf, send_buf, size);
276  return EXIT_SUCCESS;
277  }
278 
279  if (ipm == 1) { // downward shift
280  p_send = Layout::m_ipe_dn[idir];
281  p_recv = Layout::m_ipe_up[idir];
282  } else { // upward shift
283  p_send = Layout::m_ipe_up[idir];
284  p_recv = Layout::m_ipe_dn[idir];
285  }
286 
287  int tag_send = Layout::tag(self(), idir, -ipm);
288  int tag_recv = Layout::tag(p_recv, idir, -ipm);
289 
290  return MPI_Sendrecv(
291  send_buf, size, MPI_BYTE, p_send, tag_send,
292  recv_buf, size, MPI_BYTE, p_recv, tag_recv,
293  m_comm, &status);
294 }
295 
296 
297 //====================================================================
298 int Communicator_impl::Base::send_1to1(size_t size, void *recv_buf, void *send_buf, int send_to, int recv_from, int tag)
299 {
300  LOG;
301 
302  MPI_Status status;
303 
304  if (send_to == recv_from) {
305  memcpy(recv_buf, send_buf, size);
306  } else {
307  if (self() == recv_from)
308  MPI_Send(send_buf, size, MPI_BYTE, send_to, tag, m_comm);
309 
310  if (self() == send_to)
311  MPI_Recv(recv_buf, size, MPI_BYTE, recv_from, tag, m_comm, &status);
312  }
313 
314  // sync should be taken outside.
315 
316  return EXIT_SUCCESS;
317 }
318 
319 
320 //====================================================================
321 int Communicator_impl::Base::reduce(int count, void *recv_buf, void *send_buf, MPI_Datatype type, MPI_Op op, int pattern)
322 {
323  LOG;
324  return MPI_Allreduce((void *)send_buf, (void *)recv_buf, count, type, op, Layout::m_sub_comm[pattern]);
325 }
326 
327 
328 //====================================================================
329 // data transfer for specific datatypes
330 int Communicator_impl::broadcast_string(int count, string& data, int sender)
331 {
332  LOG;
333  assert(count == 1);
334 
335  size_t size = 0;
336 
337  // broadcast string length.
338  if (Communicator::self() == sender) {
339  size = data.size();
340  }
341  MPI_Bcast((void *)&size, sizeof(size_t), MPI_BYTE, sender, m_comm);
342 
343  // allocate buffer. pack data at sender.
344  char *buf = new char[size + 1];
345  memset(buf, '\0', size + 1);
346 
347  if (Communicator::self() == sender) {
348  data.copy(buf, size, 0);
349  }
350 
351  // do broadcast.
352  int retv = MPI_Bcast((void *)buf, size, MPI_BYTE, sender, m_comm);
353 
354  if (Communicator::self() != sender) {
355  data = string(buf);
356  }
357 
358  delete [] buf;
359 
360  return retv;
361 }
362 
363 
364 //====================================================================
365 // info
367 {
368  return MPI_Wtime();
369 }
370 
371 
372 //====================================================================
373 // debug
375 {
376 #ifdef DEBUG
377 #ifdef ENABLE_MULTI_INSTANCE
378  printf("global_rank=%2d/%2d: ngrid=%d, grid_id=%d: grid_rank=%2d/%2d\n",
379  m_global_rank, m_global_size,
380  m_n_instance, m_instance_id,
382 #else
383  printf("grid_rank=%2d/%2d\n",
385 #endif
386 #endif
387 
388 
389  return EXIT_SUCCESS;
390 }
391 
392 
393 //====================================================================
394 //============================================================END=====
static int send_1to1(size_t size, void *recv_buf, void *send_buf, int send_to, int recv_from, int tag)
static int broadcast(size_t size, void *data, int sender)
static int layout_setup()
initialise layout.
Definition: layout.cpp:93
static char default_grid_map[16]
static void abort()
terminate communicator
static int self()
rank within small world.
static int setup(int ninstance=1)
static int * m_ipe_up
rank of upward neighbour in directions.
Definition: layout.h:53
static int * m_ipe_dn
rank of downward neighbour in directions.
Definition: layout.h:54
static bool is_primary()
static int * m_grid_dims
grid dimensions in directions.
Definition: layout.h:50
static int init(int *pargc, char ***pargv)
static MPI_Comm * m_sub_comm
subgrid
Definition: layout.h:64
#define LOG
Definition: bridge_defs.h:21
static MPI_Comm m_comm
static int broadcast_string(int count, string &data, int sender)
static double get_time()
static int sync_usleep()
static int reduce(int count, void *recv_buf, void *send_buf, MPI_Datatype type, MPI_Op op, int pattern)
static int tag(int rank, int idir, int ipm)
generate tag for communication.
Definition: layout.cpp:68
static int exchange(size_t size, void *recv_buf, void *send_buf, int idir, int ipm, int tag)
static std::terminate_handler default_handler