Bridge++  Version 1.5.4
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
fieldIO_Binary_Parallel.cpp
Go to the documentation of this file.
1 
14 // this code only makes sense in MPI environment.
15 #ifdef USE_MPI
16 
18 
19 const std::string FieldIO_Binary_Parallel::class_name = "FieldIO_Binary_Parallel";
20 
21 //====================================================================
23  : FieldIO(format),
24  m_is_initialized(false),
25  m_nvol(0), m_nin_file(0), m_nex_file(0)
26 {}
27 
28 //====================================================================
29 FieldIO_Binary_Parallel::~FieldIO_Binary_Parallel()
30 {
31  finalize();
32 }
33 
34 
35 //====================================================================
36 void FieldIO_Binary_Parallel::read_file(Field *u, const std::string filename)
37 {
38  static const char _function_name[] = "FieldIO_Binary_Parallel::read_file";
39 
40  if (!u) {
41  vout.crucial(m_vl, "Warning at %s: field is null.\n", _function_name);
42  return;
43  }
44 
45  initialize(u);
46 
47  MPI_File fh;
48  int ret;
49 
50  // fetch data from file into buffer: buffer is in file order.
51  double *buf = new double [m_nin_file * m_nvol * m_nex_file];
52  if (!buf) {
53  vout.crucial(m_vl, "Error at %s: allocate buffer failed.\n", _function_name);
54  exit(EXIT_FAILURE);
55  }
56 
57  ret = MPI_File_open(Communicator_impl::world(), const_cast<char *>(filename.c_str()), MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
58  if (ret) {
59  vout.crucial(m_vl, "Error at %s: MPI_File_open failed.\n", _function_name);
60  exit(EXIT_FAILURE);
61  }
62 
63  ret = MPI_File_set_view(fh, 0, m_type_vector, m_type_tiled, const_cast<char *>("native"), MPI_INFO_NULL);
64  if (ret) {
65  vout.crucial(m_vl, "Error at %s: MPI_File_set_view failed.\n", _function_name);
66  exit(EXIT_FAILURE);
67  }
68 
69  ret = MPI_File_read_all(fh, (void *)buf, m_nvol * m_nex_file, m_type_vector, MPI_STATUS_IGNORE);
70  if (ret) {
71  vout.crucial(m_vl, "Error at %s: MPI_File_read_all failed.\n", _function_name);
72  exit(EXIT_FAILURE);
73  }
74 
75  ret = MPI_File_close(&fh);
76  if (ret) {
77  vout.crucial(m_vl, "Error at %s: MPI_File_close failed.\n", _function_name);
78  exit(EXIT_FAILURE);
79  }
80 
81  if (!is_bigendian()) {
82  convert_endian(buf, sizeof(double), m_nvol * m_nin_file * m_nex_file);
83  }
84 
85  // unpack buffer
86  double *p = buf;
87 
88  for (int j = 0; j < m_nex_file; ++j) {
89  for (int isite = 0; isite < m_nvol; ++isite) {
90  for (int i = 0; i < m_nin_file; ++i) {
91  int s, t;
92  m_format->file_to_field(s, t, i, j);
93 
94  u->set(s, isite, t, *p++);
95  }
96  }
97  }
98 
99  delete [] buf;
100 
101  finalize();
102 }
103 
104 
105 //====================================================================
106 void FieldIO_Binary_Parallel::write_file(Field *u, const std::string filename)
107 {
108  static const char _function_name[] = "FieldIO_Binary_Parallel::write_file";
109 
110  if (!u) {
111  vout.crucial(m_vl, "Warning at %s: field is null.\n", _function_name);
112  }
113 
114  initialize(u);
115 
116  double *buf = new double [m_nin_file * m_nvol * m_nex_file];
117  if (!buf) {
118  vout.crucial(m_vl, "Error at %s: allocate buffer failed.\n", _function_name);
119  exit(EXIT_FAILURE);
120  }
121 
122  // pack buffer
123  double *p = buf;
124 
125  for (int j = 0; j < m_nex_file; ++j) {
126  for (int isite = 0; isite < m_nvol; ++isite) {
127  for (int i = 0; i < m_nin_file; ++i) {
128  int s, t;
129  m_format->file_to_field(s, t, i, j);
130 
131  *p++ = u->cmp(s, isite, t);
132  }
133  }
134  }
135 
136  if (!is_bigendian()) {
137  convert_endian(buf, sizeof(double), m_nin_file * m_nvol * m_nex_file);
138  }
139 
140  MPI_File fh;
141  int ret;
142 
143  ret = MPI_File_open(Communicator_impl::world(), const_cast<char *>(filename.c_str()), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fh);
144  if (ret) {
145  vout.crucial(m_vl, "Error at %s: MPI_File_open failed.\n", _function_name);
146  exit(EXIT_FAILURE);
147  }
148 
149  ret = MPI_File_set_view(fh, 0, m_type_vector, m_type_tiled, const_cast<char *>("native"), MPI_INFO_NULL);
150  if (ret) {
151  vout.crucial(m_vl, "Error at %s: MPI_File_set_view failed.\n", _function_name);
152  exit(EXIT_FAILURE);
153  }
154 
155  ret = MPI_File_write_all(fh, (void *)buf, m_nvol * m_nex_file, m_type_vector, MPI_STATUS_IGNORE);
156  if (ret) {
157  vout.crucial(m_vl, "Error at %s: MPI_File_write_all failed.\n", _function_name);
158  exit(EXIT_FAILURE);
159  }
160 
161  ret = MPI_File_close(&fh);
162  if (ret) {
163  vout.crucial(m_vl, "Error at %s: MPI_File_close failed.\n", _function_name);
164  exit(EXIT_FAILURE);
165  }
166 
167  delete [] buf;
168 
169  finalize();
170 }
171 
172 
173 //====================================================================
174 int FieldIO_Binary_Parallel::initialize(const Field *v)
175 {
176  static const char _function_name[] = "FieldIO_Binary_Parallel::initialize";
177 
178  // N.B. trivial layout returns 0 for nin/nex_file.
179  const int nin_file = m_format->nin() ? m_format->nin() : v->nin();
180  const int nex_file = m_format->nex() ? m_format->nex() : v->nex();
181  const int nvol = v->nvol();
182 
183  // check if layout is already generated and recyclable.
184  if (m_is_initialized &&
185  (nin_file == m_nin_file) &&
186  (nex_file == m_nex_file) &&
187  (nvol == m_nvol))
188  {
189  vout.detailed(m_vl, "%s: layout recycled.\n", _function_name);
190  return EXIT_SUCCESS;
191  }
192 
193  // first, cleanup pre-existing layout, if any.
194  clear_layout();
195 
196  // local parameters
197  m_nin_file = nin_file;
198  m_nex_file = nex_file;
199 
200  const int ndim = CommonParameters::Ndim();
201 
202  int *global_dims = new int[ndim];
203  global_dims[0] = CommonParameters::Lx();
204  global_dims[1] = CommonParameters::Ly();
205  global_dims[2] = CommonParameters::Lz();
206  global_dims[3] = CommonParameters::Lt();
207 
208  int *local_dims = new int[ndim];
209  local_dims[0] = CommonParameters::Nx();
210  local_dims[1] = CommonParameters::Ny();
211  local_dims[2] = CommonParameters::Nz();
212  local_dims[3] = CommonParameters::Nt();
213 
214  m_nvol = 1;
215  for (int i = 0; i < ndim; ++i) {
216  m_nvol *= local_dims[i];
217  }
218 
219  int *grid_pos = new int[ndim];
220  for (int i = 0; i < ndim; ++i) {
221  grid_pos[i] = Communicator::ipe(i);
222  }
223 
224  int *starts = new int[ndim];
225  for (int i = 0; i < ndim; ++i) {
226  starts[i] = local_dims[i] * grid_pos[i];
227  }
228 
229  int ret = 0;
230 
231  // MPI_Datatype m_type_vector;
232  ret = MPI_Type_contiguous(sizeof(double) * m_nin_file, MPI_BYTE, &m_type_vector);
233  if (ret) {
234  vout.crucial(m_vl, "%s: MPI_Type_Contiguous failed.\n", _function_name);
235  exit(EXIT_FAILURE);
236  }
237 
238  ret = MPI_Type_commit(&m_type_vector);
239  if (ret) {
240  vout.crucial(m_vl, "%s: MPI_Type_commit failed.\n", _function_name);
241  exit(EXIT_FAILURE);
242  }
243 
244  // MPI_Datatype m_type_tiled;
245  ret = MPI_Type_create_subarray(ndim, global_dims, local_dims, starts, MPI_ORDER_FORTRAN, m_type_vector, &m_type_tiled);
246  if (ret) {
247  vout.crucial(m_vl, "%s: MPI_Type_create_subarray failed.\n", _function_name);
248  exit(EXIT_FAILURE);
249  }
250 
251  ret = MPI_Type_commit(&m_type_tiled);
252  if (ret) {
253  vout.crucial(m_vl, "%s: MPI_Type_commit failed.\n", _function_name);
254  exit(EXIT_FAILURE);
255  }
256 
257  delete [] starts;
258  delete [] grid_pos;
259  delete [] local_dims;
260  delete [] global_dims;
261 
262  // initialization done.
263  m_is_initialized = true;
264 
265  vout.detailed(m_vl, "%s: layout initialized.\n", _function_name);
266 
267  return EXIT_SUCCESS;
268 }
269 
270 
271 //====================================================================
272 int FieldIO_Binary_Parallel::clear_layout()
273 {
274  const char _function_name[] = "FieldIO_Binary_Parallel::clear_layout";
275 
276  if (m_is_initialized) {
277  m_nin_file = 0;
278  m_nex_file = 0;
279  m_nvol = 0;
280 
281  int ret = 0;
282 
283  ret = MPI_Type_free(&m_type_vector);
284  if (ret) {
285  vout.crucial(m_vl, "%s: MPI_Type_free for type_vector failed.\n", _function_name);
286  exit(EXIT_FAILURE);
287  }
288 
289  ret = MPI_Type_free(&m_type_tiled);
290  if (ret) {
291  vout.crucial(m_vl, "%s: MPI_Type_free for type_tiled failed.\n", _function_name);
292  exit(EXIT_FAILURE);
293  }
294 
295  m_is_initialized = false;
296  }
297 
298  return EXIT_SUCCESS;
299 }
300 
301 
302 //====================================================================
303 int FieldIO_Binary_Parallel::finalize()
304 {
305  static const char _function_name[] = "FieldIO_Binary_Parallel::finalize";
306 
307  clear_layout();
308 
309  vout.detailed(m_vl, "%s via MPI I/O finalize done.\n", class_name.c_str());
310 
311  return EXIT_SUCCESS;
312 }
313 
314 
315 //====================================================================
316 #endif
317 
318 //====================================================================
319 //============================================================END=====
void read_file(Field *v, const std::string filename)
read data from file.
BridgeIO vout
Definition: bridgeIO.cpp:503
void detailed(const char *format,...)
Definition: bridgeIO.cpp:216
void set(const int jin, const int site, const int jex, double v)
Definition: field.h:175
virtual void file_to_field(int &s, int &t, const int i, const int j) const =0
virtual int nex() const =0
Container of Field-type object.
Definition: field.h:45
virtual int nin() const =0
int nvol() const
Definition: field.h:127
double cmp(const int jin, const int site, const int jex) const
Definition: field.h:143
static int ipe(const int dir)
logical coordinate of current proc.
static bool is_bigendian()
Definition: fieldIO.cpp:206
void write_file(Field *v, const std::string filename)
write data to file.
FieldIO_Binary_Parallel(const IO_Format::Format *format)
int nin() const
Definition: field.h:126
static MPI_Comm & world()
retrieves current communicator.
int nex() const
Definition: field.h:128
const IO_Format::Format * m_format
Definition: fieldIO.h:62
void crucial(const char *format,...)
Definition: bridgeIO.cpp:178
static const std::string class_name
static void convert_endian(void *buf, size_t size, size_t nmemb)
check if machine byte order is big-endian.
Definition: fieldIO.cpp:227
FieldIO class for file I/O of space-time distributed data.
Definition: fieldIO.h:53
Bridge::VerboseLevel m_vl
Definition: fieldIO.h:64