Bridge++  Ver. 1.3.x
fieldIO_Binary_Parallel.cpp
Go to the documentation of this file.
1 
14 // this code only makes sense in MPI environment.
15 #ifdef USE_MPI
16 
18 
19 const std::string FieldIO_Binary_Parallel::class_name = "FieldIO_Binary_Parallel";
20 
21 //====================================================================
22 void FieldIO_Binary_Parallel::read_file(Field *u, string filename)
23 {
24  static const char _function_name[] = "FieldIO_Binary_Parallel::read_file";
25 
26  initialize();
27 
28  MPI_File fh;
29  int ret;
30 
31  int nin_file = m_format->nin();
32  int nex_file = m_format->nex();
33 
34 // Field::element_type *buf = new Field::element_type [m_nvol*m_nvector];
35  double *buf = new double [nin_file * nex_file * m_nvol];
36  if (!buf) {
37  vout.crucial(m_vl, "%s: allocate buffer failed.\n", _function_name);
38  exit(EXIT_FAILURE);
39  }
40 
41 #ifdef USE_BGNET
42  ret = MPI_File_open(MPI_COMM_WORLD, const_cast<char *>(filename.c_str()), MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
43 #else
44  ret = MPI_File_open(Communicator_impl::world(), const_cast<char *>(filename.c_str()), MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
45 #endif
46  if (ret) {
47  vout.crucial(m_vl, "%s: MPI_File_open failed.\n", _function_name);
48  exit(EXIT_FAILURE);
49  }
50 
51  ret = MPI_File_set_view(fh, 0, m_type_vector, m_type_tiled, const_cast<char *>("native"), MPI_INFO_NULL);
52  if (ret) {
53  vout.crucial(m_vl, "%s: MPI_File_set_view failed.\n", _function_name);
54  exit(EXIT_FAILURE);
55  }
56 
57  ret = MPI_File_read_all(fh, (void *)buf, m_nvol * nex_file, m_type_vector, MPI_STATUS_IGNORE);
58  if (ret) {
59  vout.crucial(m_vl, "%s: MPI_File_read_all failed.\n", _function_name);
60  exit(EXIT_FAILURE);
61  }
62 
63  ret = MPI_File_close(&fh);
64  if (ret) {
65  vout.crucial(m_vl, "%s: MPI_File_close failed.\n", _function_name);
66  exit(EXIT_FAILURE);
67  }
68 
69  if (!is_bigendian()) {
70 // convert_endian(buf, sizeof(Field::element_type), m_nvol*m_nvector);
71  convert_endian(buf, sizeof(double), m_nvol * nin_file * nex_file);
72  }
73 
74  // unpack buffer
75  double *p = buf;
76 
77  for (int j = 0; j < nex_file; ++j) {
78  for (int isite = 0; isite < m_nvol; ++isite) {
79  for (int i = 0; i < nin_file; ++i) {
80  int s, t;
81  m_format->file_to_field(s, t, i, j);
82 
83  u->set(s, isite, t, *p++);
84  }
85  }
86  }
87 
88  delete [] buf;
89 
90  finalize();
91 }
92 
93 
94 //====================================================================
95 void FieldIO_Binary_Parallel::write_file(Field *u, string filename)
96 {
97  static const char _function_name[] = "FieldIO_Binary_Parallel::write_file";
98 
99  initialize();
100 
101  int nin_file = m_format->nin();
102  int nex_file = m_format->nex();
103 
104  // Field::element_type *buf = new Field::element_type [m_nvol*m_nvector];
105  double *buf = new double [nin_file * nex_file * m_nvol];
106  if (!buf) {
107  vout.crucial(m_vl, "%s: allocate buffer failed.\n", _function_name);
108  exit(EXIT_FAILURE);
109  }
110 
111  // pack buffer
112  double *p = buf;
113 
114  for (int j = 0; j < nex_file; ++j) {
115  for (int isite = 0; isite < m_nvol; ++isite) {
116  for (int i = 0; i < nin_file; ++i) {
117  int s, t;
118  m_format->file_to_field(s, t, i, j);
119 
120  *p++ = u->cmp(s, isite, t);
121  }
122  }
123  }
124 
125  if (!is_bigendian()) {
126  // convert_endian(buf, sizeof(Field::element_type), m_nvol*m_nvector);
127  convert_endian(buf, sizeof(double), nin_file * nex_file * m_nvol);
128  }
129 
130  MPI_File fh;
131  int ret;
132 
133 #ifdef USE_BGNET
134  ret = MPI_File_open(MPI_COMM_WORLD, const_cast<char *>(filename.c_str()), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fh);
135 #else
136  ret = MPI_File_open(Communicator_impl::world(), const_cast<char *>(filename.c_str()), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fh);
137 #endif
138  if (ret) {
139  vout.crucial(m_vl, "%s: MPI_File_open failed.\n", _function_name);
140  exit(EXIT_FAILURE);
141  }
142 
143  ret = MPI_File_set_view(fh, 0, m_type_vector, m_type_tiled, const_cast<char *>("native"), MPI_INFO_NULL);
144  if (ret) {
145  vout.crucial(m_vl, "%s: MPI_File_set_view failed.\n", _function_name);
146  exit(EXIT_FAILURE);
147  }
148 
149  ret = MPI_File_write_all(fh, (void *)buf, m_nvol * nex_file, m_type_vector, MPI_STATUS_IGNORE);
150  if (ret) {
151  vout.crucial(m_vl, "%s: MPI_File_write_all failed.\n", _function_name);
152  exit(EXIT_FAILURE);
153  }
154 
155  ret = MPI_File_close(&fh);
156  if (ret) {
157  vout.crucial(m_vl, "%s: MPI_File_close failed.\n", _function_name);
158  exit(EXIT_FAILURE);
159  }
160 
161  delete [] buf;
162 
163  finalize();
164 }
165 
166 
167 //====================================================================
168 int FieldIO_Binary_Parallel::initialize()
169 {
170  static const char _function_name[] = "FieldIO_Binary_Parallel::initialize";
171 
172  if (m_is_initialized) return EXIT_SUCCESS;
173 
174  int nin_file = m_format->nin();
175  int nex_file = m_format->nex();
176 
177  const int ndim = CommonParameters::Ndim();
178 
179  int *global_dims = new int[ndim];
180  global_dims[0] = CommonParameters::Lx();
181  global_dims[1] = CommonParameters::Ly();
182  global_dims[2] = CommonParameters::Lz();
183  global_dims[3] = CommonParameters::Lt();
184 
185  int *local_dims = new int[ndim];
186  local_dims[0] = CommonParameters::Nx();
187  local_dims[1] = CommonParameters::Ny();
188  local_dims[2] = CommonParameters::Nz();
189  local_dims[3] = CommonParameters::Nt();
190 
191  m_nvol = 1;
192  for (int i = 0; i < ndim; ++i) {
193  m_nvol *= local_dims[i];
194  }
195 
196  int *grid_pos = new int[ndim];
197  for (int i = 0; i < ndim; ++i) {
198  grid_pos[i] = Communicator::ipe(i);
199  }
200 
201  int *starts = new int[ndim];
202  for (int i = 0; i < ndim; ++i) {
203  starts[i] = local_dims[i] * grid_pos[i];
204  }
205 
206  int ret = 0;
207 
208 // MPI_Datatype m_type_vector;
209 // ret = MPI_Type_contiguous(sizeof(Field::element_type)*nin_file, MPI_BYTE, &m_type_vector);
210  ret = MPI_Type_contiguous(sizeof(double) * nin_file, MPI_BYTE, &m_type_vector);
211  if (ret) {
212  vout.general(m_vl, "%s: MPI_Type_Contiguous failed.\n", _function_name);
213  return EXIT_FAILURE;
214  }
215 
216  ret = MPI_Type_commit(&m_type_vector);
217  if (ret) {
218  vout.general(m_vl, "%s: MPI_Type_commit failed.\n", _function_name);
219  return EXIT_FAILURE;
220  }
221 
222 // MPI_Datatype m_type_tiled;
223  ret = MPI_Type_create_subarray(ndim, global_dims, local_dims, starts, MPI_ORDER_FORTRAN, m_type_vector, &m_type_tiled);
224  if (ret) {
225  vout.general(m_vl, "%s: MPI_Type_create_subarray failed.\n", _function_name);
226  return EXIT_FAILURE;
227  }
228 
229  ret = MPI_Type_commit(&m_type_tiled);
230  if (ret) {
231  vout.general(m_vl, "%s: MPI_Type_commit failed.\n", _function_name);
232  return EXIT_FAILURE;
233  }
234 
235  m_is_initialized = true;
236 
237  delete [] starts;
238  delete [] grid_pos;
239  delete [] local_dims;
240  delete [] global_dims;
241 
242  vout.detailed(m_vl, "FieldIO_Binary_Parallel via MPI I/O initialize done.\n");
243 
244  return EXIT_SUCCESS;
245 }
246 
247 
248 //====================================================================
249 int FieldIO_Binary_Parallel::finalize()
250 {
251  static const char _function_name[] = "FieldIO_Binary_Parallel::finalize";
252 
253  if (!m_is_initialized) return EXIT_SUCCESS;
254 
255  int ret;
256 
257  ret = MPI_Type_free(&m_type_tiled);
258  if (ret) {
259  vout.general(m_vl, "%s: MPI_Type_free failed.\n", _function_name);
260  return EXIT_FAILURE;
261  }
262 
263  ret = MPI_Type_free(&m_type_vector);
264  if (ret) {
265  vout.general(m_vl, "%s: MPI_Type_free failed.\n", _function_name);
266  return EXIT_FAILURE;
267  }
268 
269  m_is_initialized = false;
270 
271  vout.detailed(m_vl, "%s via MPI I/O finalize done.\n", class_name.c_str());
272 
273  return EXIT_SUCCESS;
274 }
275 
276 
277 //====================================================================
278 #endif
279 
280 
281 //====================================================================
282 //============================================================END=====
BridgeIO vout
Definition: bridgeIO.cpp:278
void detailed(const char *format,...)
Definition: bridgeIO.cpp:82
virtual void file_to_field(int &s, int &t, const int i, const int j) const =0
void set(const int jin, const int site, const int jex, double v)
Definition: field.h:155
virtual int nex() const =0
void general(const char *format,...)
Definition: bridgeIO.cpp:65
Container of Field-type object.
Definition: field.h:39
double cmp(const int jin, const int site, const int jex) const
Definition: field.h:123
static int ipe(const int dir)
logical coordinate of current proc.
static bool is_bigendian()
Definition: fieldIO.cpp:203
void write_file(Field *v, string filename)
const IO_Format::Format * m_format
Definition: fieldIO.h:62
void crucial(const char *format,...)
Definition: bridgeIO.cpp:48
virtual int nin() const =0
void read_file(Field *v, string filename)
static const std::string class_name
static void convert_endian(void *buf, size_t size, size_t nmemb)
check if machine byte order is big-endian.
Definition: fieldIO.cpp:225
Bridge::VerboseLevel m_vl
Definition: fieldIO.h:64