Bridge++  Ver. 1.1.x
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
fieldIO_Binary_Parallel.cpp
Go to the documentation of this file.
1 
9 static const char rcsid[] = "$Id: fieldIO_Binary_Parallel.cpp 929 2013-07-12 05:43:23Z aoym $";
10 
11 // this code only makes sense in MPI environment.
12 #ifdef USE_MPI
13 
15 
16 //====================================================================
17 void FieldIO_Binary_Parallel::read_file(Field *u, string filename)
18 {
19  static const char _function_name[] = "FieldIO_Binary_Parallel::read_file";
20 
21  initialize();
22 
23  MPI_File fh;
24  int ret;
25 
26  int nin_file = m_format->nin();
27  int nex_file = m_format->nex();
28 
29 // Field::element_type *buf = new Field::element_type [m_nvol*m_nvector];
30  double *buf = new double [nin_file * nex_file * m_nvol];
31  if (!buf) {
32  vout.general(m_vl, "%s: allocate buffer failed.\n", _function_name);
33  return;
34  }
35 
36  ret = MPI_File_open(Communicator_impl::world(), const_cast<char *>(filename.c_str()), MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
37  if (ret) {
38  vout.general(m_vl, "%s: MPI_File_open failed.\n", _function_name);
39  return;
40  }
41 
42  ret = MPI_File_set_view(fh, 0, m_type_vector, m_type_tiled, const_cast<char *>("native"), MPI_INFO_NULL);
43  if (ret) {
44  vout.general(m_vl, "%s: MPI_File_set_view failed.\n", _function_name);
45  return;
46  }
47 
48  ret = MPI_File_read_all(fh, (void *)buf, m_nvol * nex_file, m_type_vector, MPI_STATUS_IGNORE);
49  if (ret) {
50  vout.general(m_vl, "%s: MPI_File_read_all failed.\n", _function_name);
51  return;
52  }
53 
54  ret = MPI_File_close(&fh);
55  if (ret) {
56  vout.general(m_vl, "%s: MPI_File_close failed.\n", _function_name);
57  return;
58  }
59 
60  if (!is_bigendian()) {
61 // convert_endian(buf, sizeof(Field::element_type), m_nvol*m_nvector);
62  convert_endian(buf, sizeof(double), m_nvol * nin_file * nex_file);
63  }
64 
65  // unpack buffer
66  double *p = buf;
67 
68  for (int j = 0; j < nex_file; ++j) {
69  for (int isite = 0; isite < m_nvol; ++isite) {
70  for (int i = 0; i < nin_file; ++i) {
71  int s, t;
72  m_format->file_to_field(s, t, i, j);
73 
74  u->set(s, isite, t, *p++);
75  }
76  }
77  }
78 
79  delete buf;
80 }
81 
82 
83 //====================================================================
84 void FieldIO_Binary_Parallel::write_file(Field *u, string filename)
85 {
86  static const char _function_name[] = "FieldIO_Binary_Parallel::write_file";
87 
88  initialize();
89 
90  int nin_file = m_format->nin();
91  int nex_file = m_format->nex();
92 
93  // Field::element_type *buf = new Field::element_type [m_nvol*m_nvector];
94  double *buf = new double [nin_file * nex_file * m_nvol];
95  if (!buf) {
96  vout.general(m_vl, "%s: allocate buffer failed.\n", _function_name);
97  return;
98  }
99 
100  // pack buffer
101  double *p = buf;
102 
103  for (int j = 0; j < nex_file; ++j) {
104  for (int isite = 0; isite < m_nvol; ++isite) {
105  for (int i = 0; i < nin_file; ++i) {
106  int s, t;
107  m_format->file_to_field(s, t, i, j);
108 
109  *p++ = u->cmp(s, isite, t);
110  }
111  }
112  }
113 
114  if (!is_bigendian()) {
115  // convert_endian(buf, sizeof(Field::element_type), m_nvol*m_nvector);
116  convert_endian(buf, sizeof(double), nin_file * nex_file * m_nvol);
117  }
118 
119  MPI_File fh;
120  int ret;
121 
122  ret = MPI_File_open(Communicator_impl::world(), const_cast<char *>(filename.c_str()), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fh);
123  if (ret) {
124  vout.general(m_vl, "%s: MPI_File_open failed.\n", _function_name);
125  return;
126  }
127 
128  ret = MPI_File_set_view(fh, 0, m_type_vector, m_type_tiled, const_cast<char *>("native"), MPI_INFO_NULL);
129  if (ret) {
130  vout.general(m_vl, "%s: MPI_File_set_view failed.\n", _function_name);
131  return;
132  }
133 
134  ret = MPI_File_write_all(fh, (void *)buf, m_nvol * nex_file, m_type_vector, MPI_STATUS_IGNORE);
135  if (ret) {
136  vout.general(m_vl, "%s: MPI_File_write_all failed.\n", _function_name);
137  return;
138  }
139 
140  ret = MPI_File_close(&fh);
141  if (ret) {
142  vout.general(m_vl, "%s: MPI_File_close failed.\n", _function_name);
143  return;
144  }
145 
146  delete buf;
147 }
148 
149 
150 //====================================================================
151 int FieldIO_Binary_Parallel::initialize()
152 {
153  static const char _function_name[] = "FieldIO_Binary_Parallel::initialize";
154 
155  if (m_is_initialized) return EXIT_SUCCESS;
156 
157  int nin_file = m_format->nin();
158  int nex_file = m_format->nex();
159 
160  const int ndim = CommonParameters::Ndim();
161 
162  int *global_dims = new int[ndim];
163  global_dims[0] = CommonParameters::Lx();
164  global_dims[1] = CommonParameters::Ly();
165  global_dims[2] = CommonParameters::Lz();
166  global_dims[3] = CommonParameters::Lt();
167 
168  int *local_dims = new int[ndim];
169  local_dims[0] = CommonParameters::Nx();
170  local_dims[1] = CommonParameters::Ny();
171  local_dims[2] = CommonParameters::Nz();
172  local_dims[3] = CommonParameters::Nt();
173 
174  m_nvol = 1;
175  for (int i = 0; i < ndim; ++i) {
176  m_nvol *= local_dims[i];
177  }
178 
179  int *grid_pos = new int[ndim];
180  for (int i = 0; i < ndim; ++i) {
181  grid_pos[i] = Communicator::ipe(i);
182  }
183 
184  int *starts = new int[ndim];
185  for (int i = 0; i < ndim; ++i) {
186  starts[i] = local_dims[i] * grid_pos[i];
187  }
188 
189  int ret = 0;
190 
191 // MPI_Datatype m_type_vector;
192 // ret = MPI_Type_contiguous(sizeof(Field::element_type)*nin_file, MPI_BYTE, &m_type_vector);
193  ret = MPI_Type_contiguous(sizeof(double) * nin_file, MPI_BYTE, &m_type_vector);
194  if (ret) {
195  vout.general(m_vl, "%s: MPI_Type_Contiguous failed.\n", _function_name);
196  return EXIT_FAILURE;
197  }
198 
199  ret = MPI_Type_commit(&m_type_vector);
200  if (ret) {
201  vout.general(m_vl, "%s: MPI_Type_commit failed.\n", _function_name);
202  return EXIT_FAILURE;
203  }
204 
205 // MPI_Datatype m_type_tiled;
206  ret = MPI_Type_create_subarray(ndim, global_dims, local_dims, starts, MPI_ORDER_FORTRAN, m_type_vector, &m_type_tiled);
207  if (ret) {
208  vout.general(m_vl, "%s: MPI_Type_create_subarray failed.\n", _function_name);
209  return EXIT_FAILURE;
210  }
211 
212  ret = MPI_Type_commit(&m_type_tiled);
213  if (ret) {
214  vout.general(m_vl, "%s: MPI_Type_commit failed.\n", _function_name);
215  return EXIT_FAILURE;
216  }
217 
218  m_is_initialized = true;
219 
220  delete [] starts;
221  delete [] grid_pos;
222  delete [] local_dims;
223  delete [] global_dims;
224 
225  vout.detailed(m_vl, "FieldIO_Binary_Parallel via MPI I/O initialize done.\n");
226 
227  return EXIT_SUCCESS;
228 }
229 #endif