9 static const char rcsid[] =
"$Id: fieldIO_Binary_Parallel.cpp 929 2013-07-12 05:43:23Z aoym $";
19 static const char _function_name[] =
"FieldIO_Binary_Parallel::read_file";
30 double *buf =
new double [nin_file * nex_file * m_nvol];
36 ret = MPI_File_open(
Communicator_impl::world(), const_cast<char *>(filename.c_str()), MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
42 ret = MPI_File_set_view(fh, 0, m_type_vector, m_type_tiled, const_cast<char *>(
"native"), MPI_INFO_NULL);
48 ret = MPI_File_read_all(fh, (
void *)buf, m_nvol * nex_file, m_type_vector, MPI_STATUS_IGNORE);
54 ret = MPI_File_close(&fh);
68 for (
int j = 0; j < nex_file; ++j) {
69 for (
int isite = 0; isite < m_nvol; ++isite) {
70 for (
int i = 0; i < nin_file; ++i) {
74 u->
set(s, isite, t, *p++);
86 static const char _function_name[] =
"FieldIO_Binary_Parallel::write_file";
94 double *buf =
new double [nin_file * nex_file * m_nvol];
103 for (
int j = 0; j < nex_file; ++j) {
104 for (
int isite = 0; isite < m_nvol; ++isite) {
105 for (
int i = 0; i < nin_file; ++i) {
109 *p++ = u->
cmp(s, isite, t);
116 convert_endian(buf,
sizeof(
double), nin_file * nex_file * m_nvol);
122 ret = MPI_File_open(
Communicator_impl::world(), const_cast<char *>(filename.c_str()), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fh);
128 ret = MPI_File_set_view(fh, 0, m_type_vector, m_type_tiled, const_cast<char *>(
"native"), MPI_INFO_NULL);
134 ret = MPI_File_write_all(fh, (
void *)buf, m_nvol * nex_file, m_type_vector, MPI_STATUS_IGNORE);
136 vout.
general(
m_vl,
"%s: MPI_File_write_all failed.\n", _function_name);
140 ret = MPI_File_close(&fh);
151 int FieldIO_Binary_Parallel::initialize()
153 static const char _function_name[] =
"FieldIO_Binary_Parallel::initialize";
155 if (m_is_initialized)
return EXIT_SUCCESS;
162 int *global_dims =
new int[ndim];
168 int *local_dims =
new int[ndim];
175 for (
int i = 0; i < ndim; ++i) {
176 m_nvol *= local_dims[i];
179 int *grid_pos =
new int[ndim];
180 for (
int i = 0; i < ndim; ++i) {
184 int *starts =
new int[ndim];
185 for (
int i = 0; i < ndim; ++i) {
186 starts[i] = local_dims[i] * grid_pos[i];
193 ret = MPI_Type_contiguous(
sizeof(
double) * nin_file, MPI_BYTE, &m_type_vector);
195 vout.
general(
m_vl,
"%s: MPI_Type_Contiguous failed.\n", _function_name);
199 ret = MPI_Type_commit(&m_type_vector);
206 ret = MPI_Type_create_subarray(ndim, global_dims, local_dims, starts, MPI_ORDER_FORTRAN, m_type_vector, &m_type_tiled);
208 vout.
general(
m_vl,
"%s: MPI_Type_create_subarray failed.\n", _function_name);
212 ret = MPI_Type_commit(&m_type_tiled);
218 m_is_initialized =
true;
222 delete [] local_dims;
223 delete [] global_dims;
225 vout.
detailed(
m_vl,
"FieldIO_Binary_Parallel via MPI I/O initialize done.\n");