21 static const char _function_name[] =
"FieldIO_Binary_Parallel::read_file";
32 double *buf =
new double [nin_file * nex_file * m_nvol];
39 ret = MPI_File_open(MPI_COMM_WORLD, const_cast<char *>(filename.c_str()), MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
41 ret = MPI_File_open(
Communicator_impl::world(), const_cast<char *>(filename.c_str()), MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
48 ret = MPI_File_set_view(fh, 0, m_type_vector, m_type_tiled, const_cast<char *>(
"native"), MPI_INFO_NULL);
54 ret = MPI_File_read_all(fh, (
void *)buf, m_nvol * nex_file, m_type_vector, MPI_STATUS_IGNORE);
60 ret = MPI_File_close(&fh);
74 for (
int j = 0; j < nex_file; ++j) {
75 for (
int isite = 0; isite < m_nvol; ++isite) {
76 for (
int i = 0; i < nin_file; ++i) {
80 u->
set(s, isite, t, *p++);
94 static const char _function_name[] =
"FieldIO_Binary_Parallel::write_file";
102 double *buf =
new double [nin_file * nex_file * m_nvol];
111 for (
int j = 0; j < nex_file; ++j) {
112 for (
int isite = 0; isite < m_nvol; ++isite) {
113 for (
int i = 0; i < nin_file; ++i) {
117 *p++ = u->
cmp(s, isite, t);
124 convert_endian(buf,
sizeof(
double), nin_file * nex_file * m_nvol);
131 ret = MPI_File_open(MPI_COMM_WORLD, const_cast<char *>(filename.c_str()), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fh);
133 ret = MPI_File_open(
Communicator_impl::world(), const_cast<char *>(filename.c_str()), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fh);
140 ret = MPI_File_set_view(fh, 0, m_type_vector, m_type_tiled, const_cast<char *>(
"native"), MPI_INFO_NULL);
146 ret = MPI_File_write_all(fh, (
void *)buf, m_nvol * nex_file, m_type_vector, MPI_STATUS_IGNORE);
148 vout.
crucial(
m_vl,
"%s: MPI_File_write_all failed.\n", _function_name);
152 ret = MPI_File_close(&fh);
165 int FieldIO_Binary_Parallel::initialize()
167 static const char _function_name[] =
"FieldIO_Binary_Parallel::initialize";
169 if (m_is_initialized)
return EXIT_SUCCESS;
176 int *global_dims =
new int[ndim];
182 int *local_dims =
new int[ndim];
189 for (
int i = 0; i < ndim; ++i) {
190 m_nvol *= local_dims[i];
193 int *grid_pos =
new int[ndim];
194 for (
int i = 0; i < ndim; ++i) {
198 int *starts =
new int[ndim];
199 for (
int i = 0; i < ndim; ++i) {
200 starts[i] = local_dims[i] * grid_pos[i];
207 ret = MPI_Type_contiguous(
sizeof(
double) * nin_file, MPI_BYTE, &m_type_vector);
209 vout.
general(
m_vl,
"%s: MPI_Type_Contiguous failed.\n", _function_name);
213 ret = MPI_Type_commit(&m_type_vector);
220 ret = MPI_Type_create_subarray(ndim, global_dims, local_dims, starts, MPI_ORDER_FORTRAN, m_type_vector, &m_type_tiled);
222 vout.
general(
m_vl,
"%s: MPI_Type_create_subarray failed.\n", _function_name);
226 ret = MPI_Type_commit(&m_type_tiled);
232 m_is_initialized =
true;
236 delete [] local_dims;
237 delete [] global_dims;
239 vout.
detailed(
m_vl,
"FieldIO_Binary_Parallel via MPI I/O initialize done.\n");
246 int FieldIO_Binary_Parallel::finalize()
248 static const char _function_name[] =
"FieldIO_Binary_Parallel::finalize";
250 if (!m_is_initialized)
return EXIT_SUCCESS;
254 ret = MPI_Type_free(&m_type_tiled);
260 ret = MPI_Type_free(&m_type_vector);
266 m_is_initialized =
false;
void detailed(const char *format,...)
void set(const int jin, const int site, const int jex, double v)
void general(const char *format,...)
Container of Field-type object.
double cmp(const int jin, const int site, const int jex) const
static int ipe(const int dir)
logical coordinate of current proc.
static bool is_bigendian()
void write_file(Field *v, string filename)
const IO_Format::Format * m_format
void crucial(const char *format,...)
void read_file(Field *v, string filename)
static const std::string class_name
static void convert_endian(void *buf, size_t size, size_t nmemb)
check if machine byte order is big-endian.
Bridge::VerboseLevel m_vl