24 static const char _function_name[] =
"FieldIO_Binary_Parallel::read_file";
35 double *buf =
new double [nin_file * nex_file * m_nvol];
42 ret = MPI_File_open(MPI_COMM_WORLD, const_cast<char *>(filename.c_str()), MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
44 ret = MPI_File_open(
Communicator_impl::world(), const_cast<char *>(filename.c_str()), MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
51 ret = MPI_File_set_view(fh, 0, m_type_vector, m_type_tiled, const_cast<char *>(
"native"), MPI_INFO_NULL);
57 ret = MPI_File_read_all(fh, (
void *)buf, m_nvol * nex_file, m_type_vector, MPI_STATUS_IGNORE);
63 ret = MPI_File_close(&fh);
77 for (
int j = 0; j < nex_file; ++j) {
78 for (
int isite = 0; isite < m_nvol; ++isite) {
79 for (
int i = 0; i < nin_file; ++i) {
83 u->
set(s, isite, t, *p++);
97 static const char _function_name[] =
"FieldIO_Binary_Parallel::write_file";
105 double *buf =
new double [nin_file * nex_file * m_nvol];
114 for (
int j = 0; j < nex_file; ++j) {
115 for (
int isite = 0; isite < m_nvol; ++isite) {
116 for (
int i = 0; i < nin_file; ++i) {
120 *p++ = u->
cmp(s, isite, t);
127 convert_endian(buf,
sizeof(
double), nin_file * nex_file * m_nvol);
134 ret = MPI_File_open(MPI_COMM_WORLD, const_cast<char *>(filename.c_str()), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fh);
136 ret = MPI_File_open(
Communicator_impl::world(), const_cast<char *>(filename.c_str()), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fh);
143 ret = MPI_File_set_view(fh, 0, m_type_vector, m_type_tiled, const_cast<char *>(
"native"), MPI_INFO_NULL);
149 ret = MPI_File_write_all(fh, (
void *)buf, m_nvol * nex_file, m_type_vector, MPI_STATUS_IGNORE);
151 vout.
crucial(
m_vl,
"%s: MPI_File_write_all failed.\n", _function_name);
155 ret = MPI_File_close(&fh);
168 int FieldIO_Binary_Parallel::initialize()
170 static const char _function_name[] =
"FieldIO_Binary_Parallel::initialize";
172 if (m_is_initialized)
return EXIT_SUCCESS;
179 int *global_dims =
new int[ndim];
185 int *local_dims =
new int[ndim];
192 for (
int i = 0; i < ndim; ++i) {
193 m_nvol *= local_dims[i];
196 int *grid_pos =
new int[ndim];
197 for (
int i = 0; i < ndim; ++i) {
201 int *starts =
new int[ndim];
202 for (
int i = 0; i < ndim; ++i) {
203 starts[i] = local_dims[i] * grid_pos[i];
210 ret = MPI_Type_contiguous(
sizeof(
double) * nin_file, MPI_BYTE, &m_type_vector);
212 vout.
general(
m_vl,
"%s: MPI_Type_Contiguous failed.\n", _function_name);
216 ret = MPI_Type_commit(&m_type_vector);
223 ret = MPI_Type_create_subarray(ndim, global_dims, local_dims, starts, MPI_ORDER_FORTRAN, m_type_vector, &m_type_tiled);
225 vout.
general(
m_vl,
"%s: MPI_Type_create_subarray failed.\n", _function_name);
229 ret = MPI_Type_commit(&m_type_tiled);
235 m_is_initialized =
true;
239 delete [] local_dims;
240 delete [] global_dims;
242 vout.
detailed(
m_vl,
"FieldIO_Binary_Parallel via MPI I/O initialize done.\n");
249 int FieldIO_Binary_Parallel::finalize()
251 static const char _function_name[] =
"FieldIO_Binary_Parallel::finalize";
253 if (!m_is_initialized)
return EXIT_SUCCESS;
257 ret = MPI_Type_free(&m_type_tiled);
263 ret = MPI_Type_free(&m_type_vector);
269 m_is_initialized =
false;
void detailed(const char *format,...)
void set(const int jin, const int site, const int jex, double v)
void general(const char *format,...)
Container of Field-type object.
double cmp(const int jin, const int site, const int jex) const
static int ipe(const int dir)
logical coordinate of current proc.
static bool is_bigendian()
void write_file(Field *v, string filename)
const IO_Format::Format * m_format
void crucial(const char *format,...)
void read_file(Field *v, string filename)
static const std::string class_name
static void convert_endian(void *buf, size_t size, size_t nmemb)
check if machine byte order is big-endian.
Bridge::VerboseLevel m_vl