24 m_is_initialized(false),
25 m_nvol(0), m_nin_file(0), m_nex_file(0)
29 FieldIO_Binary_Parallel::~FieldIO_Binary_Parallel()
37 static const char _function_name[] =
"FieldIO_Binary_Parallel::read_file";
50 double *buf =
new double [m_nin_file * m_nvol * m_nex_file];
52 vout.
crucial(
m_vl,
"Error at %s: allocate buffer failed.\n", _function_name);
57 ret = MPI_File_open(MPI_COMM_WORLD, const_cast<char *>(filename.c_str()), MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
59 ret = MPI_File_open(
Communicator_impl::world(), const_cast<char *>(filename.c_str()), MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
62 vout.
crucial(
m_vl,
"Error at %s: MPI_File_open failed.\n", _function_name);
66 ret = MPI_File_set_view(fh, 0, m_type_vector, m_type_tiled, const_cast<char *>(
"native"), MPI_INFO_NULL);
68 vout.
crucial(
m_vl,
"Error at %s: MPI_File_set_view failed.\n", _function_name);
72 ret = MPI_File_read_all(fh, (
void *)buf, m_nvol * m_nex_file, m_type_vector, MPI_STATUS_IGNORE);
74 vout.
crucial(
m_vl,
"Error at %s: MPI_File_read_all failed.\n", _function_name);
78 ret = MPI_File_close(&fh);
80 vout.
crucial(
m_vl,
"Error at %s: MPI_File_close failed.\n", _function_name);
85 convert_endian(buf,
sizeof(
double), m_nvol * m_nin_file * m_nex_file);
91 for (
int j = 0; j < m_nex_file; ++j) {
92 for (
int isite = 0; isite < m_nvol; ++isite) {
93 for (
int i = 0; i < m_nin_file; ++i) {
97 u->
set(s, isite, t, *p++);
111 static const char _function_name[] =
"FieldIO_Binary_Parallel::write_file";
119 double *buf =
new double [m_nin_file * m_nvol * m_nex_file];
121 vout.
crucial(
m_vl,
"Error at %s: allocate buffer failed.\n", _function_name);
128 for (
int j = 0; j < m_nex_file; ++j) {
129 for (
int isite = 0; isite < m_nvol; ++isite) {
130 for (
int i = 0; i < m_nin_file; ++i) {
134 *p++ = u->
cmp(s, isite, t);
140 convert_endian(buf,
sizeof(
double), m_nin_file * m_nvol * m_nex_file);
147 ret = MPI_File_open(MPI_COMM_WORLD, const_cast<char *>(filename.c_str()), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fh);
149 ret = MPI_File_open(
Communicator_impl::world(), const_cast<char *>(filename.c_str()), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fh);
152 vout.
crucial(
m_vl,
"Error at %s: MPI_File_open failed.\n", _function_name);
156 ret = MPI_File_set_view(fh, 0, m_type_vector, m_type_tiled, const_cast<char *>(
"native"), MPI_INFO_NULL);
158 vout.
crucial(
m_vl,
"Error at %s: MPI_File_set_view failed.\n", _function_name);
162 ret = MPI_File_write_all(fh, (
void *)buf, m_nvol * m_nex_file, m_type_vector, MPI_STATUS_IGNORE);
164 vout.
crucial(
m_vl,
"Error at %s: MPI_File_write_all failed.\n", _function_name);
168 ret = MPI_File_close(&fh);
170 vout.
crucial(
m_vl,
"Error at %s: MPI_File_close failed.\n", _function_name);
181 int FieldIO_Binary_Parallel::initialize(
const Field* v)
183 static const char _function_name[] =
"FieldIO_Binary_Parallel::initialize";
188 int nvol = v->
nvol();
192 && nin_file == m_nin_file
193 && nex_file == m_nex_file
204 m_nin_file = nin_file;
205 m_nex_file = nex_file;
209 int *global_dims =
new int[ndim];
215 int *local_dims =
new int[ndim];
222 for (
int i = 0; i < ndim; ++i) {
223 m_nvol *= local_dims[i];
226 int *grid_pos =
new int[ndim];
227 for (
int i = 0; i < ndim; ++i) {
231 int *starts =
new int[ndim];
232 for (
int i = 0; i < ndim; ++i) {
233 starts[i] = local_dims[i] * grid_pos[i];
239 ret = MPI_Type_contiguous(
sizeof(
double) * m_nin_file, MPI_BYTE, &m_type_vector);
241 vout.
crucial(
m_vl,
"%s: MPI_Type_Contiguous failed.\n", _function_name);
245 ret = MPI_Type_commit(&m_type_vector);
252 ret = MPI_Type_create_subarray(ndim, global_dims, local_dims, starts, MPI_ORDER_FORTRAN, m_type_vector, &m_type_tiled);
254 vout.
crucial(
m_vl,
"%s: MPI_Type_create_subarray failed.\n", _function_name);
258 ret = MPI_Type_commit(&m_type_tiled);
266 delete [] local_dims;
267 delete [] global_dims;
270 m_is_initialized =
true;
278 int FieldIO_Binary_Parallel::clear_layout()
280 const char _function_name[] =
"FieldIO_Binary_Parallel::clear_layout";
282 if (m_is_initialized) {
289 ret = MPI_Type_free(&m_type_vector);
291 vout.
crucial(
m_vl,
"%s: MPI_Type_free for type_vector failed.\n", _function_name);
295 ret = MPI_Type_free(&m_type_tiled);
297 vout.
crucial(
m_vl,
"%s: MPI_Type_free for type_tiled failed.\n", _function_name);
301 m_is_initialized =
false;
309 int FieldIO_Binary_Parallel::finalize()
311 static const char _function_name[] =
"FieldIO_Binary_Parallel::finalize";
void detailed(const char *format,...)
void set(const int jin, const int site, const int jex, double v)
Container of Field-type object.
double cmp(const int jin, const int site, const int jex) const
static int ipe(const int dir)
logical coordinate of current proc.
static bool is_bigendian()
FieldIO_Binary_Parallel(const IO_Format::Format *format)
void write_file(Field *v, string filename)
const IO_Format::Format * m_format
void crucial(const char *format,...)
void read_file(Field *v, string filename)
static const std::string class_name
static void convert_endian(void *buf, size_t size, size_t nmemb)
check if machine byte order is big-endian.
FieldIO class for file I/O of space-time distributed data.
Bridge::VerboseLevel m_vl