29 const char ildg_metadata_template[] =
30 "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
31 "<ildgFormat xmlns=\"http://www.lqcd.org/ildg\"\n"
32 " xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n"
33 " xsi:schemaLocation=\"http://www.lqcd.org/ildg http://www.lqcd.org/ildg/filefmt.xsd\">\n"
34 " <version> 1.0 </version>\n"
35 " <field> su3gauge </field>\n"
36 " <precision> %zu </precision>\n"
37 " <lx> %d </lx> <ly> %d </ly> <lz> %d </lz> <lt> %d </lt>\n"
42 #define LIME_MAGIC ((uint32_t)0x456789ab)
43 #define MB_MASK ((uint16_t)0x8000)
44 #define ME_MASK ((uint16_t)0x4000)
56 struct LIME_record_info
63 typedef std::list<LIME_record_info> LIME_message_info;
64 typedef std::list<LIME_message_info> LIME_file_info;
66 int traverse(MPI_File& fh, LIME_file_info& file_info);
67 int read_lime_header(MPI_File& fh, LIME_header& header);
68 int read_lime_content(MPI_File& fh,
const MPI_Offset offset,
char *buf,
const size_t length);
69 int find_record_offset(
const LIME_file_info& file_info,
const char *
type, MPI_Offset& pos);
71 int report_file_info(
const LIME_file_info& file_info);
73 size_t write_lime_header(MPI_File& fh,
const char *
type,
const size_t length,
const uint16_t flag);
74 size_t write_lime_record(MPI_File& fh,
const char *
type,
const char *content,
const size_t length,
const uint16_t flag);
82 static const char _function_name[] =
"FieldIO_LIME_Parallel::read_file";
93 double *buf =
new double [nin_file * nex_file * m_nvol];
95 vout.
crucial(
m_vl,
"Error at %s: allocate buffer failed.\n", _function_name);
99 ret = MPI_File_open(
Communicator_impl::world(), const_cast<char *>(filename.c_str()), MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
101 vout.
crucial(
m_vl,
"Error at %s: MPI_File_open failed.\n", _function_name);
109 LIME_file_info file_info;
111 traverse(fh, file_info);
113 report_file_info(file_info);
115 if (!find_record_offset(file_info,
"ildg-binary-data", pos)) {
116 vout.
crucial(
m_vl,
"Error at %s: binary data record not found.\n", _function_name);
123 ret = MPI_File_set_view(fh, pos, m_type_vector, m_type_tiled, const_cast<char *>(
"native"), MPI_INFO_NULL);
125 vout.
crucial(
m_vl,
"Error at %s: MPI_File_set_view failed.\n", _function_name);
129 ret = MPI_File_read_all(fh, (
void *)buf, m_nvol * nex_file, m_type_vector, MPI_STATUS_IGNORE);
131 vout.
crucial(
m_vl,
"Error at %s: MPI_File_read_all failed.\n", _function_name);
135 ret = MPI_File_close(&fh);
137 vout.
crucial(
m_vl,
"Error at %s: MPI_File_close failed.\n", _function_name);
143 convert_endian(buf,
sizeof(
double), m_nvol * nin_file * nex_file);
149 for (
int j = 0; j < nex_file; ++j) {
150 for (
int isite = 0; isite < m_nvol; ++isite) {
151 for (
int i = 0; i < nin_file; ++i) {
155 u->
set(s, isite, t, *p++);
169 static const char _function_name[] =
"FieldIO_LIME_Parallel::write_file";
177 double *buf =
new double [nin_file * nex_file * m_nvol];
179 vout.
crucial(
m_vl,
"Error at %s: allocate buffer failed.\n", _function_name);
188 for (
int j = 0; j < nex_file; ++j) {
189 for (
int isite = 0; isite < m_nvol; ++isite) {
190 for (
int i = 0; i < nin_file; ++i) {
194 *p++ = u->
cmp(s, isite, t);
201 convert_endian(buf,
sizeof(
double), nin_file * nex_file * m_nvol);
207 ret = MPI_File_open(
Communicator_impl::world(), const_cast<char *>(filename.c_str()), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fh);
209 vout.
crucial(
m_vl,
"Error at %s: MPI_File_open failed.\n", _function_name);
218 sprintf(metadata, ildg_metadata_template,
225 pos += write_lime_record(fh,
"ildg-format", metadata, strlen(metadata), MB_MASK);
228 pos += write_lime_header(fh,
"ildg-binary-data", data_length, ME_MASK);
235 ret = MPI_File_set_view(fh, pos, m_type_vector, m_type_tiled, const_cast<char *>(
"native"), MPI_INFO_NULL);
237 vout.
crucial(
m_vl,
"Error at %s: MPI_File_set_view failed.\n", _function_name);
241 ret = MPI_File_write_all(fh, (
void *)buf, m_nvol * nex_file, m_type_vector, MPI_STATUS_IGNORE);
243 vout.
crucial(
m_vl,
"Error at %s: MPI_File_write_all failed.\n", _function_name);
248 if (data_length % 8 > 0) {
249 size_t padding_size = (8 - data_length % 8) % 8;
251 const char blank[8] =
"";
252 ret = MPI_File_write_at(fh, pos + data_length, const_cast<char *>(blank), padding_size, MPI_BYTE, MPI_STATUS_IGNORE);
254 vout.
crucial(
vl,
"Error at %s: write padding failed.\n", _function_name);
258 vout.
general(
m_vl,
"%s: padding %lu bytes added.\n", _function_name, padding_size);
261 ret = MPI_File_close(&fh);
263 vout.
crucial(
m_vl,
"Error at %s: MPI_File_close failed.\n", _function_name);
274 int FieldIO_LIME_Parallel::initialize()
276 static const char _function_name[] =
"FieldIO_LIME_Parallel::initialize";
281 if (m_is_initialized)
return EXIT_SUCCESS;
286 if ((nin_file == 0) || (nex_file == 0)) {
294 int *global_dims =
new int[ndim];
300 int *local_dims =
new int[ndim];
307 for (
int i = 0; i < ndim; ++i) {
308 m_nvol *= local_dims[i];
311 int *grid_pos =
new int[ndim];
312 for (
int i = 0; i < ndim; ++i) {
316 int *starts =
new int[ndim];
317 for (
int i = 0; i < ndim; ++i) {
318 starts[i] = local_dims[i] * grid_pos[i];
325 ret = MPI_Type_contiguous(
sizeof(
double) * nin_file, MPI_BYTE, &m_type_vector);
327 vout.
general(
m_vl,
"%s: MPI_Type_Contiguous failed.\n", _function_name);
331 ret = MPI_Type_commit(&m_type_vector);
338 ret = MPI_Type_create_subarray(ndim, global_dims, local_dims, starts, MPI_ORDER_FORTRAN, m_type_vector, &m_type_tiled);
340 vout.
general(
m_vl,
"%s: MPI_Type_create_subarray failed.\n", _function_name);
344 ret = MPI_Type_commit(&m_type_tiled);
350 m_is_initialized =
true;
354 delete [] local_dims;
355 delete [] global_dims;
357 vout.
detailed(
m_vl,
"FieldIO_LIME_Parallel via MPI I/O initialize done.\n");
364 int FieldIO_LIME_Parallel::finalize()
366 static const char _function_name[] =
"FieldIO_LIME_Parallel::finalize";
368 if (!m_is_initialized)
return EXIT_SUCCESS;
372 ret = MPI_Type_free(&m_type_tiled);
378 ret = MPI_Type_free(&m_type_vector);
384 m_is_initialized =
false;
395 int read_lime_header(MPI_File& fh, LIME_header& header)
399 int ret = MPI_File_read(fh, (
void *)&header,
sizeof(LIME_header), MPI_BYTE, &status);
403 MPI_Get_count(&status, MPI_BYTE, &count);
404 if (count !=
sizeof(LIME_header)) {
420 if (header.magic != LIME_MAGIC) {
430 int traverse(MPI_File& fh, LIME_file_info& file_info)
433 MPI_File_seek(fh, 0, MPI_SEEK_SET);
437 LIME_message_info message_info;
442 int stat = read_lime_header(fh, header);
460 pos +=
sizeof(LIME_header);
462 LIME_record_info record_info;
464 memcpy((
void *)&record_info, (
void *)&header,
sizeof(LIME_record_info));
465 record_info.offset = pos;
468 size_t padding_size = (8 - header.length % 8) % 8;
475 pos += header.length + padding_size;
476 MPI_File_seek(fh, pos, MPI_SEEK_SET);
479 if ((header.bitfield & MB_MASK) == MB_MASK) {
480 message_info.clear();
483 message_info.push_back(record_info);
485 if ((header.bitfield & ME_MASK) == ME_MASK) {
486 file_info.push_back(message_info);
496 int find_record_offset(
const LIME_file_info& file_info,
const char *
type, MPI_Offset& pos)
498 bool is_found =
false;
500 for (LIME_file_info::const_iterator p = file_info.begin(); p != file_info.end(); ++p) {
501 for (LIME_message_info::const_iterator q = p->begin(); q != p->end(); ++q) {
502 if (strncmp(q->type, type, strlen(type)) == 0) {
510 return is_found ? 1 : 0;
515 int read_record_content(MPI_File& fh,
const LIME_file_info& file_info,
const char *type, std::string& content)
517 bool is_found =
false;
518 LIME_record_info info;
520 for (LIME_file_info::const_iterator p = file_info.begin(); p != file_info.end(); ++p) {
521 for (LIME_message_info::const_iterator q = p->begin(); q != p->end(); ++q) {
522 if (strncmp(q->type, type, strlen(type)) == 0) {
535 char *buf =
new char [info.length + 1];
536 MPI_File_read_at(fh, info.offset, buf, info.length, MPI_BYTE, &status);
539 MPI_Get_count(&status, MPI_BYTE, &count);
541 if (count != info.length) {
542 vout.
crucial(
vl,
"Error at %s: read error. content length mismatch.\n", __func__);
546 content = std::string(buf);
553 int report_file_info(
const LIME_file_info& file_info)
559 for (LIME_file_info::const_iterator p = file_info.begin(); p != file_info.end(); ++p) {
562 for (LIME_message_info::const_iterator q = p->begin(); q != p->end(); ++q) {
577 size_t write_lime_header(MPI_File& fh,
const char *type,
const size_t length,
const uint16_t flag)
581 memset(&header, 0,
sizeof(LIME_header));
583 header.magic = LIME_MAGIC;
584 header.version = (uint16_t)1;
585 header.bitfield = flag;
586 strncpy(header.type, type, 128);
587 header.length = length;
597 int ret = MPI_File_write(fh, (
void *)&header,
sizeof(LIME_header), MPI_BYTE, &status);
604 return sizeof(LIME_header);
609 size_t write_lime_record(MPI_File& fh,
const char *type,
const char *content,
const size_t length,
const uint16_t flag)
611 const char blank[8] =
"";
613 if (write_lime_header(fh, type, length, flag) == 0) {
617 const size_t padding_size = (8 - length % 8) % 8;
620 int ret = MPI_File_write(fh, const_cast<char *>(content), length, MPI_BYTE, &status);
626 if (padding_size > 0) {
627 ret = MPI_File_write(fh, const_cast<char *>(blank), padding_size, MPI_BYTE, &status);
634 return sizeof(LIME_header) + length + padding_size;
static const std::string class_name
void detailed(const char *format,...)
void set(const int jin, const int site, const int jex, double v)
void general(const char *format,...)
Container of Field-type object.
double cmp(const int jin, const int site, const int jex) const
void write_file(Field *v, const std::string filename)
write data to file.
static int broadcast(size_t size, void *data, int sender)
static int ipe(const int dir)
logical coordinate of current proc.
static bool is_bigendian()
static MPI_Comm & world()
retrieves current communicator.
const IO_Format::Format * m_format
void crucial(const char *format,...)
void read_file(Field *v, const std::string filename)
read data from file.
static void convert_endian(void *buf, size_t size, size_t nmemb)
check if machine byte order is big-endian.
static bool is_primary()
check if the present node is primary in small communicator.
Bridge::VerboseLevel m_vl