22 #if defined USE_GROUP_SU3
23 #include "fopr_Wilson_impl_SU3.inc"
24 #elif defined USE_GROUP_SU2
25 #include "fopr_Wilson_impl_SU2.inc"
26 #elif defined USE_GROUP_SU_N
27 #include "fopr_Wilson_impl_SU_N.inc"
37 const string str_vlevel = params.
get_string(
"verbose_level");
68 for (
int mu = 0; mu <
m_Ndim; ++mu) {
74 assert(bc.size() ==
m_Ndim);
81 for (
int mu = 0; mu <
m_Ndim; ++mu) {
139 }
else if (
m_repr ==
"Chiral") {
160 (this->*
m_gm5)(v, f);
170 const double *v1 = f.
ptr(0);
171 double *v2 = w.
ptr(0);
182 int is =
m_Nvol * ith / nth;
183 int ns =
m_Nvol * (ith + 1) / nth - is;
185 for (
int site = is; site < is + ns; ++site) {
187 for (
int icc = 0; icc < Nvc; icc++) {
188 int in = Nvc * Nd * site;
189 v2[icc + id1 + in] = v1[icc + id3 + in];
190 v2[icc + id2 + in] = v1[icc + id4 + in];
191 v2[icc + id3 + in] = v1[icc + id1 + in];
192 v2[icc + id4 + in] = v1[icc + id2 + in];
204 const double *v1 = f.
ptr(0);
205 double *v2 = w.
ptr(0);
216 int is =
m_Nvol * ith / nth;
217 int ns =
m_Nvol * (ith + 1) / nth - is;
219 for (
int site = is; site < is + ns; ++site) {
221 for (
int icc = 0; icc < Nvc; icc++) {
222 int in = Nvc * Nd * site;
223 v2[icc + id1 + in] = v1[icc + id1 + in];
224 v2[icc + id2 + in] = v1[icc + id2 + in];
225 v2[icc + id3 + in] = -v1[icc + id3 + in];
226 v2[icc + id4 + in] = -v1[icc + id4 + in];
234 const int mu,
const int nu)
251 (this->*
m_csw)(v, w);
258 assert(w.
nex() == 1);
262 int Ndf = 2 * Nc * Nc;
273 const double *w2 = w.
ptr(0);
274 double *v2 = v.
ptr(0);
287 int is =
m_Nvol * ith / nth;
288 int ns =
m_Nvol * (ith + 1) / nth - is;
290 for (
int site = is; site < is + ns; ++site) {
291 int iv = Nvc * Nd * site;
294 for (
int ic = 0; ic < Nc; ++ic) {
297 int icg = ic * Nvc + ig;
299 v2[icr + id1 + iv] = 0.0;
300 v2[ici + id1 + iv] = 0.0;
301 v2[icr + id2 + iv] = 0.0;
302 v2[ici + id2 + iv] = 0.0;
304 v2[icr + id3 + iv] = 0.0;
305 v2[ici + id3 + iv] = 0.0;
306 v2[icr + id4 + iv] = 0.0;
307 v2[ici + id4 + iv] = 0.0;
310 v2[icr + id1 + iv] -= mult_uv_i(&Bx[icg], &w2[id2 + iv], Nc);
311 v2[ici + id1 + iv] += mult_uv_r(&Bx[icg], &w2[id2 + iv], Nc);
312 v2[icr + id2 + iv] -= mult_uv_i(&Bx[icg], &w2[id1 + iv], Nc);
313 v2[ici + id2 + iv] += mult_uv_r(&Bx[icg], &w2[id1 + iv], Nc);
315 v2[icr + id3 + iv] -= mult_uv_i(&Bx[icg], &w2[id4 + iv], Nc);
316 v2[ici + id3 + iv] += mult_uv_r(&Bx[icg], &w2[id4 + iv], Nc);
317 v2[icr + id4 + iv] -= mult_uv_i(&Bx[icg], &w2[id3 + iv], Nc);
318 v2[ici + id4 + iv] += mult_uv_r(&Bx[icg], &w2[id3 + iv], Nc);
321 v2[icr + id1 + iv] += mult_uv_r(&By[icg], &w2[id2 + iv], Nc);
322 v2[ici + id1 + iv] += mult_uv_i(&By[icg], &w2[id2 + iv], Nc);
323 v2[icr + id2 + iv] -= mult_uv_r(&By[icg], &w2[id1 + iv], Nc);
324 v2[ici + id2 + iv] -= mult_uv_i(&By[icg], &w2[id1 + iv], Nc);
326 v2[icr + id3 + iv] += mult_uv_r(&By[icg], &w2[id4 + iv], Nc);
327 v2[ici + id3 + iv] += mult_uv_i(&By[icg], &w2[id4 + iv], Nc);
328 v2[icr + id4 + iv] -= mult_uv_r(&By[icg], &w2[id3 + iv], Nc);
329 v2[ici + id4 + iv] -= mult_uv_i(&By[icg], &w2[id3 + iv], Nc);
332 v2[icr + id1 + iv] -= mult_uv_i(&Bz[icg], &w2[id1 + iv], Nc);
333 v2[ici + id1 + iv] += mult_uv_r(&Bz[icg], &w2[id1 + iv], Nc);
334 v2[icr + id2 + iv] += mult_uv_i(&Bz[icg], &w2[id2 + iv], Nc);
335 v2[ici + id2 + iv] -= mult_uv_r(&Bz[icg], &w2[id2 + iv], Nc);
337 v2[icr + id3 + iv] -= mult_uv_i(&Bz[icg], &w2[id3 + iv], Nc);
338 v2[ici + id3 + iv] += mult_uv_r(&Bz[icg], &w2[id3 + iv], Nc);
339 v2[icr + id4 + iv] += mult_uv_i(&Bz[icg], &w2[id4 + iv], Nc);
340 v2[ici + id4 + iv] -= mult_uv_r(&Bz[icg], &w2[id4 + iv], Nc);
343 v2[icr + id1 + iv] += mult_uv_i(&Ex[icg], &w2[id2 + iv], Nc);
344 v2[ici + id1 + iv] -= mult_uv_r(&Ex[icg], &w2[id2 + iv], Nc);
345 v2[icr + id2 + iv] += mult_uv_i(&Ex[icg], &w2[id1 + iv], Nc);
346 v2[ici + id2 + iv] -= mult_uv_r(&Ex[icg], &w2[id1 + iv], Nc);
348 v2[icr + id3 + iv] -= mult_uv_i(&Ex[icg], &w2[id4 + iv], Nc);
349 v2[ici + id3 + iv] += mult_uv_r(&Ex[icg], &w2[id4 + iv], Nc);
350 v2[icr + id4 + iv] -= mult_uv_i(&Ex[icg], &w2[id3 + iv], Nc);
351 v2[ici + id4 + iv] += mult_uv_r(&Ex[icg], &w2[id3 + iv], Nc);
354 v2[icr + id1 + iv] -= mult_uv_r(&Ey[icg], &w2[id2 + iv], Nc);
355 v2[ici + id1 + iv] -= mult_uv_i(&Ey[icg], &w2[id2 + iv], Nc);
356 v2[icr + id2 + iv] += mult_uv_r(&Ey[icg], &w2[id1 + iv], Nc);
357 v2[ici + id2 + iv] += mult_uv_i(&Ey[icg], &w2[id1 + iv], Nc);
359 v2[icr + id3 + iv] += mult_uv_r(&Ey[icg], &w2[id4 + iv], Nc);
360 v2[ici + id3 + iv] += mult_uv_i(&Ey[icg], &w2[id4 + iv], Nc);
361 v2[icr + id4 + iv] -= mult_uv_r(&Ey[icg], &w2[id3 + iv], Nc);
362 v2[ici + id4 + iv] -= mult_uv_i(&Ey[icg], &w2[id3 + iv], Nc);
365 v2[icr + id1 + iv] += mult_uv_i(&Ez[icg], &w2[id1 + iv], Nc);
366 v2[ici + id1 + iv] -= mult_uv_r(&Ez[icg], &w2[id1 + iv], Nc);
367 v2[icr + id2 + iv] -= mult_uv_i(&Ez[icg], &w2[id2 + iv], Nc);
368 v2[ici + id2 + iv] += mult_uv_r(&Ez[icg], &w2[id2 + iv], Nc);
370 v2[icr + id3 + iv] -= mult_uv_i(&Ez[icg], &w2[id3 + iv], Nc);
371 v2[ici + id3 + iv] += mult_uv_r(&Ez[icg], &w2[id3 + iv], Nc);
372 v2[icr + id4 + iv] += mult_uv_i(&Ez[icg], &w2[id4 + iv], Nc);
373 v2[ici + id4 + iv] -= mult_uv_r(&Ez[icg], &w2[id4 + iv], Nc);
376 v2[icr + id1 + iv] *= kappa_cSW;
377 v2[ici + id1 + iv] *= kappa_cSW;
378 v2[icr + id2 + iv] *= kappa_cSW;
379 v2[ici + id2 + iv] *= kappa_cSW;
381 v2[icr + id3 + iv] *= kappa_cSW;
382 v2[ici + id3 + iv] *= kappa_cSW;
383 v2[icr + id4 + iv] *= kappa_cSW;
384 v2[ici + id4 + iv] *= kappa_cSW;
394 assert(w.
nex() == 1);
398 int Ndf = 2 * Nc * Nc;
409 const double *w2 = w.
ptr(0);
410 double *v2 = v.
ptr(0);
423 int is =
m_Nvol * ith / nth;
424 int ns =
m_Nvol * (ith + 1) / nth - is;
426 for (
int site = is; site < is + ns; ++site) {
427 int iv = Nvc * Nd * site;
430 for (
int ic = 0; ic < Nc; ++ic) {
433 int icg = ic * Nvc + ig;
435 v2[icr + id1 + iv] = 0.0;
436 v2[ici + id1 + iv] = 0.0;
437 v2[icr + id2 + iv] = 0.0;
438 v2[ici + id2 + iv] = 0.0;
440 v2[icr + id3 + iv] = 0.0;
441 v2[ici + id3 + iv] = 0.0;
442 v2[icr + id4 + iv] = 0.0;
443 v2[ici + id4 + iv] = 0.0;
446 v2[icr + id1 + iv] -= mult_uv_i(&Bx[icg], &w2[id2 + iv], Nc);
447 v2[ici + id1 + iv] += mult_uv_r(&Bx[icg], &w2[id2 + iv], Nc);
448 v2[icr + id2 + iv] -= mult_uv_i(&Bx[icg], &w2[id1 + iv], Nc);
449 v2[ici + id2 + iv] += mult_uv_r(&Bx[icg], &w2[id1 + iv], Nc);
451 v2[icr + id3 + iv] -= mult_uv_i(&Bx[icg], &w2[id4 + iv], Nc);
452 v2[ici + id3 + iv] += mult_uv_r(&Bx[icg], &w2[id4 + iv], Nc);
453 v2[icr + id4 + iv] -= mult_uv_i(&Bx[icg], &w2[id3 + iv], Nc);
454 v2[ici + id4 + iv] += mult_uv_r(&Bx[icg], &w2[id3 + iv], Nc);
457 v2[icr + id1 + iv] += mult_uv_r(&By[icg], &w2[id2 + iv], Nc);
458 v2[ici + id1 + iv] += mult_uv_i(&By[icg], &w2[id2 + iv], Nc);
459 v2[icr + id2 + iv] -= mult_uv_r(&By[icg], &w2[id1 + iv], Nc);
460 v2[ici + id2 + iv] -= mult_uv_i(&By[icg], &w2[id1 + iv], Nc);
462 v2[icr + id3 + iv] += mult_uv_r(&By[icg], &w2[id4 + iv], Nc);
463 v2[ici + id3 + iv] += mult_uv_i(&By[icg], &w2[id4 + iv], Nc);
464 v2[icr + id4 + iv] -= mult_uv_r(&By[icg], &w2[id3 + iv], Nc);
465 v2[ici + id4 + iv] -= mult_uv_i(&By[icg], &w2[id3 + iv], Nc);
468 v2[icr + id1 + iv] -= mult_uv_i(&Bz[icg], &w2[id1 + iv], Nc);
469 v2[ici + id1 + iv] += mult_uv_r(&Bz[icg], &w2[id1 + iv], Nc);
470 v2[icr + id2 + iv] += mult_uv_i(&Bz[icg], &w2[id2 + iv], Nc);
471 v2[ici + id2 + iv] -= mult_uv_r(&Bz[icg], &w2[id2 + iv], Nc);
473 v2[icr + id3 + iv] -= mult_uv_i(&Bz[icg], &w2[id3 + iv], Nc);
474 v2[ici + id3 + iv] += mult_uv_r(&Bz[icg], &w2[id3 + iv], Nc);
475 v2[icr + id4 + iv] += mult_uv_i(&Bz[icg], &w2[id4 + iv], Nc);
476 v2[ici + id4 + iv] -= mult_uv_r(&Bz[icg], &w2[id4 + iv], Nc);
479 v2[icr + id1 + iv] += mult_uv_i(&Ex[icg], &w2[id4 + iv], Nc);
480 v2[ici + id1 + iv] -= mult_uv_r(&Ex[icg], &w2[id4 + iv], Nc);
481 v2[icr + id2 + iv] += mult_uv_i(&Ex[icg], &w2[id3 + iv], Nc);
482 v2[ici + id2 + iv] -= mult_uv_r(&Ex[icg], &w2[id3 + iv], Nc);
484 v2[icr + id3 + iv] += mult_uv_i(&Ex[icg], &w2[id2 + iv], Nc);
485 v2[ici + id3 + iv] -= mult_uv_r(&Ex[icg], &w2[id2 + iv], Nc);
486 v2[icr + id4 + iv] += mult_uv_i(&Ex[icg], &w2[id1 + iv], Nc);
487 v2[ici + id4 + iv] -= mult_uv_r(&Ex[icg], &w2[id1 + iv], Nc);
490 v2[icr + id1 + iv] -= mult_uv_r(&Ey[icg], &w2[id4 + iv], Nc);
491 v2[ici + id1 + iv] -= mult_uv_i(&Ey[icg], &w2[id4 + iv], Nc);
492 v2[icr + id2 + iv] += mult_uv_r(&Ey[icg], &w2[id3 + iv], Nc);
493 v2[ici + id2 + iv] += mult_uv_i(&Ey[icg], &w2[id3 + iv], Nc);
495 v2[icr + id3 + iv] -= mult_uv_r(&Ey[icg], &w2[id2 + iv], Nc);
496 v2[ici + id3 + iv] -= mult_uv_i(&Ey[icg], &w2[id2 + iv], Nc);
497 v2[icr + id4 + iv] += mult_uv_r(&Ey[icg], &w2[id1 + iv], Nc);
498 v2[ici + id4 + iv] += mult_uv_i(&Ey[icg], &w2[id1 + iv], Nc);
501 v2[icr + id1 + iv] += mult_uv_i(&Ez[icg], &w2[id3 + iv], Nc);
502 v2[ici + id1 + iv] -= mult_uv_r(&Ez[icg], &w2[id3 + iv], Nc);
503 v2[icr + id2 + iv] -= mult_uv_i(&Ez[icg], &w2[id4 + iv], Nc);
504 v2[ici + id2 + iv] += mult_uv_r(&Ez[icg], &w2[id4 + iv], Nc);
506 v2[icr + id3 + iv] += mult_uv_i(&Ez[icg], &w2[id1 + iv], Nc);
507 v2[ici + id3 + iv] -= mult_uv_r(&Ez[icg], &w2[id1 + iv], Nc);
508 v2[icr + id4 + iv] -= mult_uv_i(&Ez[icg], &w2[id2 + iv], Nc);
509 v2[ici + id4 + iv] += mult_uv_r(&Ez[icg], &w2[id2 + iv], Nc);
512 v2[icr + id1 + iv] *= kappa_cSW;
513 v2[ici + id1 + iv] *= kappa_cSW;
514 v2[icr + id2 + iv] *= kappa_cSW;
515 v2[ici + id2 + iv] *= kappa_cSW;
517 v2[icr + id3 + iv] *= kappa_cSW;
518 v2[ici + id3 + iv] *= kappa_cSW;
519 v2[icr + id4 + iv] *= kappa_cSW;
520 v2[ici + id4 + iv] *= kappa_cSW;
541 const int mu,
const int nu)
581 =
static_cast<double>(
m_Nc * m_Nd * (2 + 48 *
m_Nc));
582 double flop = flop_site *
static_cast<double>(Lvol);
void(Fopr_CloverTerm::* m_gm5)(Field &, const Field &)
void scal(Field &x, const double a)
scal(x, a): x = a * x
static const std::string class_name
void ah_Field_G(Field_G &W, const int ex)
static int get_num_threads()
returns available number of threads.
const double * ptr(const int jin, const int site, const int jex) const
void mult_csw_chiral(Field &, const Field &)
Field_G m_v2
for calculation of field strength.
void general(const char *format,...)
GammaMatrix get_GM(GMspecies spec)
void multadd_Field_Gnd(Field_G &W, const int ex, const Field_G &U1, const int ex1, const Field_G &U2, const int ex2, const double ff)
Container of Field-type object.
int sg_index(int mu, int nu)
int fetch_double(const string &key, double &value) const
void mult_csw(Field &, const Field &)
void mult_Field_Gdn(Field_G &W, const int ex, const Field_G &U1, const int ex1, const Field_G &U2, const int ex2)
std::vector< GammaMatrix > m_SG
void lower(Field_G &, const Field_G &, const int mu, const int nu)
constructs lower staple in mu-nu plane.
const Field_G * m_U
pointer to gauge configuration.
void set_config(Field *U)
setting pointer to the gauge configuration.
static int get_thread_id()
returns thread id.
Wilson-type fermion field.
void init(std::string repr)
void set_fieldstrength(Field_G &, const int, const int)
void mult_csw_dirac(Field &, const Field &)
void mult_iGM(Field_F &y, const GammaMatrix &gm, const Field_F &x)
gamma matrix multiplication (i is multiplied)
std::vector< int > m_boundary
Bridge::VerboseLevel m_vl
void set_parameters(const Parameters ¶ms)
double flop_count()
this returns the number of floating point operations.
Set of Gamma Matrices: basis class.
void upper(Field_G &, const Field_G &, const int mu, const int nu)
constructs upper staple in mu-nu plane.
void axpy(Field &y, const double a, const Field &x)
axpy(y, a, x): y := a * x + y
void(Fopr_CloverTerm::* m_csw)(Field &, const Field &)
void crucial(const char *format,...)
void gm5_dirac(Field &, const Field &)
void mult_gm5(Field &v, const Field &w)
gamma_5 multiplication. [31 Mar 2017 H.Matsufuru]
void mult_isigma(Field_F &, const Field_F &, const int mu, const int nu)
void gm5_chiral(Field &, const Field &)
string get_string(const string &key) const
int fetch_int_vector(const string &key, vector< int > &value) const
Field_G m_Ez
field strength.
static VerboseLevel set_verbose_level(const std::string &str)
void mult_sigmaF(Field &, const Field &)
void multadd_Field_Gdn(Field_G &W, const int ex, const Field_G &U1, const int ex1, const Field_G &U2, const int ex2, const double ff)
void forward(Field &, const Field &, const int mu)
void mult_Field_Gnd(Field_G &W, const int ex, const Field_G &U1, const int ex1, const Field_G &U2, const int ex2)