18 #if defined USE_GROUP_SU3 
   19 #include "fopr_Wilson_impl_SU3.inc" 
   20 #elif defined USE_GROUP_SU2 
   21 #include "fopr_Wilson_impl_SU2.inc" 
   22 #elif defined USE_GROUP_SU_N 
   23 #include "fopr_Wilson_impl_SU_N.inc" 
   41 #ifdef USE_PARAMETERS_FACTORY 
   56   const std::string str_vlevel = params.
get_string(
"verbose_level");
 
   61   double           kappa_s, kappa_t, cSW_s, cSW_t;
 
   65   err += params.
fetch_double(
"hopping_parameter_spatial", kappa_s);
 
   66   err += params.
fetch_double(
"hopping_parameter_temporal", kappa_t);
 
   67   err += params.
fetch_double(
"clover_coefficient_spatial", cSW_s);
 
   68   err += params.
fetch_double(
"clover_coefficient_temporal", cSW_t);
 
   83                                              double cSW_s, 
double cSW_t,
 
   92   for (
int mu = 0; mu < 
m_Ndim; ++mu) {
 
   98   assert(bc.size() == 
m_Ndim);
 
  107   for (
int mu = 0; mu < 
m_Ndim; ++mu) {
 
  165   } 
else if (
m_repr == 
"Chiral") {
 
  184   (this->*
m_gm5)(v, f);
 
  194   const double *v1 = f.
ptr(0);
 
  195   double       *v2 = w.
ptr(0);
 
  206   int is = 
m_Nvol * i_thread / Nthread;
 
  207   int ns = 
m_Nvol * (i_thread + 1) / Nthread - is;
 
  209   for (
int site = is; site < is + ns; ++site) {
 
  211     for (
int icc = 0; icc < Nvc; icc++) {
 
  212       int in = Nvc * Nd * site;
 
  213       v2[icc + id1 + in] = v1[icc + id3 + in];
 
  214       v2[icc + id2 + in] = v1[icc + id4 + in];
 
  215       v2[icc + id3 + in] = v1[icc + id1 + in];
 
  216       v2[icc + id4 + in] = v1[icc + id2 + in];
 
  228   const double *v1 = f.
ptr(0);
 
  229   double       *v2 = w.
ptr(0);
 
  240   int is = 
m_Nvol * i_thread / Nthread;
 
  241   int ns = 
m_Nvol * (i_thread + 1) / Nthread - is;
 
  243   for (
int site = is; site < is + ns; ++site) {
 
  245     for (
int icc = 0; icc < Nvc; icc++) {
 
  246       int in = Nvc * Nd * site;
 
  247       v2[icc + id1 + in] = v1[icc + id1 + in];
 
  248       v2[icc + id2 + in] = v1[icc + id2 + in];
 
  249       v2[icc + id3 + in] = -v1[icc + id3 + in];
 
  250       v2[icc + id4 + in] = -v1[icc + id4 + in];
 
  258                                           const int mu, 
const int nu)
 
  275   (this->*
m_csw)(v, w);
 
  282   assert(w.
nex() == 1);
 
  285   const int Nvc  = 2 * Nc;
 
  286   const int Ndf  = 2 * Nc * Nc;
 
  288   const int Nvol = w.
nvol();
 
  292   const int id3 = Nvc * 2;
 
  293   const int id4 = Nvc * 3;
 
  298   const double *w2 = w.
ptr(0);
 
  299   double       *v2 = v.
ptr(0);
 
  312   int is = 
m_Nvol * i_thread / Nthread;
 
  313   int ns = 
m_Nvol * (i_thread + 1) / Nthread - is;
 
  315   for (
int site = is; site < is + ns; ++site) {
 
  316     int iv = Nvc * Nd * site;
 
  319     for (
int ic = 0; ic < Nc; ++ic) {
 
  322       int icg = ic * Nvc + ig;
 
  324       v2[icr + id1 + iv] = 0.0;
 
  325       v2[ici + id1 + iv] = 0.0;
 
  326       v2[icr + id2 + iv] = 0.0;
 
  327       v2[ici + id2 + iv] = 0.0;
 
  329       v2[icr + id3 + iv] = 0.0;
 
  330       v2[ici + id3 + iv] = 0.0;
 
  331       v2[icr + id4 + iv] = 0.0;
 
  332       v2[ici + id4 + iv] = 0.0;
 
  335       v2[icr + id1 + iv] -= kappa_cSW_s * mult_uv_i(&Bx[icg], &w2[id2 + iv], Nc);
 
  336       v2[ici + id1 + iv] += kappa_cSW_s * mult_uv_r(&Bx[icg], &w2[id2 + iv], Nc);
 
  337       v2[icr + id2 + iv] -= kappa_cSW_s * mult_uv_i(&Bx[icg], &w2[id1 + iv], Nc);
 
  338       v2[ici + id2 + iv] += kappa_cSW_s * mult_uv_r(&Bx[icg], &w2[id1 + iv], Nc);
 
  340       v2[icr + id3 + iv] -= kappa_cSW_s * mult_uv_i(&Bx[icg], &w2[id4 + iv], Nc);
 
  341       v2[ici + id3 + iv] += kappa_cSW_s * mult_uv_r(&Bx[icg], &w2[id4 + iv], Nc);
 
  342       v2[icr + id4 + iv] -= kappa_cSW_s * mult_uv_i(&Bx[icg], &w2[id3 + iv], Nc);
 
  343       v2[ici + id4 + iv] += kappa_cSW_s * mult_uv_r(&Bx[icg], &w2[id3 + iv], Nc);
 
  346       v2[icr + id1 + iv] += kappa_cSW_s * mult_uv_r(&By[icg], &w2[id2 + iv], Nc);
 
  347       v2[ici + id1 + iv] += kappa_cSW_s * mult_uv_i(&By[icg], &w2[id2 + iv], Nc);
 
  348       v2[icr + id2 + iv] -= kappa_cSW_s * mult_uv_r(&By[icg], &w2[id1 + iv], Nc);
 
  349       v2[ici + id2 + iv] -= kappa_cSW_s * mult_uv_i(&By[icg], &w2[id1 + iv], Nc);
 
  351       v2[icr + id3 + iv] += kappa_cSW_s * mult_uv_r(&By[icg], &w2[id4 + iv], Nc);
 
  352       v2[ici + id3 + iv] += kappa_cSW_s * mult_uv_i(&By[icg], &w2[id4 + iv], Nc);
 
  353       v2[icr + id4 + iv] -= kappa_cSW_s * mult_uv_r(&By[icg], &w2[id3 + iv], Nc);
 
  354       v2[ici + id4 + iv] -= kappa_cSW_s * mult_uv_i(&By[icg], &w2[id3 + iv], Nc);
 
  357       v2[icr + id1 + iv] -= kappa_cSW_s * mult_uv_i(&Bz[icg], &w2[id1 + iv], Nc);
 
  358       v2[ici + id1 + iv] += kappa_cSW_s * mult_uv_r(&Bz[icg], &w2[id1 + iv], Nc);
 
  359       v2[icr + id2 + iv] += kappa_cSW_s * mult_uv_i(&Bz[icg], &w2[id2 + iv], Nc);
 
  360       v2[ici + id2 + iv] -= kappa_cSW_s * mult_uv_r(&Bz[icg], &w2[id2 + iv], Nc);
 
  362       v2[icr + id3 + iv] -= kappa_cSW_s * mult_uv_i(&Bz[icg], &w2[id3 + iv], Nc);
 
  363       v2[ici + id3 + iv] += kappa_cSW_s * mult_uv_r(&Bz[icg], &w2[id3 + iv], Nc);
 
  364       v2[icr + id4 + iv] += kappa_cSW_s * mult_uv_i(&Bz[icg], &w2[id4 + iv], Nc);
 
  365       v2[ici + id4 + iv] -= kappa_cSW_s * mult_uv_r(&Bz[icg], &w2[id4 + iv], Nc);
 
  368       v2[icr + id1 + iv] += kappa_cSW_t * mult_uv_i(&Ex[icg], &w2[id2 + iv], Nc);
 
  369       v2[ici + id1 + iv] -= kappa_cSW_t * mult_uv_r(&Ex[icg], &w2[id2 + iv], Nc);
 
  370       v2[icr + id2 + iv] += kappa_cSW_t * mult_uv_i(&Ex[icg], &w2[id1 + iv], Nc);
 
  371       v2[ici + id2 + iv] -= kappa_cSW_t * mult_uv_r(&Ex[icg], &w2[id1 + iv], Nc);
 
  373       v2[icr + id3 + iv] -= kappa_cSW_t * mult_uv_i(&Ex[icg], &w2[id4 + iv], Nc);
 
  374       v2[ici + id3 + iv] += kappa_cSW_t * mult_uv_r(&Ex[icg], &w2[id4 + iv], Nc);
 
  375       v2[icr + id4 + iv] -= kappa_cSW_t * mult_uv_i(&Ex[icg], &w2[id3 + iv], Nc);
 
  376       v2[ici + id4 + iv] += kappa_cSW_t * mult_uv_r(&Ex[icg], &w2[id3 + iv], Nc);
 
  379       v2[icr + id1 + iv] -= kappa_cSW_t * mult_uv_r(&Ey[icg], &w2[id2 + iv], Nc);
 
  380       v2[ici + id1 + iv] -= kappa_cSW_t * mult_uv_i(&Ey[icg], &w2[id2 + iv], Nc);
 
  381       v2[icr + id2 + iv] += kappa_cSW_t * mult_uv_r(&Ey[icg], &w2[id1 + iv], Nc);
 
  382       v2[ici + id2 + iv] += kappa_cSW_t * mult_uv_i(&Ey[icg], &w2[id1 + iv], Nc);
 
  384       v2[icr + id3 + iv] += kappa_cSW_t * mult_uv_r(&Ey[icg], &w2[id4 + iv], Nc);
 
  385       v2[ici + id3 + iv] += kappa_cSW_t * mult_uv_i(&Ey[icg], &w2[id4 + iv], Nc);
 
  386       v2[icr + id4 + iv] -= kappa_cSW_t * mult_uv_r(&Ey[icg], &w2[id3 + iv], Nc);
 
  387       v2[ici + id4 + iv] -= kappa_cSW_t * mult_uv_i(&Ey[icg], &w2[id3 + iv], Nc);
 
  390       v2[icr + id1 + iv] += kappa_cSW_t * mult_uv_i(&Ez[icg], &w2[id1 + iv], Nc);
 
  391       v2[ici + id1 + iv] -= kappa_cSW_t * mult_uv_r(&Ez[icg], &w2[id1 + iv], Nc);
 
  392       v2[icr + id2 + iv] -= kappa_cSW_t * mult_uv_i(&Ez[icg], &w2[id2 + iv], Nc);
 
  393       v2[ici + id2 + iv] += kappa_cSW_t * mult_uv_r(&Ez[icg], &w2[id2 + iv], Nc);
 
  395       v2[icr + id3 + iv] -= kappa_cSW_t * mult_uv_i(&Ez[icg], &w2[id3 + iv], Nc);
 
  396       v2[ici + id3 + iv] += kappa_cSW_t * mult_uv_r(&Ez[icg], &w2[id3 + iv], Nc);
 
  397       v2[icr + id4 + iv] += kappa_cSW_t * mult_uv_i(&Ez[icg], &w2[id4 + iv], Nc);
 
  398       v2[ici + id4 + iv] -= kappa_cSW_t * mult_uv_r(&Ez[icg], &w2[id4 + iv], Nc);
 
  408   assert(w.
nex() == 1);
 
  411   const int Nvc  = 2 * Nc;
 
  412   const int Ndf  = 2 * Nc * Nc;
 
  414   const int Nvol = w.
nvol();
 
  418   const int id3 = Nvc * 2;
 
  419   const int id4 = Nvc * 3;
 
  424   const double *w2 = w.
ptr(0);
 
  425   double       *v2 = v.
ptr(0);
 
  438   int is = 
m_Nvol * i_thread / Nthread;
 
  439   int ns = 
m_Nvol * (i_thread + 1) / Nthread - is;
 
  441   for (
int site = is; site < is + ns; ++site) {
 
  442     int iv = Nvc * Nd * site;
 
  445     for (
int ic = 0; ic < Nc; ++ic) {
 
  448       int icg = ic * Nvc + ig;
 
  450       v2[icr + id1 + iv] = 0.0;
 
  451       v2[ici + id1 + iv] = 0.0;
 
  452       v2[icr + id2 + iv] = 0.0;
 
  453       v2[ici + id2 + iv] = 0.0;
 
  455       v2[icr + id3 + iv] = 0.0;
 
  456       v2[ici + id3 + iv] = 0.0;
 
  457       v2[icr + id4 + iv] = 0.0;
 
  458       v2[ici + id4 + iv] = 0.0;
 
  461       v2[icr + id1 + iv] -= kappa_cSW_s * mult_uv_i(&Bx[icg], &w2[id2 + iv], Nc);
 
  462       v2[ici + id1 + iv] += kappa_cSW_s * mult_uv_r(&Bx[icg], &w2[id2 + iv], Nc);
 
  463       v2[icr + id2 + iv] -= kappa_cSW_s * mult_uv_i(&Bx[icg], &w2[id1 + iv], Nc);
 
  464       v2[ici + id2 + iv] += kappa_cSW_s * mult_uv_r(&Bx[icg], &w2[id1 + iv], Nc);
 
  466       v2[icr + id3 + iv] -= kappa_cSW_s * mult_uv_i(&Bx[icg], &w2[id4 + iv], Nc);
 
  467       v2[ici + id3 + iv] += kappa_cSW_s * mult_uv_r(&Bx[icg], &w2[id4 + iv], Nc);
 
  468       v2[icr + id4 + iv] -= kappa_cSW_s * mult_uv_i(&Bx[icg], &w2[id3 + iv], Nc);
 
  469       v2[ici + id4 + iv] += kappa_cSW_s * mult_uv_r(&Bx[icg], &w2[id3 + iv], Nc);
 
  472       v2[icr + id1 + iv] += kappa_cSW_s * mult_uv_r(&By[icg], &w2[id2 + iv], Nc);
 
  473       v2[ici + id1 + iv] += kappa_cSW_s * mult_uv_i(&By[icg], &w2[id2 + iv], Nc);
 
  474       v2[icr + id2 + iv] -= kappa_cSW_s * mult_uv_r(&By[icg], &w2[id1 + iv], Nc);
 
  475       v2[ici + id2 + iv] -= kappa_cSW_s * mult_uv_i(&By[icg], &w2[id1 + iv], Nc);
 
  477       v2[icr + id3 + iv] += kappa_cSW_s * mult_uv_r(&By[icg], &w2[id4 + iv], Nc);
 
  478       v2[ici + id3 + iv] += kappa_cSW_s * mult_uv_i(&By[icg], &w2[id4 + iv], Nc);
 
  479       v2[icr + id4 + iv] -= kappa_cSW_s * mult_uv_r(&By[icg], &w2[id3 + iv], Nc);
 
  480       v2[ici + id4 + iv] -= kappa_cSW_s * mult_uv_i(&By[icg], &w2[id3 + iv], Nc);
 
  483       v2[icr + id1 + iv] -= kappa_cSW_s * mult_uv_i(&Bz[icg], &w2[id1 + iv], Nc);
 
  484       v2[ici + id1 + iv] += kappa_cSW_s * mult_uv_r(&Bz[icg], &w2[id1 + iv], Nc);
 
  485       v2[icr + id2 + iv] += kappa_cSW_s * mult_uv_i(&Bz[icg], &w2[id2 + iv], Nc);
 
  486       v2[ici + id2 + iv] -= kappa_cSW_s * mult_uv_r(&Bz[icg], &w2[id2 + iv], Nc);
 
  488       v2[icr + id3 + iv] -= kappa_cSW_s * mult_uv_i(&Bz[icg], &w2[id3 + iv], Nc);
 
  489       v2[ici + id3 + iv] += kappa_cSW_s * mult_uv_r(&Bz[icg], &w2[id3 + iv], Nc);
 
  490       v2[icr + id4 + iv] += kappa_cSW_s * mult_uv_i(&Bz[icg], &w2[id4 + iv], Nc);
 
  491       v2[ici + id4 + iv] -= kappa_cSW_s * mult_uv_r(&Bz[icg], &w2[id4 + iv], Nc);
 
  494       v2[icr + id1 + iv] += kappa_cSW_t * mult_uv_i(&Ex[icg], &w2[id4 + iv], Nc);
 
  495       v2[ici + id1 + iv] -= kappa_cSW_t * mult_uv_r(&Ex[icg], &w2[id4 + iv], Nc);
 
  496       v2[icr + id2 + iv] += kappa_cSW_t * mult_uv_i(&Ex[icg], &w2[id3 + iv], Nc);
 
  497       v2[ici + id2 + iv] -= kappa_cSW_t * mult_uv_r(&Ex[icg], &w2[id3 + iv], Nc);
 
  499       v2[icr + id3 + iv] += kappa_cSW_t * mult_uv_i(&Ex[icg], &w2[id2 + iv], Nc);
 
  500       v2[ici + id3 + iv] -= kappa_cSW_t * mult_uv_r(&Ex[icg], &w2[id2 + iv], Nc);
 
  501       v2[icr + id4 + iv] += kappa_cSW_t * mult_uv_i(&Ex[icg], &w2[id1 + iv], Nc);
 
  502       v2[ici + id4 + iv] -= kappa_cSW_t * mult_uv_r(&Ex[icg], &w2[id1 + iv], Nc);
 
  505       v2[icr + id1 + iv] -= kappa_cSW_t * mult_uv_r(&Ey[icg], &w2[id4 + iv], Nc);
 
  506       v2[ici + id1 + iv] -= kappa_cSW_t * mult_uv_i(&Ey[icg], &w2[id4 + iv], Nc);
 
  507       v2[icr + id2 + iv] += kappa_cSW_t * mult_uv_r(&Ey[icg], &w2[id3 + iv], Nc);
 
  508       v2[ici + id2 + iv] += kappa_cSW_t * mult_uv_i(&Ey[icg], &w2[id3 + iv], Nc);
 
  510       v2[icr + id3 + iv] -= kappa_cSW_t * mult_uv_r(&Ey[icg], &w2[id2 + iv], Nc);
 
  511       v2[ici + id3 + iv] -= kappa_cSW_t * mult_uv_i(&Ey[icg], &w2[id2 + iv], Nc);
 
  512       v2[icr + id4 + iv] += kappa_cSW_t * mult_uv_r(&Ey[icg], &w2[id1 + iv], Nc);
 
  513       v2[ici + id4 + iv] += kappa_cSW_t * mult_uv_i(&Ey[icg], &w2[id1 + iv], Nc);
 
  516       v2[icr + id1 + iv] += kappa_cSW_t * mult_uv_i(&Ez[icg], &w2[id3 + iv], Nc);
 
  517       v2[ici + id1 + iv] -= kappa_cSW_t * mult_uv_r(&Ez[icg], &w2[id3 + iv], Nc);
 
  518       v2[icr + id2 + iv] -= kappa_cSW_t * mult_uv_i(&Ez[icg], &w2[id4 + iv], Nc);
 
  519       v2[ici + id2 + iv] += kappa_cSW_t * mult_uv_r(&Ez[icg], &w2[id4 + iv], Nc);
 
  521       v2[icr + id3 + iv] += kappa_cSW_t * mult_uv_i(&Ez[icg], &w2[id1 + iv], Nc);
 
  522       v2[ici + id3 + iv] -= kappa_cSW_t * mult_uv_r(&Ez[icg], &w2[id1 + iv], Nc);
 
  523       v2[icr + id4 + iv] -= kappa_cSW_t * mult_uv_i(&Ez[icg], &w2[id2 + iv], Nc);
 
  524       v2[ici + id4 + iv] += kappa_cSW_t * mult_uv_r(&Ez[icg], &w2[id2 + iv], Nc);
 
  545                                                 const int mu, 
const int nu)
 
  549   assert(Nthread == 1);
 
  586   double flop = flop_site * 
static_cast<double>(Lvol);
 
void Register_int_vector(const string &, const std::vector< int > &)
 
void scal(Field &x, const double a)
scal(x, a): x = a * x 
 
double flop_count()
this returns the number of floating point operations. 
 
static int get_num_threads()
returns available number of threads. 
 
void Register_string(const string &, const string &)
 
const double * ptr(const int jin, const int site, const int jex) const 
 
void mult_Field_Gdn(Field_G &w, const int ex, const Field_G &u1, const int ex1, const Field_G &u2, const int ex2)
 
Parameters_Fopr_CloverTerm_General()
 
void general(const char *format,...)
 
GammaMatrix get_GM(GMspecies spec)
 
Container of Field-type object. 
 
void init(std::string repr)
 
void set_fieldstrength(Field_G &, const int, const int)
 
void gm5_dirac(Field &, const Field &)
 
Field_G m_v2
for calculation of field strength. 
 
void mult_csw_chiral(Field &, const Field &)
 
static int get_thread_id()
returns thread id. 
 
Wilson-type fermion field. 
 
void ah_Field_G(Field_G &w, const int ex)
 
std::vector< GammaMatrix > m_SG
 
void mult_csw_dirac(Field &, const Field &)
 
void multadd_Field_Gdn(Field_G &w, const int ex, const Field_G &u1, const int ex1, const Field_G &u2, const int ex2, const double ff)
 
void mult_Field_Gnd(Field_G &w, const int ex, const Field_G &u1, const int ex1, const Field_G &u2, const int ex2)
 
void mult_iGM(Field_F &y, const GammaMatrix &gm, const Field_F &x)
gamma matrix multiplication (i is multiplied) 
 
void mult_gm5(Field &v, const Field &w)
 
Bridge::VerboseLevel m_vl
 
void set_config(Field *U)
setting pointer to the gauge configuration. 
 
void mult_isigma(Field_F &, const Field_F &, const int mu, const int nu)
 
void mult_csw(Field &, const Field &)
 
void(Fopr_CloverTerm_General::* m_csw)(Field &, const Field &)
 
void set_parameters(const Parameters ¶ms)
 
void multadd_Field_Gnd(Field_G &w, const int ex, const Field_G &u1, const int ex1, const Field_G &u2, const int ex2, const double ff)
 
void axpy(Field &y, const double a, const Field &x)
axpy(y, a, x): y := a * x + y 
 
void gm5_chiral(Field &, const Field &)
 
void crucial(const char *format,...)
 
const Field_G * m_U
pointer to gauge configuration. 
 
void mult_sigmaF(Field &, const Field &)
 
void lower(Field_G &, const Field_G &, const int mu, const int nu)
constructs lower staple in mu-nu plane. 
 
static bool Register(const std::string &realm, const creator_callback &cb)
 
Field_G m_Ez
field strength. 
 
std::vector< int > m_boundary
 
void(Fopr_CloverTerm_General::* m_gm5)(Field &, const Field &)
 
void Register_double(const string &, const double)
 
static const std::string class_name
 
int fetch_double(const string &key, double &val) const 
 
string get_string(const string &key) const 
 
void upper(Field_G &, const Field_G &, const int mu, const int nu)
constructs upper staple in mu-nu plane. 
 
int sg_index(int mu, int nu)
 
static VerboseLevel set_verbose_level(const std::string &str)
 
int fetch_int_vector(const string &key, std::vector< int > &val) const 
 
void forward(Field &, const Field &, const int mu)