10#include "std_make_vector.h"
11#include "gslpp_function_adapter.h"
12#include <boost/bind/bind.hpp>
14#include <gsl/gsl_sf_gegenbauer.h>
15#include <gsl/gsl_sf_expint.h>
17using namespace boost::placeholders;
20: mySM(SM_i), ig(ROOT::Math::Integration::kADAPTIVESINGULAR, ROOT::Math::Integration::kGAUSS51), wf()
31 checkcache_int_tau =
false;
32 checkcache_int_mu =
false;
33 checkcache_int_el =
false;
35 double max_double = std::numeric_limits<double>::max();
37 hA1w1_cache = max_double;
38 rho2_cache = max_double;
39 R1w1_cache = max_double;
40 R2w1_cache = max_double;
41 N_A_cache = max_double;
42 N_1_cache =max_double;
43 N_2_cache = max_double;
44 j_A_cache= max_double;
45 j_0_cache = max_double;
46 j_1_cache = max_double;
47 j_2_cache = max_double;
48 k_A_cache = max_double;
49 k_0_cache = max_double;
50 k_1_cache = max_double;
51 k_2_cache = max_double;
52 l_A_cache = max_double;
54 af0_cache = max_double;
55 af1_cache = max_double;
56 af2_cache = max_double;
57 ag0_cache = max_double;
58 ag1_cache = max_double;
59 ag2_cache = max_double;
60 aF11_cache = max_double;
61 aF12_cache = max_double;
62 aF13_cache = max_double;
63 aF21_cache = max_double;
64 aF22_cache = max_double;
65 aF23_cache = max_double;
67 af_1_cache = max_double;
68 ag_1_cache = max_double;
69 aF1_1_cache = max_double;
70 aF2_1_cache = max_double;
71 af_2_cache = max_double;
72 ag_2_cache = max_double;
73 aF1_2_cache = max_double;
74 aF2_2_cache = max_double;
75 af_3_cache = max_double;
76 ag_3_cache = max_double;
77 aF1_3_cache = max_double;
78 aF2_3_cache = max_double;
79 af_4_cache = max_double;
80 ag_4_cache = max_double;
81 aF1_4_cache = max_double;
82 aF2_4_cache = max_double;
83 af_5_cache = max_double;
84 ag_5_cache = max_double;
85 aF1_5_cache = max_double;
86 aF2_5_cache = max_double;
87 af_6_cache = max_double;
88 ag_6_cache = max_double;
89 aF1_6_cache = max_double;
90 aF2_6_cache = max_double;
91 af_7_cache = max_double;
92 ag_7_cache = max_double;
93 aF1_7_cache = max_double;
94 aF2_7_cache = max_double;
95 af_8_cache = max_double;
96 ag_8_cache = max_double;
97 aF1_8_cache = max_double;
98 aF2_8_cache = max_double;
99 af_9_cache = max_double;
100 ag_9_cache = max_double;
101 aF1_9_cache = max_double;
102 aF2_9_cache = max_double;
103 af_10_cache = max_double;
104 ag_10_cache = max_double;
105 aF1_10_cache = max_double;
106 aF2_10_cache = max_double;
107 bf_1_cache = max_double;
108 bg_1_cache = max_double;
109 bF1_1_cache = max_double;
110 bF2_1_cache = max_double;
111 bf_2_cache = max_double;
112 bg_2_cache = max_double;
113 bF1_2_cache = max_double;
114 bF2_2_cache = max_double;
115 bf_3_cache = max_double;
116 bg_3_cache = max_double;
117 bF1_3_cache = max_double;
118 bF2_3_cache = max_double;
119 bf_4_cache = max_double;
120 bg_4_cache = max_double;
121 bF1_4_cache = max_double;
122 bF2_4_cache = max_double;
123 bf_5_cache = max_double;
124 bg_5_cache = max_double;
125 bF1_5_cache = max_double;
126 bF2_5_cache = max_double;
127 bf_6_cache = max_double;
128 bg_6_cache = max_double;
129 bF1_6_cache = max_double;
130 bF2_6_cache = max_double;
131 bf_7_cache = max_double;
132 bg_7_cache = max_double;
133 bF1_7_cache = max_double;
134 bF2_7_cache = max_double;
135 bf_8_cache = max_double;
136 bg_8_cache = max_double;
137 bF1_8_cache = max_double;
138 bF2_8_cache = max_double;
139 bf_9_cache = max_double;
140 bg_9_cache = max_double;
141 bF1_9_cache = max_double;
142 bF2_9_cache = max_double;
143 bf_10_cache = max_double;
144 bg_10_cache = max_double;
145 bF1_10_cache = max_double;
146 bF2_10_cache = max_double;
148 CS_cache = max_double;
149 CSp_cache = max_double;
150 CP_cache = max_double;
151 CPp_cache = max_double;
152 CV_cache = max_double;
153 CVp_cache = max_double;
154 CA_cache = max_double;
155 CAp_cache = max_double;
156 CT_cache = max_double;
157 CTp_cache = max_double;
159 ig.SetRelTolerance(1.E-6);
160 ig.SetAbsTolerance(1.E-6);
175 if (
CLNflag +
BGLflag +
DMflag !=
true)
throw std::runtime_error(
"MVlnu: Set only one among CLNflag, BGLflag, DMflag to true");
180 <<
"hA1w1" <<
"rho2" <<
"R1w1" <<
"R2w1"
181 <<
"N_A" <<
"N_1" <<
"N_2" <<
"j_A" <<
"j_0" <<
"j_1" <<
"j_2"
182 <<
"k_A" <<
"k_0" <<
"k_1" <<
"k_2" <<
"l_A";
187 <<
"af0" <<
"af1" <<
"af2" <<
"ag0" <<
"ag1" <<
"ag2"
188 <<
"aF11" <<
"aF12" <<
"aF13" <<
"aF21" <<
"aF22" <<
"aF23"
189 <<
"mBcstV1" <<
"mBcstV2" <<
"mBcstV3" <<
"mBcstV4"
190 <<
"mBcstA1" <<
"mBcstA2" <<
"mBcstA3" <<
"mBcstA4"
191 <<
"mBcstP1" <<
"mBcstP2" <<
"mBcstP3"
192 <<
"chiTV" <<
"chiTA" <<
"chiTP" <<
"nI";
197 <<
"af_1" <<
"ag_1" <<
"aF1_1" <<
"aF2_1"
198 <<
"af_2" <<
"ag_2" <<
"aF1_2" <<
"aF2_2"
199 <<
"af_3" <<
"ag_3" <<
"aF1_3" <<
"aF2_3"
200 <<
"af_4" <<
"ag_4" <<
"aF1_4" <<
"aF2_4"
201 <<
"af_5" <<
"ag_5" <<
"aF1_5" <<
"aF2_5"
202 <<
"af_6" <<
"ag_6" <<
"aF1_6" <<
"aF2_6"
203 <<
"af_7" <<
"ag_7" <<
"aF1_7" <<
"aF2_7"
204 <<
"af_8" <<
"ag_8" <<
"aF1_8" <<
"aF2_8"
205 <<
"af_9" <<
"ag_9" <<
"aF1_9" <<
"aF2_9"
206 <<
"af_10" <<
"ag_10" <<
"aF1_10" <<
"aF2_10"
207 <<
"bf_1" <<
"bg_1" <<
"bF1_1" <<
"bF2_1"
208 <<
"bf_2" <<
"bg_2" <<
"bF1_2" <<
"bF2_2"
209 <<
"bf_3" <<
"bg_3" <<
"bF1_3" <<
"bF2_3"
210 <<
"bf_4" <<
"bg_4" <<
"bF1_4" <<
"bF2_4"
211 <<
"bf_5" <<
"bg_5" <<
"bF1_5" <<
"bF2_5"
212 <<
"bf_6" <<
"bg_6" <<
"bF1_6" <<
"bF2_6"
213 <<
"bf_7" <<
"bg_7" <<
"bF1_7" <<
"bF2_7"
214 <<
"bf_8" <<
"bg_8" <<
"bF1_8" <<
"bF2_8"
215 <<
"bf_9" <<
"bg_9" <<
"bF1_9" <<
"bF2_9"
216 <<
"bf_10" <<
"bg_10" <<
"bF1_10" <<
"bF2_10";
219 std::stringstream out;
221 throw std::runtime_error(
"MVlnu: vector " + out.str() +
" not implemented");
229void MVlnu::updateParameters()
245 amplsq_factor = 1./(64.*M_PI*M_PI*M_PI*
MM*
MM*
MM);
253 gslpp::complex norm = 4./sqrt(2.);
255 CV = (*(allcoeff_bclnu[
LO]))(0)/norm*(1.+ale_mub/M_PI*log(
mySM.
getMz()/
mu_b))/2.;
257 CVp = (*(allcoeff_bclnu[
LO]))(1)/norm/2.;
259 CS = (*(allcoeff_bclnu[
LO]))(2)/norm/2.;
260 CSp = (*(allcoeff_bclnu[
LO]))(3)/norm/2.;
265 CT = (*(allcoeff_bclnu[
LO]))(4)/norm/2.;
454 std::stringstream out;
456 throw std::runtime_error(
"MVlnu: vector " + out.str() +
" not implemented");
459 if ((hA1w1 != hA1w1_cache) || (rho2 != rho2_cache) || (R1w1 != R1w1_cache) || (R2w1 != R2w1_cache)
460 || (N_A != N_A_cache) || (N_1 != N_1_cache) || (N_2 != N_2_cache) || (l_A != l_A_cache)
461 || (j_A != j_A_cache) || (j_0 != j_0_cache) || (j_1 != j_1_cache) || (j_2 != j_2_cache)
462 || (k_A != k_A_cache) || (k_0 != k_0_cache) || (k_1 != k_1_cache) || (k_2 != k_2_cache)
463 || (af0 != af0_cache) || (af1 != af1_cache) || (af2 != af2_cache)
464 || (ag0 != ag0_cache) || (ag1 != af1_cache) || (ag2 != af2_cache)
465 || (aF11 != aF11_cache) || (aF12 != aF12_cache) || (aF13 != aF13_cache)
466 || (aF21 != aF21_cache) || (aF22 != aF22_cache) || (aF23 != aF23_cache)
467 || (af_1 != af_1_cache) || (ag_1 != ag_1_cache) || (aF1_1 != aF1_1_cache) || (aF2_1 != aF2_1_cache)
468 || (af_2 != af_2_cache) || (ag_2 != ag_2_cache) || (aF1_2 != aF1_2_cache) || (aF2_2 != aF2_2_cache)
469 || (af_3 != af_3_cache) || (ag_3 != ag_3_cache) || (aF1_3 != aF1_3_cache) || (aF2_3 != aF2_3_cache)
470 || (af_4 != af_4_cache) || (ag_4 != ag_4_cache) || (aF1_4 != aF1_4_cache) || (aF2_4 != aF2_4_cache)
471 || (af_5 != af_5_cache) || (ag_5 != ag_5_cache) || (aF1_5 != aF1_5_cache) || (aF2_5 != aF2_5_cache)
472 || (af_6 != af_6_cache) || (ag_6 != ag_6_cache) || (aF1_6 != aF1_6_cache) || (aF2_6 != aF2_6_cache)
473 || (af_7 != af_7_cache) || (ag_7 != ag_7_cache) || (aF1_7 != aF1_7_cache) || (aF2_7 != aF2_7_cache)
474 || (af_8 != af_8_cache) || (ag_8 != ag_8_cache) || (aF1_8 != aF1_8_cache) || (aF2_8 != aF2_8_cache)
475 || (af_9 != af_9_cache) || (ag_9 != ag_9_cache) || (aF1_9 != aF1_9_cache) || (aF2_9 != aF2_9_cache)
476 || (af_10 != af_10_cache) || (ag_10 != ag_10_cache) || (aF1_10 != aF1_10_cache) || (aF2_10 != aF2_10_cache)
477 || (bf_1 != bf_1_cache) || (bg_1 != bg_1_cache) || (bF1_1 != bF1_1_cache) || (bF2_1 != bF2_1_cache)
478 || (bf_2 != bf_2_cache) || (bg_2 != bg_2_cache) || (bF1_2 != bF1_2_cache) || (bF2_2 != bF2_2_cache)
479 || (bf_3 != bf_3_cache) || (bg_3 != bg_3_cache) || (bF1_3 != bF1_3_cache) || (bF2_3 != bF2_3_cache)
480 || (bf_4 != bf_4_cache) || (bg_4 != bg_4_cache) || (bF1_4 != bF1_4_cache) || (bF2_4 != bF2_4_cache)
481 || (bf_5 != bf_5_cache) || (bg_5 != bg_5_cache) || (bF1_5 != bF1_5_cache) || (bF2_5 != bF2_5_cache)
482 || (bf_6 != bf_6_cache) || (bg_6 != bg_6_cache) || (bF1_6 != bF1_6_cache) || (bF2_6 != bF2_6_cache)
483 || (bf_7 != bf_7_cache) || (bg_7 != bg_7_cache) || (bF1_7 != bF1_7_cache) || (bF2_7 != bF2_7_cache)
484 || (bf_8 != bf_8_cache) || (bg_8 != bg_8_cache) || (bF1_8 != bF1_8_cache) || (bF2_8 != bF2_8_cache)
485 || (bf_9 != bf_9_cache) || (bg_9 != bg_9_cache) || (bF1_9 != bF1_9_cache) || (bF2_9 != bF2_9_cache)
486 || (bf_10 != bf_10_cache) || (bg_10 != bg_10_cache) || (bF1_10 != bF1_10_cache) || (bF2_10 != bF2_10_cache)
487 || (CS != CS_cache) || (CSp != CSp_cache)
488 || (CP != CP_cache) || (CPp != CPp_cache)
489 || (CV != CV_cache) || (CVp != CVp_cache)
490 || (CA != CA_cache) || (CAp != CAp_cache)
491 || (CT != CT_cache) || (CTp != CTp_cache)) {
492 checkcache_int_tau =
false;
493 checkcache_int_mu =
false;
494 checkcache_int_el =
false;
497 if (!checkcache_int_tau || !checkcache_int_mu || !checkcache_int_el) {
499 cached_intJ1s_tau = integrateJ(1, q2min, q2max);
500 cached_intJ1c_tau = integrateJ(2, q2min, q2max);
501 cached_intJ2s_tau = integrateJ(3, q2min, q2max);
502 cached_intJ2c_tau = integrateJ(4, q2min, q2max);
503 cached_intJ3_tau = integrateJ(5, q2min, q2max);
504 cached_intJ6s_tau = integrateJ(8, q2min, q2max);
505 cached_intJ6c_tau = integrateJ(9, q2min, q2max);
506 cached_intJ9_tau = integrateJ(12, q2min, q2max);
507 cached_intJ4_tau = 0.;
508 cached_intJ5_tau = 0.;
509 cached_intJ7_tau = 0.;
510 cached_intJ8_tau = 0.;
518 checkcache_int_tau =
true;
521 cached_intJ1s_mu = integrateJ(1, q2min, q2max);
522 cached_intJ1c_mu = integrateJ(2, q2min, q2max);
523 cached_intJ2s_mu = integrateJ(3, q2min, q2max);
524 cached_intJ2c_mu = integrateJ(4, q2min, q2max);
525 cached_intJ3_mu = integrateJ(5, q2min, q2max);
526 cached_intJ6s_mu = integrateJ(8, q2min, q2max);
527 cached_intJ6c_mu = integrateJ(9, q2min, q2max);
528 cached_intJ9_mu = integrateJ(12, q2min, q2max);
529 cached_intJ4_mu = 0.;
530 cached_intJ5_mu = 0.;
531 cached_intJ7_mu = 0.;
532 cached_intJ8_mu = 0.;
540 checkcache_int_mu =
true;
543 cached_intJ1s_el = integrateJ(1, q2min, q2max);
544 cached_intJ1c_el = integrateJ(2, q2min, q2max);
545 cached_intJ2s_el = integrateJ(3, q2min, q2max);
546 cached_intJ2c_el = integrateJ(4, q2min, q2max);
547 cached_intJ3_el = integrateJ(5, q2min, q2max);
548 cached_intJ6s_el = integrateJ(8, q2min, q2max);
549 cached_intJ6c_el = integrateJ(9, q2min, q2max);
550 cached_intJ9_el = integrateJ(12, q2min, q2max);
551 cached_intJ4_el = 0.;
552 cached_intJ5_el = 0.;
553 cached_intJ7_el = 0.;
554 cached_intJ8_el = 0.;
562 checkcache_int_el =
true;
636 aF1_10_cache = aF1_10;
637 aF2_10_cache = aF2_10;
676 bF1_10_cache = bF1_10;
677 bF2_10_cache = bF2_10;
702double MVlnu::lambda_half(
double a,
double b,
double c)
704 return sqrt(a*a+b*b+c*c-2.*(a*b+a*c+b*c));
710double MVlnu::phi_f(
double z)
712 double prefac = 4. * (
MV_o_MM) /
MM /
MM * sqrt(nI / (3. * M_PI * chiTA));
713 double num = (1. + z) * sqrt((1. - z)*(1. - z)*(1. - z));
715 double den4 = den * den * den*den;
716 return prefac * num / den4;
719double MVlnu::f_BGL(
double q2)
721 double w =
w0 - q2 / (2. *
MM *
MV);
722 double z = (sqrt(w + 1.) - M_SQRT2) / (sqrt(w + 1.) + M_SQRT2);
723 double Pfacf = (z - zA1) / (1. - z * zA1)*(z - zA2) / (1. - z * zA2)*(z - zA3) / (1. - z * zA3)*(z - zA4) / (1. - z * zA4);
724 double phif = phi_f(z);
725 return (af0 + af1 * z + af2 * z * z) / phif / Pfacf;
728double MVlnu::phi_g(
double z)
730 double prefac = sqrt(nI / (3. * M_PI * chiTV));
731 double num = 16. * (
MV_o_MM)*(
MV_o_MM)*(1. + z)*(1. + z) / sqrt(1. - z);
733 double den4 = den * den * den*den;
734 return prefac * num / den4;
737double MVlnu::g_BGL(
double q2)
739 double w =
w0 - q2 / (2. *
MM *
MV);
740 double z = (sqrt(w + 1.) - M_SQRT2) / (sqrt(w + 1.) + M_SQRT2);
741 double Pfacg = (z - zV1) / (1. - z * zV1)*(z - zV2) / (1. - z * zV2)*(z - zV3) / (1. - z * zV3)*(z - zV4) / (1. - z * zV4);
742 double phig = phi_g(z);
743 return (ag0 + ag1 * z + ag2 * z * z) / phig / Pfacg;
746double MVlnu::phi_F1(
double z)
748 double prefac = 4. * (
MV_o_MM) /
MM /
MM /
MM * sqrt(nI / (6. * M_PI * chiTA));
749 double num = (1. + z) * sqrt((1. - z)*(1. - z)*(1. - z)*(1. - z)*(1. - z));
751 double den5 = den * den * den * den*den;
752 return prefac * num / den5;
755double MVlnu::F1_BGL(
double q2)
757 double w =
w0 - q2 / (2. *
MM *
MV);
758 double z = (sqrt(w + 1.) - M_SQRT2) / (sqrt(w + 1.) + M_SQRT2);
759 double PfacF1 = (z - zA1) / (1. - z * zA1)*(z - zA2) / (1. - z * zA2)*(z - zA3) / (1. - z * zA3)*(z - zA4) / (1. - z * zA4);
760 double phiF1 = phi_F1(z);
761 double aF10 = (
MM -
MV)*(phi_F1(0.) / phi_f(0.)) * af0;
762 return (aF10 + aF11 * z + aF12 * z * z + aF13 * z * z * z) / phiF1 / PfacF1;
765double MVlnu::phi_F2(
double z)
767 double prefac = 8. * sqrt(2.)*(
MV_o_MM)*(
MV_o_MM) * sqrt(nI / (M_PI * chiTP));
768 double num = (1. + z)*(1. + z) / sqrt(1. - z);
770 double den4 = den * den * den*den;
771 return prefac * num / den4;
774double MVlnu::F2_BGL(
double q2)
776 double w =
w0 - q2 / (2. *
MM *
MV);
777 double z = (sqrt(w + 1.) - M_SQRT2) / (sqrt(w + 1.) + M_SQRT2);
778 double z0 = (sqrt(
w0 + 1.) - M_SQRT2) / (sqrt(
w0 + 1.) + M_SQRT2);
779 double PfacF2 = (z - zP1) / (1. - z * zP1)*(z - zP2) / (1. - z * zP2)*(z - zP3) / (1. - z * zP3);
780 double PfacF2z0 = (z0 - zP1) / (1. - z0 * zP1)*(z0 - zP2) / (1. - z0 * zP2)*(z0 - zP3) / (1. - z0 * zP3);
781 double phiF2 = phi_F2(z);
782 double phiF2z0 = phi_F2(z0);
783 double aF20 = PfacF2z0 * phiF2z0 * 2. * F1_BGL(0.) / (
MM *
MM -
MV *
MV) - aF21 * z0 - aF22 * z0*z0;
784 return (aF20 + aF21 * z + aF22 * z * z + aF23 * z * z * z) / phiF2 / PfacF2;
787double MVlnu::hA1(
double q2)
789 double w =
w0 - q2 / (2. *
MM *
MV);
790 double z = (sqrt(w + 1.) - M_SQRT2) / (sqrt(w + 1.) + M_SQRT2);
791 if (
CLNflag)
return hA1w1 * N_A * (1. - j_A * 8. * rho2 * z + k_A * (53. * rho2 - 15.) * z * z - l_A * (231. * rho2 - 91.) * z * z * z);
792 else if (
BGLflag)
return(f_BGL(q2) / sqrt(
MM *
MV) / (1. + w));
794 double w =
w0 - q2 / (2. *
MM *
MV);
796 if (w<1.05) {f_fac = af_1 + bf_1 * w;}
797 else if (w<1.10) {f_fac = af_2 + bf_2 * w;}
798 else if (w<1.15) {f_fac = af_3 + bf_3 * w;}
799 else if (w<1.20) {f_fac = af_4 + bf_4 * w;}
800 else if (w<1.25) {f_fac = af_5 + bf_5 * w;}
801 else if (w<1.30) {f_fac = af_6 + bf_6 * w;}
802 else if (w<1.35) {f_fac = af_7 + bf_7 * w;}
803 else if (w<1.40) {f_fac = af_8 + bf_8 * w;}
804 else if (w<1.45) {f_fac = af_9 + bf_9 * w;}
805 else {f_fac = af_10 + bf_10 * w;}
806 return f_fac / sqrt(
MM *
MV) / (1. + w);
811double MVlnu::R1(
double q2)
813 double w =
w0 - q2 / (2. *
MM *
MV);
814 return N_1 * R1w1 - j_1 * 0.12 * (w - 1.) + k_1 * 0.05 * (w - 1.)*(w - 1.);
817double MVlnu::R2(
double q2)
819 double w =
w0 - q2 / (2. *
MM *
MV);
820 return N_2 * R2w1 + j_2 * 0.11 * (w - 1.) - k_2 * 0.06 * (w - 1.)*(w - 1.);
823double MVlnu::R0(
double q2)
825 double w =
w0 - q2 / (2. *
MM *
MV);
827 double R2q2at0 = R2(0.);
828 double R0q2at0 = (
MM +
MV - (
MM -
MV) * R2q2at0) / (2. *
MV);
830 double R0w1 = R0q2at0 + j_0 * 0.11 * (
w0 - 1.) - k_0 * 0.01 * (
w0 - 1.)*(
w0 - 1.);
832 return R0w1 - j_0 * 0.11 * (w - 1.) + k_0 * 0.01 * (w - 1.)*(w - 1.);
835double MVlnu::V(
double q2)
837 if (
CLNflag)
return R1(q2) /
RV * hA1(q2);
838 else if (
BGLflag)
return (
MM +
MV) * g_BGL(q2) / 2.;
840 double w =
w0 - q2 / (2. *
MM *
MV);
842 if (w<1.05) {g_fac = ag_1 + bg_1 * w;}
843 else if (w<1.10) {g_fac = ag_2 + bg_2 * w;}
844 else if (w<1.15) {g_fac = ag_3 + bg_3 * w;}
845 else if (w<1.20) {g_fac = ag_4 + bg_4 * w;}
846 else if (w<1.25) {g_fac = ag_5 + bg_5 * w;}
847 else if (w<1.30) {g_fac = ag_6 + bg_6 * w;}
848 else if (w<1.35) {g_fac = ag_7 + bg_7 * w;}
849 else if (w<1.40) {g_fac = ag_8 + bg_8 * w;}
850 else if (w<1.45) {g_fac = ag_9 + bg_9 * w;}
851 else {g_fac = ag_10 + bg_10 * w;}
852 return (
MM +
MV) * g_fac / 2.;
857double MVlnu::A0(
double q2)
859 double w =
w0 - q2 / (2. *
MM *
MV);
861 if (
CLNflag)
return R0(q2) * (w + 1.) *
RV / 2. * hA1(q2);
863 else if (
BGLflag)
return F2_BGL(q2) / 2.;
865 double w =
w0 - q2 / (2. *
MM *
MV);
867 if (w<1.05) {F2_fac = aF2_1 + bF2_1 * w;}
868 else if (w<1.10) {F2_fac = aF2_2 + bF2_2 * w;}
869 else if (w<1.15) {F2_fac = aF2_3 + bF2_3 * w;}
870 else if (w<1.20) {F2_fac = aF2_4 + bF2_4 * w;}
871 else if (w<1.25) {F2_fac = aF2_5 + bF2_5 * w;}
872 else if (w<1.30) {F2_fac = aF2_6 + bF2_6 * w;}
873 else if (w<1.35) {F2_fac = aF2_7 + bF2_7 * w;}
874 else if (w<1.40) {F2_fac = aF2_8 + bF2_8 * w;}
875 else if (w<1.45) {F2_fac = aF2_9 + bF2_9 * w;}
876 else {F2_fac = aF2_10 + bF2_10 * w;}
882double MVlnu::A1(
double q2)
885 double w =
w0 - q2 / (2. *
MM *
MV);
886 return (w + 1.) *
RV / 2. * hA1(q2);
889double MVlnu::A2(
double q2)
891 double w =
w0 - q2 / (2. *
MM *
MV);
892 if (
CLNflag)
return R2(q2) /
RV * hA1(q2);
895 double w =
w0 - q2 / (2. *
MM *
MV);
898 if (w<1.05) {f_fac = af_1 + bf_1 * w; F1_fac = aF1_1 + bF1_1 * w;}
899 else if (w<1.10) {f_fac = af_2 + bf_2 * w; F1_fac = aF1_2 + bF1_2 * w;}
900 else if (w<1.15) {f_fac = af_3 + bf_3 * w; F1_fac = aF1_3 + bF1_3 * w;}
901 else if (w<1.20) {f_fac = af_4 + bf_4 * w; F1_fac = aF1_4 + bF1_4 * w;}
902 else if (w<1.25) {f_fac = af_5 + bf_5 * w; F1_fac = aF1_5 + bF1_5 * w;}
903 else if (w<1.30) {f_fac = af_6 + bf_6 * w; F1_fac = aF1_6 + bF1_6 * w;}
904 else if (w<1.35) {f_fac = af_7 + bf_7 * w; F1_fac = aF1_7 + bF1_7 * w;}
905 else if (w<1.40) {f_fac = af_8 + bf_8 * w; F1_fac = aF1_8 + bF1_8 * w;}
906 else if (w<1.45) {f_fac = af_9 + bf_9 * w; F1_fac = aF1_9 + bF1_9 * w;}
907 else {f_fac = af_10 + bf_10 * w; F1_fac = aF1_10 + bF1_10 * w;}
908 return (
MM +
MV) / 2. / (w * w - 1.) /
MM /
MV * ((w -
MV_o_MM) * f_fac - F1_fac /
MM);
913double MVlnu::A12(
double q2)
920double MVlnu::T1(
double q2)
922 double delta_T1 = 0.;
923 return (
Mb +
Mc) / (
MM +
MV) * V(q2)*(1. + delta_T1);
926double MVlnu::T2(
double q2)
928 double delta_T2 = 0.;
929 return (
Mb -
Mc) / (
MM -
MV) * A1(q2)*(1. + delta_T2);
932double MVlnu::T23(
double q2)
934 double delta_T23 = 0.;
936 8 *
MM *
MV * (
MV *
MV -
MM *
MM) * A12(q2)) / (4. *
MM * (
MV -
MM) *
MV * q2)*(1. + delta_T23);
942gslpp::complex MVlnu::HV0(
double q2)
944 return 4. * gslpp::complex::i() *
MM *
MV / (sqrt(q2)*(
MM +
MV))*((CV - CVp)*(
MM +
MV) * A12(q2) +
Mb * (C7 - C7p) * T23(q2));
947gslpp::complex MVlnu::HVp(
double q2)
949 return gslpp::complex::i()*((((CV + CVp) * lambda_half(
MM*
MM,
MV*
MV, q2) * V(q2)-(
MM +
MV)*(
MM +
MV)*(CV - CVp) * A1(q2))) / (2. * (
MM +
MV))
950 + (
Mb / q2)*((C7 + C7p) * lambda_half(
MM*
MM,
MV*
MV, q2) * T1(q2)-(C7 - C7p)*(
MM *
MM -
MV *
MV) * T2(q2)));
953gslpp::complex MVlnu::HVm(
double q2)
955 return gslpp::complex::i()*(((-(CV + CVp) * lambda_half(
MM*
MM,
MV*
MV, q2) * V(q2)-(
MM +
MV)*(
MM +
MV)*(CV - CVp) * A1(q2))) / (2. * (
MM +
MV))
956 + (
Mb / q2)*(-(C7 + C7p) * lambda_half(
MM*
MM,
MV*
MV, q2) * T1(q2)-(C7 - C7p)*(
MM *
MM -
MV *
MV) * T2(q2)));
959gslpp::complex MVlnu::HAp(
double q2)
961 return gslpp::complex::i()*((CA + CAp) * lambda_half(
MM*
MM,
MV*
MV, q2) * V(q2)-(
MM +
MV)*(
MM +
MV)*(CA - CAp) * A1(q2)) / (2. * (
MM +
MV));
964gslpp::complex MVlnu::HAm(
double q2)
966 return gslpp::complex::i()*(-(CA + CAp) * lambda_half(
MM*
MM,
MV*
MV, q2) * V(q2)-(
MM +
MV)*(
MM +
MV)*(CA - CAp) * A1(q2)) / (2. * (
MM +
MV));
969gslpp::complex MVlnu::HA0(
double q2)
971 return 4. * gslpp::complex::i() *
MV *
MM / (sqrt(q2))*(CA - CAp) * A12(q2);
974gslpp::complex MVlnu::HP(
double q2)
976 return gslpp::complex::i() * lambda_half(
MM*
MM,
MV*
MV, q2) / 2. * ((CP - CPp) / (
Mb +
Mc)+(
Mlep +
Mnu) / q2 * (CA - CAp)) * A0(q2);
979gslpp::complex MVlnu::HS(
double q2)
981 return gslpp::complex::i() * lambda_half(
MM*
MM,
MV*
MV, q2) / 2. * ((CS - CSp) / (
Mb +
Mc)+(
Mlep -
Mnu) / q2 * (CV - CVp)) * A0(q2);
984gslpp::complex MVlnu::HT0(
double q2)
986 return 2. * M_SQRT2 * (
MM *
MV) / (
MM +
MV)*(CT + CTp) * T23(q2);
989gslpp::complex MVlnu::HT0t(
double q2)
991 return 2. * (
MM *
MV) / (
MM +
MV)*(CT - CTp) * T23(q2);
994gslpp::complex MVlnu::HTp(
double q2)
996 return ((CT - CTp) * lambda_half(
MM*
MM,
MV*
MV, q2) * T1(q2)-(CT + CTp)*(
MM *
MM -
MV *
MV) * T2(q2)) / (sqrt(2. * q2));
999gslpp::complex MVlnu::HTpt(
double q2)
1001 return ((CT + CTp) * lambda_half(
MM*
MM,
MV*
MV, q2) * T1(q2)-(CT - CTp)*(
MM *
MM -
MV *
MV) * T2(q2)) / (2. * sqrt(q2));
1004gslpp::complex MVlnu::HTm(
double q2)
1006 return (-(CT - CTp) * lambda_half(
MM*
MM,
MV*
MV, q2) * T1(q2)-(CT + CTp)*(
MM *
MM -
MV *
MV) * T2(q2)) / (sqrt(2. * q2));
1009gslpp::complex MVlnu::HTmt(
double q2)
1011 return (-(CT + CTp) * lambda_half(
MM*
MM,
MV*
MV, q2) * T1(q2)-(CT - CTp)*(
MM *
MM -
MV *
MV) * T2(q2)) / (2. * sqrt(q2));
1017gslpp::complex MVlnu::G000(
double q2)
1019 double lambda_MM = lambda_half(
MM*
MM,
MV*
MV, q2);
1021 double lambda_lep2 = lambda_lep*lambda_lep;
1022 double Elep = sqrt(
Mlep *
Mlep + lambda_lep2 / (4. * q2));
1023 double Enu = sqrt(
Mnu *
Mnu + lambda_lep2 / (4. * q2));
1024 double Gprefactor = lambda_MM * lambda_lep / q2;
1026 return Gprefactor * (4. / 9. * (3. * Elep * Enu + lambda_lep2 / (4. * q2))*(HVp(q2).abs2() + HVm(q2).abs2() + HV0(q2).abs2() + HAp(q2).abs2() + HAm(q2).abs2() + HA0(q2).abs2()) +
1027 4. *
Mlep *
Mnu / 3. * (HVp(q2).abs2() + HVm(q2).abs2() + HV0(q2).abs2() - HAp(q2).abs2() - HAm(q2).abs2() - HA0(q2).abs2()) +
1028 4. / 3. * ((Elep * Enu -
Mlep *
Mnu + lambda_lep2 / (4. * q2)) * HS(q2).abs2()+(Elep * Enu +
Mlep *
Mnu + lambda_lep2 / (4. * q2)) * HP(q2).abs2()) +
1029 16. / 9. * (3. * (Elep * Enu +
Mlep *
Mnu) - lambda_lep2 / (4. * q2))*(HTpt(q2).abs2() + HTmt(q2).abs2() + HT0t(q2).abs2()) +
1030 8. / 9. * (3. * (Elep * Enu -
Mlep *
Mnu) - lambda_lep2 / (4. * q2))*(HTp(q2).abs2() + HTm(q2).abs2() + HT0(q2).abs2()) +
1031 16. / 3. * (
Mlep * Enu +
Mnu * Elep)*(HVp(q2) * HTpt(q2).conjugate() + HVm(q2) * HTmt(q2).conjugate() + HV0(q2) * HT0t(q2).conjugate()).imag() +
1032 8. * M_SQRT2 / 3. * (
Mlep * Enu -
Mnu * Elep)*(HAp(q2) * HTp(q2).conjugate() + HAm(q2) * HTm(q2).conjugate() + HA0(q2) * HT0(q2).conjugate()).imag());
1035gslpp::complex MVlnu::G010(
double q2)
1037 double lambda_MM = lambda_half(
MM*
MM,
MV*
MV, q2);
1039 double Gprefactor = lambda_MM * lambda_lep / q2;
1041 return Gprefactor * 4. / 3. * lambda_lep * ((HVp(q2) * HAp(q2).conjugate() - HVm(q2) * HAm(q2).conjugate()).real()+
1042 (2. * M_SQRT2) / q2 * (
Mlep *
Mlep -
Mnu *
Mnu)*(HTp(q2) * HTpt(q2).conjugate() - HTm(q2) * HTmt(q2).conjugate()).real() +
1043 2. * (
Mlep +
Mnu) / sqrt(q2)*(HAp(q2) * HTpt(q2).conjugate() - HAm(q2) * HTmt(q2).conjugate()).imag() +
1044 M_SQRT2 * (
Mlep -
Mnu) / sqrt(q2)*(HVp(q2) * HTp(q2).conjugate() - HVm(q2) * HTm(q2).conjugate()).imag()-
1045 (
Mlep -
Mnu) / sqrt(q2)*(HA0(q2) * HP(q2).conjugate()).real()-(
Mlep +
Mnu) / sqrt(q2)*(HV0(q2) * HS(q2).conjugate()).real()+
1046 (M_SQRT2 * HT0(q2) * HP(q2).conjugate() + 2. * HT0t(q2) * HS(q2).conjugate()).imag());
1050gslpp::complex MVlnu::G020(
double q2)
1052 double lambda_MM = lambda_half(
MM*
MM,
MV*
MV, q2);
1054 double lambda_lep2 = lambda_lep*lambda_lep;
1055 double Gprefactor = lambda_MM * lambda_lep / q2;
1057 return -Gprefactor * 2. / 9. * lambda_lep2 / q2 * ((-HVp(q2).abs2() - HVm(q2).abs2() + 2. * HV0(q2).abs2() - HAp(q2).abs2() - HAm(q2).abs2() + 2. * HA0(q2).abs2()) -
1058 2. * (2. * HT0(q2).abs2() - HTp(q2).abs2() - HTm(q2).abs2()) - 4. * (2. * HT0t(q2).abs2() - HTpt(q2).abs2() - HTmt(q2).abs2()));
1061gslpp::complex MVlnu::G200(
double q2)
1063 double lambda_MM = lambda_half(
MM*
MM,
MV*
MV, q2);
1065 double lambda_lep2 = lambda_lep*lambda_lep;
1066 double Elep = sqrt(
Mlep *
Mlep + lambda_lep2 / (4. * q2));
1067 double Enu = sqrt(
Mnu *
Mnu + lambda_lep2 / (4. * q2));
1068 double Gprefactor = lambda_MM * lambda_lep / q2;
1070 return Gprefactor * (-4. / 9. * (3. * Elep * Enu + lambda_lep2 / (4. * q2))*(HVp(q2).abs2() + HVm(q2).abs2() - 2. * HV0(q2).abs2() + HAp(q2).abs2() + HAm(q2).abs2() - 2. * HA0(q2).abs2()) -
1071 4. / 3. *
Mlep *
Mnu * (HVp(q2).abs2() + HVm(q2).abs2() - 2. * HV0(q2).abs2() - HAp(q2).abs2() - HAm(q2).abs2() + 2. * HA0(q2).abs2()) +
1072 8. / 3. * (Elep * Enu -
Mlep *
Mnu + lambda_lep2 / (4. * q2)) * HS(q2).abs2() + 8. / 3. * (Elep * Enu +
Mlep *
Mnu + lambda_lep2 / (4. * q2)) * HP(q2).abs2() -
1073 16. / 9. * (3. * (Elep * Enu +
Mlep *
Mnu) - lambda_lep2 / (4. * q2))*(HTpt(q2).abs2() + HTmt(q2).abs2() - 2. * HT0t(q2).abs2()) - 8. / 9. * (3. * (Elep * Enu -
Mlep *
Mnu) - lambda_lep2 / (4. * q2))*
1074 (HTp(q2).abs2() + HTm(q2).abs2() - 2 * HT0(q2).abs2()) - 16. / 3. * (
Mlep * Enu +
Mnu * Elep)*(HVp(q2) * HTpt(q2).conjugate() + HVm(q2) * HTmt(q2).conjugate() - 2. * HV0(q2) * HT0t(q2).conjugate()).imag() -
1075 8. * M_SQRT2 / 3. * (
Mlep * Enu -
Mnu * Elep)*(HAp(q2) * HTp(q2).conjugate() + HAm(q2) * HTm(q2).conjugate() - 2. * HA0(q2) * HT0(q2).conjugate()).imag());
1078gslpp::complex MVlnu::G210(
double q2)
1080 double lambda_MM = lambda_half(
MM*
MM,
MV*
MV, q2);
1082 double Gprefactor = lambda_MM * lambda_lep / q2;
1084 return -Gprefactor * 4. * lambda_lep / 3. * ((HVp(q2) * HAp(q2).conjugate() - HVm(q2) * HAm(q2).conjugate()).real() +
1085 2. * M_SQRT2 * (
Mlep *
Mlep -
Mnu *
Mnu) / q2 * (HTp(q2) * HTpt(q2).conjugate() - HTm(q2) * HTmt(q2).conjugate()).real() +
1086 2. * (
Mlep +
Mnu) / sqrt(q2)*(HAp(q2) * HTpt(q2).conjugate() - HAm(q2) * HTmt(q2).conjugate()).imag() +
1087 M_SQRT2 * (
Mlep -
Mnu) / sqrt(q2)*(HVp(q2) * HTp(q2).conjugate() - HVm(q2) * HTm(q2).conjugate()).imag() +
1088 2. * (
Mlep -
Mnu) / sqrt(q2)*(HA0(q2) * HP(q2).conjugate()).real() + 2. * (
Mlep +
Mnu) / sqrt(q2)*(HV0(q2) * HS(q2).conjugate()).real() -
1089 2. * (M_SQRT2 * HT0(q2) * HP(q2).conjugate() + 2. * HT0t(q2) * HS(q2).conjugate()).imag());
1092gslpp::complex MVlnu::G220(
double q2)
1094 double lambda_MM = lambda_half(
MM*
MM,
MV*
MV, q2);
1096 double lambda_lep2 = lambda_lep*lambda_lep;
1097 double Gprefactor = lambda_MM * lambda_lep / q2;
1099 return -Gprefactor * 2. / 9. * lambda_lep2 / q2 * (HVp(q2).abs2() + HVm(q2).abs2() + 4. * HV0(q2).abs2() + HAp(q2).abs2() +
1100 HAm(q2).abs2() + 4. * HA0(q2).abs2() - 2. * (HTp(q2).abs2() + HTm(q2).abs2() + 4. * HT0(q2).abs2()) -
1101 4. * (HTpt(q2).abs2() + HTmt(q2).abs2() + 4. * HT0t(q2).abs2()));
1104gslpp::complex MVlnu::G211(
double q2)
1106 double lambda_MM = lambda_half(
MM*
MM,
MV*
MV, q2);
1108 double Gprefactor = lambda_MM * lambda_lep / q2;
1110 return Gprefactor * 4. / sqrt(3.) * lambda_lep * (HVp(q2) * HA0(q2).conjugate() + HAp(q2) * HV0(q2).conjugate() -
1111 HV0(q2) * HAm(q2).conjugate() - HA0(q2) * HVm(q2).conjugate()+(
Mlep +
Mnu) / sqrt(q2)*(HVp(q2) * HS(q2).conjugate() + HS(q2) * HVm(q2).conjugate()) -
1112 gslpp::complex::i() * M_SQRT2 * (HP(q2) * HTm(q2).conjugate() - HTp(q2) * HP(q2).conjugate() + M_SQRT2 * (HS(q2) * HTmt(q2).conjugate() - HTpt(q2) * HS(q2).conjugate()))+
1113 (
Mlep -
Mnu) / sqrt(q2)*(HAp(q2) * HP(q2).conjugate() + HP(q2) * HAm(q2).conjugate()) -
1114 gslpp::complex::i()*2. * (
Mlep +
Mnu) / sqrt(q2)*(HAp(q2) * HT0t(q2).conjugate() + HT0t(q2) * HAm(q2).conjugate() - HTpt(q2) * HA0(q2).conjugate() - HA0(q2) * HTmt(q2).conjugate()) -
1115 gslpp::complex::i() * M_SQRT2 * (
Mlep -
Mnu) / sqrt(q2)*(HVp(q2) * HT0(q2).conjugate() + HT0(q2) * HVm(q2).conjugate() - HTp(q2) * HV0(q2).conjugate() - HV0(q2) * HTm(q2).conjugate()) +
1116 2. * M_SQRT2 * (
Mlep *
Mlep -
Mnu *
Mnu) / q2 * (HTp(q2) * HT0t(q2).conjugate() + HTpt(q2) * HT0(q2).conjugate() - HT0(q2) * HTmt(q2).conjugate() - HT0t(q2) * HTm(q2).conjugate()));
1119gslpp::complex MVlnu::G221(
double q2)
1121 double lambda_MM = lambda_half(
MM*
MM,
MV*
MV, q2);
1123 double lambda_lep2 = lambda_lep*lambda_lep;
1124 double Gprefactor = lambda_MM * lambda_lep / q2;
1126 return Gprefactor * 4. / 3. * lambda_lep2 / q2 * (HVp(q2) * HV0(q2).conjugate() + HV0(q2) * HVm(q2).conjugate() +
1127 HAp(q2) * HA0(q2).conjugate() + HA0(q2) * HAm(q2).conjugate() - 2. * (HTp(q2) * HT0(q2).conjugate() +
1128 HT0(q2) * HTm(q2).conjugate() + 2. * (HTpt(q2) * HT0t(q2).conjugate() + HT0t(q2) * HTmt(q2).conjugate())));
1131gslpp::complex MVlnu::G222(
double q2)
1133 double lambda_MM = lambda_half(
MM*
MM,
MV*
MV, q2);
1135 double lambda_lep2 = lambda_lep*lambda_lep;
1136 double Gprefactor = lambda_MM * lambda_lep / q2;
1138 return -Gprefactor * 8. / 3. * lambda_lep2 / q2 * (HVp(q2) * HVm(q2).conjugate() + HAp(q2) * HAm(q2).conjugate() -
1139 2. * (HTp(q2) * HTm(q2).conjugate() + 2. * HTpt(q2) * HTmt(q2).conjugate()));
1145double MVlnu::J1s(
double q2)
1148 return amplsq_factor * (8. * G000(q2) + 2. * G020(q2) - 4. * G200(q2) - G220(q2)).real() / 3.;
1151double MVlnu::J1c(
double q2)
1154 return amplsq_factor * (8. * G000(q2) + 2. * G020(q2) + 8. * G200(q2) + 2. * G220(q2)).real() / 3.;
1157double MVlnu::J2s(
double q2)
1160 return amplsq_factor * (2. * G020(q2) - G220(q2)).real();
1163double MVlnu::J2c(
double q2)
1166 return amplsq_factor * (2. * (G020(q2) + G220(q2))).real();
1169double MVlnu::J3(
double q2)
1172 return amplsq_factor * (G222(q2).real());
1175double MVlnu::J4(
double q2)
1178 return -amplsq_factor * (G221(q2).real());
1181double MVlnu::J5(
double q2)
1184 return amplsq_factor * (2. * G211(q2).real() / sqrt(3.));
1187double MVlnu::J6s(
double q2)
1190 return -amplsq_factor * (4. * (2. * G010(q2) - G210(q2)).real() / 3.);
1193double MVlnu::J6c(
double q2)
1196 if (q2 > (
MM -
MV)*(
MM -
MV))
return 0.;
1197 return -amplsq_factor * (8. * (G010(q2) + G210(q2)).real() / 3.);
1200double MVlnu::J7(
double q2)
1203 return -amplsq_factor * (2. * sqrt(3.)*(G211(q2).imag()) / 3.);
1206double MVlnu::J8(
double q2)
1209 return amplsq_factor * (G221(q2).imag());
1212double MVlnu::J9(
double q2)
1215 return -amplsq_factor * (G222(q2).imag());
1221double MVlnu::integrateJ(
int i,
double q2_min,
double q2_max)
1225 if (
lep ==
StandardModel::TAU)
if (checkcache_int_tau && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ1s_tau;
1226 if (
lep ==
StandardModel::MU)
if (checkcache_int_mu && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ1s_mu;
1227 if (
lep ==
StandardModel::ELECTRON)
if (checkcache_int_el && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ1s_el;
1228 wf=ROOT::Math::Functor1D(&(*
this),&MVlnu::J1s);
1230 J_res = ig.Integral(q2_min, q2_max);
1234 if (
lep ==
StandardModel::TAU)
if (checkcache_int_tau && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ1c_tau;
1235 if (
lep ==
StandardModel::MU)
if (checkcache_int_mu && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ1c_mu;
1236 if (
lep ==
StandardModel::ELECTRON)
if (checkcache_int_el && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ1c_el;
1237 wf=ROOT::Math::Functor1D(&(*
this),&MVlnu::J1c);
1239 J_res = ig.Integral(q2_min, q2_max);
1243 if (
lep ==
StandardModel::TAU)
if (checkcache_int_tau && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ2s_tau;
1244 if (
lep ==
StandardModel::MU)
if (checkcache_int_mu && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ2s_mu;
1245 if (
lep ==
StandardModel::ELECTRON)
if (checkcache_int_el && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ2s_el;
1246 wf=ROOT::Math::Functor1D(&(*
this),&MVlnu::J2s);
1248 J_res = ig.Integral(q2_min, q2_max);
1252 if (
lep ==
StandardModel::TAU)
if (checkcache_int_tau && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ2c_tau;
1253 if (
lep ==
StandardModel::MU)
if (checkcache_int_mu && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ2c_mu;
1254 if (
lep ==
StandardModel::ELECTRON)
if (checkcache_int_el && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ2c_el;
1255 wf=ROOT::Math::Functor1D(&(*
this),&MVlnu::J2c);
1257 J_res = ig.Integral(q2_min, q2_max);
1261 if (
lep ==
StandardModel::TAU)
if (checkcache_int_tau && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ3_tau;
1262 if (
lep ==
StandardModel::MU)
if (checkcache_int_mu && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ3_mu;
1264 wf=ROOT::Math::Functor1D(&(*
this),&MVlnu::J3);
1266 J_res = ig.Integral(q2_min, q2_max);
1270 if (
lep ==
StandardModel::TAU)
if (checkcache_int_tau && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ4_tau;
1271 if (
lep ==
StandardModel::MU)
if (checkcache_int_mu && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ4_mu;
1273 wf=ROOT::Math::Functor1D(&(*
this),&MVlnu::J4);
1275 J_res = ig.Integral(q2_min, q2_max);
1279 if (
lep ==
StandardModel::TAU)
if (checkcache_int_tau && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ5_tau;
1280 if (
lep ==
StandardModel::MU)
if (checkcache_int_mu && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ5_mu;
1282 wf=ROOT::Math::Functor1D(&(*
this),&MVlnu::J5);
1284 J_res = ig.Integral(q2_min, q2_max);
1288 if (
lep ==
StandardModel::TAU)
if (checkcache_int_tau && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ6s_tau;
1289 if (
lep ==
StandardModel::MU)
if (checkcache_int_mu && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ6s_mu;
1290 if (
lep ==
StandardModel::ELECTRON)
if (checkcache_int_el && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ6s_el;
1291 wf=ROOT::Math::Functor1D(&(*
this),&MVlnu::J6s);
1293 J_res = ig.Integral(q2_min, q2_max);
1297 if (
lep ==
StandardModel::TAU)
if (checkcache_int_tau && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ6c_mu;
1298 if (
lep ==
StandardModel::MU)
if (checkcache_int_mu && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ6c_mu;
1299 if (
lep ==
StandardModel::ELECTRON)
if (checkcache_int_el && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ6c_el;
1300 wf=ROOT::Math::Functor1D(&(*
this),&MVlnu::J6c);
1302 J_res = ig.Integral(q2_min, q2_max);
1306 if (
lep ==
StandardModel::TAU)
if (checkcache_int_tau && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ7_tau;
1307 if (
lep ==
StandardModel::MU)
if (checkcache_int_mu && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ7_mu;
1309 wf=ROOT::Math::Functor1D(&(*
this),&MVlnu::J7);
1311 J_res = ig.Integral(q2_min, q2_max);
1315 if (
lep ==
StandardModel::TAU)
if (checkcache_int_tau && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ8_tau;
1316 if (
lep ==
StandardModel::MU)
if (checkcache_int_mu && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ8_mu;
1318 wf=ROOT::Math::Functor1D(&(*
this),&MVlnu::J8);
1320 J_res = ig.Integral(q2_min, q2_max);
1324 if (
lep ==
StandardModel::TAU)
if (checkcache_int_tau && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ9_tau;
1325 if (
lep ==
StandardModel::MU)
if (checkcache_int_mu && (q2_min == q2min) && (q2_max == q2max))
return cached_intJ9_mu;
1327 wf=ROOT::Math::Functor1D(&(*
this),&MVlnu::J9);
1329 J_res = ig.Integral(q2_min, q2_max);
1333 std::stringstream out;
1335 throw std::runtime_error(
"MVlnu::integrateJ: index " + out.str() +
" not implemented");
1339double MVlnu::dGammadw(
double q2)
1343 return 3. / 4. * (2. * J1s(q2) + J1c(q2)) - 1. / 4. * (2. * J2s(q2) + J2c(q2));
1350 double q2_min = (2. *
MM *
MV)*(
w0 - w_max);
1351 double q2_max = (2. *
MM *
MV)*(
w0 - w_min);
1353 double intJ1s = integrateJ(1, q2_min, q2_max);
1354 double intJ1c = integrateJ(2, q2_min, q2_max);
1355 double intJ2s = integrateJ(3, q2_min, q2_max);
1356 double intJ2c = integrateJ(4, q2_min, q2_max);
1358 return 3. / 4. * (2. * intJ1s + intJ1c) - 1. / 4. * (2. * intJ2s + intJ2c);
1361double MVlnu::dGammadcldq2(
double q2,
double cl)
1365 return 3. / 8. * ((J1s(q2) + 2. * J1c(q2)) + cl * (J6s(q2) + 2. * J6c(q2))+(2. * cl * cl - 1.)*(J2s(q2) + 2. * J2c(q2)));
1368double MVlnu::dGammadcl(
double cl)
1372 double intJ1s = integrateJ(1, q2min, q2max);
1373 double intJ1c = integrateJ(2, q2min, q2max);
1374 double intJ2s = integrateJ(3, q2min, q2max);
1375 double intJ2c = integrateJ(4, q2min, q2max);
1376 double intJ6s = integrateJ(8, q2min, q2max);
1377 double intJ6c = integrateJ(9, q2min, q2max);
1379 return 3. / 8. * ((intJ1s + 2. * intJ1c) + cl * (intJ6s + 2. * intJ6c)+(2. * cl * cl - 1.)*(intJ2s + 2. * intJ2c));
1386 double intJ1s = integrateJ(1, q2min, q2max);
1387 double intJ1c = integrateJ(2, q2min, q2max);
1388 double intJ2s = integrateJ(3, q2min, q2max);
1389 double intJ2c = integrateJ(4, q2min, q2max);
1390 double intJ6s = integrateJ(8, q2min, q2max);
1391 double intJ6c = integrateJ(9, q2min, q2max);
1393 return 3. / 8. * ((cl_max - cl_min)*(intJ1c + 2. * intJ1s)+
1394 (cl_max * cl_max - cl_min * cl_min) / 2. * (intJ6c + 2. * intJ6s)+
1395 (2. / 3. * (cl_max * cl_max * cl_max - cl_min * cl_min * cl_min)-(cl_max - cl_min))*(intJ2c + 2. * intJ2s));
1398double MVlnu::dGammadcVdq2(
double q2,
double cl)
1402 return 3. / 8. * ((J1s(q2) + 2. * J1c(q2)) + cl * (J6s(q2) + 2. * J6c(q2))+(2. * cl * cl - 1.)*(J2s(q2) + 2. * J2c(q2)));
1405double MVlnu::dGammadcV(
double cV)
1409 double intJ1s = integrateJ(1, q2min, q2max);
1410 double intJ1c = integrateJ(2, q2min, q2max);
1411 double intJ2s = integrateJ(3, q2min, q2max);
1412 double intJ2c = integrateJ(4, q2min, q2max);
1414 return 3. / 8. * (cV * cV * (3. * intJ1c - intJ2c)+(1. - cV * cV)*(3. * intJ1s - intJ2s));
1421 double intJ1s = integrateJ(1, q2min, q2max);
1422 double intJ1c = integrateJ(2, q2min, q2max);
1423 double intJ2s = integrateJ(3, q2min, q2max);
1424 double intJ2c = integrateJ(4, q2min, q2max);
1426 return 3. / 8. * ((cV_max * cV_max * cV_max - cV_min * cV_min * cV_min) / 3. * (3. * intJ1c - intJ2c)+
1427 ((cV_max - cV_min)-(cV_max * cV_max * cV_max - cV_min * cV_min * cV_min) / 3.)*(3. * intJ1s - intJ2s));
1430double MVlnu::dGammadchidq2(
double q2,
double chi)
1434 return (3. * J1c(q2) + 6. * J1s(q2) - J2c(q2) - 2. * J2s(q2)) / 8. / M_PI +
1435 cos(2. * chi) / 2. / M_PI * J3(q2) + sin(2. * chi) / 2. / M_PI * J9(q2);
1438double MVlnu::dGammadchi(
double chi)
1442 double intJ1s = integrateJ(1, q2min, q2max);
1443 double intJ1c = integrateJ(2, q2min, q2max);
1444 double intJ2s = integrateJ(3, q2min, q2max);
1445 double intJ2c = integrateJ(4, q2min, q2max);
1446 double intJ3 = integrateJ(5, q2min, q2max);
1447 double intJ9 = integrateJ(12, q2min, q2max);
1449 return ((3. * intJ1c + 6. * intJ1s - intJ2c - 2. * intJ2s) / 4. +
1450 cos(2. * chi) * intJ3 + sin(2. * chi) * intJ9) / 2. / M_PI;
1457 double intJ1s = integrateJ(1, q2min, q2max);
1458 double intJ1c = integrateJ(2, q2min, q2max);
1459 double intJ2s = integrateJ(3, q2min, q2max);
1460 double intJ2c = integrateJ(4, q2min, q2max);
1461 double intJ3 = integrateJ(5, q2min, q2max);
1462 double intJ9 = integrateJ(12, q2min, q2max);
1464 return ((chi_max - chi_min)*(3. * intJ1c + 6. * intJ1s - intJ2c - 2. * intJ2s) / 4. +
1465 (sin(2. * chi_max) - sin(2. * chi_min)) / 2. * intJ3 -
1466 (cos(2. * chi_max) - cos(2. * chi_min)) / 2. * intJ9) / (2. * M_PI);
1473 double intJ1s = integrateJ(1, q2min, q2max);
1474 double intJ1c = integrateJ(2, q2min, q2max);
1475 double intJ2s = integrateJ(3, q2min, q2max);
1476 double intJ2c = integrateJ(4, q2min, q2max);
1478 double DeltaJL = (3. * intJ1c - intJ2c) / 4.;
1479 double DeltaJ = 3. / 4. * (2. * intJ1s + intJ1c) - 1. / 4. * (2. * intJ2s + intJ2c);
1480 return DeltaJL/DeltaJ;
1488 return ag0 * ag0 + ag1 * ag1 + ag2*ag2;
1496 double aF10 = (
MM -
MV)*(phi_F1(0.) / phi_f(0.)) * af0;
1497 return af0 * af0 + af1 * af1 + af2 * af2 + aF10 * aF10 + aF11 * aF11 + aF12*aF12 + aF13*aF13;
1504 double z0 = (sqrt(
w0 + 1.) - M_SQRT2) / (sqrt(
w0 + 1.) + M_SQRT2);
1505 double PfacF2z0 = (z0 - zP1) / (1. - z0 * zP1)*(z0 - zP2) / (1. - z0 * zP2)*(z0 - zP3) / (1. - z0 * zP3);
1506 double phiF2z0 = phi_F2(z0);
1507 double aF20 = PfacF2z0 * phiF2z0 * 2. * F1_BGL(0.) / (
MM *
MM -
MV *
MV) - aF21 * z0 - aF22 * z0*z0 - aF23 * z0*z0*z0;
1508 return aF20 * aF20 + aF21 * aF21 + aF22*aF22 + aF23*aF23;
1521 double q2 = (2. *
MM *
MV)*(
w0 - w);
1529 double q2 = (2. *
MM *
MV)*(
w0 - w);
1538 double q2 = (2. *
MM *
MV)*(
w0 - w);
1546 double q2 = (2. *
MM *
MV)*(
w0 - w);
1554 double q2 = (2. *
MM *
MV)*(
w0 - w);
1557 else if (
BGLflag)
return V(q2) *
RV / hA1(q2);
1564 double q2 = (2. *
MM *
MV)*(
w0 - w);
1567 else if (
BGLflag)
return A2(q2) *
RV / hA1(q2);
1574 double q2 = (2. *
MM *
MV)*(
w0 - w);
1577 else if (
BGLflag)
return A0(q2) / A1(q2);
1586double MVlnu::Hplus(
double q2){
1587 double abs_p = lambda_half(
MM*
MM,
MV*
MV,q2);
1588 return (
MM+
MV)*A1(q2)-2.*
MM/(
MM+
MV)*abs_p*V(q2);
1591double MVlnu::Hminus(
double q2){
1592 double abs_p = lambda_half(
MM*
MM,
MV*
MV,q2);
1593 return (
MM+
MV)*A1(q2)+2.*
MM/(
MM+
MV)*abs_p*V(q2);
1596double MVlnu::H0(
double q2){
1597 double abs_p = lambda_half(
MM*
MM,
MV*
MV,q2);
1601double MVlnu::H0t(
double q2){
1602 double abs_p = lambda_half(
MM*
MM,
MV*
MV,q2);
1603 return 2.*
MM*abs_p*A0(q2)/sqrt(q2);
1606double MVlnu::dGpdq2(
double q2){
1610 double abs_p = lambda_half(
MM*
MM,
MV*
MV,q2);
1612 return 2./3.*amplsq_factor*abs_p*q2*lep_factor*(Hplus(q2)*Hplus(q2)+Hminus(q2)*Hminus(q2)+H0(q2)*H0(q2)
1613 +3.*H0t(q2)*H0t(q2));
1616double MVlnu::dGmdq2(
double q2){
1620 double abs_p = lambda_half(
MM*
MM,
MV*
MV,q2);
1622 return 2./3.*amplsq_factor*abs_p*q2*lep_factor*(Hplus(q2)*Hplus(q2)+Hminus(q2)*Hminus(q2)+H0(q2)*H0(q2));
1625double MVlnu::integrateGpm(
int i,
double q2_min,
double q2_max)
1629 wf=ROOT::Math::Functor1D(&(*
this),&MVlnu::dGpdq2);
1631 J_res = ig.Integral(q2_min, q2_max);
1635 wf=ROOT::Math::Functor1D(&(*
this),&MVlnu::dGmdq2);
1637 J_res = ig.Integral(q2_min, q2_max);
1641 std::stringstream out;
1643 throw std::runtime_error(
"MVlnu::integrateGpm: index " + out.str() +
" not implemented");
1651 double DeltaGammaPlus = integrateGpm(1,q2min,q2max);
1652 double DeltaGammaMinus = integrateGpm(2,q2min,q2max);
1654 return (DeltaGammaPlus-DeltaGammaMinus)/(DeltaGammaPlus+DeltaGammaMinus);
void setUpdateFlag(QCD::meson meson_i, QCD::meson meson_j, QCD::lepton lep_i, bool updated_i) const
sets the update flag for the initial and final state dependent object for .
bool getUpdateFlag(QCD::meson meson_i, QCD::meson meson_j, QCD::lepton lep_i) const
gets the update flag for the initial and final state dependent object for .
gslpp::vector< gslpp::complex > ** ComputeCoeffdiujlknu(int i, int j, int k, double mu) const
Computes the Wilson coefficient for the Hamiltonian transitions in the JMS basis ordered as CnueduVL...
double get_hA1w1()
return A1 form factor at
double get_R0(double w)
return at
double get_unitarity_V_BGL()
Vector unitarity constraint for BGL parameters.
double getDeltaGammaDeltacl(double cl_min, double cl_max)
The integral of from to .
double get_unitarity_P_BGL()
Pseudoscalar unitarity constraint for BGL parameters.
double getPlep()
Binned lepton helicity asymmetry .
double get_hA1(double w)
return at
std::vector< std::string > initializeMVlnuParameters()
double get_hV(double w)
return at
double get_hA2(double w)
return at
const StandardModel & mySM
double get_R2(double w)
return at
double getDeltaGammaDeltaw(double w_min, double w_max)
The integral of from to .
double get_R1(double w)
return at
double getFL()
Binned D* polarization fraction .
virtual ~MVlnu()
Destructor.
double get_hA3(double w)
return at
MVlnu(const StandardModel &SM_i, QCD::meson meson_i, QCD::meson vector_i, QCD::lepton lep_i)
Constructor.
double get_unitarity_A_BGL()
Axial unitarity constraint for BGL parameters.
double getDeltaGammaDeltachi(double chi_min, double chi_max)
The integral of from to .
double getDeltaGammaDeltacV(double cV_min, double cV_max)
The integral of from to .
std::vector< std::string > mvlnuParameters
double computeWidth() const
A method to compute the width of the meson from its lifetime.
std::string getModelName() const
A method to fetch the name of the model.
const double & getMass() const
A get method to access the particle mass.
meson
An enum type for mesons.
const double getOptionalParameter(std::string name) const
A method to get parameters that are specific to only one set of observables.
const Meson & getMesons(const QCD::meson m) const
A get method to access a meson as an object of the type Meson.
lepton
An enum type for leptons.
const Particle & getQuarks(const QCD::quark q) const
A get method to access a quark as an object of the type Particle.
void initializeMeson(QCD::meson meson_i) const
A method to initialize a meson.
A model class for the Standard Model.
virtual const double getCCC1() const
A virtual implementation for the RealWeakEFTCC class.
const Particle & getLeptons(const QCD::lepton p) const
A get method to retrieve the member object of a lepton.
virtual const double getCCC3() const
A virtual implementation for the RealWeakEFTCC class.
const double getMz() const
A get method to access the mass of the boson .
const double Ale(double mu, orders order, bool Nf_thr=true) const
The running electromagnetic coupling in the scheme.
virtual const double getCCC4() const
A virtual implementation for the RealWeakEFTCC class.
const Flavour & getFlavour() const
virtual const double getCCC2() const
A virtual implementation for the RealWeakEFTCC class.
virtual const double getCCC5() const
A virtual implementation for the RealWeakEFTCC class.