Rivet analyses referenceOPAL_1997_I447146K∗0 meson production measured by OPAL at LEP 1.Experiment: OPAL (LEP 1) Inspire ID: 447146 Status: VALIDATED Authors:
Beam energies: (45.6, 45.6) GeV Run details:
The K∗0 fragmentation function has been measured in hadronic Z0 decays. In addition the helicity density matrix elements for inclusive K∗(892)0 mesons from hadronic Z0 decays have been measured over the full range of K∗0 momentum using data taken with the OPAL experiment at LEP. Both the fragmentation function and polarization measurements are implemented. Source code: OPAL_1997_I447146.cc 1// -*- C++ -*-
2#include "Rivet/Analysis.hh"
3#include "Rivet/Projections/Beam.hh"
4#include "Rivet/Projections/FinalState.hh"
5#include "Rivet/Projections/ChargedFinalState.hh"
6#include "Rivet/Projections/UnstableParticles.hh"
7
8namespace Rivet {
9
10
11 /// @brief OPAL K*0 fragmentation function paper
12 ///
13 /// @author Peter Richardson
14 class OPAL_1997_I447146 : public Analysis {
15 public:
16
17 RIVET_DEFAULT_ANALYSIS_CTOR(OPAL_1997_I447146);
18
19
20 /// @name Analysis methods
21 /// @{
22
23 void init() {
24 declare(Beam(), "Beams");
25 declare(ChargedFinalState(), "FS");
26 declare(UnstableParticles(), "UFS");
27 book(_histXeK0, 1, 1, 1);
28 const vector<double> edges{0., 0.01, 0.03, 0.1, 0.125, 0.14, 0.16, 0.2, 0.3, 0.4, 0.5, 0.7, 1.0};
29 book(_h_ctheta, edges);
30 book(_h_alpha, edges);
31 for (size_t ix=0; ix < _h_ctheta->numBins(); ++ix) {
32 string suff = std::to_string(ix);
33 if (suff.length() == 1) suff = "0"+suff;
34 book(_h_ctheta->bin(ix+1), "ctheta_"+suff, 20, -1.0, 1.0);
35 book(_h_alpha->bin(ix+1), "alpha_"+suff, 20, 0.0, 0.5*M_PI);
36 }
37 book(_h_ctheta_large,"ctheta_large", 20,-1.0, 1.0);
38
39
40 book(_h_alpha_low, {0.3, 0.4, 0.5, 0.7, 1.0});
41 book(_h_alpha_high, {0.3, 0.4, 0.5, 0.7, 1.0});
42 for (size_t ix=0; ix < _h_alpha_low->numBins(); ++ix) {
43 string suff = std::to_string(ix);
44 book(_h_alpha_low->bin(ix+1), "alpha_low_"+suff, 20, 0.0, 0.5*M_PI);
45 book(_h_alpha_high->bin(ix+1), "alpha_high_"+suff, 20, 0.0, 0.5*M_PI);
46 }
47 book(_h_alpha_large ,"_h_alpha_large" ,20,0.,0.5*M_PI);
48 book(_h_alpha_large_low ,"_h_alpha_large_low" ,20,0.,0.5*M_PI);
49 book(_h_alpha_large_high,"_h_alpha_large_high",20,0.,0.5*M_PI);
50 }
51
52 pair<double,double> calcRho(Histo1DPtr hist,unsigned int imode) {
53 if(hist->numEntries()==0.) return make_pair(0.,0.);
54 double sum1(0.),sum2(0.);
55 for (const auto& bin : hist->bins() ) {
56 double Oi = bin.sumW();
57 if(Oi==0.) continue;
58 double ai,bi;
59 if(imode==0) {
60 ai = 0.25*(bin.xMax()*(3.-sqr(bin.xMax())) - bin.xMin()*(3.-sqr(bin.xMin())));
61 bi = 0.75*(bin.xMin()*(1.-sqr(bin.xMin())) - bin.xMax()*(1.-sqr(bin.xMax())));
62 }
63 else {
64 ai = 2.*(bin.xMax()-bin.xMin())/M_PI;
65 bi = 2.*(sin(2.*bin.xMax())-sin(2.*bin.xMin()))/M_PI;
66 }
67 double Ei = bin.errW();
68 sum1 += sqr(bi/Ei);
69 sum2 += bi/sqr(Ei)*(Oi-ai);
70 }
71 return make_pair(sum2/sum1,sqrt(1./sum1));
72 }
73
74
75 void analyze(const Event& e) {
76 // First, veto on leptonic events by requiring at least 4 charged FS particles
77 const FinalState& fs = apply<FinalState>(e, "FS");
78 const size_t numParticles = fs.particles().size();
79
80 // Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
81 if (numParticles < 2) {
82 MSG_DEBUG("Failed leptonic event cut");
83 vetoEvent;
84 }
85 MSG_DEBUG("Passed leptonic event cut");
86
87 // Get beams and average beam momentum
88 const ParticlePair& beams = apply<Beam>(e, "Beams").beams();
89 const double meanBeamMom = ( beams.first.p3().mod() + beams.second.p3().mod() ) / 2.0;
90 MSG_DEBUG("Avg beam momentum = " << meanBeamMom);
91 Vector3 axis;
92 if(beams.first.pid()>0) {
93 axis = beams.first .momentum().p3().unit();
94 }
95 else {
96 axis = beams.second.momentum().p3().unit();
97 }
98
99 // Final state of unstable particles to get particle spectra
100 const UnstableParticles& ufs = apply<UnstableParticles>(e, "UFS");
101
102 for (const Particle& p : ufs.particles(Cuts::abspid==313)) {
103 double xp = p.p3().mod()/meanBeamMom;
104 _histXeK0->fill(xp);
105 int sign = p.pid()/313;
106 if(p.children().size()!=2) continue;
107 Particle kaon;
108 if(p.children()[0].pid()==sign*321 && p.children()[1].pid()==-sign*211) {
109 kaon = p.children()[0];
110 }
111 else if(p.children()[1].pid()==sign*321 && p.children()[0].pid()==-sign*211) {
112 kaon = p.children()[1];
113 }
114 else
115 continue;
116 // spin axes
117 Vector3 e1z = p.momentum().p3().unit();
118 Vector3 e1y = e1z.cross(axis).unit();
119 Vector3 e1x = e1y.cross(e1z).unit();
120 LorentzTransform boost = LorentzTransform::mkFrameTransformFromBeta(p.momentum().betaVec());
121 Vector3 axis1 = boost.transform(kaon.momentum()).p3().unit();
122 double ctheta = e1z.dot(axis1);
123 double phi = atan2(e1y.dot(axis1),e1x.dot(axis1));
124 double alpha = abs(abs(phi)-0.5*M_PI);
125 _h_ctheta->fill(xp,ctheta);
126 _h_alpha->fill(xp,alpha );
127 double cBeam = axis.dot(e1z);
128 (cBeam < 0.5 ? _h_alpha_low : _h_alpha_high)->fill(xp,alpha);
129
130 if (xp > 0.3) {
131 _h_ctheta_large->fill(ctheta);
132 _h_alpha_large->fill(alpha);
133 (cBeam < 0.5 ? _h_alpha_large_low : _h_alpha_large_high)->fill(alpha);
134 }
135 }
136 }
137
138
139 /// Finalize
140 void finalize() {
141 scale(_histXeK0, 1./sumOfWeights());
142 const vector<double> x = {0., 0.01 ,0.03 ,0.10 ,0.125,0.14 ,0.16 ,0.20 ,0.30 ,0.40 ,0.50 ,0.70 ,1.00};
143 Estimate1DPtr h_rho00;
144 book(h_rho00,2,1,1);
145 Estimate1DPtr h_rho_off;
146 book(h_rho_off,2,1,2);
147 Estimate1DPtr h_ratio;
148 book(h_ratio,3,1,1);
149 Estimate1DPtr h_off_low;
150 book(h_off_low,4,1,1);
151 Estimate1DPtr h_off_high;
152 book(h_off_high,4,1,2);
153 for (size_t ix=0; ix<_h_ctheta->numBins(); ++ix) {
154 // extract the rho00 component
155 normalize(_h_ctheta->bin(ix+1));
156 pair<double,double> rho00 = calcRho(_h_ctheta->bin(ix+1), 0);
157 h_rho00->bin(ix+1).set(rho00.first, rho00.second);
158
159 // extract the rho 1 -1 component
160 normalize(_h_alpha->bin(ix+1));
161 pair<double,double> rho_off = calcRho(_h_alpha->bin(ix+1), 1);
162 h_rho_off->bin(ix+1).set(rho_off.first, rho_off.second);
163
164 if (ix<8) continue;
165
166 // at large xp also the ratio
167 size_t iy = ix-8;
168 double ratio = rho_off.first/(1.-rho00.first);
169 double dr = ((rho_off.second - rho00.first*rho_off.second + rho_off.first*rho00.second))/sqr(1.-rho00.first);
170 h_ratio->bin(iy+1).set(ratio, dr);
171 // rho 1 -1 for cos theta <0.5
172 normalize(_h_alpha_low->bin(iy+1));
173 rho_off = calcRho(_h_alpha_low->bin(iy+1), 1);
174 h_off_low->bin(iy+1).set(rho_off.first, rho_off.second);
175 // rho 1 -1 for cos theta >0.5
176 normalize(_h_alpha_high->bin(iy+1));
177 rho_off = calcRho(_h_alpha_high->bin(iy+1), 1);
178 h_off_high->bin(iy+1).set(rho_off.first, rho_off.second);
179
180 }
181 // ratio for xp 0.3
182 normalize(_h_ctheta_large);
183 pair<double,double> rho00 = calcRho(_h_ctheta_large,0);
184 normalize(_h_alpha_large);
185 pair<double,double> rho_off = calcRho(_h_alpha_large,1);
186 double ratio = rho_off.first/(1.-rho00.first);
187 double dr = ((rho_off.second - rho00.first*rho_off.second + rho_off.first*rho00.second))/sqr(1.-rho00.first);
188 Estimate1DPtr h_ratio_large;
189 book(h_ratio_large,3,2,1);
190 h_ratio_large->bin(1).set(ratio, dr);
191 // rho 1 -1 for xp >0.3 and cos theta < 0.5
192 normalize(_h_alpha_large_low );
193 rho_off = calcRho(_h_alpha_large_low,1);
194 book(h_off_low,4,2,1);
195 h_off_low->bin(1).set(rho_off.first, rho_off.second);
196 // rho 1 -1 for xp >0.3 and cos theta > 0.5
197 normalize(_h_alpha_large_high);
198 rho_off = calcRho(_h_alpha_large_high,1);
199 book(h_off_high,4,2,2);
200 h_off_high->bin(1).set(rho_off.first, rho_off.second);
201 }
202
203 /// @}
204
205
206 private:
207
208 /// @{
209 Histo1DPtr _histXeK0;
210 Histo1DGroupPtr _h_ctheta, _h_alpha, _h_alpha_low, _h_alpha_high;
211 Histo1DPtr _h_ctheta_large, _h_alpha_large, _h_alpha_large_low, _h_alpha_large_high;
212 /// @}
213
214 };
215
216
217
218 RIVET_DECLARE_ALIASED_PLUGIN(OPAL_1997_I447146, OPAL_1997_S3608263);
219
220}
|