Rivet analyses referenceCMS_2018_I1663958ttbar lepton+jets 13 TeVExperiment: CMS (LHC) Inspire ID: 1663958 Status: VALIDATED Authors:
Beam energies: (6500.0, 6500.0) GeV Run details:
Abstract: Differential and double-differential cross sections for the production of top quark pairs in proton-proton collisions at $\sqrt{s} = 13$\,TeV are measured as a function of kinematic variables of the top quarks and the top quark-antiquark ($\text{t}\bar{\text{t}}$) system. In addition, kinematic variables and multiplicities of jets associated with the $\text{t}\bar{\text{t}}$ production are measured. This analysis is based on data collected by the CMS experiment at the LHC in 2016 corresponding to an integrated luminosity of 35.8\,fb$^{-1}$. The measurements are performed in the lepton+jets decay channels with a single muon or electron and jets in the final state. The differential cross sections are presented at the particle level, within a phase space close to the experimental acceptance, and at the parton level in the full phase space. The results are compared to several standard model predictions that use different methods and approximations. The kinematic variables of the top quarks and the $\text{t}\bar{\text{t}}$ system are reasonably described in general, though none predict all the measured distributions. In particular, the transverse momentum distribution of the top quarks is more steeply falling than predicted. The kinematic distributions and multiplicities of jets are adequately modeled by certain combinations of next-to-leading-order calculations and parton shower models. Rivet: This analysis is to be run on $\text{t}\bar{\text{t}}$ Monte Carlo. The particle-level phase space is defined using the following definitions: \begin{description} \item[lepton]: an electron or muon with $p_\text{T}>30\,\text{GeV}$ and $|\eta|<2.4$, dressed within a cone of radius 0.1, \item[jet]: a jet is reconstructed with the anti-$k_\text{T}$ algorithm with a radius of 0.4, after removing the neutrinos and dressed leptons, with $p_\text{T]>25\,\text{GeV}$ and $|\eta|<2.4$, \item[b-jet]: a jet that contains a B-hadron. \end{description} A W boson is reconstructed from a lepton and the sum of the neutrino energies, while another W boson is reconstructed from a light jet pair. The two top quarks are reconstructed by combining b jets to these W boson. A check based on the W boson and top quark masses is performed to choose the proper combinations. Source code: CMS_2018_I1663958.cc 1#include "Rivet/Analysis.hh"
2#include "Rivet/Projections/FinalState.hh"
3#include "Rivet/Projections/VisibleFinalState.hh"
4#include "Rivet/Projections/VetoedFinalState.hh"
5#include "Rivet/Projections/FastJets.hh"
6#include "Rivet/Projections/DressedLeptons.hh"
7#include "Rivet/Tools/BinnedHistogram.hh"
8
9namespace Rivet {
10
11
12 /// @brief ttbar lepton+jets 13 TeV
13 class CMS_2018_I1663958 : public Analysis {
14 public:
15
16 RIVET_DEFAULT_ANALYSIS_CTOR(CMS_2018_I1663958);
17
18 void init() {
19
20 const FinalState fs(Cuts::abseta < 6.);
21 const VisibleFinalState vfs(Cuts::abseta < 6.);
22
23 VetoedFinalState invisibles(fs);
24 invisibles.addVetoOnThisFinalState(vfs);
25 declare(invisibles, "Invisibles");
26
27 FinalState all_photons(vfs, Cuts::abspid == PID::PHOTON);
28 FinalState leptons(vfs, Cuts::abspid == PID::ELECTRON || Cuts::abspid == PID::MUON);
29
30 DressedLeptons dressed_leptons(all_photons, leptons, 0.1, Cuts::abseta < 2.4 && Cuts::pT > 15*GeV, true);
31 declare(dressed_leptons, "MyLeptons");
32
33 VetoedFinalState photons(all_photons);
34 photons.addVetoOnThisFinalState(dressed_leptons);
35 declare(photons, "MyPhotons");
36
37 VetoedFinalState isolationparticles(vfs);
38 isolationparticles.addVetoOnThisFinalState(dressed_leptons);
39 declare(isolationparticles, "IsoParticles");
40
41 declare(FastJets(vfs, FastJets::ANTIKT, 0.4), "Jets");
42
43 book(_h["thadpt"], 1, 1, 1);
44 book(_h["thady"], 3, 1, 1);
45 book(_h["tleppt"], 5, 1, 1);
46 book(_h["tlepy"], 7, 1, 1);
47 book(_h["ttm"], 9, 1, 1);
48 book(_h["ttpt"], 11, 1, 1);
49 book(_h["tty"], 13, 1, 1);
50 book(_h["njet"], 15, 1, 1);
51 /// @todo Memory leak
52 vector<double> njetbins = {-0.5, 0.5, 1.5, 2.5, 3.5};
53 for (size_t i = 0; i < njetbins.size() - 1; ++i) {
54 { Histo1DPtr tmp; _b["njet_ttm"].add(njetbins[i], njetbins[i+1], book(tmp, 17 + i, 1, 1)); }
55 { Histo1DPtr tmp; _b["njet_ttm_norm"].add(njetbins[i], njetbins[i+1], book(tmp, 99 + i, 1, 1)); }
56 { Histo1DPtr tmp; _b["njet_thadpt"].add(njetbins[i], njetbins[i+1], book(tmp, 22 + i, 1, 1)); }
57 { Histo1DPtr tmp; _b["njet_thadpt_norm"].add(njetbins[i], njetbins[i+1], book(tmp, 104 + i, 1, 1)); }
58 { Histo1DPtr tmp; _b["njet_ttpt"].add(njetbins[i], njetbins[i+1], book(tmp, 27 + i, 1, 1)); }
59 { Histo1DPtr tmp; _b["njet_ttpt_norm"].add(njetbins[i], njetbins[i+1], book(tmp, 109 + i, 1, 1)); }
60 }
61 vector<double> thadybins = {0.0, 0.5, 1.0, 1.5, 2.5};
62 for (size_t i = 0; i < thadybins.size() - 1; ++i) {
63 { Histo1DPtr tmp; _b["thady_thadpt"].add(thadybins[i], thadybins[i+1], book(tmp, 32 + i, 1, 1)); }
64 { Histo1DPtr tmp; _b["thady_thadpt_norm"].add(thadybins[i], thadybins[i+1], book(tmp, 114 + i, 1, 1)); }
65 }
66 vector<double> ttmbins = {300., 450., 625., 850., 2000.};
67 for (size_t i = 0; i < ttmbins.size() - 1; ++i) {
68 { Histo1DPtr tmp; _b["ttm_tty"].add(ttmbins[i], ttmbins[i+1], book(tmp, 37 + i, 1, 1)); }
69 { Histo1DPtr tmp; _b["ttm_tty_norm"].add(ttmbins[i], ttmbins[i+1], book(tmp, 119 + i, 1, 1)); }
70 }
71 vector<double> thadptbins = {0., 90., 180., 270., 800.};
72 for (size_t i = 0; i < thadptbins.size() - 1; ++i) {
73 { Histo1DPtr tmp; _b["thadpt_ttm"].add(thadptbins[i], thadptbins[i+1], book(tmp, 42 + i, 1, 1)); }
74 { Histo1DPtr tmp; _b["thadpt_ttm_norm"].add(thadptbins[i], thadptbins[i+1], book(tmp, 124 + i, 1, 1)); }
75 }
76 vector<double> jetbins = {-0.5, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5};
77 for (size_t i = 0; i < jetbins.size() - 1; ++i) {
78 { Histo1DPtr tmp; _b["jetspt"].add(jetbins[i], jetbins[i+1], book(tmp, 47 + i, 1, 1)); }
79 { Histo1DPtr tmp; _b["jetspt_norm"].add(jetbins[i], jetbins[i+1], book(tmp, 129 + i, 1, 1)); }
80 { Histo1DPtr tmp; _b["jetseta"].add(jetbins[i], jetbins[i+1], book(tmp, 56 + i, 1, 1)); }
81 { Histo1DPtr tmp; _b["jetseta_norm"].add(jetbins[i], jetbins[i+1], book(tmp, 138 + i, 1, 1)); }
82 { Histo1DPtr tmp; _b["jetsdr"].add(jetbins[i], jetbins[i+1], book(tmp, 65 + i, 1, 1)); }
83 { Histo1DPtr tmp; _b["jetsdr_norm"].add(jetbins[i], jetbins[i+1], book(tmp, 147 + i, 1, 1)); }
84 { Histo1DPtr tmp; _b["jetsdrtops"].add(jetbins[i], jetbins[i+1], book(tmp, 74 + i, 1, 1)); }
85 { Histo1DPtr tmp; _b["jetsdrtops_norm"].add(jetbins[i], jetbins[i+1], book(tmp, 156 + i, 1, 1)); }
86 }
87 vector<double> njetsptbins = {0., 40., 60., 80., 120.};
88 for (size_t i = 0; i < njetsptbins.size() - 1; ++i) {
89 { Histo1DPtr tmp; _b["njetspt"].add(njetsptbins[i], njetsptbins[i+1], book(tmp, 169 + i, 1, 1)); }
90 }
91
92 book(_h["thadpt_norm"], 83, 1, 1);
93 book(_h["thady_norm"], 85, 1, 1);
94 book(_h["tleppt_norm"], 87, 1, 1);
95 book(_h["tlepy_norm"], 89, 1, 1);
96 book(_h["ttm_norm"], 91, 1, 1);
97 book(_h["ttpt_norm"], 93, 1, 1);
98 book(_h["tty_norm"], 95, 1, 1);
99 book(_h["njet_norm"], 97, 1, 1);
100 /// @todo Memory leak?
101 book(m_hist_gap1, "d165-x01-y01");
102 book(m_hist_gap2, "d167-x01-y01");
103 }
104
105
106 void analyze(const Event& event) {
107
108 Jets bjets, ljets;
109 Particles leptons, vetoleptons, additionalobjects, additionaljets;
110
111 const Particles& isopars = apply<VetoedFinalState>(event, "IsoParticles").particles();
112 const Particles& dressedleptons = apply<DressedLeptons>(event, "MyLeptons").particles();
113 for (const Particle& lep : dressedleptons) {
114 double isolation = sum(filter_select(isopars, deltaRLess(lep, 0.4)), Kin::pT, 0.);
115 isolation = isolation/lep.pt();
116 if (isolation > 0.35) continue;
117 if (lep.pt() > 30*GeV && lep.abseta() < 2.4) leptons += lep;
118 else if(lep.pt() > 15*GeV && lep.abseta() < 2.4) vetoleptons += lep;
119 }
120
121 const Particles& photons = apply<VetoedFinalState>(event, "MyPhotons").particles(Cuts::abseta < 2.4 && Cuts::pT > 15*GeV);
122 for (const Particle& ph : photons) {
123 double isolation = sum(filter_select(isopars, deltaRLess(ph, 0.4)), Kin::pT, 0.);
124 isolation = isolation/ph.pt() - 1.;
125 if (isolation > 0.25) continue;
126 additionalobjects += ph;
127 }
128
129 FourMomentum invsum(0.,0.,0.,0.);
130 const Particles& invfspars = apply<FinalState>(event, "Invisibles").particles();
131 for (const Particle& par : invfspars) invsum += par.mom();
132
133 Jets allJets = apply<FastJets>(event, "Jets").jetsByPt(Cuts::abseta < 2.4 && Cuts::pT > 25*GeV);
134 idiscardIfAnyDeltaRLess(allJets, leptons, 0.4);
135 idiscardIfAnyDeltaRLess(allJets, vetoleptons, 0.4);
136 idiscardIfAnyDeltaRLess(allJets, additionalobjects, 0.4);
137 for (const Jet& jet : allJets) {
138 if (jet.bTagged()) bjets += jet;
139 else ljets += jet;
140 }
141
142 //Semi-leptonic reconstruction
143 if (leptons.size() != 1 || vetoleptons.size() || bjets.size() < 2 || ljets.size() < 2) vetoEvent;
144
145 FourMomentum wl = invsum + leptons[0].mom();
146
147 Particle thad, tlep;
148 Particles tlep_decay(3), thad_decay(3);
149 double Kmin = numeric_limits<double>::max();
150 for (size_t a = 0 ; a < ljets.size() ; ++a){
151 const Jet& lja = ljets[a];
152 for (size_t b = 0 ; b < a ; ++b) {
153 const Jet& ljb = ljets[b];
154 FourMomentum wh(lja.momentum() + ljb.momentum());
155 for (const Jet& bjh : bjets) {
156 FourMomentum th(wh + bjh.momentum());
157 for (const Jet& bjl : bjets) {
158 if (&bjh == &bjl) continue;
159 FourMomentum tl(wl + bjl.momentum());
160
161 double K = pow(wh.mass() - 80.4, 2) + pow(th.mass() - 172.5, 2) + pow(tl.mass() - 172.5, 2);
162 if (K < Kmin)
163 {
164 Kmin = K;
165 thad = Particle(6, th);
166 thad_decay[0] = Particle(5, bjh);
167 thad_decay[1] = lja.pt() > ljb.pt() ? Particle(1, lja) : Particle(1, ljb);
168 thad_decay[2] = lja.pt() <= ljb.pt() ? Particle(1, lja) : Particle(1, ljb);
169 tlep = Particle(-6, tl);
170 tlep_decay[0] = Particle(5, bjl);
171 tlep_decay[1] = leptons[0];
172 tlep_decay[2] = Particle(-1*(leptons[0].pid()+1), invsum);
173 }
174 }
175 }
176 }
177 }
178
179 Particles tt_jets({ tlep_decay[0], thad_decay[0], thad_decay[1], thad_decay[2] });
180
181 const double eps = 1E-5;
182 for (const Jet& jet : bjets) {
183 if(jet.pt() < 30*GeV || jet.abseta() > 2.4) continue;
184 if(find_if(tt_jets.begin(), tt_jets.end(), [&](const Particle& par){return deltaR(jet, par) < eps;}) != tt_jets.end()) continue;
185 additionaljets += Particle(5, jet.mom());
186 }
187 for (const Jet& jet : ljets) {
188 if(jet.pt() < 30*GeV || jet.abseta() > 2.4) continue;
189 if(find_if(tt_jets.begin(), tt_jets.end(), [&](const Particle& par){return deltaR(jet, par) < eps;}) != tt_jets.end()) continue;
190 if(jet.cTagged()) additionaljets += Particle(4, jet.mom());
191 else additionaljets += Particle(1, jet.mom());
192 }
193
194 sort(additionaljets.begin(), additionaljets.end(), cmpMomByPt);
195
196 FourMomentum tt(thad.mom() + tlep.mom());
197
198 dualfill("thadpt", thad.pt()/GeV);
199 dualfill("thady", thad.absrap());
200 dualfill("tleppt", tlep.pt()/GeV);
201 dualfill("tlepy", tlep.absrap());
202 dualfill("ttm", tt.mass()/GeV);
203 dualfill("ttpt", tt.pt()/GeV);
204 dualfill("tty", tt.absrap());
205 dualfill("njet", min(additionaljets.size(), (size_t)5));
206 double njet = double(min((size_t)3, additionaljets.size()));
207 dualfill("njet_ttm", njet, tt.mass()/GeV);
208 dualfill("njet_thadpt", njet, thad.pt()/GeV);
209 dualfill("njet_ttpt", njet, tt.pt()/GeV);
210 dualfill("thady_thadpt", thad.absrap(), thad.pt()/GeV);
211 dualfill("ttm_tty", tt.mass()/GeV, tt.absrap());
212 dualfill("thadpt_ttm", thad.pt()/GeV, tt.mass()/GeV);
213 int jpos = -1;
214 for (const Particles& jets : {tt_jets, additionaljets}) {
215 for (const Particle& jet : jets) {
216 jpos++;
217 dualfill("jetspt", jpos, jet.pt()/GeV);
218 dualfill("jetseta", jpos, jet.abseta());
219 double drmin = 1E10;
220 for (const Particle& tjet : tt_jets) {
221 double dr = deltaR(jet, tjet);
222 if(dr > eps && dr < drmin) drmin = dr;
223 }
224 dualfill("jetsdr", jpos, drmin);
225 dualfill("jetsdrtops", jpos, min(deltaR(jet, thad), deltaR(jet, tlep)));
226 }
227 }
228 for (double ptcut : {30, 50, 75, 100}) {
229 _b["njetspt"].fill(ptcut , count_if(additionaljets.begin(), additionaljets.end(),
230 [&ptcut](const Particle& j) {return j.pt() > ptcut;}));
231 }
232 }
233
234 void dualfill(const string& tag, const double value) {
235 _h[tag]->fill(value); _h[tag + "_norm"]->fill(value);
236 }
237
238 void dualfill(const string& tag, const double val1, const double val2) {
239 _b[tag].fill(val1, val2); _b[tag + "_norm"].fill(val1, val2);
240 }
241
242 void gapfractionfromjetpt(const string& tag, Scatter2DPtr hgap, size_t njet) {
243 size_t hn = njet+3;
244 const double total = _b[tag].histo(0)->integral();
245 const double totalj = _b[tag].histo(hn)->integral();
246 double acc = total-totalj;
247 double gf = acc/total;
248 if (!std::isnan(gf)) {
249 for (size_t nb = 0 ; nb < _b[tag].histo(hn)->numBins() ; ++nb) {
250 const double bl = _b[tag].histo(njet+3)->bin(nb).xMin();
251 const double bh = _b[tag].histo(njet+3)->bin(nb).xMax();
252 const double bc = 0.5*(bh+bl);
253 hgap->addPoint(bc, gf, bc-bl, bh-bc, 0., 0.);
254 acc += _b[tag].histo(njet+3)->bin(nb).area();
255 gf = acc/total;
256 }
257 } else MSG_WARNING("Gap fraction is NaN. Histogram not produced.");
258 }
259
260
261 void finalize() {
262
263 const double sf = crossSection()/sumOfWeights();
264
265 gapfractionfromjetpt("jetspt", m_hist_gap1, 1);
266 gapfractionfromjetpt("jetspt", m_hist_gap2, 2);
267
268 for (auto& item : _h) {
269 if (item.first.find("_norm") != string::npos) {
270 normalize(item.second, 1.0, false);
271 }
272 else scale(item.second, sf);
273 }
274
275 for (auto& item : _b) {
276 if (item.first.find("_norm") != string::npos) {
277 double area = 0;
278 for (auto& hist : item.second.histos()) { area += hist->sumW(false); }
279 //normalize(item.second.histos(), 1.0, false);
280 if (area) item.second.scale(1/area, this);
281 }
282 else if (item.first.find("njetspt") != string::npos) {
283 // skip division by bin width for secondary axis
284 for (auto& hist : item.second.histos()) { scale(hist, sf); }
285 }
286 else item.second.scale(sf, this);
287 }
288
289 }
290
291 map<string,Histo1DPtr> _h;
292 map<string,BinnedHistogram> _b;
293
294 Scatter2DPtr m_hist_gap1;
295 Scatter2DPtr m_hist_gap2;
296
297 };
298
299
300 RIVET_DECLARE_PLUGIN(CMS_2018_I1663958);
301}
|