Rivet is hosted by Hepforge, IPPP Durham

Rivet analyses reference

ATLAS_2012_I1093734

Forward-backward and $\phi$ correlations at 900 GeV and 7 TeV
Experiment: ATLAS (LHC)
Inspire ID: 1093734
Status: VALIDATED
Authors:
  • Camille Belanger-Champagne
  • Andy Buckley
  • Craig Buttar
  • Roman Lysak
References: Beams: p+ p+
Beam energies: (450.0, 450.0); (3500.0, 3500.0) GeV
Run details:
  • QCD events at 7 TeV and 900 GeV. Diffractive events should be included.

Using inelastic proton-proton interactions at $\sqrt{s}$ = 900 GeV and 7 TeV, recorded by the ATLAS detector at the LHC, measurements have been made of the correlations between forward and backward charged-particle multiplicities and, for the first time, between forward and backward charged-particle summed transverse momentum. In addition, jet-like structure in the events is studied by means of azimuthal distributions of charged particles relative to the charged particle with highest transverse momentum in a selected kinematic region of the event.

Source code: ATLAS_2012_I1093734.cc
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
#include "Rivet/Projections/MissingMomentum.hh"

namespace Rivet {


  namespace {

    inline double sumAB(vector<double> vecX, vector<double> vecY, vector<double> vecW) {
      assert(vecX.size() == vecY.size() && vecX.size() == vecW.size());
      double sum(0);
      for (size_t i = 0; i < vecX.size(); i++) sum += vecW[i] * vecX[i] * vecY[i];
      return sum;
    }

    inline double sumA(vector<double> vecX, vector<double> vecW) {
      assert(vecX.size() == vecW.size());
      double sum(0);
      for (size_t i = 0; i < vecX.size(); i++) sum += vecX[i]*vecW[i];
      return sum;
    }

    inline double sumW(vector<double> vecW) {
      double sum(0);
      for (size_t i = 0; i < vecW.size(); i++) sum += vecW[i];
      return sum;
    }

    inline double mean(vector<double>  vecX, vector<double> vecW) {
      return sumA(vecX, vecW) / sumW(vecW);
    }

    inline double standard_deviation(vector<double> vecX, vector<double> vecW) {
      const double x_bar = mean(vecX, vecW);
      double sum(0);
      for (size_t i = 0; i < vecX.size(); i++) {
        sum += vecW[i] * sqr(vecX[i] - x_bar);
      }
      return sqrt( sum / sumW(vecW) );
    }

    inline double a0_regression(vector<double>  vecX, vector<double> vecY, vector<double> vecW) {
      const double numerator   = sumA(vecY, vecW) * sumAB(vecX, vecX, vecW) - sumA(vecX, vecW) * sumAB(vecX, vecY, vecW);
      const double denominator = sumW(vecW) * sumAB(vecX, vecX, vecW) - sumA(vecX, vecW) * sumA(vecX, vecW);
      return numerator / denominator;
    }

    inline double a1_regression(vector<double>  vecX, vector<double> vecY, vector<double> vecW) {
      const double numerator   = sumW(vecW) * sumAB(vecX,vecY,vecW) - sumA(vecX, vecW) * sumA(vecY, vecW);
      const double denominator = sumW(vecW) * sumAB(vecX,vecX,vecW) - sumA(vecX, vecW) * sumA(vecX, vecW);
      return numerator/ denominator;
    }

    inline double a1_regression2(vector<double>  vecX, vector<double> vecY, vector<double> vecW) {
      const double x_bar = mean(vecX, vecW);
      const double y_bar = mean(vecY, vecW);
      double sumXY(0);
      for (size_t i = 0; i < vecX.size(); i++) {
        sumXY += vecW[i] * (vecY[i]-y_bar) * (vecX[i]-x_bar);
      }
      return sumXY / ( standard_deviation(vecX, vecW) * standard_deviation(vecY, vecW) * sumW(vecW) );
    }

    inline double quadra_sum_residual(vector<double> vecX, vector<double> vecY, vector<double> vecW) {
      const double a0 = a0_regression(vecX, vecY, vecW);
      const double a1 = a1_regression(vecX, vecY, vecW);
      double sum(0);
      for (size_t i = 0; i < vecX.size(); i++) {
        const double y_est = a0 + a1*vecX[i];
        sum += vecW[i] * sqr(vecY[i] - y_est);
      }
      return sum;
    }

    inline double error_on_slope(vector<double> vecX, vector<double> vecY, vector<double> vecW) {
      const double quadra_sum_res = quadra_sum_residual(vecX, vecY, vecW);
      const double sqrt_quadra_sum_x   = standard_deviation(vecX, vecW) * sqrt(sumW(vecW));
      return sqrt(quadra_sum_res/(sumW(vecW)-2)) / sqrt_quadra_sum_x;
    }

  }



  /// Forward-backward and azimuthal correlations in minimum bias events
  class ATLAS_2012_I1093734 : public Analysis {
  public:

    /// Constructor
    ATLAS_2012_I1093734()
      : Analysis("ATLAS_2012_I1093734")
    {
      // Stat convergence happens around 20k events, so it doesn't make sense to run this
      // analysis with much less than that. Given that, lets avoid some unnecessary vector
      // resizing by allocating sensible amounts in the first place.
      for (int ipt = 0; ipt < NPTBINS; ++ipt) {
        for (int k = 0; k < NETABINS; ++k) {
          _vecsNchF [ipt][k].reserve(10000);
          _vecsNchB [ipt][k].reserve(10000);
          _vecWeight[ipt][k].reserve(10000);
          if (ipt == 0) {
            _vecsSumptF[k].reserve(10000);
            _vecsSumptB[k].reserve(10000);
          }
        }
      }
    }


  public:

    /// @name Analysis methods
    //@{

    /// Book histograms and initialise projections before the run
    void init() {

      // FB correlations part

      // Projections
      for (int ipt = 0; ipt < NPTBINS; ++ipt) {
        const double ptmin = PTMINVALUES[ipt]*MeV;
        for (int ieta = 0; ieta < NETABINS; ++ieta) {
          declare(ChargedFinalState(-ETAVALUES[ieta],    -ETAVALUES[ieta]+0.5, ptmin), "Tracks"+ETABINNAMES[ieta]+"B"+PTBINNAMES[ipt]);
          declare(ChargedFinalState( ETAVALUES[ieta]-0.5, ETAVALUES[ieta],     ptmin), "Tracks"+ETABINNAMES[ieta]+"F"+PTBINNAMES[ipt]);
        }
        declare(ChargedFinalState(-2.5, 2.5, ptmin), "CFS" + PTBINNAMES[ipt]);
      }
      // Histos
      if (fuzzyEquals(sqrtS(), 7000*GeV, 1e-3)) {
        for (int ipt  = 0; ipt  < NPTBINS ; ++ipt ) _s_NchCorr_vsEta[ipt]  = bookScatter2D(1+ipt,  2, 1, true);
        for (int ieta = 0; ieta < NETABINS; ++ieta) _s_NchCorr_vsPt [ieta] = bookScatter2D(8+ieta, 2, 1, true);
        _s_PtsumCorr = bookScatter2D(13, 2, 1, true);
      } else if (fuzzyEquals(sqrtS(), 900*GeV, 1e-3)) {
        _s_NchCorr_vsEta[0] = bookScatter2D(14, 2, 1, true);
        _s_PtsumCorr        = bookScatter2D(15, 2, 1, true);
      }


      // Azimuthal correlations part
      // Projections
      const double ptmin = 500*MeV;
      declare(ChargedFinalState(-2.5, 2.5, ptmin), "ChargedTracks25");
      declare(ChargedFinalState(-2.0, 2.0, ptmin), "ChargedTracks20");
      declare(ChargedFinalState(-1.0, 1.0, ptmin), "ChargedTracks10");
      // Histos
      /// @todo Declare/book as temporary
      for (size_t ieta = 0; ieta < 3; ++ieta) {
        if (fuzzyEquals(sqrtS(), 7000*GeV, 1e-3)) {
          _s_dphiMin[ieta] = bookScatter2D(2+2*ieta, 1, 1, true);
          _s_diffSO[ieta] = bookScatter2D(8+2*ieta, 1, 1, true);
          _th_dphi[ieta] = YODA::Histo1D(refData(2+2*ieta, 1, 1));
          _th_same[ieta] = YODA::Histo1D(refData(8+2*ieta, 1, 1));
          _th_oppo[ieta] = YODA::Histo1D(refData(8+2*ieta, 1, 1));
        } else if (fuzzyEquals(sqrtS(), 900*GeV, 1e-3)) {
          _s_dphiMin[ieta] = bookScatter2D(1+2*ieta, 1, 1, true);
          _s_diffSO[ieta] = bookScatter2D(7+2*ieta, 1, 1, true);
          _th_dphi[ieta] = YODA::Histo1D(refData(1+2*ieta, 1, 1));
          _th_same[ieta] = YODA::Histo1D(refData(7+2*ieta, 1, 1));
          _th_oppo[ieta] = YODA::Histo1D(refData(7+2*ieta, 1, 1));
        }
      }

    }


    /// Perform the per-event analysis
    void analyze(const Event& event) {
      const double weight = event.weight();

      for (int ipt = 0; ipt < NPTBINS; ++ipt) {
        const FinalState& charged = apply<FinalState>(event, "CFS" + PTBINNAMES[ipt]);
        if (charged.particles().size() >= 2) {
          for (int ieta = 0; ieta < NETABINS; ++ieta) {
            const string fname = "Tracks" + ETABINNAMES[ieta] + "F" + PTBINNAMES[ipt];
            const string bname = "Tracks" + ETABINNAMES[ieta] + "B" + PTBINNAMES[ipt];
            const ParticleVector particlesF = apply<FinalState>(event, fname).particles();
            const ParticleVector particlesB = apply<FinalState>(event, bname).particles();
            _vecsNchF[ipt][ieta].push_back((double) particlesF.size());
            _vecsNchB[ipt][ieta].push_back((double) particlesB.size());
            _vecWeight[ipt][ieta].push_back(weight);

            // Sum pT only for 100 MeV particles
            if (ipt == 0) {
              double sumptF = 0;
              double sumptB = 0;
              foreach (const Particle& p, particlesF) sumptF += p.pT();
              foreach (const Particle& p, particlesB) sumptB += p.pT();
              _vecsSumptF[ieta].push_back(sumptF);
              _vecsSumptB[ieta].push_back(sumptB);
            }

          }
        }
      }


      string etabin[3] = { "10", "20", "25" };
      for (int ieta = 0; ieta < 3; ieta++) {
        const string fname = "ChargedTracks" + etabin[ieta];
        const ParticleVector partTrks = apply<FinalState>(event, fname).particlesByPt();

        // Find the leading track and fill the temp histograms
        const Particle& plead = partTrks[0];
        foreach (const Particle& p, partTrks) {
          if (&plead == &p) continue; ///< Don't compare the lead particle to itself
          const double dphi = deltaPhi(p.momentum(), plead.momentum());
          _th_dphi[ieta].fill(dphi, weight);
          const bool sameside = (plead.eta() * p.eta() > 0);
          (sameside ? _th_same : _th_oppo)[ieta].fill(dphi, weight);
        }
      }
    }


    /// Finalize
    void finalize() {

      // FB part
      // @todo For 2D plots we will need _vecsNchF[i], _vecsNchB[j]
      for (int ipt = 0; ipt < NPTBINS; ++ipt) {
        for (int ieta = 0; ieta < NETABINS; ++ieta) {
        _s_NchCorr_vsEta[ipt]->point(ieta).setY(a1_regression2(_vecsNchF[ipt][ieta], _vecsNchB[ipt][ieta], _vecWeight[ipt][ieta]));
        _s_NchCorr_vsEta[ipt]->point(ieta).setYErr(error_on_slope(_vecsNchF[ipt][ieta], _vecsNchB[ipt][ieta], _vecWeight[ipt][ieta]));
        }
        // There is just one plot at 900 GeV so exit the loop here
        if (fuzzyEquals(sqrtS(), 900*GeV, 1e-3) && ipt == 0) break;
      }

      if (!fuzzyEquals(sqrtS(),  900*GeV, 1e-3)) { ///< No plots at 900 GeV
        for (int ieta = 0; ieta < NETABINS; ++ieta) {
          for (int ipt = 0; ipt < NPTBINS; ++ipt) {
            _s_NchCorr_vsPt[ieta]->point(ipt).setY(a1_regression2(_vecsNchF[ipt][ieta], _vecsNchB[ipt][ieta], _vecWeight[ipt][ieta]));
            _s_NchCorr_vsPt[ieta]->point(ipt).setYErr(error_on_slope(_vecsNchF[ipt][ieta], _vecsNchB[ipt][ieta], _vecWeight[ipt][ieta]));
          }
        }
      }

      // Sum pt only for 100 MeV particles
      for (int ieta = 0; ieta < NETABINS; ++ieta) {
        _s_PtsumCorr->point(ieta).setY(a1_regression2(_vecsSumptF[ieta], _vecsSumptB[ieta], _vecWeight[0][ieta]));
        _s_PtsumCorr->point(ieta).setYErr(error_on_slope(_vecsSumptF[ieta], _vecsSumptB[ieta], _vecWeight[0][ieta]));
      }


      // Azimuthal part
      for (int ieta = 0; ieta < 3; ieta++) {
        /// @note We don't just do a subtraction because of the risk of negative values and negative errors
        /// @todo Should the difference always be shown as positive?, i.e. y -> abs(y), etc.
        /// @todo Should the normalization be done _after_ the -ve value treatment?
        YODA::Histo1D hdiffSO = _th_same[ieta] - _th_oppo[ieta];
        hdiffSO.normalize(hdiffSO.bin(0).xWidth());
        for (size_t i = 0; i < hdiffSO.numBins(); ++i) {
          const double y = hdiffSO.bin(i).height() >= 0 ? hdiffSO.bin(i).height() : 0;
          const double yerr = hdiffSO.bin(i).heightErr() >= 0 ? hdiffSO.bin(i).heightErr() : 0;
          _s_diffSO[ieta]->point(i).setY(y, yerr);
        }

        // Extract minimal value
        double histMin = _th_dphi[ieta].bin(0).height();
        for (size_t iphi = 1; iphi < _th_dphi[ieta].numBins(); ++iphi) {
          histMin = std::min(histMin, _th_dphi[ieta].bin(iphi).height());
        }
        // Build scatter of differences
        double sumDiff = 0;
        for (size_t iphi = 0; iphi < _th_dphi[ieta].numBins(); ++iphi) {
          const double diff = _th_dphi[ieta].bin(iphi).height() - histMin;
          _s_dphiMin[ieta]->point(iphi).setY(diff, _th_dphi[ieta].bin(iphi).heightErr());
          sumDiff += diff;
        }
        // Normalize
        _s_dphiMin[ieta]->scale(1, 1/sumDiff);
      }

    }

    //@}


  private:

    static const int NPTBINS  = 7;
    static const int NETABINS = 5;
    static const double PTMINVALUES[NPTBINS];
    static const string PTBINNAMES[NPTBINS];
    static const double ETAVALUES[NETABINS];
    static const string ETABINNAMES[NETABINS];

    vector<double> _vecWeight[NPTBINS][NETABINS];
    vector<double> _vecsNchF[NPTBINS][NETABINS];
    vector<double> _vecsNchB[NPTBINS][NETABINS];
    vector<double> _vecsSumptF[NETABINS];
    vector<double> _vecsSumptB[NETABINS];

    /// @name Histograms
    //@{
    Scatter2DPtr _s_NchCorr_vsEta[NPTBINS], _s_NchCorr_vsPt[NETABINS], _s_PtsumCorr;
    Scatter2DPtr _s_dphiMin[3], _s_diffSO[3];
    YODA::Histo1D _th_dphi[3], _th_same[3], _th_oppo[3];
    //@}

  };


  /// @todo Initialize these inline at declaration with C++11
  const double ATLAS_2012_I1093734::PTMINVALUES[] = {100, 200, 300, 500, 1000, 1500, 2000 };
  const string ATLAS_2012_I1093734::PTBINNAMES[] = { "100", "200", "300", "500", "1000", "1500", "2000" };
  const double ATLAS_2012_I1093734::ETAVALUES[] = {0.5, 1.0, 1.5, 2.0, 2.5};
  const string ATLAS_2012_I1093734::ETABINNAMES[] = { "05", "10", "15", "20", "25" };



  // Hook for the plugin system
  DECLARE_RIVET_PLUGIN(ATLAS_2012_I1093734);

}