rivet is hosted by Hepforge, IPPP Durham
ATLAS_2012_I1093734.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 #include "Rivet/Analysis.hh"
00003 #include "Rivet/Projections/FinalState.hh"
00004 #include "Rivet/Projections/ChargedFinalState.hh"
00005 #include "Rivet/Projections/UnstableFinalState.hh"
00006 #include "Rivet/Projections/MissingMomentum.hh"
00007 
00008 namespace Rivet {
00009 
00010 
00011   namespace {
00012 
00013     inline double sumAB(vector<double> vecX, vector<double> vecY, vector<double> vecW) {
00014       assert(vecX.size() == vecY.size() && vecX.size() == vecW.size());
00015       double sum(0);
00016       for (size_t i = 0; i < vecX.size(); i++) sum += vecW[i] * vecX[i] * vecY[i];
00017       return sum;
00018     }
00019 
00020     inline double sumA(vector<double> vecX, vector<double> vecW) {
00021       assert(vecX.size() == vecW.size());
00022       double sum(0);
00023       for (size_t i = 0; i < vecX.size(); i++) sum += vecX[i]*vecW[i];
00024       return sum;
00025     }
00026 
00027     inline double sumW(vector<double> vecW) {
00028       double sum(0);
00029       for (size_t i = 0; i < vecW.size(); i++) sum += vecW[i];
00030       return sum;
00031     }
00032 
00033     inline double mean(vector<double>  vecX, vector<double> vecW) {
00034       return sumA(vecX, vecW) / sumW(vecW);
00035     }
00036 
00037     inline double standard_deviation(vector<double> vecX, vector<double> vecW) {
00038       const double x_bar = mean(vecX, vecW);
00039       double sum(0);
00040       for (size_t i = 0; i < vecX.size(); i++) {
00041         sum += vecW[i] * sqr(vecX[i] - x_bar);
00042       }
00043       return sqrt( sum / sumW(vecW) );
00044     }
00045 
00046     inline double a0_regression(vector<double>  vecX, vector<double> vecY, vector<double> vecW) {
00047       const double numerator   = sumA(vecY, vecW) * sumAB(vecX, vecX, vecW) - sumA(vecX, vecW) * sumAB(vecX, vecY, vecW);
00048       const double denominator = sumW(vecW) * sumAB(vecX, vecX, vecW) - sumA(vecX, vecW) * sumA(vecX, vecW);
00049       return numerator / denominator;
00050     }
00051 
00052     inline double a1_regression(vector<double>  vecX, vector<double> vecY, vector<double> vecW) {
00053       const double numerator   = sumW(vecW) * sumAB(vecX,vecY,vecW) - sumA(vecX, vecW) * sumA(vecY, vecW);
00054       const double denominator = sumW(vecW) * sumAB(vecX,vecX,vecW) - sumA(vecX, vecW) * sumA(vecX, vecW);
00055       return numerator/ denominator;
00056     }
00057 
00058     inline double a1_regression2(vector<double>  vecX, vector<double> vecY, vector<double> vecW) {
00059       const double x_bar = mean(vecX, vecW);
00060       const double y_bar = mean(vecY, vecW);
00061       double sumXY(0);
00062       for (size_t i = 0; i < vecX.size(); i++) {
00063         sumXY += vecW[i] * (vecY[i]-y_bar) * (vecX[i]-x_bar);
00064       }
00065       return sumXY / ( standard_deviation(vecX, vecW) * standard_deviation(vecY, vecW) * sumW(vecW) );
00066     }
00067 
00068     inline double quadra_sum_residual(vector<double> vecX, vector<double> vecY, vector<double> vecW) {
00069       const double a0 = a0_regression(vecX, vecY, vecW);
00070       const double a1 = a1_regression(vecX, vecY, vecW);
00071       double sum(0);
00072       for (size_t i = 0; i < vecX.size(); i++) {
00073         const double y_est = a0 + a1*vecX[i];
00074         sum += vecW[i] * sqr(vecY[i] - y_est);
00075       }
00076       return sum;
00077     }
00078 
00079     inline double error_on_slope(vector<double> vecX, vector<double> vecY, vector<double> vecW) {
00080       const double quadra_sum_res = quadra_sum_residual(vecX, vecY, vecW);
00081       const double sqrt_quadra_sum_x   = standard_deviation(vecX, vecW) * sqrt(sumW(vecW));
00082       return sqrt(quadra_sum_res/(sumW(vecW)-2)) / sqrt_quadra_sum_x;
00083     }
00084 
00085   }
00086 
00087 
00088 
00089   /// Forward-backward and azimuthal correlations in minimum bias events
00090   class ATLAS_2012_I1093734 : public Analysis {
00091   public:
00092 
00093     /// Constructor
00094     ATLAS_2012_I1093734()
00095       : Analysis("ATLAS_2012_I1093734")
00096     {
00097       // Stat convergence happens around 20k events, so it doesn't make sense to run this
00098       // analysis with much less than that. Given that, lets avoid some unnecessary vector
00099       // resizing by allocating sensible amounts in the first place.
00100       for (int ipt = 0; ipt < NPTBINS; ++ipt) {
00101         for (int k = 0; k < NETABINS; ++k) {
00102           _vecsNchF [ipt][k].reserve(10000);
00103           _vecsNchB [ipt][k].reserve(10000);
00104           _vecWeight[ipt][k].reserve(10000);
00105           if (ipt == 0) {
00106             _vecsSumptF[k].reserve(10000);
00107             _vecsSumptB[k].reserve(10000);
00108           }
00109         }
00110       }
00111     }
00112 
00113 
00114   public:
00115 
00116     /// @name Analysis methods
00117     //@{
00118 
00119     /// Book histograms and initialise projections before the run
00120     void init() {
00121 
00122       // FB correlations part
00123 
00124       // Projections
00125       for (int ipt = 0; ipt < NPTBINS; ++ipt) {
00126         const double ptmin = PTMINVALUES[ipt]*MeV;
00127         for (int ieta = 0; ieta < NETABINS; ++ieta) {
00128           addProjection(ChargedFinalState(-ETAVALUES[ieta],    -ETAVALUES[ieta]+0.5, ptmin), "Tracks"+ETABINNAMES[ieta]+"B"+PTBINNAMES[ipt]);
00129           addProjection(ChargedFinalState( ETAVALUES[ieta]-0.5, ETAVALUES[ieta],     ptmin), "Tracks"+ETABINNAMES[ieta]+"F"+PTBINNAMES[ipt]);
00130         }
00131         addProjection(ChargedFinalState(-2.5, 2.5, ptmin), "CFS" + PTBINNAMES[ipt]);
00132       }
00133       // Histos
00134       if (fuzzyEquals(sqrtS(), 7000*GeV, 1e-3)) {
00135         for (int ipt  = 0; ipt  < NPTBINS ; ++ipt ) _s_NchCorr_vsEta[ipt]  = bookScatter2D(1+ipt,  2, 1, true);
00136         for (int ieta = 0; ieta < NETABINS; ++ieta) _s_NchCorr_vsPt [ieta] = bookScatter2D(8+ieta, 2, 1, true);
00137         _s_PtsumCorr = bookScatter2D(13, 2, 1, true);
00138       } else if (fuzzyEquals(sqrtS(), 900*GeV, 1e-3)) {
00139         _s_NchCorr_vsEta[0] = bookScatter2D(14, 2, 1, true);
00140         _s_PtsumCorr        = bookScatter2D(15, 2, 1, true);
00141       }
00142 
00143 
00144       // Azimuthal correlations part
00145       // Projections
00146       const double ptmin = 500*MeV;
00147       addProjection(ChargedFinalState(-2.5, 2.5, ptmin), "ChargedTracks25");
00148       addProjection(ChargedFinalState(-2.0, 2.0, ptmin), "ChargedTracks20");
00149       addProjection(ChargedFinalState(-1.0, 1.0, ptmin), "ChargedTracks10");
00150       // Histos
00151       /// @todo Declare/book as temporary
00152       for (size_t ieta = 0; ieta < 3; ++ieta) {
00153         if (fuzzyEquals(sqrtS(), 7000*GeV, 1e-3)) {
00154           _s_dphiMin[ieta] = bookScatter2D(2+2*ieta, 1, 1, true);
00155           _s_diffSO[ieta] = bookScatter2D(8+2*ieta, 1, 1, true);
00156           _th_dphi[ieta] = YODA::Histo1D(refData(2+2*ieta, 1, 1));
00157           _th_same[ieta] = YODA::Histo1D(refData(8+2*ieta, 1, 1));
00158           _th_oppo[ieta] = YODA::Histo1D(refData(8+2*ieta, 1, 1));
00159         } else if (fuzzyEquals(sqrtS(), 900*GeV, 1e-3)) {
00160           _s_dphiMin[ieta] = bookScatter2D(1+2*ieta, 1, 1, true);
00161           _s_diffSO[ieta] = bookScatter2D(7+2*ieta, 1, 1, true);
00162           _th_dphi[ieta] = YODA::Histo1D(refData(1+2*ieta, 1, 1));
00163           _th_same[ieta] = YODA::Histo1D(refData(7+2*ieta, 1, 1));
00164           _th_oppo[ieta] = YODA::Histo1D(refData(7+2*ieta, 1, 1));
00165         }
00166       }
00167 
00168     }
00169 
00170 
00171     /// Perform the per-event analysis
00172     void analyze(const Event& event) {
00173       const double weight = event.weight();
00174 
00175       for (int ipt = 0; ipt < NPTBINS; ++ipt) {
00176         const FinalState& charged = applyProjection<FinalState>(event, "CFS" + PTBINNAMES[ipt]);
00177         if (charged.particles().size() >= 2) {
00178           for (int ieta = 0; ieta < NETABINS; ++ieta) {
00179             const string fname = "Tracks" + ETABINNAMES[ieta] + "F" + PTBINNAMES[ipt];
00180             const string bname = "Tracks" + ETABINNAMES[ieta] + "B" + PTBINNAMES[ipt];
00181             const ParticleVector particlesF = applyProjection<FinalState>(event, fname).particles();
00182             const ParticleVector particlesB = applyProjection<FinalState>(event, bname).particles();
00183             _vecsNchF[ipt][ieta].push_back((double) particlesF.size());
00184             _vecsNchB[ipt][ieta].push_back((double) particlesB.size());
00185             _vecWeight[ipt][ieta].push_back(weight);
00186 
00187             // Sum pT only for 100 MeV particles
00188             if (ipt == 0) {
00189               double sumptF = 0;
00190               double sumptB = 0;
00191               foreach (const Particle& p, particlesF) sumptF += p.pT();
00192               foreach (const Particle& p, particlesB) sumptB += p.pT();
00193               _vecsSumptF[ieta].push_back(sumptF);
00194               _vecsSumptB[ieta].push_back(sumptB);
00195             }
00196 
00197           }
00198         }
00199       }
00200 
00201 
00202       string etabin[3] = { "10", "20", "25" };
00203       for (int ieta = 0; ieta < 3; ieta++) {
00204         const string fname = "ChargedTracks" + etabin[ieta];
00205         const ParticleVector partTrks = applyProjection<FinalState>(event, fname).particlesByPt();
00206 
00207         // Find the leading track and fill the temp histograms
00208         const Particle& plead = partTrks[0];
00209         foreach (const Particle& p, partTrks) {
00210           if (&plead == &p) continue; ///< Don't compare the lead particle to itself
00211           const double dphi = deltaPhi(p.momentum(), plead.momentum());
00212           _th_dphi[ieta].fill(dphi, weight);
00213           const bool sameside = (plead.eta() * p.eta() > 0);
00214           (sameside ? _th_same : _th_oppo)[ieta].fill(dphi, weight);
00215         }
00216       }
00217     }
00218 
00219 
00220     /// Finalize
00221     void finalize() {
00222 
00223       // FB part
00224       // @todo For 2D plots we will need _vecsNchF[i], _vecsNchB[j]
00225       for (int ipt = 0; ipt < NPTBINS; ++ipt) {
00226         for (int ieta = 0; ieta < NETABINS; ++ieta) {
00227         _s_NchCorr_vsEta[ipt]->point(ieta).setY(a1_regression2(_vecsNchF[ipt][ieta], _vecsNchB[ipt][ieta], _vecWeight[ipt][ieta]));
00228         _s_NchCorr_vsEta[ipt]->point(ieta).setYErr(error_on_slope(_vecsNchF[ipt][ieta], _vecsNchB[ipt][ieta], _vecWeight[ipt][ieta]));
00229         }
00230         // There is just one plot at 900 GeV so exit the loop here
00231         if (fuzzyEquals(sqrtS(), 900*GeV, 1e-3) && ipt == 0) break;
00232       }
00233 
00234       if (!fuzzyEquals(sqrtS(),  900*GeV, 1e-3)) { ///< No plots at 900 GeV
00235         for (int ieta = 0; ieta < NETABINS; ++ieta) {
00236           for (int ipt = 0; ipt < NPTBINS; ++ipt) {
00237             _s_NchCorr_vsPt[ieta]->point(ipt).setY(a1_regression2(_vecsNchF[ipt][ieta], _vecsNchB[ipt][ieta], _vecWeight[ipt][ieta]));
00238             _s_NchCorr_vsPt[ieta]->point(ipt).setYErr(error_on_slope(_vecsNchF[ipt][ieta], _vecsNchB[ipt][ieta], _vecWeight[ipt][ieta]));
00239           }
00240         }
00241       }
00242 
00243       // Sum pt only for 100 MeV particles
00244       for (int ieta = 0; ieta < NETABINS; ++ieta) {
00245         _s_PtsumCorr->point(ieta).setY(a1_regression2(_vecsSumptF[ieta], _vecsSumptB[ieta], _vecWeight[0][ieta]));
00246         _s_PtsumCorr->point(ieta).setYErr(error_on_slope(_vecsSumptF[ieta], _vecsSumptB[ieta], _vecWeight[0][ieta]));
00247       }
00248 
00249 
00250       // Azimuthal part
00251       for (int ieta = 0; ieta < 3; ieta++) {
00252         /// @note We don't just do a subtraction because of the risk of negative values and negative errors
00253         /// @todo Should the difference always be shown as positive?, i.e. y -> abs(y), etc.
00254         /// @todo Should the normalization be done _after_ the -ve value treatment?
00255         YODA::Histo1D hdiffSO = _th_same[ieta] - _th_oppo[ieta];
00256         hdiffSO.normalize(hdiffSO.bin(0).xWidth());
00257         for (size_t i = 0; i < hdiffSO.numBins(); ++i) {
00258           const double y = hdiffSO.bin(i).height() >= 0 ? hdiffSO.bin(i).height() : 0;
00259           const double yerr = hdiffSO.bin(i).heightErr() >= 0 ? hdiffSO.bin(i).heightErr() : 0;
00260           _s_diffSO[ieta]->point(i).setY(y, yerr);
00261         }
00262 
00263         // Extract minimal value
00264         double histMin = _th_dphi[ieta].bin(0).height();
00265         for (size_t iphi = 1; iphi < _th_dphi[ieta].numBins(); ++iphi) {
00266           histMin = std::min(histMin, _th_dphi[ieta].bin(iphi).height());
00267         }
00268         // Build scatter of differences
00269         double sumDiff = 0;
00270         for (size_t iphi = 0; iphi < _th_dphi[ieta].numBins(); ++iphi) {
00271           const double diff = _th_dphi[ieta].bin(iphi).height() - histMin;
00272           _s_dphiMin[ieta]->point(iphi).setY(diff, _th_dphi[ieta].bin(iphi).heightErr());
00273           sumDiff += diff;
00274         }
00275         // Normalize
00276         _s_dphiMin[ieta]->scale(1, 1/sumDiff);
00277       }
00278 
00279     }
00280 
00281     //@}
00282 
00283 
00284   private:
00285 
00286     static const int NPTBINS  = 7;
00287     static const int NETABINS = 5;
00288     static const double PTMINVALUES[NPTBINS];
00289     static const string PTBINNAMES[NPTBINS];
00290     static const double ETAVALUES[NETABINS];
00291     static const string ETABINNAMES[NETABINS];
00292 
00293     vector<double> _vecWeight[NPTBINS][NETABINS];
00294     vector<double> _vecsNchF[NPTBINS][NETABINS];
00295     vector<double> _vecsNchB[NPTBINS][NETABINS];
00296     vector<double> _vecsSumptF[NETABINS];
00297     vector<double> _vecsSumptB[NETABINS];
00298 
00299     /// @name Histograms
00300     //@{
00301     Scatter2DPtr _s_NchCorr_vsEta[NPTBINS], _s_NchCorr_vsPt[NETABINS], _s_PtsumCorr;
00302     Scatter2DPtr _s_dphiMin[3], _s_diffSO[3];
00303     YODA::Histo1D _th_dphi[3], _th_same[3], _th_oppo[3];
00304     //@}
00305 
00306   };
00307 
00308 
00309   /// @todo Initialize these inline at declaration with C++11
00310   const double ATLAS_2012_I1093734::PTMINVALUES[] = {100, 200, 300, 500, 1000, 1500, 2000 };
00311   const string ATLAS_2012_I1093734::PTBINNAMES[] = { "100", "200", "300", "500", "1000", "1500", "2000" };
00312   const double ATLAS_2012_I1093734::ETAVALUES[] = {0.5, 1.0, 1.5, 2.0, 2.5};
00313   const string ATLAS_2012_I1093734::ETABINNAMES[] = { "05", "10", "15", "20", "25" };
00314 
00315 
00316 
00317   // Hook for the plugin system
00318   DECLARE_RIVET_PLUGIN(ATLAS_2012_I1093734);
00319 
00320 }