ATLAS_2012_I1093734.cc
Go to the documentation of this file.
00001 // -*- C++ -*- 00002 #include "Rivet/Analysis.hh" 00003 #include "Rivet/Tools/Logging.hh" 00004 #include "Rivet/Projections/FinalState.hh" 00005 #include "Rivet/Projections/ChargedFinalState.hh" 00006 #include "Rivet/Projections/UnstableFinalState.hh" 00007 #include "Rivet/Projections/MissingMomentum.hh" 00008 #include "Rivet/Math/MathUtils.hh" 00009 00010 namespace Rivet { 00011 00012 00013 namespace { 00014 00015 inline double sumAB(vector<double> vecX, vector<double> vecY, vector<double> vecW) { 00016 assert(vecX.size() == vecY.size() && vecX.size() == vecW.size()); 00017 double sum(0); 00018 for (size_t i = 0; i < vecX.size(); i++) sum += vecW[i] * vecX[i] * vecY[i]; 00019 return sum; 00020 } 00021 00022 inline double sumA(vector<double> vecX, vector<double> vecW) { 00023 assert(vecX.size() == vecW.size()); 00024 double sum(0); 00025 for (size_t i = 0; i < vecX.size(); i++) sum += vecX[i]*vecW[i]; 00026 return sum; 00027 } 00028 00029 inline double sumW(vector<double> vecW) { 00030 double sum(0); 00031 for (size_t i = 0; i < vecW.size(); i++) sum += vecW[i]; 00032 return sum; 00033 } 00034 00035 inline double mean(vector<double> vecX, vector<double> vecW) { 00036 return sumA(vecX, vecW) / sumW(vecW); 00037 } 00038 00039 inline double standard_deviation(vector<double> vecX, vector<double> vecW) { 00040 const double x_bar = mean(vecX, vecW); 00041 double sum(0); 00042 for (size_t i = 0; i < vecX.size(); i++) { 00043 sum += vecW[i] * sqr(vecX[i] - x_bar); 00044 } 00045 return sqrt( sum / sumW(vecW) ); 00046 } 00047 00048 inline double a0_regression(vector<double> vecX, vector<double> vecY, vector<double> vecW) { 00049 const double numerator = sumA(vecY, vecW) * sumAB(vecX, vecX, vecW) - sumA(vecX, vecW) * sumAB(vecX, vecY, vecW); 00050 const double denominator = sumW(vecW) * sumAB(vecX, vecX, vecW) - sumA(vecX, vecW) * sumA(vecX, vecW); 00051 return numerator / denominator; 00052 } 00053 00054 inline double a1_regression(vector<double> vecX, vector<double> vecY, vector<double> vecW) { 00055 const double numerator = sumW(vecW) * sumAB(vecX,vecY,vecW) - sumA(vecX, vecW) * sumA(vecY, vecW); 00056 const double denominator = sumW(vecW) * sumAB(vecX,vecX,vecW) - sumA(vecX, vecW) * sumA(vecX, vecW); 00057 return numerator/ denominator; 00058 } 00059 00060 inline double a1_regression2(vector<double> vecX, vector<double> vecY, vector<double> vecW) { 00061 const double x_bar = mean(vecX, vecW); 00062 const double y_bar = mean(vecY, vecW); 00063 double sumXY(0); 00064 for (size_t i = 0; i < vecX.size(); i++) { 00065 sumXY += vecW[i] * (vecY[i]-y_bar) * (vecX[i]-x_bar); 00066 } 00067 return sumXY / ( standard_deviation(vecX, vecW) * standard_deviation(vecY, vecW) * sumW(vecW) ); 00068 } 00069 00070 inline double quadra_sum_residual(vector<double> vecX, vector<double> vecY, vector<double> vecW) { 00071 const double a0 = a0_regression(vecX, vecY, vecW); 00072 const double a1 = a1_regression(vecX, vecY, vecW); 00073 double sum(0); 00074 for (size_t i = 0; i < vecX.size(); i++) { 00075 const double y_est = a0 + a1*vecX[i]; 00076 sum += vecW[i] * sqr(vecY[i] - y_est); 00077 } 00078 return sum; 00079 } 00080 00081 inline double error_on_slope(vector<double> vecX, vector<double> vecY, vector<double> vecW) { 00082 const double quadra_sum_res = quadra_sum_residual(vecX, vecY, vecW); 00083 const double sqrt_quadra_sum_x = standard_deviation(vecX, vecW) * sqrt(sumW(vecW)); 00084 return sqrt(quadra_sum_res/(sumW(vecW)-2)) / sqrt_quadra_sum_x; 00085 } 00086 00087 } 00088 00089 00090 00091 /// Forward-backward and azimuthal correlations in minimum bias events 00092 class ATLAS_2012_I1093734 : public Analysis { 00093 public: 00094 00095 /// Constructor 00096 ATLAS_2012_I1093734() 00097 : Analysis("ATLAS_2012_I1093734") 00098 { 00099 // Stat convergence happens around 20k events, so it doesn't make sense to run this 00100 // analysis with much less than that. Given that, lets avoid some unnecessary vector 00101 // resizing by allocating sensible amounts in the first place. 00102 for (int ipt = 0; ipt < NPTBINS; ++ipt) { 00103 for (int k = 0; k < NETABINS; ++k) { 00104 _vecsNchF [ipt][k].reserve(10000); 00105 _vecsNchB [ipt][k].reserve(10000); 00106 _vecWeight[ipt][k].reserve(10000); 00107 if (ipt == 0) { 00108 _vecsSumptF[k].reserve(10000); 00109 _vecsSumptB[k].reserve(10000); 00110 } 00111 } 00112 } 00113 } 00114 00115 00116 public: 00117 00118 /// @name Analysis methods 00119 //@{ 00120 00121 /// Book histograms and initialise projections before the run 00122 void init() { 00123 00124 // FB correlations part 00125 00126 // Projections 00127 for (int ipt = 0; ipt < NPTBINS; ++ipt) { 00128 const double ptmin = PTMINVALUES[ipt]*MeV; 00129 for (int ieta = 0; ieta < NETABINS; ++ieta) { 00130 addProjection(ChargedFinalState(-ETAVALUES[ieta], -ETAVALUES[ieta]+0.5, ptmin), "Tracks"+ETABINNAMES[ieta]+"B"+PTBINNAMES[ipt]); 00131 addProjection(ChargedFinalState( ETAVALUES[ieta]-0.5, ETAVALUES[ieta], ptmin), "Tracks"+ETABINNAMES[ieta]+"F"+PTBINNAMES[ipt]); 00132 } 00133 addProjection(ChargedFinalState(-2.5, 2.5, ptmin), "CFS" + PTBINNAMES[ipt]); 00134 } 00135 // Histos 00136 if (fuzzyEquals(sqrtS(), 7000*GeV, 1e-3)) { 00137 for (int ipt = 0; ipt < NPTBINS ; ++ipt ) _s_NchCorr_vsEta[ipt] = bookScatter2D(1+ipt, 2, 1, true); 00138 for (int ieta = 0; ieta < NETABINS; ++ieta) _s_NchCorr_vsPt [ieta] = bookScatter2D(8+ieta, 2, 1, true); 00139 _s_PtsumCorr = bookScatter2D(13, 2, 1, true); 00140 } else if (fuzzyEquals(sqrtS(), 900*GeV, 1e-3)) { 00141 _s_NchCorr_vsEta[0] = bookScatter2D(14, 2, 1, true); 00142 _s_PtsumCorr = bookScatter2D(15, 2, 1, true); 00143 } 00144 00145 00146 // Azimuthal correlations part 00147 // Projections 00148 const double ptmin = 500*MeV; 00149 addProjection(ChargedFinalState(-2.5, 2.5, ptmin), "ChargedTracks25"); 00150 addProjection(ChargedFinalState(-2.0, 2.0, ptmin), "ChargedTracks20"); 00151 addProjection(ChargedFinalState(-1.0, 1.0, ptmin), "ChargedTracks10"); 00152 // Histos 00153 /// @todo Declare/book as temporary 00154 for (size_t ieta = 0; ieta < 3; ++ieta) { 00155 if (fuzzyEquals(sqrtS(), 7000*GeV, 1e-3)) { 00156 _s_dphiMin[ieta] = bookScatter2D(2+2*ieta, 1, 1, true); 00157 _s_diffSO[ieta] = bookScatter2D(8+2*ieta, 1, 1, true); 00158 _th_dphi[ieta] = YODA::Histo1D(refData(2+2*ieta, 1, 1)); 00159 _th_same[ieta] = YODA::Histo1D(refData(8+2*ieta, 1, 1)); 00160 _th_oppo[ieta] = YODA::Histo1D(refData(8+2*ieta, 1, 1)); 00161 } else if (fuzzyEquals(sqrtS(), 900*GeV, 1e-3)) { 00162 _s_dphiMin[ieta] = bookScatter2D(1+2*ieta, 1, 1, true); 00163 _s_diffSO[ieta] = bookScatter2D(7+2*ieta, 1, 1, true); 00164 _th_dphi[ieta] = YODA::Histo1D(refData(1+2*ieta, 1, 1)); 00165 _th_same[ieta] = YODA::Histo1D(refData(7+2*ieta, 1, 1)); 00166 _th_oppo[ieta] = YODA::Histo1D(refData(7+2*ieta, 1, 1)); 00167 } 00168 } 00169 } 00170 00171 00172 /// Perform the per-event analysis 00173 void analyze(const Event& event) { 00174 const double weight = event.weight(); 00175 00176 for (int ipt = 0; ipt < NPTBINS; ++ipt) { 00177 const FinalState& charged = applyProjection<FinalState>(event, "CFS" + PTBINNAMES[ipt]); 00178 if (charged.particles().size() >= 2) { 00179 for (int ieta = 0; ieta < NETABINS; ++ieta) { 00180 const string fname = "Tracks" + ETABINNAMES[ieta] + "F" + PTBINNAMES[ipt]; 00181 const string bname = "Tracks" + ETABINNAMES[ieta] + "B" + PTBINNAMES[ipt]; 00182 const ParticleVector particlesF = applyProjection<FinalState>(event, fname).particles(); 00183 const ParticleVector particlesB = applyProjection<FinalState>(event, bname).particles(); 00184 _vecsNchF[ipt][ieta].push_back((double) particlesF.size()); 00185 _vecsNchB[ipt][ieta].push_back((double) particlesB.size()); 00186 _vecWeight[ipt][ieta].push_back(weight); 00187 00188 // Sum pT only for 100 MeV particles 00189 if (ipt == 0) { 00190 double sumptF = 0; 00191 double sumptB = 0; 00192 foreach (const Particle& p, particlesF) sumptF += p.momentum().pT(); 00193 foreach (const Particle& p, particlesB) sumptB += p.momentum().pT(); 00194 _vecsSumptF[ieta].push_back(sumptF); 00195 _vecsSumptB[ieta].push_back(sumptB); 00196 } 00197 00198 } 00199 } 00200 } 00201 00202 00203 string etabin[3] = { "10", "20", "25" }; 00204 for (int ieta = 0; ieta < 3; ieta++) { 00205 const string fname = "ChargedTracks" + etabin[ieta]; 00206 const ParticleVector partTrks = applyProjection<FinalState>(event, fname).particlesByPt(); 00207 00208 // Find the leading track and fill the temp histograms 00209 const Particle& plead = partTrks[0]; 00210 foreach (const Particle& p, partTrks) { 00211 if (&plead == &p) continue; //< Don't compare the lead particle to itself 00212 const double dphi = deltaPhi(p.momentum(), plead.momentum()); 00213 _th_dphi[ieta].fill(dphi, weight); 00214 const bool sameside = (plead.momentum().eta() * p.momentum().eta() > 0); 00215 (sameside ? _th_same : _th_oppo)[ieta].fill(dphi, weight); 00216 } 00217 } 00218 } 00219 00220 00221 /// Finalize 00222 void finalize() { 00223 00224 // FB part 00225 // @todo For 2D plots we will need _vecsNchF[i], _vecsNchB[j] 00226 for (int ipt = 0; ipt < NPTBINS; ++ipt) { 00227 for (int ieta = 0; ieta < NETABINS; ++ieta) { 00228 _s_NchCorr_vsEta[ipt]->point(ieta).setY(a1_regression2(_vecsNchF[ipt][ieta], _vecsNchB[ipt][ieta], _vecWeight[ipt][ieta])); 00229 _s_NchCorr_vsEta[ipt]->point(ieta).setYErr(error_on_slope(_vecsNchF[ipt][ieta], _vecsNchB[ipt][ieta], _vecWeight[ipt][ieta])); 00230 } 00231 // There is just one plot at 900 GeV so exit the loop here 00232 if (fuzzyEquals(sqrtS(), 900*GeV, 1e-3) && ipt > 0) break; 00233 } 00234 00235 if (!fuzzyEquals(sqrtS(), 900*GeV, 1e-3)) { //< No plots at 900 GeV 00236 for (int ieta = 0; ieta < NETABINS; ++ieta) { 00237 for (int ipt = 0; ipt < NPTBINS; ++ipt) { 00238 _s_NchCorr_vsPt[ieta]->point(ipt).setY(a1_regression2(_vecsNchF[ipt][ieta], _vecsNchB[ipt][ieta], _vecWeight[ipt][ieta])); 00239 _s_NchCorr_vsPt[ieta]->point(ipt).setYErr(error_on_slope(_vecsNchF[ipt][ieta], _vecsNchB[ipt][ieta], _vecWeight[ipt][ieta])); 00240 } 00241 } 00242 } 00243 00244 // Sum pt only for 100 MeV particles 00245 for (int ieta = 0; ieta < NETABINS; ++ieta) { 00246 _s_PtsumCorr->point(ieta).setY(a1_regression2(_vecsSumptF[ieta], _vecsSumptB[ieta], _vecWeight[0][ieta])); 00247 _s_PtsumCorr->point(ieta).setYErr(error_on_slope(_vecsSumptF[ieta], _vecsSumptB[ieta], _vecWeight[0][ieta])); 00248 } 00249 00250 00251 // Azimuthal part 00252 for (int ieta = 0; ieta < 3; ieta++) { 00253 /// @note We don't just do a subtraction because of the risk of negative values and negative errors 00254 /// @todo Should the difference always be shown as positive?, i.e. y -> abs(y), etc. 00255 /// @todo Should the normalization be done _after_ the -ve value treatment? 00256 YODA::Histo1D hdiffSO = _th_same[ieta] - _th_oppo[ieta]; 00257 hdiffSO.normalize(hdiffSO.bin(0).width()); 00258 for (size_t i = 0; i < hdiffSO.numBins(); ++i) { 00259 const double y = hdiffSO.bin(i).height() >= 0 ? hdiffSO.bin(i).height() : 0; 00260 const double yerr = hdiffSO.bin(i).heightErr() >= 0 ? hdiffSO.bin(i).heightErr() : 0; 00261 _s_diffSO[ieta]->point(i).setY(y, yerr); 00262 } 00263 00264 // Extract minimal value 00265 double histMin = _th_dphi[ieta].bin(0).height(); 00266 for (size_t iphi = 1; iphi < _th_dphi[ieta].numBins(); ++iphi) { 00267 histMin = std::min(histMin, _th_dphi[ieta].bin(iphi).height()); 00268 } 00269 // Build scatter of differences 00270 double sumDiff = 0; 00271 for (size_t iphi = 1; iphi < _th_dphi[ieta].numBins(); ++iphi) { 00272 const double diff = _th_dphi[ieta].bin(iphi).height() - histMin; 00273 _s_dphiMin[ieta]->point(iphi).setY(diff, _th_dphi[ieta].bin(iphi).heightErr()); 00274 sumDiff += diff; 00275 } 00276 // Normalize 00277 _s_dphiMin[ieta]->scale(1, 1/sumDiff); 00278 } 00279 00280 } 00281 00282 //@} 00283 00284 00285 private: 00286 00287 static const int NPTBINS = 7; 00288 static const int NETABINS = 5; 00289 static const double PTMINVALUES[NPTBINS]; 00290 static const string PTBINNAMES[NPTBINS]; 00291 static const double ETAVALUES[NETABINS]; 00292 static const string ETABINNAMES[NETABINS]; 00293 00294 vector<double> _vecWeight[NPTBINS][NETABINS]; 00295 vector<double> _vecsNchF[NPTBINS][NETABINS]; 00296 vector<double> _vecsNchB[NPTBINS][NETABINS]; 00297 vector<double> _vecsSumptF[NETABINS]; 00298 vector<double> _vecsSumptB[NETABINS]; 00299 00300 /// @name Histograms 00301 //@{ 00302 Scatter2DPtr _s_NchCorr_vsEta[NPTBINS], _s_NchCorr_vsPt[NETABINS], _s_PtsumCorr; 00303 Scatter2DPtr _s_dphiMin[3], _s_diffSO[3]; 00304 YODA::Histo1D _th_dphi[3], _th_same[3], _th_oppo[3]; 00305 //@} 00306 00307 }; 00308 00309 00310 /// @todo Initialize these inline at declaration with C++11 00311 const double ATLAS_2012_I1093734::PTMINVALUES[] = {100, 200, 300, 500, 1000, 1500, 2000 }; 00312 const string ATLAS_2012_I1093734::PTBINNAMES[] = { "100", "200", "300", "500", "1000", "1500", "2000" }; 00313 const double ATLAS_2012_I1093734::ETAVALUES[] = {0.5, 1.0, 1.5, 2.0, 2.5}; 00314 const string ATLAS_2012_I1093734::ETABINNAMES[] = { "05", "10", "15", "20", "25" }; 00315 00316 00317 00318 // Hook for the plugin system 00319 DECLARE_RIVET_PLUGIN(ATLAS_2012_I1093734); 00320 00321 } Generated on Thu Feb 6 2014 17:38:41 for The Rivet MC analysis system by ![]() |