STAR_2008_S7869363.cc
Go to the documentation of this file.
00001 // -*- C++ -*- 00002 #include "Rivet/Analysis.hh" 00003 #include "Rivet/RivetYODA.hh" 00004 #include "Rivet/Tools/Logging.hh" 00005 #include "Rivet/Projections/ChargedFinalState.hh" 00006 #include "Rivet/Projections/LossyFinalState.hh" 00007 #include "Rivet/Tools/ParticleIdUtils.hh" 00008 00009 namespace Rivet { 00010 00011 00012 class STARRandomFilter { 00013 public: 00014 00015 STARRandomFilter() { } 00016 00017 // Return true to throw away a particle 00018 bool operator()(const Particle& p) { 00019 /// @todo Use a better RNG? 00020 size_t idx = int(floor(p.momentum().pT()/MeV/50)); 00021 if (idx > 11) idx = 11; 00022 return (rand()/static_cast<double>(RAND_MAX) > _trkeff[idx]); 00023 } 00024 00025 int compare(const STARRandomFilter& other) const { 00026 return true; 00027 } 00028 00029 private: 00030 00031 const static double _trkeff[12]; 00032 00033 }; 00034 00035 00036 // Here we have the track reconstruction efficiencies for tracks with pT from 0 to 600 MeV 00037 // in steps of 50 MeV. The efficiency is assumed to be 0.88 for pT >= 600 MeV 00038 const double STARRandomFilter::_trkeff[12] = {0,0,0.38,0.72,0.78,0.81,0.82,0.84,0.85,0.86,0.87,0.88}; 00039 00040 00041 00042 class STAR_2008_S7869363 : public Analysis { 00043 public: 00044 00045 /// @name Constructors etc. 00046 //@{ 00047 00048 /// Constructor 00049 STAR_2008_S7869363() 00050 : Analysis("STAR_2008_S7869363"), 00051 nCutsPassed(0), 00052 nPi(0), nPiPlus(0), nKaon(0), nKaonPlus(0), nProton(0), nAntiProton(0) 00053 { } 00054 00055 //@} 00056 00057 00058 public: 00059 00060 /// @name Analysis methods 00061 //@{ 00062 00063 /// Book histograms and initialise projections before the run 00064 void init() { 00065 const ChargedFinalState cfs(-0.5, 0.5, 0.2*GeV); 00066 const LossyFinalState<STARRandomFilter> lfs(cfs, STARRandomFilter()); 00067 addProjection(lfs, "FS"); 00068 00069 _h_dNch = bookHisto1D(1, 1, 1); 00070 _h_dpT_Pi = bookHisto1D(2, 1, 1); 00071 _h_dpT_Piplus = bookHisto1D(2, 1, 2); 00072 _h_dpT_Kaon = bookHisto1D(2, 1, 3); 00073 _h_dpT_Kaonplus = bookHisto1D(2, 1, 4); 00074 _h_dpT_AntiProton = bookHisto1D(2, 1, 5); 00075 _h_dpT_Proton = bookHisto1D(2, 1, 6); 00076 } 00077 00078 00079 /// Perform the per-event analysis 00080 void analyze(const Event& event) { 00081 const FinalState& charged = applyProjection<FinalState>(event, "FS"); 00082 00083 // Vertex reconstruction efficiencies as a function of charged multiplicity. 00084 // For events with more than 23 reconstructed tracks the efficiency is 100%. 00085 double vtxeffs[24] = { 0.000000,0.512667,0.739365,0.847131,0.906946,0.940922,0.959328,0.96997, 00086 0.975838,0.984432,0.988311,0.990327,0.990758,0.995767,0.99412,0.992271, 00087 0.996631,0.994802,0.99635,0.997384,0.998986,0.996441,0.994513,1.000000 }; 00088 00089 double vtxeff = 1.0; 00090 if (charged.particles().size() < 24) { 00091 vtxeff = vtxeffs[charged.particles().size()]; 00092 } 00093 00094 const double weight = vtxeff * event.weight(); 00095 00096 foreach (const Particle& p, charged.particles()) { 00097 double pT = p.momentum().pT()/GeV; 00098 double y = p.momentum().rapidity(); 00099 if (fabs(y) < 0.1) { 00100 nCutsPassed += weight; 00101 const PdgId id = p.pdgId(); 00102 switch (id) { 00103 case -211: 00104 _h_dpT_Pi->fill(pT, weight/(TWOPI*pT*0.2)); 00105 nPi += weight; 00106 break; 00107 case 211: 00108 _h_dpT_Piplus->fill(pT, weight/(TWOPI*pT*0.2)); 00109 nPiPlus += weight; 00110 break; 00111 case -321: 00112 _h_dpT_Kaon->fill(pT, weight/(TWOPI*pT*0.2)); 00113 nKaon += weight; 00114 break; 00115 case 321: 00116 _h_dpT_Kaonplus->fill(pT, weight/(TWOPI*pT*0.2)); 00117 nKaonPlus += weight; 00118 break; 00119 case -2212: 00120 _h_dpT_AntiProton->fill(pT, weight/(TWOPI*pT*0.2)); 00121 nAntiProton += weight; 00122 break; 00123 case 2212: 00124 _h_dpT_Proton->fill(pT, weight/(TWOPI*pT*0.2)); 00125 nProton += weight; 00126 break; 00127 } 00128 } 00129 else { 00130 continue; 00131 } 00132 } 00133 _h_dNch->fill(charged.particles().size(), weight); 00134 } 00135 00136 00137 /// Normalise histograms etc., after the run 00138 void finalize() { 00139 //double nTot = nPi + nPiPlus + nKaon + nKaonPlus + nProton + nAntiProton; 00140 normalize(_h_dNch); 00141 00142 /// @todo Norm to data! 00143 normalize(_h_dpT_Pi , 0.389825 ); 00144 normalize(_h_dpT_Piplus , 0.396025 ); 00145 normalize(_h_dpT_Kaon , 0.03897 ); 00146 normalize(_h_dpT_Kaonplus , 0.04046 ); 00147 normalize(_h_dpT_AntiProton, 0.0187255); 00148 normalize(_h_dpT_Proton , 0.016511 ); 00149 } 00150 00151 00152 private: 00153 00154 00155 Histo1DPtr _h_dNch; 00156 00157 Histo1DPtr _h_dpT_Pi, _h_dpT_Piplus; 00158 Histo1DPtr _h_dpT_Kaon, _h_dpT_Kaonplus; 00159 Histo1DPtr _h_dpT_AntiProton, _h_dpT_Proton; 00160 00161 Profile1DPtr _h_pT_vs_Nch; 00162 double nCutsPassed, nPi, nPiPlus, nKaon, nKaonPlus, nProton, nAntiProton; 00163 }; 00164 00165 00166 00167 // The hook for the plugin system 00168 DECLARE_RIVET_PLUGIN(STAR_2008_S7869363); 00169 00170 } Generated on Fri Dec 21 2012 15:03:42 for The Rivet MC analysis system by ![]() |