UA5_1988_S1867512.cc

Go to the documentation of this file.
00001 // -*- C++ -*-
00002 #include "Rivet/Analysis.hh"
00003 #include "Rivet/RivetAIDA.hh"
00004 #include "Rivet/Tools/Logging.hh"
00005 #include "Rivet/Math/MathUtils.hh"
00006 #include "Rivet/Projections/ChargedFinalState.hh"
00007 #include "Rivet/Projections/Beam.hh"
00008 #include "Rivet/Projections/TriggerUA5.hh"
00009 
00010 namespace Rivet {
00011 
00012 
00013   /// @brief UA5 charged particle correlations at 200, 546 and 900 GeV
00014   class UA5_1988_S1867512 : public Analysis {
00015   public:
00016 
00017     UA5_1988_S1867512() : Analysis("UA5_1988_S1867512")
00018     {
00019       _sumWPassed = 0;
00020     }
00021 
00022 
00023     /// @name Analysis methods
00024     //@{
00025 
00026     void init() {
00027       // Projections
00028       addProjection(TriggerUA5(), "Trigger");
00029 
00030       // Symmetric eta interval
00031       addProjection(ChargedFinalState(-0.5, 0.5), "CFS05");
00032 
00033       // Asymmetric intervals first
00034       // Forward eta intervals
00035       addProjection(ChargedFinalState(0.0, 1.0), "CFS10F");
00036       addProjection(ChargedFinalState(0.5, 1.5), "CFS15F");
00037       addProjection(ChargedFinalState(1.0, 2.0), "CFS20F");
00038       addProjection(ChargedFinalState(1.5, 2.5), "CFS25F");
00039       addProjection(ChargedFinalState(2.0, 3.0), "CFS30F");
00040       addProjection(ChargedFinalState(2.5, 3.5), "CFS35F");
00041       addProjection(ChargedFinalState(3.0, 4.0), "CFS40F");
00042 
00043       // Backward eta intervals
00044       addProjection(ChargedFinalState(-1.0,  0.0), "CFS10B");
00045       addProjection(ChargedFinalState(-1.5, -0.5), "CFS15B");
00046       addProjection(ChargedFinalState(-2.0, -1.0), "CFS20B");
00047       addProjection(ChargedFinalState(-2.5, -1.5), "CFS25B");
00048       addProjection(ChargedFinalState(-3.0, -2.0), "CFS30B");
00049       addProjection(ChargedFinalState(-3.5, -2.5), "CFS35B");
00050       addProjection(ChargedFinalState(-4.0, -3.0), "CFS40B");
00051 
00052       // Histogram booking, we have sqrt(s) = 200, 546 and 900 GeV
00053       // TODO use DataPointSet to be able to output errors
00054       if (fuzzyEquals(sqrtS()/GeV, 200.0, 1E-4)) {
00055         _hist_correl = bookDataPointSet(2, 1, 1);
00056         _hist_correl_asym = bookDataPointSet(3, 1, 1);
00057       } else if (fuzzyEquals(sqrtS()/GeV, 546.0, 1E-4)) {
00058         _hist_correl = bookDataPointSet(2, 1, 2);
00059         _hist_correl_asym = bookDataPointSet(3, 1, 2);
00060       } else if (fuzzyEquals(sqrtS()/GeV, 900.0, 1E-4)) {
00061         _hist_correl = bookDataPointSet(2, 1, 3);
00062         _hist_correl_asym = bookDataPointSet(3, 1, 3);
00063       }
00064     }
00065 
00066 
00067     void analyze(const Event& event) {
00068       // Trigger
00069       const bool trigger = applyProjection<TriggerUA5>(event, "Trigger").nsdDecision();
00070       if (!trigger) vetoEvent;
00071       _sumWPassed += event.weight();
00072 
00073       // Count forward/backward particles
00074       n_10f += applyProjection<ChargedFinalState>(event, "CFS10F").size();
00075       n_15f += applyProjection<ChargedFinalState>(event, "CFS15F").size();
00076       n_20f += applyProjection<ChargedFinalState>(event, "CFS20F").size();
00077       n_25f += applyProjection<ChargedFinalState>(event, "CFS25F").size();
00078       n_30f += applyProjection<ChargedFinalState>(event, "CFS30F").size();
00079       n_35f += applyProjection<ChargedFinalState>(event, "CFS35F").size();
00080       n_40f += applyProjection<ChargedFinalState>(event, "CFS40F").size();
00081       //
00082       n_10b += applyProjection<ChargedFinalState>(event, "CFS10B").size();
00083       n_15b += applyProjection<ChargedFinalState>(event, "CFS15B").size();
00084       n_20b += applyProjection<ChargedFinalState>(event, "CFS20B").size();
00085       n_25b += applyProjection<ChargedFinalState>(event, "CFS25B").size();
00086       n_30b += applyProjection<ChargedFinalState>(event, "CFS30B").size();
00087       n_35b += applyProjection<ChargedFinalState>(event, "CFS35B").size();
00088       n_40b += applyProjection<ChargedFinalState>(event, "CFS40B").size();
00089       //
00090       n_05 += applyProjection<ChargedFinalState>(event, "CFS05").size();
00091     }
00092 
00093 
00094     void finalize() {
00095       // The correlation strength is defined in formulas
00096       // 4.1 and 4.2
00097 
00098       // Fill histos, gap width histo comes first
00099       //      * Set the errors as Delta b / sqrt(sumWPassed) with
00100       //      Delta b being the absolute uncertainty of b according to
00101       //      Gaussian error-propagation (linear limit) and assuming
00102       //      Poissonian uncertainties for the number of particles in
00103       //      the eta-intervals
00104       //
00105 
00106       // Define vectors to be able to fill DataPointSets
00107 
00108       vector<double> xvals;
00109       vector<double> xerrs;
00110       vector<double> yvals;
00111       vector<double> yerrs;
00112 
00113 
00114       // This defines the binning eventually
00115       for (int i=0; i<7; i++) {
00116         xvals.push_back(static_cast<double>(i));
00117         xerrs.push_back(0.5);
00118       }
00119 
00120       // Fill the y-value vector
00121       yvals.push_back(correlation(n_10f, n_10b));
00122       yvals.push_back(correlation(n_15f, n_15b));
00123       yvals.push_back(correlation(n_20f, n_20b));
00124       yvals.push_back(correlation(n_25f, n_25b));
00125       yvals.push_back(correlation(n_30f, n_30b));
00126       yvals.push_back(correlation(n_35f, n_35b));
00127       yvals.push_back(correlation(n_40f, n_40b));
00128 
00129       // Fill the y-error vector
00130       yerrs.push_back(correlation_err(n_10f, n_10b)/sqrt(_sumWPassed));
00131       yerrs.push_back(correlation_err(n_15f, n_15b)/sqrt(_sumWPassed));
00132       yerrs.push_back(correlation_err(n_20f, n_20b)/sqrt(_sumWPassed));
00133       yerrs.push_back(correlation_err(n_25f, n_25b)/sqrt(_sumWPassed));
00134       yerrs.push_back(correlation_err(n_30f, n_30b)/sqrt(_sumWPassed));
00135       yerrs.push_back(correlation_err(n_35f, n_35b)/sqrt(_sumWPassed));
00136       yerrs.push_back(correlation_err(n_40f, n_40b)/sqrt(_sumWPassed));
00137 
00138       // Fill the DPS
00139       _hist_correl->setCoordinate(0, xvals, xerrs);
00140       _hist_correl->setCoordinate(1, yvals, yerrs);
00141 
00142       // Now do the other histo -- clear already defined vectors first
00143       xvals.clear();
00144       xerrs.clear();
00145       yvals.clear();
00146       yerrs.clear();
00147 
00148       // Different binning for this one
00149       for (int i=0; i<6; i++) {
00150         xvals.push_back(0.5* static_cast<double>(i));
00151         xerrs.push_back(0.25);
00152       }
00153 
00154       // Fill gap-center histo (Fig 15)
00155       //
00156       // The first bin contains the c_str strengths of
00157       // the gap size histo that has ane eta gap of two
00158       //
00159       // Fill the y-value vector
00160       yvals.push_back(correlation(n_20f, n_20b));
00161       yvals.push_back(correlation(n_25f, n_15b));
00162       yvals.push_back(correlation(n_30f, n_10b));
00163       yvals.push_back(correlation(n_35f, n_05 ));
00164       yvals.push_back(correlation(n_40f, n_10f));
00165 
00166       // Fill the y-error vector
00167       yerrs.push_back(correlation_err(n_20f, n_20b)/sqrt(_sumWPassed));
00168       yerrs.push_back(correlation_err(n_25f, n_15b)/sqrt(_sumWPassed));
00169       yerrs.push_back(correlation_err(n_30f, n_10b)/sqrt(_sumWPassed));
00170       yerrs.push_back(correlation_err(n_35f, n_05 )/sqrt(_sumWPassed));
00171       yerrs.push_back(correlation_err(n_40f, n_10f)/sqrt(_sumWPassed));
00172 
00173 
00174       // Fill in correlation strength for assymetric intervals,
00175       // see Tab. 5
00176       // Fill the DPS
00177       _hist_correl_asym->setCoordinate(0, xvals, xerrs);
00178       _hist_correl_asym->setCoordinate(1, yvals, yerrs);
00179     }
00180 
00181     //@}
00182 
00183 
00184   private:
00185 
00186     /// @name Counters
00187     //@{
00188     double _sumWPassed;
00189     //@}
00190 
00191 
00192     /// @name Vectors for storing the number of particles in the different eta intervals per event.
00193     /// @todo Is there a better way?
00194     //@{
00195     std::vector<int> n_10f;
00196     std::vector<int> n_15f;
00197     std::vector<int> n_20f;
00198     std::vector<int> n_25f;
00199     std::vector<int> n_30f;
00200     std::vector<int> n_35f;
00201     std::vector<int> n_40f;
00202     //
00203     std::vector<int> n_10b;
00204     std::vector<int> n_15b;
00205     std::vector<int> n_20b;
00206     std::vector<int> n_25b;
00207     std::vector<int> n_30b;
00208     std::vector<int> n_35b;
00209     std::vector<int> n_40b;
00210     //
00211     std::vector<int> n_05;
00212     //@}
00213 
00214 
00215     /// @name Histograms
00216     //@{
00217     // Symmetric eta intervals
00218     AIDA::IDataPointSet *_hist_correl;
00219     // For asymmetric eta intervals
00220     AIDA::IDataPointSet *_hist_correl_asym;
00221     //@}
00222 
00223   };
00224 
00225 
00226 
00227   // The hook for the plugin system
00228   DECLARE_RIVET_PLUGIN(UA5_1988_S1867512);
00229 
00230 }