CbmRoot
Loading...
Searching...
No Matches
CbmRichParallelQa.cxx
Go to the documentation of this file.
1/* Copyright (C) 2010-2011 UGiessen/JINR-LIT, Giessen/Dubna
2 SPDX-License-Identifier: GPL-3.0-only
3 Authors: Semen Lebedev [committer] */
4
11#include "CbmRichParallelQa.h"
12
13#include "CbmRichHit.h"
14#include "FairRootManager.h"
15#include "tbb/parallel_for.h"
16#include "tbb/parallel_invoke.h"
17#include "tbb/spin_mutex.h"
18#include "tbb/task.h"
19#include "tbb/task_scheduler_init.h"
20#include "tbb/task_scheduler_observer.h"
21#include "tbb/tick_count.h"
22
23#include <fstream>
24#include <iostream>
25#include <map>
26
27using namespace std;
28using namespace tbb;
29
31map<int, long> threadToCpuMap; // let get cpuId by threadId
32map<int, int> threadNumberToCpuMap; // let get cpuId by threadNumber (see threads_counter)
33spin_mutex mutex;
34
35class TMyObserver : public task_scheduler_observer {
36 public:
37 void FInit(); // set cpu - thread correspondence
38 protected:
39 void on_scheduler_entry(bool Is_worker); // run at begin of each thread execution
40 void on_scheduler_exit(bool Is_worker); // run at end of each thread execution
41};
42
43// set cpu - thread correspondence
45{
46 for (int i = 0; i < 8; i++) {
47 //threadNumberToCpuMap[2 * i + 0] = i;
48 //threadNumberToCpuMap[2 * i + 1] = i + 8;
50 threadNumberToCpuMap[8 + i] = i + 8;
51 };
52 observe(true);
53}
54
56#define handle_error_en(en, msg) \
57 do { \
58 errno = en; \
59 perror(msg); \
60 exit(EXIT_FAILURE); \
61 } while (0)
63{
64 //cout << "-I-Scheduler entry" <<endl;
65 pthread_t I = pthread_self();
66 spin_mutex::scoped_lock lock;
67 lock.acquire(mutex);
69 int cpuId = threadNumberToCpuMap[threads_counter % 16];
70
71 //cout << "ThrId=" << I << " thread have been created " << threads_counter << "-th.";
72 //cout << " And was run on cpu " << cpuId << endl;
73
74 lock.release();
75 threadToCpuMap[I] = cpuId;
76
77 int s, j;
78 cpu_set_t cpuset;
79 pthread_t thread = I;
80 CPU_ZERO(&cpuset);
81 CPU_SET(cpuId, &cpuset);
82 // cout << "before" << endl; //FIXME: segmentation fault somethere.
83 s = pthread_setaffinity_np(thread, sizeof(cpu_set_t), &cpuset);
84 if (s != 0) {
85 cout << " pthread_setaffinity_np " << endl;
86 // handle_error_en(s, "pthread_setaffinity_np");
87 }
88};
89
90// run at end of each thread execution
91void TMyObserver::on_scheduler_exit(bool Is_worker) //FIXME: don't run
92{
93 pthread_t I = pthread_self();
94 cout << "Thread with number " << I << " was ended " << --threads_counter;
95};
96
97
98class FinderTaskQa : public task {
100 //CbmRichRingFinderHough* fHT;
101 std::vector<std::vector<CbmRichHoughHit>> fData;
102 // std::vector<CbmRichHoughHit> fData;
103 public:
104 FinderTaskQa(CbmL1RichENNRingFinder* HTImpl, const std::vector<std::vector<CbmRichHoughHit>>& data)
105 {
106 fHT = HTImpl;
107 fData = data;
108 //fData.assign(data[0].begin(), data[0].end());
109 }
110 task* execute()
111 {
112 //for (int j = 0; j < 10; j ++){
113 for (int i = 0; i < fData.size(); i++) {
114 //cout<< "rec event "<< i << endl;
115 fHT->DoFind(fData[i]);
116 }
117 //}//
118 return NULL;
119 }
120};
121
122
124{
125 for (int i = 0; i < kMAX_NOF_THREADS; i++) {
126 //fHT[i] = new CbmRichRingFinderHough(0, "compact");
127 fHT[i] = new CbmL1RichENNRingFinder(0);
128 }
129
130 fEventNumber = 0;
131 fExecTime = 0.;
132}
133
135
137{
138 cout << "InitStatus CbmRichParallelQa::Init()" << endl;
139
140 FairRootManager* ioman = FairRootManager::Instance();
141 if (NULL == ioman) {
142 Fatal("CbmRichParallelQa::Init", "RootManager not instantised!");
143 }
144
145 fRichHits = (TClonesArray*) ioman->GetObject("RichHit");
146 if (NULL == fRichHits) {
147 Fatal("CbmRichParallelQa::Init", "No RichHit array!");
148 }
149
150 fRichRings = (TClonesArray*) ioman->GetObject("RichRing");
151 if (NULL == fRichRings) {
152 Fatal("CbmRichParallelQa::Init", "No RichRing array!");
153 }
154
155 for (int i = 0; i < kMAX_NOF_THREADS; i++) {
156 fHT[i]->Init();
157 }
158
159 // tbb::task_scheduler_init init();
160 // TMyObserver obs;
161 // obs.FInit(); // set cpu-threads correspondence
162
163 return kSUCCESS;
164}
165
166void CbmRichParallelQa::Exec(Option_t* option)
167{
168 fEventNumber++;
169 cout << "-I- Read event " << fEventNumber << endl;
170 std::vector<CbmRichHoughHit> data;
171
172 const Int_t nhits = fRichHits->GetEntriesFast();
173 if (!nhits) {
174 cout << "-E- CbmRichRingFinderHough::DoFind:No hits in this event." << endl;
175 return;
176 }
177 data.reserve(nhits);
178
179 for (Int_t iHit = 0; iHit < nhits; iHit++) {
180 CbmRichHit* hit = (CbmRichHit*) fRichHits->At(iHit);
181 if (hit) {
182 CbmRichHoughHit tempPoint;
183 tempPoint.fHit.fX = hit->GetX();
184 tempPoint.fHit.fY = hit->GetY();
185 tempPoint.fX2plusY2 = hit->GetX() * hit->GetX() + hit->GetY() * hit->GetY();
186 tempPoint.fId = iHit;
187 tempPoint.fIsUsed = false;
188 data.push_back(tempPoint);
189 }
190 }
191
192 fData.push_back(data);
193
194 if (fEventNumber == fNofEvents) {
195 cout << "-I- NofTasks = " << fNofTasks << endl;
196 TMyObserver obs;
197 obs.FInit(); // set cpu-threads correspondence
198 obs.observe(true);
199 tbb::task_scheduler_init init(fNofTasks);
201 }
202}
203
205{
206 tbb::tick_count t0 = tbb::tick_count::now();
207 task* root_task = new (task::allocate_root()) empty_task;
208 root_task->set_ref_count(fNofTasks + 1);
209 task_list list;
210
211 for (int iT = 0; iT < fNofTasks; iT++) {
212 list.push_back(*new (root_task->allocate_child()) FinderTaskQa(fHT[iT], fData));
213 }
214
215 root_task->spawn_and_wait_for_all(list);
216 tbb::tick_count t1 = tbb::tick_count::now();
217
218 root_task->destroy(*root_task);
219 fExecTime += (t1 - t0).seconds();
220 cout << 1000. * fExecTime << " ms for " << fData.size() << " events" << endl;
221 cout << 1000. * fExecTime / (fData.size()) << " ms per event " << endl;
222 cout << fNofTasks * fData.size() / fExecTime << " events per sec" << endl;
223
224 std::ofstream fout;
225 fout.open("parallel.txt", std::ios_base::app);
226 fout << (int) (fNofTasks * fData.size() / fExecTime) << ",";
227}
map< int, long > threadToCpuMap
int threads_counter
map< int, int > threadNumberToCpuMap
spin_mutex mutex
Int_t DoFind(CbmEvent *event, TClonesArray *hitArray, TClonesArray *projArray, TClonesArray *ringArray)
double GetY() const
Definition CbmPixelHit.h:74
double GetX() const
Definition CbmPixelHit.h:73
Implementation of RICH hit for ring finder algorithm.
std::vector< std::vector< CbmRichHoughHit > > fData
virtual InitStatus Init()
virtual void Exec(Option_t *option)
CbmL1RichENNRingFinder * fHT[kMAX_NOF_THREADS]
TClonesArray * fRichHits
static const int kMAX_NOF_THREADS
TClonesArray * fRichRings
CbmL1RichENNRingFinder * fHT
FinderTaskQa(CbmL1RichENNRingFinder *HTImpl, const std::vector< std::vector< CbmRichHoughHit > > &data)
std::vector< std::vector< CbmRichHoughHit > > fData
void on_scheduler_exit(bool Is_worker)
void on_scheduler_entry(bool Is_worker)
Hash for CbmL1LinkKey.