forked from AliceO2Group/AliceO2
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathROFLookupTables.h
More file actions
681 lines (596 loc) · 23.1 KB
/
ROFLookupTables.h
File metadata and controls
681 lines (596 loc) · 23.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
// Copyright 2019-2026 CERN and copyright holders of ALICE O2.
// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders.
// All rights not expressly granted are reserved.
//
// This software is distributed under the terms of the GNU General Public
// License v3 (GPL Version 3), copied verbatim in the file "COPYING".
//
// In applying this license CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
#ifndef TRACKINGITSU_INCLUDE_ROFOVERLAPTABLE_H_
#define TRACKINGITSU_INCLUDE_ROFOVERLAPTABLE_H_
#include <cstddef>
#include <cstdint>
#include <limits>
#include <string>
#include <vector>
#ifndef GPUCA_GPUCODE
#include <format>
#endif
#include "CommonConstants/LHCConstants.h"
#include "CommonDataFormat/RangeReference.h"
#include "DataFormatsITS/TimeEstBC.h"
#include "DataFormatsITS/Vertex.h"
#include "GPUCommonLogger.h"
#include "GPUCommonMath.h"
#include "GPUCommonDef.h"
namespace o2::its
{
// Layer timing definition
struct LayerTiming {
using BCType = TimeStampType;
BCType mNROFsTF{0}; // number of ROFs per timeframe
BCType mROFLength{0}; // ROF length in BC
BCType mROFDelay{0}; // delay of ROFs wrt start of first orbit in TF in BC
BCType mROFBias{0}; // bias wrt to the LHC clock in BC
BCType mROFAddTimeErr{0}; // additionally imposed uncertainty on ROF time in BC
// return start of ROF in BC
// this does not account for the opt. error!
GPUhdi() BCType getROFStartInBC(BCType rofId) const noexcept
{
assert(rofId < mNROFsTF && rofId >= 0);
return (mROFLength * rofId) + mROFDelay + mROFBias;
}
// return end of ROF in BCs
// this does not account for the opt. error!
GPUhdi() BCType getROFEndInBC(BCType rofId) const noexcept
{
assert(rofId < mNROFsTF);
return getROFStartInBC(rofId) + mROFLength;
}
// return (clamped) time-interval of rof
GPUhdi() TimeEstBC getROFTimeBounds(BCType rofId, bool withError = false) const noexcept
{
if (withError) {
int64_t start = getROFStartInBC(rofId);
int64_t end = getROFEndInBC(rofId);
start = o2::gpu::CAMath::Max(start - mROFAddTimeErr, int64_t(0));
end += mROFAddTimeErr;
const BCType half = (end - start + 1u) / 2u;
return {BCType(start) + half, static_cast<uint16_t>(half)};
}
const BCType start = getROFStartInBC(rofId);
const BCType half = mROFLength / BCType(2);
return {start + half, static_cast<TimeStampErrorType>(half)};
}
// return which ROF this BC belongs to
GPUhi() BCType getROF(BCType bc) const noexcept
{
const BCType offset = mROFDelay + mROFBias;
if (bc <= offset) {
return 0;
}
return (bc - offset) / mROFLength;
}
#ifndef GPUCA_GPUCODE
GPUh() std::string asString() const
{
return std::format("NROFsPerTF {:4} ROFLength {:4} ({:4} per Orbit) ROFDelay {:4} ROFBias {:4} ROFAddTimeErr {:4}", mNROFsTF, mROFLength, (o2::constants::lhc::LHCMaxBunches / mROFLength), mROFDelay, mROFBias, mROFAddTimeErr);
}
GPUh() void print() const
{
LOG(info) << asString();
}
#endif
};
// Base class for lookup to define layers
template <int32_t NLayers>
class LayerTimingBase
{
protected:
LayerTiming mLayers[NLayers];
public:
using T = LayerTiming::BCType;
LayerTimingBase() = default;
GPUh() void defineLayer(int32_t layer, T nROFsTF, T rofLength, T rofDelay, T rofBias, T rofTE)
{
assert(layer >= 0 && layer < NLayers);
mLayers[layer] = {nROFsTF, rofLength, rofDelay, rofBias, rofTE};
}
GPUh() void defineLayer(int32_t layer, const LayerTiming& timing)
{
assert(layer >= 0 && layer < NLayers);
mLayers[layer] = timing;
}
GPUhdi() const LayerTiming& getLayer(int32_t layer) const
{
assert(layer >= 0 && layer < NLayers);
return mLayers[layer];
}
GPUhdi() constexpr int32_t getEntries() noexcept { return NLayers; }
#ifndef GPUCA_GPUCODE
GPUh() void print() const
{
LOGP(info, "Imposed time structure:");
for (int32_t iL{0}; iL < NLayers; ++iL) {
LOGP(info, "\tLayer:{} {}", iL, mLayers[iL].asString());
}
}
#endif
};
// GPU friendly view of the table below
template <int32_t NLayers, typename TableEntry, typename TableIndex>
struct ROFOverlapTableView {
const TableEntry* mFlatTable{nullptr};
const TableIndex* mIndices{nullptr};
const LayerTiming* mLayers{nullptr};
GPUhdi() const LayerTiming& getLayer(int32_t layer) const noexcept
{
assert(layer >= 0 && layer < NLayers);
return mLayers[layer];
}
GPUh() int getClock() const noexcept
{
// we take the fastest layer as clock
int fastest = 0;
uint32_t shortestROF{std::numeric_limits<uint32_t>::max()};
for (int iL{0}; iL < NLayers; ++iL) {
const auto& layer = getLayer(iL);
if (layer.mROFLength < shortestROF) {
fastest = iL;
}
}
return fastest;
}
GPUh() const LayerTiming& getClockLayer() const noexcept
{
return mLayers[getClock()];
}
GPUhdi() const TableEntry& getOverlap(int32_t from, int32_t to, size_t rofIdx) const noexcept
{
assert(from < NLayers && to < NLayers);
const size_t linearIdx = (from * NLayers) + to;
const auto& idx = mIndices[linearIdx];
assert(rofIdx < idx.getEntries());
return mFlatTable[idx.getFirstEntry() + rofIdx];
}
GPUhdi() bool doROFsOverlap(int32_t layer0, size_t rof0, int32_t layer1, size_t rof1) const noexcept
{
if (layer0 == layer1) { // layer is compatible with itself
return rof0 == rof1;
}
assert(layer0 < NLayers && layer1 < NLayers);
const size_t linearIdx = (layer0 * NLayers) + layer1;
const auto& idx = mIndices[linearIdx];
if (rof0 >= idx.getEntries()) {
return false;
}
const auto& overlap = mFlatTable[idx.getFirstEntry() + rof0];
if (overlap.getEntries() == 0) {
return false;
}
const size_t firstCompatible = overlap.getFirstEntry();
const size_t lastCompatible = firstCompatible + overlap.getEntries() - 1;
return rof1 >= firstCompatible && rof1 <= lastCompatible;
}
GPUhdi() TimeEstBC getTimeStamp(int32_t layer0, size_t rof0, int32_t layer1, size_t rof1) const noexcept
{
assert(layer0 < NLayers && layer1 < NLayers);
assert(doROFsOverlap(layer0, rof0, layer1, rof1));
// retrieves the combined timestamp
// e.g., taking one cluster from rof0 and one from rof1
// and constructing a tracklet (doublet) what is its time
// this assumes that the rofs overlap, e.g. doROFsOverlap -> true
// get timestamp including margins from rof0 and rof1
const auto t0 = mLayers[layer0].getROFTimeBounds(rof0, true);
const auto t1 = mLayers[layer1].getROFTimeBounds(rof1, true);
return t0 + t1;
}
#ifndef GPUCA_GPUCODE
/// Print functions
GPUh() void printAll() const
{
for (int32_t i = 0; i < NLayers; ++i) {
for (int32_t j = 0; j < NLayers; ++j) {
if (i != j) {
printMapping(i, j);
}
}
}
printSummary();
}
GPUh() void printMapping(int32_t from, int32_t to) const
{
if (from == to) {
LOGP(error, "No self-lookup supported");
return;
}
constexpr int w_index = 10;
constexpr int w_first = 12;
constexpr int w_last = 12;
constexpr int w_count = 10;
LOGF(info, "Overlap mapping: Layer %d -> Layer %d", from, to);
LOGP(info, "From: {}", mLayers[from].asString());
LOGP(info, "To : {}", mLayers[to].asString());
LOGF(info, "%*s | %*s | %*s | %*s", w_index, "ROF.index", w_first, "First.ROF", w_last, "Last.ROF", w_count, "Count");
LOGF(info, "%.*s-+-%.*s-+-%.*s-+-%.*s", w_index, "----------", w_first, "------------", w_last, "------------", w_count, "----------");
const size_t linearIdx = (from * NLayers) + to;
const auto& idx = mIndices[linearIdx];
for (int32_t i = 0; i < idx.getEntries(); ++i) {
const auto& overlap = getOverlap(from, to, i);
LOGF(info, "%*d | %*d | %*d | %*d", w_index, i, w_first, overlap.getFirstEntry(), w_last, overlap.getEntriesBound() - 1, w_count, overlap.getEntries());
}
}
GPUh() void printSummary() const
{
uint32_t totalEntries{0};
size_t flatTableSize{0};
for (int32_t i = 0; i < NLayers; ++i) {
for (int32_t j = 0; j < NLayers; ++j) {
if (i != j) {
const size_t linearIdx = (i * NLayers) + j;
const auto& idx = mIndices[linearIdx];
totalEntries += idx.getEntries();
flatTableSize += idx.getEntries();
}
}
}
for (int32_t i = 0; i < NLayers; ++i) {
mLayers[i].print();
}
const uint32_t totalBytes = (flatTableSize * sizeof(TableEntry)) + (NLayers * NLayers * sizeof(TableIndex));
LOGF(info, "------------------------------------------------------------");
LOGF(info, "Total overlap table size: %u entries", totalEntries);
LOGF(info, "Flat table size: %zu entries", flatTableSize);
LOGF(info, "Total view size: %u bytes", totalBytes);
LOGF(info, "------------------------------------------------------------");
}
#endif
};
// Precalculated lookup table to find overlapping ROFs in another layer given a ROF index in the current layer
template <int32_t NLayers>
class ROFOverlapTable : public LayerTimingBase<NLayers>
{
public:
using T = LayerTimingBase<NLayers>::T;
using TableEntry = dataformats::RangeReference<T, T>;
using TableIndex = dataformats::RangeReference<T, T>;
using View = ROFOverlapTableView<NLayers, TableEntry, TableIndex>;
ROFOverlapTable() = default;
GPUh() void init()
{
std::vector<TableEntry> table[NLayers][NLayers];
for (int32_t i{0}; i < NLayers; ++i) {
for (int32_t j{0}; j < NLayers; ++j) {
if (i != j) { // we do not need self-lookup
buildMapping(i, j, table[i][j]);
}
}
}
flatten(table);
}
GPUh() View getView() const
{
View view;
view.mFlatTable = mFlatTable.data();
view.mIndices = mIndices;
view.mLayers = this->mLayers;
return view;
}
GPUh() View getDeviceView(const TableEntry* deviceFlatTablePtr, const TableIndex* deviceIndicesPtr, const LayerTiming* deviceLayerTimingPtr) const
{
View view;
view.mFlatTable = deviceFlatTablePtr;
view.mIndices = deviceIndicesPtr;
view.mLayers = deviceLayerTimingPtr;
return view;
}
GPUh() size_t getFlatTableSize() const noexcept { return mFlatTable.size(); }
static GPUh() constexpr size_t getIndicesSize() { return NLayers * NLayers; }
private:
GPUh() void buildMapping(int32_t from, int32_t to, std::vector<TableEntry>& table)
{
const auto& layerFrom = this->mLayers[from];
const auto& layerTo = this->mLayers[to];
table.resize(layerFrom.mNROFsTF);
for (int32_t iROF{0}; iROF < layerFrom.mNROFsTF; ++iROF) {
int64_t fromStart = o2::gpu::CAMath::Max((int64_t)layerFrom.getROFStartInBC(iROF) - (int64_t)layerFrom.mROFAddTimeErr, int64_t(0));
int64_t fromEnd = (int64_t)layerFrom.getROFEndInBC(iROF) + layerFrom.mROFAddTimeErr;
int32_t firstROFTo = o2::gpu::CAMath::Max(0, (int32_t)((fromStart - (int64_t)layerTo.mROFAddTimeErr - (int64_t)layerTo.mROFDelay - (int64_t)layerTo.mROFBias) / (int64_t)layerTo.mROFLength));
int32_t lastROFTo = (int32_t)((fromEnd + (int64_t)layerTo.mROFAddTimeErr - (int64_t)layerTo.mROFDelay - (int64_t)layerTo.mROFBias - 1) / (int64_t)layerTo.mROFLength);
firstROFTo = o2::gpu::CAMath::Max(0, firstROFTo);
lastROFTo = o2::gpu::CAMath::Min((int32_t)layerTo.mNROFsTF - 1, lastROFTo);
while (firstROFTo <= lastROFTo) {
int64_t toStart = o2::gpu::CAMath::Max((int64_t)layerTo.getROFStartInBC(firstROFTo) - (int64_t)layerTo.mROFAddTimeErr, int64_t(0));
int64_t toEnd = (int64_t)layerTo.getROFEndInBC(firstROFTo) + layerTo.mROFAddTimeErr;
if (toEnd > fromStart && toStart < fromEnd) {
break;
}
++firstROFTo;
}
while (lastROFTo >= firstROFTo) {
int64_t toStart = o2::gpu::CAMath::Max((int64_t)layerTo.getROFStartInBC(lastROFTo) - (int64_t)layerTo.mROFAddTimeErr, int64_t(0));
int64_t toEnd = (int64_t)layerTo.getROFEndInBC(lastROFTo) + layerTo.mROFAddTimeErr;
if (toEnd > fromStart && toStart < fromEnd) {
break;
}
--lastROFTo;
}
int32_t count = (firstROFTo <= lastROFTo) ? (lastROFTo - firstROFTo + 1) : 0;
table[iROF] = {static_cast<T>(firstROFTo), static_cast<T>(count)};
}
}
GPUh() void flatten(const std::vector<TableEntry> table[NLayers][NLayers])
{
size_t total{0};
for (int32_t i{0}; i < NLayers; ++i) {
for (int32_t j{0}; j < NLayers; ++j) {
if (i != j) { // we do not need self-lookup
total += table[i][j].size();
}
}
}
mFlatTable.reserve(total);
for (int32_t i{0}; i < NLayers; ++i) {
for (int32_t j{0}; j < NLayers; ++j) {
size_t idx = (i * NLayers) + j;
if (i != j) {
mIndices[idx].setFirstEntry(static_cast<T>(mFlatTable.size()));
mIndices[idx].setEntries(static_cast<T>(table[i][j].size()));
mFlatTable.insert(mFlatTable.end(), table[i][j].begin(), table[i][j].end());
} else {
mIndices[idx] = {0, 0};
}
}
}
}
TableIndex mIndices[NLayers * NLayers];
std::vector<TableEntry> mFlatTable;
};
// GPU friendly view of the table below
template <int32_t NLayers, typename TableEntry, typename TableIndex>
struct ROFVertexLookupTableView {
const TableEntry* mFlatTable{nullptr};
const TableIndex* mIndices{nullptr};
const LayerTiming* mLayers{nullptr};
GPUhdi() const LayerTiming& getLayer(int32_t layer) const noexcept
{
assert(layer >= 0 && layer < NLayers);
return mLayers[layer];
}
GPUhdi() const TableEntry& getVertices(int32_t layer, size_t rofIdx) const noexcept
{
assert(layer < NLayers);
const auto& idx = mIndices[layer];
assert(rofIdx < idx.getEntries());
return mFlatTable[idx.getFirstEntry() + rofIdx];
}
GPUh() int32_t getMaxVerticesPerROF() const noexcept
{
int32_t maxCount = 0;
for (int32_t layer = 0; layer < NLayers; ++layer) {
const auto& idx = mIndices[layer];
for (int32_t i = 0; i < idx.getEntries(); ++i) {
const auto& entry = mFlatTable[idx.getFirstEntry() + i];
maxCount = o2::gpu::CAMath::Max(maxCount, static_cast<int32_t>(entry.getEntries()));
}
}
return maxCount;
}
// Check if a specific vertex is compatible with a given ROF
GPUhdi() bool isVertexCompatible(int32_t layer, size_t rofIdx, const Vertex& vertex) const noexcept
{
assert(layer < NLayers);
const auto& layerDef = mLayers[layer];
int64_t rofLower = o2::gpu::CAMath::Max((int64_t)layerDef.getROFStartInBC(rofIdx) - (int64_t)layerDef.mROFAddTimeErr, int64_t(0));
int64_t rofUpper = (int64_t)layerDef.getROFEndInBC(rofIdx) + layerDef.mROFAddTimeErr;
int64_t vLower = (int64_t)vertex.getTimeStamp().getTimeStamp() - (int64_t)vertex.getTimeStamp().getTimeStampError();
int64_t vUpper = (int64_t)vertex.getTimeStamp().getTimeStamp() + (int64_t)vertex.getTimeStamp().getTimeStampError();
return vUpper >= rofLower && vLower < rofUpper;
}
#ifndef GPUCA_GPUCODE
GPUh() void printAll() const
{
for (int32_t i = 0; i < NLayers; ++i) {
printLayer(i);
}
printSummary();
}
GPUh() void printLayer(int32_t layer) const
{
constexpr int w_rof = 10;
constexpr int w_first = 12;
constexpr int w_last = 12;
constexpr int w_count = 10;
LOGF(info, "Vertex lookup: Layer %d", layer);
LOGF(info, "%*s | %*s | %*s | %*s", w_rof, "ROF.index", w_first, "First.Vtx", w_last, "Last.Vtx", w_count, "Count");
LOGF(info, "%.*s-+-%.*s-+-%.*s-+-%.*s", w_rof, "----------", w_first, "------------", w_last, "------------", w_count, "----------");
const auto& idx = mIndices[layer];
for (int32_t i = 0; i < idx.getEntries(); ++i) {
const auto& entry = mFlatTable[idx.getFirstEntry() + i];
int first = entry.getFirstEntry();
int count = entry.getEntries();
int last = first + count - 1;
LOGF(info, "%*d | %*d | %*d | %*d", w_rof, i, w_first, first, w_last, last, w_count, count);
}
}
GPUh() void printSummary() const
{
uint32_t totalROFs{0};
uint32_t totalVertexRefs{0};
for (int32_t i = 0; i < NLayers; ++i) {
const auto& idx = mIndices[i];
totalROFs += idx.getEntries();
for (int32_t j = 0; j < idx.getEntries(); ++j) {
const auto& entry = mFlatTable[idx.getFirstEntry() + j];
totalVertexRefs += entry.getEntries();
}
}
const uint32_t totalBytes = (totalROFs * sizeof(TableEntry)) + (NLayers * sizeof(TableIndex));
LOGF(info, "------------------------------------------------------------");
LOGF(info, "Total ROFs in table: %u", totalROFs);
LOGF(info, "Total vertex references: %u", totalVertexRefs);
LOGF(info, "Total view size: %u bytes", totalBytes);
LOGF(info, "------------------------------------------------------------");
}
#endif
};
// Precalculated lookup table to find vertices compatible with ROFs
// Given a layer and ROF index, returns the range of vertices that overlap in time.
// The vertex time is defined as symmetrical [t0-e,t0+e]
// It needs to be guaranteed that the input vertices are sorted by their lower-bound!
// additionally compatibliyty has to be queried per vertex!
template <int32_t NLayers>
class ROFVertexLookupTable : public LayerTimingBase<NLayers>
{
public:
using T = LayerTimingBase<NLayers>::T;
using BCType = LayerTiming::BCType;
using TableEntry = dataformats::RangeReference<T, T>;
using TableIndex = dataformats::RangeReference<T, T>;
using View = ROFVertexLookupTableView<NLayers, TableEntry, TableIndex>;
ROFVertexLookupTable() = default;
GPUh() size_t getFlatTableSize() const noexcept { return mFlatTable.size(); }
static GPUh() constexpr size_t getIndicesSize() { return NLayers; }
// Build the lookup table given a sorted array of vertices
// vertices must be sorted by timestamp, then by error (secondary)
GPUh() void init(const Vertex* vertices, size_t nVertices)
{
if (nVertices > std::numeric_limits<T>::max()) {
LOGF(fatal, "too many vertices %zu, max supported is %u", nVertices, std::numeric_limits<T>::max());
}
std::vector<TableEntry> table[NLayers];
for (int32_t layer{0}; layer < NLayers; ++layer) {
buildMapping(layer, vertices, nVertices, table[layer]);
}
flatten(table);
}
// Pre-allocated needed memory, then use update(...)
GPUh() void init()
{
size_t total{0};
for (int32_t layer{0}; layer < NLayers; ++layer) {
total += this->mLayers[layer].mNROFsTF;
}
mFlatTable.resize(total, {0, 0});
size_t offset = 0;
for (int32_t layer{0}; layer < NLayers; ++layer) {
size_t nROFs = this->mLayers[layer].mNROFsTF;
mIndices[layer].setFirstEntry(static_cast<T>(offset));
mIndices[layer].setEntries(static_cast<T>(nROFs));
offset += nROFs;
}
}
// Recalculate lookup table with new vertices
GPUh() void update(const Vertex* vertices, size_t nVertices)
{
size_t offset = 0;
for (int32_t layer{0}; layer < NLayers; ++layer) {
const auto& idx = mIndices[layer];
size_t nROFs = idx.getEntries();
for (size_t iROF = 0; iROF < nROFs; ++iROF) {
updateROFMapping(layer, iROF, vertices, nVertices, offset + iROF);
}
offset += nROFs;
}
}
GPUh() View getView() const
{
View view;
view.mFlatTable = mFlatTable.data();
view.mIndices = mIndices;
view.mLayers = this->mLayers;
return view;
}
GPUh() View getDeviceView(const TableEntry* deviceFlatTablePtr, const TableIndex* deviceIndicesPtr, const LayerTiming* deviceLayerTimingPtr) const
{
View view;
view.mFlatTable = deviceFlatTablePtr;
view.mIndices = deviceIndicesPtr;
view.mLayers = deviceLayerTimingPtr;
return view;
}
private:
// Build the mapping for one layer
GPUh() void buildMapping(int32_t layer, const Vertex* vertices, size_t nVertices, std::vector<TableEntry>& table)
{
const auto& layerDef = this->mLayers[layer];
table.resize(layerDef.mNROFsTF);
size_t vertexSearchStart = 0;
for (int32_t iROF{0}; iROF < layerDef.mNROFsTF; ++iROF) {
int64_t rofLower = o2::gpu::CAMath::Max((int64_t)layerDef.getROFStartInBC(iROF) - (int64_t)layerDef.mROFAddTimeErr, int64_t(0));
int64_t rofUpper = (int64_t)layerDef.getROFEndInBC(iROF) + layerDef.mROFAddTimeErr;
size_t lastVertex = binarySearchFirst(vertices, nVertices, vertexSearchStart, rofUpper);
size_t firstVertex = vertexSearchStart;
while (firstVertex < lastVertex) {
int64_t vUpper = (int64_t)vertices[firstVertex].getTimeStamp().getTimeStamp() +
(int64_t)vertices[firstVertex].getTimeStamp().getTimeStampError();
if (vUpper > rofLower) {
break;
}
++firstVertex;
}
size_t count = (lastVertex > firstVertex) ? (lastVertex - firstVertex) : 0;
table[iROF] = {static_cast<T>(firstVertex), static_cast<T>(count)};
vertexSearchStart = firstVertex;
}
}
// Update a single ROF's vertex mapping
GPUh() void updateROFMapping(int32_t layer, size_t iROF, const Vertex* vertices, size_t nVertices, size_t flatTableIdx)
{
const auto& layerDef = this->mLayers[layer];
int64_t rofLower = o2::gpu::CAMath::Max((int64_t)layerDef.getROFStartInBC(iROF) - (int64_t)layerDef.mROFAddTimeErr, int64_t(0));
int64_t rofUpper = (int64_t)layerDef.getROFEndInBC(iROF) + layerDef.mROFAddTimeErr;
size_t lastVertex = binarySearchFirst(vertices, nVertices, 0, rofUpper);
size_t firstVertex = 0;
while (firstVertex < lastVertex) {
int64_t vUpper = (int64_t)vertices[firstVertex].getTimeStamp().getTimeStamp() +
(int64_t)vertices[firstVertex].getTimeStamp().getTimeStampError();
if (vUpper > rofLower) {
break;
}
++firstVertex;
}
size_t count = (lastVertex > firstVertex) ? (lastVertex - firstVertex) : 0;
mFlatTable[flatTableIdx].setFirstEntry(static_cast<T>(firstVertex));
mFlatTable[flatTableIdx].setEntries(static_cast<T>(count));
}
// Binary search for first vertex where maxBC >= targetBC
GPUh() size_t binarySearchFirst(const Vertex* vertices, size_t nVertices, size_t searchStart, BCType targetBC) const
{
size_t left = searchStart;
size_t right = nVertices;
while (left < right) {
size_t mid = left + ((right - left) / 2);
int64_t lower = (int64_t)vertices[mid].getTimeStamp().getTimeStamp() -
(int64_t)vertices[mid].getTimeStamp().getTimeStampError();
if (lower < targetBC) {
left = mid + 1;
} else {
right = mid;
}
}
return left;
}
// Compress the temporary table into a single flat table
GPUh() void flatten(const std::vector<TableEntry> table[NLayers])
{
// Count total entries
size_t total{0};
for (int32_t i{0}; i < NLayers; ++i) {
total += table[i].size();
}
mFlatTable.reserve(total);
// Build flat table and indices
for (int32_t i{0}; i < NLayers; ++i) {
mIndices[i].setFirstEntry(static_cast<T>(mFlatTable.size()));
mIndices[i].setEntries(static_cast<T>(table[i].size()));
mFlatTable.insert(mFlatTable.end(), table[i].begin(), table[i].end());
}
}
TableIndex mIndices[NLayers];
std::vector<TableEntry> mFlatTable;
};
} // namespace o2::its
#endif