forked from AliceO2Group/AliceO2
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathGPUTPCCFStreamCompaction.cxx
More file actions
148 lines (118 loc) · 5.07 KB
/
GPUTPCCFStreamCompaction.cxx
File metadata and controls
148 lines (118 loc) · 5.07 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
// Copyright 2019-2020 CERN and copyright holders of ALICE O2.
// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders.
// All rights not expressly granted are reserved.
//
// This software is distributed under the terms of the GNU General Public
// License v3 (GPL Version 3), copied verbatim in the file "COPYING".
//
// In applying this license CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
/// \file StreamCompaction.cxx
/// \author Felix Weiglhofer
#include "GPUTPCCFStreamCompaction.h"
#include "GPUCommonAlgorithm.h"
#include "CfChargePos.h"
#include "CfUtils.h"
using namespace o2::gpu;
using namespace o2::gpu::tpccf;
template <>
GPUdii() void GPUTPCCFStreamCompaction::Thread<GPUTPCCFStreamCompaction::scanStart>(int32_t nBlocks, int32_t nThreads, int32_t iBlock, int32_t iThread, GPUSharedMemory& smem, processorType& clusterer, int32_t iBuf, int32_t stage)
{
#ifdef GPUCA_GPUCODE
int32_t nElems = CompactionElems(clusterer, stage);
const auto* predicate = clusterer.mPisPeak;
auto* scanOffset = clusterer.GetScanBuffer(iBuf);
int32_t iThreadGlobal = get_global_id(0);
int32_t pred = 0;
if (iThreadGlobal < nElems) {
pred = predicate[iThreadGlobal];
}
int32_t nElemsInBlock = CfUtils::blockPredicateSum<GPUCA_PAR_CF_SCAN_WORKGROUP_SIZE>(smem, pred);
int32_t lastThread = nThreads - 1;
if (iThread == lastThread) {
scanOffset[iBlock] = nElemsInBlock;
}
#endif
}
template <>
GPUdii() void GPUTPCCFStreamCompaction::Thread<GPUTPCCFStreamCompaction::scanUp>(int32_t nBlocks, int32_t nThreads, int32_t iBlock, int32_t iThread, GPUSharedMemory& smem, processorType& clusterer, int32_t iBuf, int32_t nElems)
{
#ifdef GPUCA_GPUCODE
auto* scanOffset = clusterer.GetScanBuffer(iBuf - 1);
auto* scanOffsetNext = clusterer.GetScanBuffer(iBuf);
int32_t iThreadGlobal = get_global_id(0);
int32_t offsetInBlock = work_group_scan_inclusive_add((iThreadGlobal < nElems) ? scanOffset[iThreadGlobal] : 0);
if (iThreadGlobal < nElems) {
scanOffset[iThreadGlobal] = offsetInBlock;
}
int32_t lastThread = nThreads - 1;
if (iThread == lastThread) {
scanOffsetNext[iBlock] = offsetInBlock;
}
#endif
}
template <>
GPUdii() void GPUTPCCFStreamCompaction::Thread<GPUTPCCFStreamCompaction::scanTop>(int32_t nBlocks, int32_t nThreads, int32_t iBlock, int32_t iThread, GPUSharedMemory& smem, processorType& clusterer, int32_t iBuf, int32_t nElems)
{
#ifdef GPUCA_GPUCODE
int32_t iThreadGlobal = get_global_id(0);
int32_t* scanOffset = clusterer.GetScanBuffer(iBuf - 1);
bool inBounds = (iThreadGlobal < nElems);
int32_t offsetInBlock = work_group_scan_inclusive_add(inBounds ? scanOffset[iThreadGlobal] : 0);
if (inBounds) {
scanOffset[iThreadGlobal] = offsetInBlock;
}
#endif
}
template <>
GPUdii() void GPUTPCCFStreamCompaction::Thread<GPUTPCCFStreamCompaction::scanDown>(int32_t nBlocks, int32_t nThreads, int32_t iBlock, int32_t iThread, GPUSharedMemory& /*smem*/, processorType& clusterer, int32_t iBuf, uint32_t offset, int32_t nElems)
{
#ifdef GPUCA_GPUCODE
int32_t iThreadGlobal = get_global_id(0) + offset;
int32_t* scanOffsetPrev = clusterer.GetScanBuffer(iBuf - 1);
const int32_t* scanOffset = clusterer.GetScanBuffer(iBuf);
int32_t shift = scanOffset[iBlock];
if (iThreadGlobal < nElems) {
scanOffsetPrev[iThreadGlobal] += shift;
}
#endif
}
template <>
GPUdii() void GPUTPCCFStreamCompaction::Thread<GPUTPCCFStreamCompaction::compactDigits>(int32_t nBlocks, int32_t nThreads, int32_t iBlock, int32_t iThread, GPUSharedMemory& smem, processorType& clusterer, int32_t iBuf, int32_t stage, CfChargePos* in, CfChargePos* out)
{
#ifdef GPUCA_GPUCODE
uint32_t nElems = CompactionElems(clusterer, stage);
SizeT bufferSize = (stage) ? clusterer.mNMaxClusters : clusterer.mNMaxPeaks;
uint32_t iThreadGlobal = get_global_id(0);
const auto* predicate = clusterer.mPisPeak;
const auto* scanOffset = clusterer.GetScanBuffer(iBuf);
bool iAmDummy = (iThreadGlobal >= nElems);
int32_t pred = (iAmDummy) ? 0 : predicate[iThreadGlobal];
int32_t offsetInBlock = CfUtils::blockPredicateScan<GPUCA_PAR_CF_SCAN_WORKGROUP_SIZE>(smem, pred);
SizeT globalOffsetOut = offsetInBlock;
if (iBlock > 0) {
globalOffsetOut += scanOffset[iBlock - 1];
}
if (pred && globalOffsetOut < bufferSize) {
out[globalOffsetOut] = in[iThreadGlobal];
}
uint32_t lastId = get_global_size(0) - 1;
if (iThreadGlobal == lastId) {
SizeT nFinal = globalOffsetOut + pred;
if (nFinal > bufferSize) {
clusterer.raiseError(stage ? GPUErrors::ERROR_CF_CLUSTER_OVERFLOW : GPUErrors::ERROR_CF_PEAK_OVERFLOW, clusterer.mISector, nFinal, bufferSize);
nFinal = bufferSize;
}
if (stage) {
clusterer.mPmemory->counters.nClusters = nFinal;
} else {
clusterer.mPmemory->counters.nPeaks = nFinal;
}
}
#endif
}
GPUdii() int32_t GPUTPCCFStreamCompaction::CompactionElems(processorType& clusterer, int32_t stage)
{
return (stage) ? clusterer.mPmemory->counters.nPeaks : clusterer.mPmemory->counters.nPositions;
}