forked from AliceO2Group/AliceO2
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtest_TimePipeline.cxx
More file actions
100 lines (89 loc) · 3.02 KB
/
test_TimePipeline.cxx
File metadata and controls
100 lines (89 loc) · 3.02 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
// Copyright 2019-2020 CERN and copyright holders of ALICE O2.
// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders.
// All rights not expressly granted are reserved.
//
// This software is distributed under the terms of the GNU General Public
// License v3 (GPL Version 3), copied verbatim in the file "COPYING".
//
// In applying this license CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
#include "Framework/DataProcessorSpec.h"
#include "Framework/ParallelContext.h"
#include "Framework/runDataProcessing.h"
#include <thread>
#include <chrono>
using namespace o2::framework;
struct FakeCluster {
float x;
float y;
float z;
float q;
};
using DataHeader = o2::header::DataHeader;
size_t parallelSize = 4;
size_t collectionChunkSize = 1000;
void someDataProducerAlgorithm(ProcessingContext& ctx);
void someProcessingStageAlgorithm(ProcessingContext& ctx);
WorkflowSpec defineDataProcessing(ConfigContext const&)
{
return WorkflowSpec{
{"dataProducer",
Inputs{},
{
OutputSpec{"TPC", "CLUSTERS"},
},
AlgorithmSpec{
(AlgorithmSpec::ProcessCallback)someDataProducerAlgorithm}},
timePipeline(
DataProcessorSpec{
"processingStage",
Inputs{
{"dataTPC", "TPC", "CLUSTERS"}},
Outputs{
{"TPC", "CLUSTERS_P"}},
AlgorithmSpec{
(AlgorithmSpec::ProcessCallback)someProcessingStageAlgorithm}},
parallelSize),
DataProcessorSpec{
"dataSampler",
Inputs{
{"dataTPC-sampled", "TPC", "CLUSTERS", 0, Lifetime::Timeframe},
},
Outputs{},
AlgorithmSpec{
(AlgorithmSpec::ProcessCallback)[](ProcessingContext & ctx){}}}};
}
void someDataProducerAlgorithm(ProcessingContext& ctx)
{
uint32_t index = ctx.services().get<ParallelContext>().index1D();
std::this_thread::sleep_for(std::chrono::seconds(1));
// Creates a new message of size collectionChunkSize which
// has "TPC" as data origin and "CLUSTERS" as data description.
auto& tpcClusters = ctx.outputs().make<FakeCluster>(Output{"TPC", "CLUSTERS", index}, collectionChunkSize);
int i = 0;
for (auto& cluster : tpcClusters) {
assert(i < collectionChunkSize);
cluster.x = index;
cluster.y = i;
cluster.z = i;
cluster.q = rand() % 1000;
i++;
}
}
void someProcessingStageAlgorithm(ProcessingContext& ctx)
{
uint32_t index = ctx.services().get<ParallelContext>().index1D();
const FakeCluster* inputDataTpc = reinterpret_cast<const FakeCluster*>(ctx.inputs().get("dataTPC").payload);
auto processedTpcClusters =
ctx.outputs().make<FakeCluster>(Output{"TPC", "CLUSTERS_P", index}, collectionChunkSize);
int i = 0;
for (auto& cluster : processedTpcClusters) {
assert(i < collectionChunkSize);
cluster.x = -inputDataTpc[i].x;
cluster.y = 2 * inputDataTpc[i].y;
cluster.z = inputDataTpc[i].z * inputDataTpc[i].q;
cluster.q = inputDataTpc[i].q;
i++;
}
};