-
Notifications
You must be signed in to change notification settings - Fork 7
Expand file tree
/
Copy pathutils.ts
More file actions
130 lines (113 loc) · 3.72 KB
/
utils.ts
File metadata and controls
130 lines (113 loc) · 3.72 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
import {
calculateQuantiles,
msToNs,
msToS,
type Benchmark,
type BenchmarkStats,
} from "@codspeed/core";
import {
type Benchmark as VitestBenchmark,
type RunnerTaskResult,
type RunnerTestSuite,
} from "vitest";
import { getBenchOptions } from "../compat";
import { isVitestTaskBenchmark } from "../common";
export async function extractBenchmarkResults(
suite: RunnerTestSuite,
parentPath = ""
): Promise<Benchmark[]> {
const benchmarks: Benchmark[] = [];
const currentPath = parentPath ? `${parentPath}::${suite.name}` : suite.name;
for (const task of suite.tasks) {
if (isVitestTaskBenchmark(task) && task.result?.state === "pass") {
const benchmark = await processBenchmarkTask(task, currentPath);
if (benchmark) {
benchmarks.push(benchmark);
}
} else if (task.type === "suite") {
const nestedBenchmarks = await extractBenchmarkResults(task, currentPath);
benchmarks.push(...nestedBenchmarks);
}
}
return benchmarks;
}
async function processBenchmarkTask(
task: VitestBenchmark,
suitePath: string
): Promise<Benchmark | null> {
const uri = `${suitePath}::${task.name}`;
const result = task.result;
if (!result) {
console.warn(` ⚠ No result data available for ${uri}`);
return null;
}
try {
// Get tinybench configuration options from vitest
const benchOptions = getBenchOptions(task);
const stats = convertVitestResultToBenchmarkStats(result, benchOptions);
if (stats === null) {
console.log(` ✔ No walltime data to collect for ${uri}`);
return null;
}
const coreBenchmark: Benchmark = {
name: task.name,
uri,
config: {
max_rounds: benchOptions.iterations ?? null,
max_time_ns: benchOptions.time ? msToNs(benchOptions.time) : null,
min_round_time_ns: null, // tinybench does not have an option for this
warmup_time_ns:
benchOptions.warmupIterations !== 0 && benchOptions.warmupTime
? msToNs(benchOptions.warmupTime)
: null,
},
stats,
};
console.log(` ✔ Collected walltime data for ${uri}`);
return coreBenchmark;
} catch (error) {
console.warn(` ⚠ Failed to process benchmark result for ${uri}:`, error);
return null;
}
}
function convertVitestResultToBenchmarkStats(
result: RunnerTaskResult,
benchOptions: {
time?: number;
warmupTime?: number;
warmupIterations?: number;
iterations?: number;
}
): BenchmarkStats | null {
const benchmark = result.benchmark;
if (!benchmark) {
throw new Error("No benchmark data available in result");
}
const { totalTime, min, max, mean, sd, samples } = benchmark;
// Get individual sample times in nanoseconds and sort them
const sortedTimesNs = samples.map(msToNs).sort((a, b) => a - b);
const meanNs = msToNs(mean);
const stdevNs = msToNs(sd);
if (sortedTimesNs.length == 0) {
// Sometimes the benchmarks can be completely optimized out and not even run, but its beforeEach and afterEach hooks are still executed, and the task is still considered a success.
// This is the case for the hooks.bench.ts example in this package
return null;
}
const { q1_ns, q3_ns, median_ns, iqr_outlier_rounds, stdev_outlier_rounds } =
calculateQuantiles({ meanNs, stdevNs, sortedTimesNs });
return {
min_ns: msToNs(min),
max_ns: msToNs(max),
mean_ns: meanNs,
stdev_ns: stdevNs,
q1_ns,
median_ns,
q3_ns,
total_time: msToS(totalTime),
iter_per_round: 1, // as there is only one round in tinybench, we define that there were n rounds of 1 iteration
rounds: sortedTimesNs.length,
iqr_outlier_rounds,
stdev_outlier_rounds,
warmup_iters: benchOptions.warmupIterations ?? 0,
};
}