Skip to content

Commit 1af51b7

Browse files
committed
add benchmarking
1 parent 3aee6d7 commit 1af51b7

File tree

26 files changed

+1253
-29
lines changed

26 files changed

+1253
-29
lines changed
Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
name: 'Render Benchmark Results'
2+
description: 'Renders vitest benchmark results as markdown in GitHub step summary'
3+
inputs:
4+
benchmark-file:
5+
description: 'Path to the vitest benchmark JSON output file'
6+
required: true
7+
app-name:
8+
description: 'Name of the app being benchmarked'
9+
required: true
10+
backend:
11+
description: 'Backend type (local, postgres, vercel)'
12+
required: true
13+
runs:
14+
using: 'composite'
15+
steps:
16+
- name: Render benchmark results
17+
shell: bash
18+
run: |
19+
node ${{ github.action_path }}/render.js "${{ inputs.benchmark-file }}" "${{ inputs.app-name }}" "${{ inputs.backend }}" >> $GITHUB_STEP_SUMMARY
Lines changed: 123 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,123 @@
1+
#!/usr/bin/env node
2+
3+
const fs = require('fs');
4+
5+
const [, , benchmarkFile, appName, backend] = process.argv;
6+
7+
if (!benchmarkFile || !appName || !backend) {
8+
console.error('Usage: render.js <benchmark-file> <app-name> <backend>');
9+
process.exit(1);
10+
}
11+
12+
const path = require('path');
13+
14+
// Try to load workflow timing data
15+
let workflowTimings = null;
16+
// Only replace filename, not directory name
17+
const timingFilename = path
18+
.basename(benchmarkFile)
19+
.replace('bench-results-', 'bench-timings-');
20+
const timingFile = path.join(path.dirname(benchmarkFile), timingFilename);
21+
if (fs.existsSync(timingFile)) {
22+
try {
23+
workflowTimings = JSON.parse(fs.readFileSync(timingFile, 'utf-8'));
24+
} catch (e) {
25+
console.error(
26+
`Warning: Could not parse timing file ${timingFile}: ${e.message}`
27+
);
28+
}
29+
}
30+
31+
// Format number with consistent width
32+
function formatSec(ms, decimals = 3) {
33+
return (ms / 1000).toFixed(decimals);
34+
}
35+
36+
// Get backend emoji
37+
function getBackendEmoji(backend) {
38+
switch (backend) {
39+
case 'vercel':
40+
return '▲';
41+
case 'postgres':
42+
return '🐘';
43+
case 'local':
44+
return '💻';
45+
default:
46+
return '';
47+
}
48+
}
49+
50+
try {
51+
const data = JSON.parse(fs.readFileSync(benchmarkFile, 'utf-8'));
52+
53+
const emoji = getBackendEmoji(backend);
54+
console.log(`## ${emoji} Benchmark Results: ${appName} (${backend})\n`);
55+
56+
for (const file of data.files) {
57+
for (const group of file.groups) {
58+
// Workflow Time is primary metric, Wall Time is secondary
59+
console.log(
60+
'| Benchmark | Workflow Time (avg) | Min | Max | Wall Time | Overhead | Samples |'
61+
);
62+
console.log(
63+
'|:----------|--------------------:|----:|----:|----------:|---------:|--------:|'
64+
);
65+
66+
for (const bench of group.benchmarks) {
67+
// Skip benchmarks without valid timing data (failed or timed out)
68+
if (bench.mean === undefined || bench.mean === null) {
69+
console.log(`| ${bench.name} | ⚠️ No data | - | - | - | - | 0 |`);
70+
continue;
71+
}
72+
73+
const wallTimeSec = formatSec(bench.mean);
74+
75+
// Get workflow execution time if available
76+
let workflowTimeSec = '-';
77+
let minTimeSec = '-';
78+
let maxTimeSec = '-';
79+
let overheadSec = '-';
80+
81+
if (workflowTimings?.summary?.[bench.name]) {
82+
const summary = workflowTimings.summary[bench.name];
83+
workflowTimeSec = formatSec(summary.avgExecutionTimeMs);
84+
85+
// Get min/max if available
86+
if (summary.minExecutionTimeMs !== undefined) {
87+
minTimeSec = formatSec(summary.minExecutionTimeMs);
88+
}
89+
if (summary.maxExecutionTimeMs !== undefined) {
90+
maxTimeSec = formatSec(summary.maxExecutionTimeMs);
91+
}
92+
93+
// Calculate overhead (wall time - workflow time)
94+
const overheadMs = bench.mean - summary.avgExecutionTimeMs;
95+
overheadSec = formatSec(overheadMs);
96+
}
97+
98+
console.log(
99+
`| ${bench.name} | ${workflowTimeSec}s | ${minTimeSec}s | ${maxTimeSec}s | ${wallTimeSec}s | ${overheadSec}s | ${bench.sampleCount} |`
100+
);
101+
}
102+
console.log('');
103+
}
104+
}
105+
106+
// Add legend
107+
console.log('<details>');
108+
console.log('<summary>Column Definitions</summary>\n');
109+
console.log(
110+
'- **Workflow Time (avg)**: Average runtime reported by workflow (completedAt - createdAt)'
111+
);
112+
console.log('- **Min**: Minimum workflow execution time across all samples');
113+
console.log('- **Max**: Maximum workflow execution time across all samples');
114+
console.log(
115+
'- **Wall Time**: Total testbench time (trigger workflow + poll for result)'
116+
);
117+
console.log('- **Overhead**: Testbench overhead (Wall Time - Workflow Time)');
118+
console.log('- **Samples**: Number of benchmark iterations run');
119+
console.log('</details>');
120+
} catch (error) {
121+
console.error(`Error rendering benchmark results: ${error.message}`);
122+
process.exit(1);
123+
}

0 commit comments

Comments
 (0)