Skip to content

Commit 89d7faa

Browse files
committed
feat: add benchmark
1 parent 7b05e57 commit 89d7faa

File tree

3 files changed

+114
-0
lines changed

3 files changed

+114
-0
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ venv
1212
result.txt
1313
testing/main.c
1414
*/*compile_commands.json
15+
testing/benchmark_results.txt
1516

1617
# Ignore Python wheel packages (clang-format, clang-tidy)
1718
clang-tidy-1*

docs/benchmark.md

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
# Benchmarking
2+
3+
This document outlines the benchmarking process for comparing the performance of cpp-linter-hooks and mirrors-clang-format.
4+
5+
## Running the Benchmark
6+
7+
```bash
8+
python3 testing/benchmark_hooks.py
9+
```
10+
11+
## Results
12+
13+
The results of the benchmarking process will be saved to `testing/benchmark_results.txt`.
14+
15+
## To Do
16+
17+
- Run benchmark against a larger codebase, such as [TheAlgorithms/C-Plus-Plus](https://github.com/TheAlgorithms/C-Plus-Plus).
18+
- Run benchmark with GitHub Actions for continuous integration.

testing/benchmark_hooks.py

Lines changed: 95 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,95 @@
1+
#!/usr/bin/env python3
2+
"""
3+
Benchmark script to compare performance of cpp-linter-hooks vs mirrors-clang-format.
4+
5+
Usage:
6+
python benchmark_hooks.py
7+
8+
Requirements:
9+
- pre-commit must be installed and available in PATH
10+
- Two config files:
11+
- testing/pre-commit-config-cpp-linter-hooks.yaml
12+
- testing/pre-commit-config-mirrors-clang-format.yaml
13+
- Target files: testing/main.c (or adjust as needed)
14+
"""
15+
16+
import subprocess
17+
import time
18+
import statistics
19+
import glob
20+
21+
HOOKS = [
22+
{
23+
"name": "cpp-linter-hooks",
24+
"config": "testing/pre-commit-config-cpp-linter-hooks.yaml",
25+
},
26+
{
27+
"name": "mirrors-clang-format",
28+
"config": "testing/pre-commit-config-mirrors-clang-format.yaml",
29+
},
30+
]
31+
32+
# Automatically find all C/C++ files in testing/ (and optionally src/, include/)
33+
TARGET_FILES = (
34+
glob.glob("testing/**/*.c", recursive=True)
35+
+ glob.glob("testing/**/*.cpp", recursive=True)
36+
+ glob.glob("testing/**/*.h", recursive=True)
37+
+ glob.glob("testing/**/*.hpp", recursive=True)
38+
)
39+
40+
REPEATS = 5
41+
RESULTS_FILE = "testing/benchmark_results.txt"
42+
43+
44+
def run_hook(config, files):
45+
cmd = ["pre-commit", "run", "--config", config, "--files"] + files
46+
start = time.perf_counter()
47+
try:
48+
subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
49+
except subprocess.CalledProcessError:
50+
# Still record time even if hook fails
51+
pass
52+
end = time.perf_counter()
53+
return end - start
54+
55+
56+
def benchmark():
57+
results = {}
58+
for hook in HOOKS:
59+
times = []
60+
print(f"Benchmarking {hook['name']}...")
61+
for i in range(REPEATS):
62+
# Clean up any changes before each run
63+
subprocess.run(["git", "restore"] + TARGET_FILES)
64+
subprocess.run(["pre-commit", "clean"])
65+
t = run_hook(hook["config"], TARGET_FILES)
66+
print(f" Run {i + 1}: {t:.3f} seconds")
67+
times.append(t)
68+
results[hook["name"]] = times
69+
return results
70+
71+
72+
def report(results):
73+
lines = []
74+
for name, times in results.items():
75+
avg = statistics.mean(times)
76+
std = statistics.stdev(times) if len(times) > 1 else 0.0
77+
min_t = min(times)
78+
max_t = max(times)
79+
lines.append(
80+
f"{name}: avg={avg:.3f}s, std={std:.3f}s, min={min_t:.3f}s, max={max_t:.3f}s, runs={len(times)}"
81+
)
82+
print("\nBenchmark Results:")
83+
print("\n".join(lines))
84+
with open(RESULTS_FILE, "w") as f:
85+
f.write("\n".join(lines) + "\n")
86+
print(f"Results saved to {RESULTS_FILE}")
87+
88+
89+
def main():
90+
results = benchmark()
91+
report(results)
92+
93+
94+
if __name__ == "__main__":
95+
main()

0 commit comments

Comments
 (0)