-
Notifications
You must be signed in to change notification settings - Fork 24
241 lines (206 loc) · 8.3 KB
/
benchmarks.yml
File metadata and controls
241 lines (206 loc) · 8.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
name: Performance Benchmarks
on:
push:
branches: [main]
pull_request:
branches: [main]
schedule:
# Run benchmarks daily at 2 AM UTC for performance monitoring
- cron: "0 2 * * *"
workflow_dispatch:
inputs:
benchmark_suite:
description: "Benchmark suite to run"
required: false
default: "all"
type: choice
options:
- all
- stump
- proof
- accumulator
- simple
baseline:
description: 'Compare against baseline (branch name or "none")'
required: false
default: "none"
type: string
permissions:
contents: read
env:
CARGO_TERM_COLOR: always
RUST_BACKTRACE: 1
jobs:
benchmark:
name: Run Performance Benchmarks
runs-on: ${{ matrix.os }}
permissions:
contents: read
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, macos-latest]
include:
- os: ubuntu-latest
target: x86_64-unknown-linux-gnu
- os: macos-latest
target: x86_64-apple-darwin
steps:
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
fetch-depth: 0 # Full history for baseline comparison
persist-credentials: false
- name: Install Rust toolchain
run: rustup toolchain install stable --component clippy,rustfmt --no-self-update
- name: Configure Rust cache
uses: Swatinem/rust-cache@c19371144df3bb44fab255c43d04cbc2ab54d1c4 # v2.9.1
with:
key: ${{ matrix.os }}-benchmark-stable
cache-on-failure: true
- name: System information
run: |
echo "System Information:"
echo "OS: ${{ matrix.os }}"
echo "Target: ${{ matrix.target }}"
echo "Rust version: $(rustc --version)"
echo "CPU cores: $(nproc 2>/dev/null || sysctl -n hw.ncpu)"
echo "Memory: $(free -h 2>/dev/null || echo 'N/A on macOS')"
echo "Disk space: $(df -h . | tail -1)"
- name: Install dependencies (Linux)
if: runner.os == 'Linux'
run: |
echo "Installing benchmark dependencies..."
sudo apt-get update
sudo apt-get install -y gnuplot
- name: Install dependencies (macOS)
if: runner.os == 'macOS'
run: |
echo "Installing benchmark dependencies..."
brew install gnuplot coreutils || true
- name: Optimize environment for benchmarking (macOS)
if: runner.os == 'macOS'
run: |
echo "Optimizing macOS environment for benchmarking..."
# Reduce background processes impact
sudo launchctl unload /System/Library/LaunchDaemons/com.apple.mds.messages.scan.plist || true
- name: Validate benchmark compilation
run: |
echo "Checking benchmark compilation..."
cargo check --benches --verbose
echo "Benchmarks compile successfully ✅"
- name: Run quick benchmark check
run: |
echo "Running quick benchmark compilation check..."
cargo bench --bench stump_benchmarks --help > /dev/null || echo "Benchmark help completed"
- name: Run Stump benchmarks
if: ${{ github.event.inputs.benchmark_suite == 'all' || github.event.inputs.benchmark_suite == 'stump' || github.event.inputs.benchmark_suite == '' }}
timeout-minutes: 30
run: |
echo "Running Stump accumulator benchmarks..."
mkdir -p benchmark-results
cargo bench --bench stump_benchmarks
echo "✅ Stump benchmarks completed successfully"
- name: Run Proof benchmarks
if: ${{ github.event.inputs.benchmark_suite == 'all' || github.event.inputs.benchmark_suite == 'proof' || github.event.inputs.benchmark_suite == '' }}
timeout-minutes: 30
run: |
echo "Running Proof operation benchmarks..."
mkdir -p benchmark-results
cargo bench --bench proof_benchmarks
echo "✅ Proof benchmarks completed successfully"
- name: Run Accumulator comparison benchmarks
if: ${{ github.event.inputs.benchmark_suite == 'all' || github.event.inputs.benchmark_suite == 'accumulator' || github.event.inputs.benchmark_suite == '' }}
timeout-minutes: 30
run: |
echo "Running Accumulator comparison benchmarks..."
mkdir -p benchmark-results
cargo bench --bench accumulator_benchmarks
echo "✅ Accumulator benchmarks completed successfully"
- name: Run all benchmarks with HTML report generation
if: ${{ github.event.inputs.benchmark_suite == 'all' || github.event.inputs.benchmark_suite == '' }}
timeout-minutes: 30
run: |
echo "Generating comprehensive HTML reports..."
cargo bench
echo "✅ Full benchmark suite with HTML reports completed successfully"
- name: Generate performance summary
env:
BRANCH_NAME: ${{ github.ref_name }}
RUNNER_NAME: ${{ runner.name }}
BENCHMARK_SUITE: ${{ github.event.inputs.benchmark_suite || 'all' }}
BASELINE: ${{ github.event.inputs.baseline || 'none' }}
run: |
echo "Generating performance summary..."
mkdir -p benchmark-results
# Create a summary file
cat > benchmark-results/summary.md << EOF
# Benchmark Results Summary
**Date:** $(date -u +"%Y-%m-%d %H:%M:%S UTC")
**OS:** ${{ matrix.os }}
**Rust:** $(rustc --version)
**Commit:** ${{ github.sha }}
**Branch:** ${BRANCH_NAME}
## Environment
- **CPU Cores:** $(nproc 2>/dev/null || sysctl -n hw.ncpu)
- **Runner:** ${RUNNER_NAME}
- **Architecture:** ${{ matrix.target }}
## Benchmark Execution
- **Trigger:** ${{ github.event_name }}
- **Suite:** ${BENCHMARK_SUITE}
- **Baseline:** ${BASELINE}
## Results
Detailed results are available in the artifacts and HTML reports.
Key performance metrics:
- Stump operations: See stump_results.json
- Proof operations: See proof_results.json
- Accumulator comparisons: See accumulator_results.json
EOF
- name: Collect benchmark artifacts
env:
BRANCH_NAME: ${{ github.ref_name }}
WORKFLOW_NAME: ${{ github.workflow }}
run: |
echo "Collecting benchmark artifacts..."
# Use the benchmark collection script
bash contrib/collect_benchmark_results.sh benchmark-results
mkdir -p artifacts
# Copy collected results
cp -r benchmark-results/* artifacts/ 2>/dev/null || echo "No benchmark results to copy"
# Copy HTML reports
cp -r target/criterion artifacts/html-reports 2>/dev/null || echo "No HTML reports to copy"
# Copy logs
find . -name "*.log" -exec cp {} artifacts/ \; 2>/dev/null || echo "No logs found"
# Create archive info
echo "Archive created: $(date)" > artifacts/archive-info.txt
echo "Commit: ${{ github.sha }}" >> artifacts/archive-info.txt
echo "Branch: ${BRANCH_NAME}" >> artifacts/archive-info.txt
echo "Workflow: ${WORKFLOW_NAME}" >> artifacts/archive-info.txt
echo "Run number: ${{ github.run_number }}" >> artifacts/archive-info.txt
- name: Upload benchmark results
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: benchmark-results-${{ matrix.os }}-${{ github.run_number }}
path: artifacts/
retention-days: 90
compression-level: 6
- name: Upload HTML reports
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
if: always()
with:
name: benchmark-html-reports-${{ matrix.os }}-${{ github.run_number }}
path: target/criterion/
retention-days: 30
compression-level: 9
cleanup:
name: Cleanup
runs-on: ubuntu-latest
if: always()
needs: [benchmark]
permissions:
contents: read
steps:
- name: Reset environment
run: |
echo "Cleanup completed - environment reset for next run"
echo "Benchmark workflow execution finished"