-
-
Notifications
You must be signed in to change notification settings - Fork 130
278 lines (240 loc) · 11.1 KB
/
benchmarks-extended.yml
File metadata and controls
278 lines (240 loc) · 11.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
name: Extended Performance Benchmarks
on:
# Run nightly at 2 AM UTC
schedule:
- cron: '0 2 * * *'
# Run when transport code changes
push:
branches: [main]
paths:
- 'crates/core/src/transport/**'
- 'crates/core/benches/**'
- '.github/workflows/benchmarks-extended.yml'
pull_request:
paths:
- 'crates/core/src/transport/**'
- 'crates/core/benches/**'
- '.github/workflows/benchmarks-extended.yml'
# Allow manual trigger
workflow_dispatch:
# Cancel in-progress runs when a new commit is pushed.
# On main, never cancel — each merge must complete its run (#3311).
concurrency:
group: benchmarks-extended-${{ github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
jobs:
extended-benchmark:
name: Extended Benchmarks
runs-on: ubuntu-latest
timeout-minutes: 60 # Extended benchmarks take longer
# Job will fail on severe regressions (>25%) for nightly/main builds
# PRs only get informational comments, not failures
env:
CARGO_TARGET_DIR: ${{ github.workspace }}/target
RUST_LOG: error
steps:
- uses: actions/checkout@v6
with:
fetch-depth: 0 # Need history for merge-base
- name: Install system dependencies
run: |
sudo apt-get update
sudo apt-get install -y liblzma-dev
- uses: dtolnay/rust-toolchain@stable
with:
toolchain: 1.93.0
- uses: Swatinem/rust-cache@v2
with:
prefix-key: bench-extended
save-if: ${{ github.ref == 'refs/heads/main' }}
# Determine baseline commit (merge-base for PRs, current for main)
- name: Determine Baseline Commit
id: baseline-commit
run: |
if [ "${{ github.event_name }}" == "pull_request" ]; then
BASE_SHA=$(git merge-base origin/${{ github.base_ref }} HEAD)
echo "Using merge-base for PR: $BASE_SHA"
else
BASE_SHA=${{ github.sha }}
echo "Using current commit for main: $BASE_SHA"
fi
echo "sha=$BASE_SHA" >> $GITHUB_OUTPUT
# Download baseline from merge-base commit
- name: Download Baseline
id: baseline-cache
uses: actions/cache/restore@v5
with:
path: target/criterion
key: criterion-extended-main-${{ runner.os }}-${{ steps.baseline-commit.outputs.sha }}
restore-keys: |
criterion-extended-main-${{ runner.os }}-
- name: Report Baseline Status
run: |
echo "## Extended Benchmark Baseline" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
if [ "${{ steps.baseline-cache.outputs.cache-hit }}" == "true" ]; then
echo "✅ **Exact baseline match**: Comparing against merge-base commit" >> $GITHUB_STEP_SUMMARY
echo "- Commit: ${{ steps.baseline-commit.outputs.sha }}" >> $GITHUB_STEP_SUMMARY
elif [ -d "target/criterion" ]; then
echo "⚠️ **Restored from fallback**: Using recent main branch baseline" >> $GITHUB_STEP_SUMMARY
echo "- Target commit: ${{ steps.baseline-commit.outputs.sha }}" >> $GITHUB_STEP_SUMMARY
echo "- Actual baseline may be from an older commit" >> $GITHUB_STEP_SUMMARY
if [ -f "target/criterion/.baseline_info" ]; then
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Baseline info:**" >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
cat target/criterion/.baseline_info >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
fi
else
echo "📊 **No baseline**: This run will establish the baseline" >> $GITHUB_STEP_SUMMARY
fi
echo "" >> $GITHUB_STEP_SUMMARY
# Compile benchmarks first (fast fail on compilation errors)
- name: Compile Extended Benchmarks
run: |
echo "## Compiling Extended Benchmarks" >> $GITHUB_STEP_SUMMARY
cargo bench --bench transport_extended --features bench --no-run --color=never 2>&1 | tee compile_output.txt
echo "✅ Extended benchmark compilation successful" >> $GITHUB_STEP_SUMMARY
# Find the benchmark binary path for direct execution
BENCH_BIN=$(find target/release/deps -name 'transport_extended-*' -type f -executable | head -1)
echo "BENCH_BIN=$BENCH_BIN" >> $GITHUB_ENV
echo "Benchmark binary: $BENCH_BIN"
# Run Extended Benchmarks - execute pre-compiled binary directly
- name: Run Extended Benchmark Suite
id: bench_extended
run: |
echo "## Extended Transport Performance" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "Comprehensive resilience and performance testing:" >> $GITHUB_STEP_SUMMARY
echo "- **High-latency paths**: 100ms, 200ms, 500ms RTT (detects ssthresh death spiral)" >> $GITHUB_STEP_SUMMARY
echo "- **Packet loss**: 1%, 5% loss rates (tests reliability layer)" >> $GITHUB_STEP_SUMMARY
echo "- **Large transfers**: 10MB, 50MB (sustained throughput)" >> $GITHUB_STEP_SUMMARY
echo "- **Micro-benchmarks**: Component-level validation" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
# Run pre-compiled benchmark binary directly (avoids cargo recompilation check)
"$BENCH_BIN" --bench --color never 2>&1 | tee bench_output.txt || true
# Parse results with structured script
- name: Parse Benchmark Results
id: parse_results
run: |
python3 --version
# Run parser - captures exit code
# Exit codes: 0=ok, 1=regressions, 2=severe regressions
set +e
python3 scripts/parse_bench_output.py bench_output.txt > parsed_output.txt 2>&1
PARSE_EXIT=$?
set -e
if [ $PARSE_EXIT -eq 0 ]; then
echo "regression_detected=false" >> $GITHUB_OUTPUT
echo "severe_regression=false" >> $GITHUB_OUTPUT
elif [ $PARSE_EXIT -eq 1 ]; then
echo "regression_detected=true" >> $GITHUB_OUTPUT
echo "severe_regression=false" >> $GITHUB_OUTPUT
elif [ $PARSE_EXIT -eq 2 ]; then
echo "regression_detected=true" >> $GITHUB_OUTPUT
echo "severe_regression=true" >> $GITHUB_OUTPUT
else
echo "regression_detected=false" >> $GITHUB_OUTPUT
echo "severe_regression=false" >> $GITHUB_OUTPUT
fi
# Append parsed results
echo "" >> $GITHUB_STEP_SUMMARY
cat bench_summary.md >> $GITHUB_STEP_SUMMARY 2>/dev/null || {
echo "⚠️ Failed to parse results" >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
# Strip ANSI escape codes for clean markdown output
tail -100 bench_output.txt | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
}
exit 0
# Save baseline metadata
- name: Save Baseline Metadata
if: github.ref == 'refs/heads/main'
run: |
mkdir -p target/criterion
cat > target/criterion/.baseline_info <<EOF
Benchmark Suite: Extended (transport_extended)
Commit: ${{ github.sha }}
Branch: ${{ github.ref }}
Timestamp: $(date -u +"%Y-%m-%d %H:%M:%S UTC")
Workflow Run: ${{ github.run_id }}
Run Type: ${{ github.event_name }}
EOF
# Save baseline for future comparisons (only on main)
- name: Save Baseline
if: github.ref == 'refs/heads/main'
uses: actions/cache/save@v5
with:
path: target/criterion
key: criterion-extended-main-${{ runner.os }}-${{ github.sha }}
# Post PR comment if regressions detected
- name: Comment on PR
if: github.event_name == 'pull_request' && steps.parse_results.outputs.regression_detected == 'true'
uses: actions/github-script@v8
with:
script: |
const fs = require('fs');
let body;
try {
body = fs.readFileSync('bench_pr_comment.md', 'utf8');
} catch (error) {
body = `## ⚠️ Extended Benchmark Regressions Detected
Some extended benchmarks show performance regressions.
**Note**: Extended benchmarks include high-latency and packet-loss scenarios
which have higher variance than standard CI benchmarks.
[View full results](${process.env.GITHUB_SERVER_URL}/${process.env.GITHUB_REPOSITORY}/actions/runs/${process.env.GITHUB_RUN_ID})`;
}
body += `\n\n[View full benchmark summary](${process.env.GITHUB_SERVER_URL}/${process.env.GITHUB_REPOSITORY}/actions/runs/${process.env.GITHUB_RUN_ID})`;
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: body
});
# Upload results as artifacts
- name: Upload Benchmark Results
uses: actions/upload-artifact@v7
with:
name: extended-benchmark-results
path: |
bench_output.txt
bench_results.json
bench_summary.md
bench_pr_comment.md
target/criterion/**/report/index.html
retention-days: 90 # Keep extended results longer
# Fail nightly/main builds on severe throughput regressions (>25%)
# PRs only get informational comments, not failures
- name: Fail on Severe Regressions
if: steps.parse_results.outputs.severe_regression == 'true' && github.event_name != 'pull_request'
run: |
echo "🚨 SEVERE THROUGHPUT REGRESSION DETECTED" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "One or more throughput benchmarks regressed by more than 25%." >> $GITHUB_STEP_SUMMARY
echo "This indicates a significant performance problem that should be investigated." >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "See the benchmark results above for details." >> $GITHUB_STEP_SUMMARY
exit 1
# Summary job - reports status but doesn't block PRs
benchmark-summary:
name: Extended Benchmark Summary
runs-on: ubuntu-latest
timeout-minutes: 5
needs: extended-benchmark
if: always()
steps:
- name: Check Status
run: |
if [ "${{ needs.extended-benchmark.result }}" == "failure" ]; then
if [ "${{ github.event_name }}" == "pull_request" ]; then
echo "⚠️ Extended benchmarks detected regressions (informational, not blocking PR)"
else
echo "🚨 Extended benchmarks failed - severe throughput regression detected (>25%)"
echo "This indicates a significant performance problem that should be investigated."
fi
elif [ "${{ needs.extended-benchmark.result }}" == "cancelled" ]; then
echo "⚠️ Extended benchmarks were cancelled"
else
echo "✅ Extended benchmarks completed successfully"
fi