Skip to content

[Fusion] Perf experiment 2 #1770

[Fusion] Perf experiment 2

[Fusion] Perf experiment 2 #1770

Workflow file for this run

name: Benchmarks
on:
pull_request:
types: [opened, synchronize, reopened, ready_for_review, closed]
branches:
- main
- main-version-*
paths:
- 'src/HotChocolate/Fusion-vnext/**'
- '.github/workflows/benchmarks.yml'
push:
branches:
- main
paths:
- 'src/HotChocolate/Fusion-vnext/**'
- '.github/workflows/benchmarks.yml'
concurrency:
group: benchmarks-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
setup:
name: "Initialize Benchmark Report"
if: github.event.action != 'closed' && github.event_name == 'pull_request' && github.event.pull_request.draft == false
runs-on: ubuntu-latest
permissions:
pull-requests: write
steps:
- name: Post initial pending comment
uses: actions/github-script@v7
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
const marker = '<!-- fusion-gateway-benchmark-report -->';
const runId = String(context.runId);
const dataPrefix = '<!-- benchmark-data:';
const { data: comments } = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
});
const existing = comments.find(c => c.body.includes(marker));
// --- Generate all-pending markdown ---
const tests = [
{ key: 'no-recursion', title: 'Simple Composite Query' },
{ key: 'deep-recursion', title: 'Deep Recursion Query' },
{ key: 'variable-batch', title: 'Variable Batching Throughput' },
];
const modes = [
{ key: 'constant', label: 'Constant', desc: '(50 VUs)' },
{ key: 'ramping', label: 'Ramping', desc: '(0-500-0 VUs)' },
];
const runners = [
{ group: 'benchmarking-1', label: '1' },
];
let md = '### Fusion Gateway Performance Results\n\n';
md += `> **Progress: 0/6 benchmarks completed** — this report updates as each job finishes.\n\n`;
for (const test of tests) {
md += `#### ${test.title}\n\n`;
md += '| | Req/s | Err% |\n|:--|--:|--:|\n';
for (const mode of modes) {
for (const runner of runners) {
md += `| **${mode.label} ${runner.label}** ${mode.desc} | *pending* | *pending* |\n`;
}
}
md += '\n<details>\n<summary>Response Times</summary>\n\n';
md += '| | Min | Med | Avg | P90 | P95 | Max |\n|:--|--:|--:|--:|--:|--:|--:|\n';
for (const mode of modes) {
for (const runner of runners) {
md += `| **${mode.label} ${runner.label}** | *pending* | *pending* | *pending* | *pending* | *pending* | *pending* |\n`;
}
}
md += '\n</details>\n\n---\n\n';
}
md += '*Runner 1 = benchmarking-1*';
const encodedData = Buffer.from(JSON.stringify({})).toString('base64');
const timestamp = new Date().toUTCString();
const commitSha = context.sha.substring(0, 7);
const runUrl = `${context.serverUrl}/${context.repo.owner}/${context.repo.repo}/actions/runs/${runId}`;
const commentBody = [
marker,
`<!-- run:${runId} -->`,
`${dataPrefix}${encodedData} -->`,
`<!-- completed:0 -->`,
md,
'---',
`*Run [${runId}](${runUrl}) • Commit ${commitSha} • ${timestamp}*`,
].join('\n');
if (existing) {
await github.rest.issues.updateComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: existing.id,
body: commentBody,
});
console.log('Reset existing comment to all-pending');
} else {
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: commentBody,
});
console.log('Created initial all-pending comment');
}
benchmark:
name: "${{ matrix.test-name }} ${{ matrix.mode-label }} (${{ matrix.runner-label }})"
needs: [setup]
if: "!cancelled() && github.event.action != 'closed' && (github.event_name == 'push' || github.event.pull_request.draft == false)"
runs-on:
group: ${{ matrix.runner-group }}
permissions:
contents: write
pull-requests: write
strategy:
fail-fast: false
matrix:
test: [no-recursion, deep-recursion, variable-batch]
mode: [constant, ramping]
runner-group: [benchmarking-1]
include:
- runner-group: benchmarking-1
runner-label: "Runner 1"
- test: no-recursion
test-name: "Simple Composite Query"
- test: deep-recursion
test-name: "Deep Recursion"
- test: variable-batch
test-name: "Variable Batch"
- mode: constant
mode-label: "Constant"
- mode: ramping
mode-label: "Ramping"
steps:
- name: Checkout current repository
uses: actions/checkout@v6
with:
fetch-depth: 0
show-progress: false
- name: Install .NET
run: |
if ! command -v dotnet &> /dev/null; then
echo "Installing .NET..."
curl -sSL https://dot.net/v1/dotnet-install.sh -o dotnet-install.sh
chmod +x dotnet-install.sh
./dotnet-install.sh --channel 10.0 --install-dir $HOME/.dotnet
rm dotnet-install.sh
echo "$HOME/.dotnet" >> $GITHUB_PATH
export PATH="$HOME/.dotnet:$PATH"
fi
dotnet --version
- name: Install k6
run: |
if ! command -v k6 &> /dev/null; then
echo "Installing k6..."
sudo gpg -k
sudo gpg --no-default-keyring --keyring /usr/share/keyrings/k6-archive-keyring.gpg --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys C5AD17C747E3415A3642D57D77C6C491D6AC1D69
echo "deb [signed-by=/usr/share/keyrings/k6-archive-keyring.gpg] https://dl.k6.io/deb stable main" | sudo tee /etc/apt/sources.list.d/k6.list
sudo apt-get update
sudo apt-get install k6 -y
fi
k6 version
- name: Install jq for result parsing
run: |
if ! command -v jq &> /dev/null; then
sudo apt-get install jq -y
fi
- name: Make scripts executable
working-directory: src/HotChocolate/Fusion-vnext/benchmarks/k6
run: chmod +x *.sh
- name: Run benchmark
working-directory: src/HotChocolate/Fusion-vnext/benchmarks/k6
run: |
./run-single-benchmark.sh \
"${{ matrix.test }}" \
"${{ matrix.mode }}" \
"result.json" \
"${{ matrix.runner-group }}" \
"${{ matrix.runner-label }}"
- name: Upload benchmark result
uses: actions/upload-artifact@v6
if: always()
with:
name: benchmark-${{ matrix.test }}-${{ matrix.mode }}-${{ matrix.runner-group }}
path: src/HotChocolate/Fusion-vnext/benchmarks/k6/result.json
retention-days: 30
# Lightweight progressive PR update — reads only the local result.json
# and accumulates data in a hidden JSON block inside the PR comment.
# No artifact downloads, no external scripts — just a few API calls.
- name: Update PR comment (progressive)
if: github.event_name == 'pull_request'
uses: actions/github-script@v7
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
const fs = require('fs');
// Read this job's result
let result;
try {
result = JSON.parse(fs.readFileSync(
'src/HotChocolate/Fusion-vnext/benchmarks/k6/result.json', 'utf8'));
} catch (e) {
console.log('No result.json found, skipping comment update');
return;
}
const marker = '<!-- fusion-gateway-benchmark-report -->';
const dataPrefix = '<!-- benchmark-data:';
const runId = String(context.runId);
// Fetch existing comment (created by the setup job)
const { data: comments } = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
});
const existing = comments.find(c => c.body.includes(marker));
if (!existing) {
console.log('No benchmark comment found, skipping update');
return;
}
// Only accumulate data from the same run
let allResults = {};
const runMatch = existing.body.match(/<!-- run:(\d+) -->/);
const commentRunId = runMatch ? runMatch[1] : null;
if (commentRunId === runId) {
const m = existing.body.match(/<!-- benchmark-data:(.*?) -->/);
if (m) {
try { allResults = JSON.parse(Buffer.from(m[1], 'base64').toString()); }
catch (e) { console.log('Failed to parse existing data, starting fresh'); }
}
}
// Merge our result
const key = `${result.test}|${result.mode}|${result.runner_group}`;
allResults[key] = result;
const completed = Object.keys(allResults).length;
// --- Generate markdown report inline ---
const fmt = (n) => Number(n).toFixed(2);
const tests = [
{ key: 'no-recursion', title: 'Simple Composite Query' },
{ key: 'deep-recursion', title: 'Deep Recursion Query' },
{ key: 'variable-batch', title: 'Variable Batching Throughput' },
];
const modes = [
{ key: 'constant', label: 'Constant', desc: '(50 VUs)' },
{ key: 'ramping', label: 'Ramping', desc: '(0-500-0 VUs)' },
];
const runners = [
{ group: 'benchmarking-1', label: '1' },
];
let md = '### Fusion Gateway Performance Results\n\n';
if (completed < 6) {
md += `> **Progress: ${completed}/6 benchmarks completed** — this report updates as each job finishes.\n\n`;
}
for (const test of tests) {
md += `#### ${test.title}\n\n`;
md += '| | Req/s | Err% |\n|:--|--:|--:|\n';
for (const mode of modes) {
for (const runner of runners) {
const k = `${test.key}|${mode.key}|${runner.group}`;
const r = allResults[k];
const lbl = `**${mode.label} ${runner.label}**`;
if (r) {
md += `| ${lbl} ${mode.desc} | ${fmt(r.throughput.requests_per_second)} | ${fmt(r.reliability.error_rate)}% |\n`;
} else {
md += `| ${lbl} ${mode.desc} | *pending* | *pending* |\n`;
}
}
}
md += '\n<details>\n<summary>Response Times</summary>\n\n';
md += '| | Min | Med | Avg | P90 | P95 | Max |\n|:--|--:|--:|--:|--:|--:|--:|\n';
for (const mode of modes) {
for (const runner of runners) {
const k = `${test.key}|${mode.key}|${runner.group}`;
const r = allResults[k];
const lbl = `**${mode.label} ${runner.label}**`;
if (r) {
const t = r.response_time;
md += `| ${lbl} | ${fmt(t.min)}ms | ${fmt(t.p50)}ms | ${fmt(t.avg)}ms | ${fmt(t.p90)}ms | ${fmt(t.p95)}ms | ${fmt(t.max)}ms |\n`;
} else {
md += `| ${lbl} | *pending* | *pending* | *pending* | *pending* | *pending* | *pending* |\n`;
}
}
}
md += '\n</details>\n\n---\n\n';
}
md += '*Runner 1 = benchmarking-1*';
// Build comment body with hidden data block
const encodedData = Buffer.from(JSON.stringify(allResults)).toString('base64');
const timestamp = new Date().toUTCString();
const commitSha = context.sha.substring(0, 7);
const runUrl = `${context.serverUrl}/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}`;
const commentBody = [
marker,
`<!-- run:${runId} -->`,
`${dataPrefix}${encodedData} -->`,
`<!-- completed:${completed} -->`,
md,
'---',
`*Run [${runId}](${runUrl}) • Commit ${commitSha} • ${timestamp}*`,
].join('\n');
await github.rest.issues.updateComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: existing.id,
body: commentBody,
});
console.log(`Updated comment (${completed}/6 completed)`);
report:
name: "Final Performance Report"
needs: [setup, benchmark]
if: "!cancelled() && github.event.action != 'closed' && github.event_name == 'pull_request'"
runs-on: ubuntu-latest
permissions:
pull-requests: write
steps:
- name: Download all benchmark results
uses: actions/download-artifact@v4
continue-on-error: true
with:
pattern: benchmark-*
path: benchmark-results
# Merges artifact data with accumulated comment data so we never
# overwrite progressive results with a less complete artifact set
# (e.g., when cancel-in-progress killed some jobs mid-run).
- name: Update PR comment with final report
uses: actions/github-script@v7
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
const fs = require('fs');
const path = require('path');
const marker = '<!-- fusion-gateway-benchmark-report -->';
const dataPrefix = '<!-- benchmark-data:';
const runId = String(context.runId);
// --- Collect results from artifacts ---
let artifactResults = {};
const baseDir = 'benchmark-results';
if (fs.existsSync(baseDir)) {
for (const dir of fs.readdirSync(baseDir)) {
const filePath = path.join(baseDir, dir, 'result.json');
if (fs.existsSync(filePath)) {
try {
const r = JSON.parse(fs.readFileSync(filePath, 'utf8'));
const key = `${r.test}|${r.mode}|${r.runner_group}`;
artifactResults[key] = r;
} catch (e) { console.log(`Failed to parse ${filePath}:`, e.message); }
}
}
}
console.log(`Found ${Object.keys(artifactResults).length} result(s) from artifacts`);
// --- Read accumulated data from existing PR comment (same run only) ---
const { data: comments } = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
});
const existing = comments.find(c => c.body.includes(marker));
let commentResults = {};
if (existing) {
const runMatch = existing.body.match(/<!-- run:(\d+) -->/);
const commentRunId = runMatch ? runMatch[1] : null;
if (commentRunId === runId) {
const m = existing.body.match(/<!-- benchmark-data:(.*?) -->/);
if (m) {
try { commentResults = JSON.parse(Buffer.from(m[1], 'base64').toString()); }
catch (e) { console.log('Failed to parse comment data'); }
}
console.log(`Same run, loaded ${Object.keys(commentResults).length} result(s) from comment`);
} else {
console.log(`Different run (comment=${commentRunId}, current=${runId}), ignoring comment data`);
}
}
// --- Merge: artifacts win over comment data, comment fills gaps ---
const allResults = { ...commentResults, ...artifactResults };
const completed = Object.keys(allResults).length;
console.log(`Merged total: ${completed} result(s)`);
// Only update if we have at least as much data as what's posted for the same run
if (existing) {
const runMatch2 = existing.body.match(/<!-- run:(\d+) -->/);
const commentRunId2 = runMatch2 ? runMatch2[1] : null;
if (commentRunId2 === runId) {
const cm = existing.body.match(/<!-- completed:(\d+) -->/);
const existingCompleted = cm ? parseInt(cm[1], 10) : 0;
if (completed < existingCompleted) {
console.log(`Skipped final report (${completed} < ${existingCompleted})`);
return;
}
}
}
// --- Generate markdown (same logic as progressive updates) ---
const fmt = (n) => Number(n).toFixed(2);
const tests = [
{ key: 'no-recursion', title: 'Simple Composite Query' },
{ key: 'deep-recursion', title: 'Deep Recursion Query' },
{ key: 'variable-batch', title: 'Variable Batching Throughput' },
];
const modes = [
{ key: 'constant', label: 'Constant', desc: '(50 VUs)' },
{ key: 'ramping', label: 'Ramping', desc: '(0-500-0 VUs)' },
];
const runners = [
{ group: 'benchmarking-1', label: '1' },
];
let md = '### Fusion Gateway Performance Results\n\n';
if (completed < 6) {
md += `> **Progress: ${completed}/6 benchmarks completed** — this report updates as each job finishes.\n\n`;
}
for (const test of tests) {
md += `#### ${test.title}\n\n`;
md += '| | Req/s | Err% |\n|:--|--:|--:|\n';
for (const mode of modes) {
for (const runner of runners) {
const k = `${test.key}|${mode.key}|${runner.group}`;
const r = allResults[k];
const lbl = `**${mode.label} ${runner.label}**`;
if (r) {
md += `| ${lbl} ${mode.desc} | ${fmt(r.throughput.requests_per_second)} | ${fmt(r.reliability.error_rate)}% |\n`;
} else {
md += `| ${lbl} ${mode.desc} | *pending* | *pending* |\n`;
}
}
}
md += '\n<details>\n<summary>Response Times</summary>\n\n';
md += '| | Min | Med | Avg | P90 | P95 | Max |\n|:--|--:|--:|--:|--:|--:|--:|\n';
for (const mode of modes) {
for (const runner of runners) {
const k = `${test.key}|${mode.key}|${runner.group}`;
const r = allResults[k];
const lbl = `**${mode.label} ${runner.label}**`;
if (r) {
const t = r.response_time;
md += `| ${lbl} | ${fmt(t.min)}ms | ${fmt(t.p50)}ms | ${fmt(t.avg)}ms | ${fmt(t.p90)}ms | ${fmt(t.p95)}ms | ${fmt(t.max)}ms |\n`;
} else {
md += `| ${lbl} | *pending* | *pending* | *pending* | *pending* | *pending* | *pending* |\n`;
}
}
}
md += '\n</details>\n\n---\n\n';
}
md += '*Runner 1 = benchmarking-1*';
// Build comment body
const encodedData = Buffer.from(JSON.stringify(allResults)).toString('base64');
const timestamp = new Date().toUTCString();
const commitSha = context.sha.substring(0, 7);
const runUrl = `${context.serverUrl}/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}`;
const commentBody = [
marker,
`<!-- run:${runId} -->`,
`${dataPrefix}${encodedData} -->`,
`<!-- completed:${completed} -->`,
md,
'---',
`*Run [${runId}](${runUrl}) • Commit ${commitSha} • ${timestamp}*`,
].join('\n');
if (existing) {
await github.rest.issues.updateComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: existing.id,
body: commentBody,
});
console.log(`Updated final report (${completed}/6 completed)`);
} else {
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: commentBody,
});
console.log(`Created final report (${completed}/6 completed)`);
}
store-results:
name: "Store Baseline Results"
needs: benchmark
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
runs-on: ubuntu-latest
permissions:
contents: write
steps:
- name: Download all benchmark results
uses: actions/download-artifact@v4
with:
pattern: benchmark-*
path: benchmark-results
- name: Checkout performance data repository
uses: actions/checkout@v6
with:
repository: ChilliCream/graphql-platform-performance-data
token: ${{ secrets.PERFORMANCE_DATA_TOKEN }}
path: performance-data-repo
fetch-depth: 1
show-progress: false
- name: Store results to external repository
run: |
mkdir -p performance-data-repo/fusion-gateway
for dir in benchmark-results/benchmark-*/; do
if [ -f "$dir/result.json" ]; then
name=$(basename "$dir")
key=${name#benchmark-}
cp "$dir/result.json" "performance-data-repo/fusion-gateway/${key}.json"
fi
done
cd performance-data-repo
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
git add fusion-gateway/
if ! git diff --staged --quiet; then
git commit -m "Update Fusion Gateway benchmark data from ${{ github.sha }}"
git push
else
echo "No changes to performance data"
fi