Skip to content

Benchmarks PR Comment #144

Benchmarks PR Comment

Benchmarks PR Comment #144

name: Benchmarks PR Comment
on:
workflow_run:
workflows: [ "Benchmarks" ]
types: [ completed ]
jobs:
comment:
if: ${{ github.event.workflow_run.event == 'pull_request' && github.event.workflow_run.conclusion != 'cancelled' && github.event.workflow_run.conclusion != 'skipped' && github.event.workflow_run.pull_requests[0] != null }}
runs-on: ubuntu-latest
permissions:
contents: read
issues: write
pull-requests: write
steps:
- name: Download benchmark artifact
uses: actions/download-artifact@v5
with:
name: benchmark-results
path: benchmark
run-id: ${{ github.event.workflow_run.id }}
github-token: ${{ secrets.GITHUB_TOKEN }}
if-no-files-found: warn
- name: Comment PR with benchmark results
uses: actions/github-script@v8
with:
script: |
const fs = require('fs');
const path = require('path');
const pr = context.payload.workflow_run.pull_requests[0];
if (!pr) {
core.info('No associated pull request; skipping comment.');
return;
}
const resultsPath = path.join(process.cwd(), 'benchmark', 'benchmark-results.json');
if (!fs.existsSync(resultsPath)) {
core.setFailed('benchmark-results.json not found in artifact.');
return;
}
const raw = fs.readFileSync(resultsPath, 'utf8').trim();
if (!raw) {
core.setFailed('benchmark-results.json is empty');
return;
}
let parsed;
try {
parsed = JSON.parse(raw);
} catch (error) {
core.setFailed(`Failed to parse benchmark-results.json: ${error.message}`);
throw error;
}
const benchmarks = Array.isArray(parsed)
? parsed
: Array.isArray(parsed?.benchmarks)
? parsed.benchmarks
: [];
const runUrl = context.payload.workflow_run.html_url;
const commentLines = [
'## 📊 Benchmark Results',
'',
`Triggered by [workflow run](${runUrl}).`,
'',
];
if (benchmarks.length === 0) {
commentLines.push('⚠️ **No benchmark data was produced.** Check the workflow run logs for failures in the benchmark phase.');
} else {
commentLines.push('| Benchmark | Score | Error | Unit |');
commentLines.push('|-----------|-------|-------|------|');
let hasRegression = false;
for (const result of benchmarks) {
const score = Number(result?.primaryMetric?.score ?? NaN);
const error = Number(result?.primaryMetric?.scoreError ?? NaN);
const unit = result?.primaryMetric?.scoreUnit ?? 'ops';
const benchmark = (result?.benchmark ?? 'unknown').split('.').pop();
if (!Number.isFinite(score)) {
commentLines.push(`| ⚠️ ${benchmark} | n/a | n/a | ${unit} |`);
core.warning(`Missing score for benchmark ${result?.benchmark ?? 'unknown'}`);
continue;
}
const emoji = score > 50 ? '🔴' : '🟢';
if (score > 50) hasRegression = true;
const errorDisplay = Number.isFinite(error) ? error.toFixed(3) : 'n/a';
commentLines.push(`| ${emoji} ${benchmark} | ${score.toFixed(3)} | ±${errorDisplay} | ${unit} |`);
}
commentLines.push('');
if (hasRegression) {
commentLines.push('⚠️ **Performance regression detected**: One or more benchmarks exceeded the 50ms threshold.');
} else {
commentLines.push('✅ **All benchmarks passed** performance thresholds.');
}
}
const comment = commentLines.join('\n');
const { owner, repo } = context.repo;
const issue_number = pr.number;
const { data: comments } = await github.rest.issues.listComments({
owner,
repo,
issue_number,
});
const existing = comments.find(comment =>
comment.user.type === 'Bot' && comment.body.includes('📊 Benchmark Results')
);
if (existing) {
await github.rest.issues.updateComment({
owner,
repo,
comment_id: existing.id,
body: comment,
});
} else {
await github.rest.issues.createComment({
owner,
repo,
issue_number,
body: comment,
});
}
if (benchmarks.length === 0) {
core.setFailed('No benchmark entries were found in benchmark-results.json.');
}