Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
118 changes: 118 additions & 0 deletions .github/workflows/benchmark.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
name: Benchmark

on:
workflow_dispatch:
inputs:
baseline_ref:
description: "Branch/tag/commit to compare against"
required: false
default: "main"
pull_request:
types: [opened, synchronize, reopened, labeled]

permissions:
contents: read

jobs:
benchmark:
if: github.event_name == 'workflow_dispatch' || contains(github.event.pull_request.labels.*.name, 'run-benchmarks')
runs-on: ubuntu-latest
env:
BASE_REF: ${{ github.event_name == 'workflow_dispatch' && inputs.baseline_ref || github.base_ref }}
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0

- name: Set up Julia
uses: julia-actions/setup-julia@v2
with:
version: "1"

- name: Cache Julia artifacts
uses: julia-actions/cache@v2

- name: Ensure baseline ref is available
run: git fetch origin "${BASE_REF}" --depth=1

- name: Instantiate benchmark environment
run: julia --project=benchmark -e 'using Pkg; Pkg.instantiate(); Pkg.develop(PackageSpec(path=pwd())); Pkg.instantiate()'

- name: Compare benchmarks against baseline
run: |
if git cat-file -e "origin/${BASE_REF}:benchmark/Project.toml" 2>/dev/null; then
julia --project=benchmark -e '
using PkgBenchmark
baseline = "origin/$(ENV["BASE_REF"])"
judgment = judge(pwd(), "HEAD", baseline)
mkpath("benchmark-results")
export_markdown("benchmark-results/report.md", judgment; export_invariants=true)
'
touch benchmark-results/has_judgment
else
julia --project=benchmark -e '
using BenchmarkTools
include("benchmark/benchmarks.jl")
results = run(SUITE; seconds=1)
mkpath("benchmark-results")
open("benchmark-results/report.md", "w") do io
println(io, "# Benchmark Report")
println(io)
println(io, "Baseline branch `$(ENV["BASE_REF"])` does not contain `benchmark/Project.toml`.")
println(io, "Ran current-branch benchmarks only; regression comparison skipped.")
println(io)
show(io, MIME"text/plain"(), results)
println(io)
end
'
touch benchmark-results/no_judgment
fi

- name: Print benchmark report in job log
run: |
echo "=== Benchmark Report ==="
cat benchmark-results/report.md

- name: Publish benchmark report in step summary
run: |
cat benchmark-results/report.md >> "${GITHUB_STEP_SUMMARY}"

- name: Fail on regression above 15%
run: |
if [ -f benchmark-results/no_judgment ]; then
echo "No baseline benchmark harness found; skipping regression gate."
exit 0
fi
julia --project=benchmark -e '
threshold = 15.0
report = read("benchmark-results/report.md", String)
worst = 0.0
offenders = String[]
for line in split(report, "\n")
m = match(r"\+([0-9]+(?:\.[0-9]+)?)%", line)
m === nothing && continue
pct = parse(Float64, m.captures[1])
worst = max(worst, pct)
if pct > threshold
push!(offenders, strip(line))
end
end

println("Maximum parsed regression: ", round(worst; digits=2), "%")
if isempty(offenders)
println("No regression above ", threshold, "% detected.")
exit(0)
end

println("Regressions above ", threshold, "%:")
foreach(x -> println(" ", x), offenders)
exit(1)
'

- name: Upload benchmark report
if: always()
uses: actions/upload-artifact@v4
with:
name: benchmark-report
path: benchmark-results/report.md
12 changes: 10 additions & 2 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,20 @@ LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7"
StaticArrays = "90137ffa-7385-5640-81b9-e52037218182"
StructArrays = "09ab397b-f2b6-538f-b94a-2f83cf4a842a"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"

[compat]
julia = "1.8 - 1"
StaticArrays = "1.9 - 1"
StructArrays = "0.7.1 - 0.7"
LinearAlgebra = "1"
Printf = "1"
Test = "1"
Test = "1"
BenchmarkTools = "1"

[extras]
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"

[targets]
benchmark = ["BenchmarkTools"]
test = ["Test"]
9 changes: 9 additions & 0 deletions benchmark/Project.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
[deps]
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
IdealGasThermo = "d4fb33c0-2494-4384-b7a5-e5d0487b6649"
PkgBenchmark = "32113eaa-f34f-5b0d-bd6c-c81e245fc73d"

[compat]
BenchmarkTools = "1"
PkgBenchmark = "0.2"
julia = "1.8 - 1"
34 changes: 34 additions & 0 deletions benchmark/benchmarks.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
using BenchmarkTools
using IdealGasThermo

const SUITE = BenchmarkGroup()

const TT = rand(200.0:600.0, 100)

function benchmark_gas!(TT::AbstractVector, gas::IdealGasThermo.AbstractGas)
@views for i in eachindex(TT)
gas.T = TT[i]
gas.cp
gas.ϕ
gas.h
gas.cp_T
end
return nothing
end

function benchmark_enthalpy()
gas = Gas()
gas.h = 1e5
return gas
end

function benchmark_combustion()
gas_ox = Gas()
fuel = "CH4"
return IdealGasThermo.fuel_combustion(gas_ox, fuel, 298.15, 1e-2)
end

SUITE["gas"]["Gas1D thermo properties"] = @benchmarkable benchmark_gas!($TT, $(Gas1D()))
SUITE["gas"]["Gas thermo properties"] = @benchmarkable benchmark_gas!($TT, $(Gas()))
SUITE["state"]["Set enthalpy"] = @benchmarkable benchmark_enthalpy()
SUITE["combustion"]["fuel_combustion"] = @benchmarkable benchmark_combustion()