Skip to content

Commit

Permalink
Add basic benchmark harness
Browse files Browse the repository at this point in the history
Co-authored by: Andrei Zhabinski <faithlessfriend@gmail.com>
  • Loading branch information
ToucheSir committed Oct 13, 2022
1 parent 806b0ef commit 10bf133
Show file tree
Hide file tree
Showing 5 changed files with 102 additions and 0 deletions.
15 changes: 15 additions & 0 deletions .buildkite/pipeline.yml
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,21 @@ steps:
NNLIB_TEST_CUDA: true
timeout_in_minutes: 60

- label: "Benchmarks"
plugins:
- JuliaCI/julia#v1:
version: 1
env:
JULIA_NUM_THREADS: 4
command:
- julia --project=benchmark -e 'using Pkg; Pkg.develop(PackageSpec(path=pwd())); Pkg.instantiate()'
- julia --project=benchmark benchmark/runbenchmarks.jl
- printf '%b\n' "$(cat benchmark/report.md)" | buildkite-agent annotate --style "info"
agents:
queue: "juliagpu"
if: build.pull_request.labels includes "benchmark"
timeout_in_minutes: 30

# - label: "GPU julia nightly"
# plugins:
# - JuliaCI/julia#v1:
Expand Down
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -13,3 +13,6 @@ deps.jl
.vscode/
/Manifest.toml
lib/NNlibCUDA/Manifest.toml
benchmark/Manifest.toml
benchmark/*.json
benchmark/report.md
6 changes: 6 additions & 0 deletions benchmark/Project.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
[deps]
ArgParse = "c7e460c6-2fb9-53a9-8c5b-16f535851c63"
BenchmarkCI = "20533458-34a3-403d-a444-e18f38190b5b"
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
NNlib = "872c559c-99b0-510c-b3b7-b6c96a88d5cd"
PkgBenchmark = "32113eaa-f34f-5b0d-bd6c-c81e245fc73d"
13 changes: 13 additions & 0 deletions benchmark/benchmarks.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
using BenchmarkTools
using NNlib

const SUITE = BenchmarkGroup()

SUITE["activations"] = BenchmarkGroup()

x = rand(64, 64)

for f in NNlib.ACTIVATIONS
act = @eval($f)
SUITE["activations"][string(f)] = @benchmarkable $act.($x)
end
65 changes: 65 additions & 0 deletions benchmark/runbenchmarks.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
# Adapted from
# https://github.com/kul-forbes/ProximalOperators.jl/tree/master/benchmark
using ArgParse
using PkgBenchmark
using BenchmarkCI: displayjudgement, printresultmd, CIResult
using Markdown

function markdown_report(judgement)
md = sprint(printresultmd, CIResult(judgement = judgement))
md = replace(md, ":x:" => "")
md = replace(md, ":white_check_mark:" => "")
return md
end

function parse_commandline()
s = ArgParseSettings()

@add_arg_table! s begin
"--target"
help = "the branch/commit/tag to use as target"
default = "HEAD"
"--baseline"
help = "the branch/commit/tag to use as baseline"
default = "master"
"--retune"
help = "force re-tuning (ignore existing tuning data)"
action = :store_true
end

return parse_args(s)
end

function main()
parsed_args = parse_commandline()

mkconfig(; kwargs...) =
BenchmarkConfig(
env = Dict(
"JULIA_NUM_THREADS" => get(ENV, "JULIA_NUM_THREADS", "1"),
);
kwargs...
)

target = parsed_args["target"]
group_target = benchmarkpkg(
dirname(@__DIR__),
mkconfig(id = target),
resultfile = joinpath(@__DIR__, "result-$(target).json"),
retune = parsed_args["retune"],
)

baseline = parsed_args["baseline"]
group_baseline = benchmarkpkg(
dirname(@__DIR__),
mkconfig(id = baseline),
resultfile = joinpath(@__DIR__, "result-$(baseline).json"),
)

judgement = judge(group_target, group_baseline)
report_md = markdown_report(judgement)
write(joinpath(@__DIR__, "report.md"), report_md)
display(Markdown.parse(report_md))
end

main()

0 comments on commit 10bf133

Please sign in to comment.