Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add basic benchmark harness #436

Merged
merged 1 commit into from
Oct 21, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions .buildkite/pipeline.yml
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,21 @@ steps:
NNLIB_TEST_CUDA: true
timeout_in_minutes: 60

- label: "Benchmarks"
plugins:
- JuliaCI/julia#v1:
version: 1
env:
JULIA_NUM_THREADS: 4
command:
- julia --project=benchmark -e 'using Pkg; Pkg.develop(PackageSpec(path=pwd())); Pkg.instantiate()'
- julia --project=benchmark benchmark/runbenchmarks.jl
- printf '%b\n' "$(cat benchmark/report.md)" | buildkite-agent annotate --style 'info'
agents:
queue: "juliagpu"
if: build.pull_request.labels includes "benchmark"
timeout_in_minutes: 30

# - label: "GPU julia nightly"
# plugins:
# - JuliaCI/julia#v1:
Expand Down
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -13,3 +13,6 @@ deps.jl
.vscode/
/Manifest.toml
lib/NNlibCUDA/Manifest.toml
benchmark/Manifest.toml
benchmark/*.json
benchmark/report.md
14 changes: 14 additions & 0 deletions benchmark/Project.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
[deps]
ArgParse = "c7e460c6-2fb9-53a9-8c5b-16f535851c63"
BenchmarkCI = "20533458-34a3-403d-a444-e18f38190b5b"
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
NNlib = "872c559c-99b0-510c-b3b7-b6c96a88d5cd"
PkgBenchmark = "32113eaa-f34f-5b0d-bd6c-c81e245fc73d"

[compat]
# No compat bounds for NNlib because we may test breaking versions
ArgParse = "1"
BenchmarkCI = "0.1"
BenchmarkTools = "1.3"
PkgBenchmark = "0.2"
julia = "1.6"
13 changes: 13 additions & 0 deletions benchmark/benchmarks.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
using BenchmarkTools
using NNlib

const SUITE = BenchmarkGroup()

SUITE["activations"] = BenchmarkGroup()

x = rand(64, 64)

for f in NNlib.ACTIVATIONS
act = @eval($f)
SUITE["activations"][string(f)] = @benchmarkable $act.($x)
end
65 changes: 65 additions & 0 deletions benchmark/runbenchmarks.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
# Adapted from
# https://github.com/kul-forbes/ProximalOperators.jl/tree/master/benchmark
using ArgParse
using PkgBenchmark
using BenchmarkCI: displayjudgement, printresultmd, CIResult
using Markdown

function markdown_report(judgement)
md = sprint(printresultmd, CIResult(judgement = judgement))
md = replace(md, ":x:" => "❌")
md = replace(md, ":white_check_mark:" => "✅")
return md
end

function parse_commandline()
s = ArgParseSettings()

@add_arg_table! s begin
"--target"
help = "the branch/commit/tag to use as target"
default = "HEAD"
"--baseline"
help = "the branch/commit/tag to use as baseline"
default = "master"
"--retune"
help = "force re-tuning (ignore existing tuning data)"
action = :store_true
end

return parse_args(s)
end

function main()
parsed_args = parse_commandline()

mkconfig(; kwargs...) =
BenchmarkConfig(
env = Dict(
"JULIA_NUM_THREADS" => get(ENV, "JULIA_NUM_THREADS", "1"),
);
kwargs...
)

target = parsed_args["target"]
group_target = benchmarkpkg(
dirname(@__DIR__),
mkconfig(id = target),
resultfile = joinpath(@__DIR__, "result-$(target).json"),
retune = parsed_args["retune"],
)

baseline = parsed_args["baseline"]
group_baseline = benchmarkpkg(
dirname(@__DIR__),
mkconfig(id = baseline),
resultfile = joinpath(@__DIR__, "result-$(baseline).json"),
)

judgement = judge(group_target, group_baseline)
report_md = markdown_report(judgement)
write(joinpath(@__DIR__, "report.md"), report_md)
display(Markdown.parse(report_md))
end

main()