Skip to content

Commit

Permalink
Merge pull request #681 from finch-tensor/wma/benchmark-improvements
Browse files Browse the repository at this point in the history
Wma/benchmark improvements
  • Loading branch information
willow-ahrens authored Dec 28, 2024
2 parents 53ae4fb + 0668497 commit 7ea378a
Show file tree
Hide file tree
Showing 6 changed files with 148 additions and 75 deletions.
3 changes: 2 additions & 1 deletion benchmark/.gitignore
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
Manifest.toml
*.json
*.o
*.o
jl_*
3 changes: 2 additions & 1 deletion benchmark/Project.toml
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
[deps]
ArgParse = "c7e460c6-2fb9-53a9-8c5b-16f535851c63"
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
Finch = "9177782c-1635-4eb9-9bfb-d9dfa25e6bce"
MatrixDepot = "b51810bb-c9f3-55da-ae3c-350fc1fbce05"
MatrixMarket = "4d4711f2-db25-561a-b6b3-d35e7d4047d3"
PkgBenchmark = "32113eaa-f34f-5b0d-bd6c-c81e245fc73d"

[compat]
BenchmarkTools = "1.5"
BenchmarkTools = "1.5"
178 changes: 117 additions & 61 deletions benchmark/benchmarks.jl
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,29 @@ using BenchmarkTools
using MatrixDepot
using SparseArrays
using Random
using ArgParse

s = ArgParseSettings("Run Finch.jl benchmarks. By default, all tests are run unless --include or --exclude options are provided.
If the environment variable FINCH_BENCHMARK_ARGS is set, it will override the given arguments.")

@add_arg_table! s begin
"--include", "-i"
nargs = '*'
default = []
help = "list of benchmark suites to include, e.g., --include high-level structures"

"--exclude", "-e"
nargs = '*'
default = []
help = "list of benchmark suites to exclude, e.g., --exclude compile graphs"
end

if "FINCH_BENCHMARK_ARGS" in keys(ENV)
ARGS = split(ENV["FINCH_BENCHMARK_ARGS"], " ")
end

parsed_args = parse_args(ARGS, s)

include(joinpath(@__DIR__, "../docs/examples/bfs.jl"))
include(joinpath(@__DIR__, "../docs/examples/pagerank.jl"))
include(joinpath(@__DIR__, "../docs/examples/shortest_paths.jl"))
Expand All @@ -20,73 +43,97 @@ SUITE = BenchmarkGroup()

SUITE["high-level"] = BenchmarkGroup()

let
k = Ref(0.0)
A = Tensor(Dense(Sparse(Element(0.0))), fsprand(10000, 10000, 0.01))
x = rand(1)
y = rand(1)
SUITE["high-level"]["permutedims(Dense(Sparse()))"] = @benchmarkable(permutedims($A, (2, 1)))
end
for (scheduler_name, scheduler) in [
"default_scheduler" => Finch.default_scheduler(),
"galley_scheduler" => Finch.galley_scheduler(),
]
Finch.with_scheduler(scheduler) do
let
A = Tensor(Dense(Sparse(Element(0.0))), fsprand(10000, 10000, 0.01))
SUITE["high-level"]["permutedims(Dense(Sparse()))"][scheduler_name] = @benchmarkable(permutedims($A, (2, 1)))
end

let
k = Ref(0.0)
A = Tensor(Dense(Dense(Element(0.0))), rand(10000, 10000))
x = rand(1)
y = rand(1)
SUITE["high-level"]["permutedims(Dense(Dense()))"] = @benchmarkable(permutedims($A, (2, 1)))
end
let
A = Tensor(Dense(Dense(Element(0.0))), rand(10000, 10000))
SUITE["high-level"]["permutedims(Dense(Dense()))"][scheduler_name] = @benchmarkable(permutedims($A, (2, 1)))
end

let
k = Ref(0.0)
x = rand(1)
y = rand(1)
SUITE["high-level"]["einsum_spmv_compile_overhead"] = @benchmarkable(
begin
A, x, y = (A, $x, $y)
@einsum y[i] += A[i, j] * x[j]
end,
setup = (A = Tensor(Dense(SparseList(Element($k[] += 1))), fsprand(1, 1, 1)))
)
end
let
k = Ref(0.0)
x = rand(1)
y = rand(1)
SUITE["high-level"]["einsum_spmv_compile_overhead"][scheduler_name] = @benchmarkable(
begin
A, x, y = (A, $x, $y)
@einsum y[i] += A[i, j] * x[j]
end,
setup = (A = Tensor(Dense(SparseList(Element($k[] += 1))), fsprand(1, 1, 1)))
)
end

let
A = Tensor(Dense(SparseList(Element(0.0))), fsprand(1, 1, 1))
x = rand(1)
SUITE["high-level"]["einsum_spmv_call_overhead"] = @benchmarkable(
begin
A, x = ($A, $x)
@einsum y[i] += A[i, j] * x[j]
end,
)
end
let
N = 10
P = 0.0001
C = 16.0
SUITE["high-level"]["einsum_matmul_adaptive_overhead"][scheduler_name] = @benchmarkable(
begin
@einsum C[i, j] += A[i, k] * B[k, j]
end,
setup = begin
(N, P, C) = ($N, $P, $C)
n = floor(Int, N * C^(rand()))
m = floor(Int, N * C^(rand()))
l = floor(Int, N * C^(rand()))
p = floor(Int, P * C^(rand()))
q = floor(Int, P * C^(rand()))
A = fsprand(n, l, p)
B = fsprand(l, m, q)
end,
evals = 1
)
end

let
N = 1_000
K = 1_000
p = 0.001
A = Tensor(Dense(Dense(Element(0.0))), rand(N, K))
B = Tensor(Dense(Dense(Element(0.0))), rand(K, N))
M = Tensor(Dense(SparseList(Element(0.0))), fsprand(N, N, p))

SUITE["high-level"]["sddmm_fused"] = @benchmarkable(
begin
M = lazy($M)
A = lazy($A)
B = lazy($B)
compute(M .* (A * B))
end,
)
let
A = Tensor(Dense(SparseList(Element(0.0))), fsprand(1, 1, 1))
x = rand(1)
SUITE["high-level"]["einsum_spmv_call_overhead"][scheduler_name] = @benchmarkable(
begin
A, x = ($A, $x)
@einsum y[i] += A[i, j] * x[j]
end,
)
end

SUITE["high-level"]["sddmm_unfused"] = @benchmarkable(
begin
M = $M
A = $A
B = $B
M .* (A * B)
end,
)
let
N = 1_000
K = 1_000
p = 0.001
A = Tensor(Dense(Dense(Element(0.0))), rand(N, K))
B = Tensor(Dense(Dense(Element(0.0))), rand(K, N))
M = Tensor(Dense(SparseList(Element(0.0))), fsprand(N, N, p))

SUITE["high-level"]["sddmm_fused"][scheduler_name] = @benchmarkable(
begin
M = lazy($M)
A = lazy($A)
B = lazy($B)
compute(M .* (A * B))
end,
)

SUITE["high-level"]["sddmm_unfused"][scheduler_name] = @benchmarkable(
begin
M = $M
A = $A
B = $B
M .* (A * B)
end,
)
end
end
end


eval(let
A = Tensor(Dense(SparseList(Element(0.0))), fsprand(1, 1, 1))
x = rand(1)
Expand Down Expand Up @@ -348,4 +395,13 @@ x = rand(N)

SUITE["structure"]["banded"]["SparseList"] = @benchmarkable spmv_serial($A_ref, $x)
SUITE["structure"]["banded"]["SparseBand"] = @benchmarkable spmv_serial($A, $x)
SUITE["structure"]["banded"]["SparseInterval"] = @benchmarkable spmv_serial($A2, $x)
SUITE["structure"]["banded"]["SparseInterval"] = @benchmarkable spmv_serial($A2, $x)

if !isempty(parsed_args["include"])
inc = reduce((a, b) -> :($a || $b), parsed_args["include"])
SUITE = eval(:(SUITE[@tagged $inc]))
end
if !isempty(parsed_args["exclude"])
exc = reduce((a, b) -> :($a || $b), parsed_args["exclude"])
SUITE = eval(:(SUITE[@tagged !$exc]))
end
2 changes: 1 addition & 1 deletion benchmark/runbenchmarks.jl
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ end
using PkgBenchmark
benchmarkpkg(
dirname(@__DIR__),
BenchmarkConfig(env = Dict("JULIA_NUM_THREADS" => "8")),
BenchmarkConfig(env = Dict("JULIA_NUM_THREADS" => "8", "FINCH_BENCHMARK_ARGS" => get(ENV, "FINCH_BENCHMARK_ARGS", join(ARGS, " ")))),
resultfile = joinpath(@__DIR__, "result.json"),
)

Expand Down
8 changes: 7 additions & 1 deletion benchmark/runjudge.jl
Original file line number Diff line number Diff line change
Expand Up @@ -11,19 +11,25 @@ end
using PkgBenchmark

function mkconfig(; kwargs...)
return BenchmarkConfig(env = Dict("JULIA_NUM_THREADS" => "1"); kwargs...)
return BenchmarkConfig(env = Dict("JULIA_NUM_THREADS" => "1", "FINCH_BENCHMARK_ARGS" => get(ENV, "FINCH_BENCHMARK_ARGS", join(ARGS, " "))); kwargs...)
end

script = tempname(joinpath(@__DIR__))

cp(joinpath(@__DIR__, "benchmarks.jl"), script)

group_target = benchmarkpkg(
dirname(@__DIR__),
mkconfig(),
resultfile = joinpath(@__DIR__, "result-target.json"),
script = script,
)

group_baseline = benchmarkpkg(
dirname(@__DIR__),
mkconfig(id = "main"),
resultfile = joinpath(@__DIR__, "result-baseline.json"),
script = script,
)

judgement = judge(group_target, group_baseline)
Expand Down
29 changes: 19 additions & 10 deletions test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -9,21 +9,29 @@ end
using Test
using ArgParse

s = ArgParseSettings("Run Finch.jl tests. All tests are run by default. Specific
test suites may be specified as positional arguments. Finch compares to
reference output which depends on the system word size (currently
$(Sys.WORD_SIZE)-bit). To overwrite $(Sys.WORD_SIZE==32 ? 64 : 32)-bit output,
run this with a $(Sys.WORD_SIZE==32 ? 64 : 32)-bit julia executable.")
s = ArgParseSettings("Run Finch.jl tests. By default, all tests are run unless --include or --exclude options are provided.
Finch compares to reference output which depends on the system word size (currently $(Sys.WORD_SIZE)-bit).To overwrite $(Sys.WORD_SIZE==32 ? 64 : 32)-bit output, run this with a $(Sys.WORD_SIZE==32 ? 64 : 32)-bit julia executable.
If the environment variable FINCH_TEST_ARGS is set, it will override the given arguments.")

@add_arg_table! s begin
"--overwrite", "-w"
action = :store_true
help = "overwrite reference output for $(Sys.WORD_SIZE)-bit systems"
"suites"
"--include", "-i"
nargs = '*'
default = ["all"]
help = "names of test suites to run, from: print, constructors, representation, merges, index, typical, kernels, issues, interface, galley, continuous, continuousexamples, simple, examples, fileio, docs, parallel, algebra."
default = []
help = "list of test suites to include, e.g., --include constructors merges"

"--exclude", "-e"
nargs = '*'
default = []
help = "list of test suites to exclude, e.g., --exclude parallel algebra"
end

if "FINCH_TEST_ARGS" in keys(ENV)
ARGS = split(ENV["FINCH_TEST_ARGS"], " ")
end

parsed_args = parse_args(ARGS, s)

"""
Expand Down Expand Up @@ -61,7 +69,9 @@ end

function should_run(name)
global parsed_args
return ("all" in parsed_args["suites"] || name in parsed_args["suites"])
inc = parsed_args["include"]
exc = parsed_args["exclude"]
return (isempty(inc) || name in inc) && !(name in exc)
end

macro repl(io, ex, quiet = false)
Expand Down Expand Up @@ -109,7 +119,6 @@ include("utils.jl")
end
end
if should_run("parallel") include("test_parallel.jl") end
#if should_run("continuous") include("test_continuous.jl") end
#algebra goes at the end since it calls refresh()
if should_run("algebra") include("test_algebra.jl") end
end

0 comments on commit 7ea378a

Please sign in to comment.