Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Tfc/parallel analysis fixes #514

Closed
wants to merge 45 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
45 commits
Select commit Hold shift + click to select a range
4efcc36
just a random start
wraith1995 Feb 9, 2024
9c1b319
new passing parallel test + working on fix for atomics
wraith1995 Feb 9, 2024
28d3f61
Merge branch 'main' into origin/tfc/more-parallel-tests
wraith1995 Feb 9, 2024
34faf8d
fewer tests; added k parallel tests but it fails
wraith1995 Feb 12, 2024
7a7be19
Merge branch 'origin/tfc/more-parallel-tests' of github.com:willow-ah…
wraith1995 Feb 12, 2024
2807d2b
make sure alternative fails
wraith1995 Feb 12, 2024
a70c732
analysis problems...
wraith1995 Feb 12, 2024
cc66fc4
more tests
wraith1995 Feb 19, 2024
a7e5867
even more tests
wraith1995 Feb 19, 2024
4133403
just one problem
wraith1995 Feb 19, 2024
f15fcaa
merging main into here
wraith1995 Feb 25, 2024
3df853b
merge
wraith1995 Feb 25, 2024
0a4c749
fix moveto in seperation level.
wraith1995 Feb 25, 2024
9c2eb79
forgot one.
wraith1995 Feb 25, 2024
1e3b4d7
intermediate
wraith1995 Feb 25, 2024
13d6ad8
update tests
wraith1995 Feb 25, 2024
3fd500c
Delete src/tensors/levels/separationlevel.jl
wraith1995 Feb 27, 2024
92d48bc
synch
wraith1995 Feb 27, 2024
c4be1b8
large update to parallel analysis
wraith1995 Feb 28, 2024
7b77a7a
eliminate old analysis
wraith1995 Feb 28, 2024
248bc25
one more fix
wraith1995 Feb 28, 2024
6795515
quick exit
wraith1995 Feb 28, 2024
95375d3
??
wraith1995 Feb 28, 2024
2d4d435
many tiny fixes to compile
wraith1995 Feb 29, 2024
4054377
bad dense is_atomic prop
wraith1995 Feb 29, 2024
00e19e8
ops
wraith1995 Feb 29, 2024
b9b9749
typo fixes many tests
wraith1995 Feb 29, 2024
1608576
now ae is needed
wraith1995 Feb 29, 2024
ec38b00
old tests pass mod ae
wraith1995 Feb 29, 2024
32b1ff8
remove prints
wraith1995 Feb 29, 2024
96ebdc2
comment
wraith1995 Feb 29, 2024
b108da5
this will pass with atomic element level.
wraith1995 Feb 29, 2024
38f8ee4
hide overall here
wraith1995 Feb 29, 2024
70ae1aa
add is_concurrent
wraith1995 Feb 29, 2024
fb2e43b
okay added concurrent
wraith1995 Feb 29, 2024
d5244bd
add docs
wraith1995 Feb 29, 2024
88fd483
smallnotes
willow-ahrens Feb 29, 2024
ca45a33
prepare for merge
willow-ahrens Apr 30, 2024
ea379da
Merge branch 'main' into origin/tfc/more-parallel-tests
willow-ahrens Apr 30, 2024
580f64d
oops
willow-ahrens Apr 30, 2024
537a7f2
fix
willow-ahrens Apr 30, 2024
8efac90
update SingleRLE
willow-ahrens Apr 30, 2024
f7f491f
fixes
willow-ahrens Apr 30, 2024
063cf4c
cool
willow-ahrens Apr 30, 2024
b5a7f52
remove num_indexable
willow-ahrens May 1, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions src/architecture.jl
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,6 @@ virtual_get_device(::VirtualSerial) = VirtualCPU(nothing, 1)
virtual_get_task(::VirtualSerial) = nothing



struct CPUThread{Parent} <: AbstractTask
tid::Int
dev::CPU
Expand Down Expand Up @@ -157,5 +156,6 @@ function moveto(vec::Vector, task::CPUThread)
end

function moveto(vec::CPULocalVector, task::CPUThread)
return vec.data[task.tid]
temp = vec.data[task.tid]
return temp
end
3 changes: 2 additions & 1 deletion src/interface/abstractarrays.jl
Original file line number Diff line number Diff line change
Expand Up @@ -115,4 +115,5 @@ Base.setindex!(arr::AsArray{T, N}, v, i::Vararg{Int, N}) where {T, N} = arr.fbr[
Base.setindex!(arr::AsArray{T, N}, v, i::Vararg{Any, N}) where {T, N} = arr.fbr[i...] = v

is_injective(ctx, tns::VirtualAbstractArray) = [true for _ in tns.ndims]
is_atomic(ctx, tns::VirtualAbstractArray) = true
is_atomic(ctx, tns::VirtualAbstractArray) = [false, [false for _ in tns.ndims]...]
# is_atomic(ctx, tns::VirtualAbstractArray) = true
5 changes: 4 additions & 1 deletion src/looplets/unfurl.jl
Original file line number Diff line number Diff line change
Expand Up @@ -26,4 +26,7 @@ function unfurl(ctx, tns::Furlable, ext, mode, protos...)
end
unfurl(ctx, tns, ext, mode, protos...) = tns

instantiate(ctx, tns::Furlable, mode, protos) = tns
instantiate(ctx, tns::Furlable, mode, protos) = tns
is_injective(ctx, tns::Furlable) = is_injective(ctx, tns.body)
is_atomic(ctx, tns::Furlable) = is_atomic(ctx, tns.body)
is_concurrent(ctx, tns::Furlable) = is_concurrent(ctx, tns.body)
1 change: 1 addition & 0 deletions src/tensors/combinators/offset.jl
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ end

is_injective(ctx, lvl::VirtualOffsetArray) = is_injective(ctx, lvl.body)
is_atomic(ctx, lvl::VirtualOffsetArray) = is_atomic(ctx, lvl.body)
is_concurrent(ctx, lvl::VirtualOffsetArray) = is_concurrent(ctx, lvl.body)

Base.show(io::IO, ex::VirtualOffsetArray) = Base.show(io, MIME"text/plain"(), ex)
function Base.show(io::IO, mime::MIME"text/plain", ex::VirtualOffsetArray)
Expand Down
2 changes: 2 additions & 0 deletions src/tensors/combinators/permissive.jl
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@ end

is_injective(ctx, lvl::VirtualPermissiveArray) = is_injective(ctx, lvl.body)
is_atomic(ctx, lvl::VirtualPermissiveArray) = is_atomic(ctx, lvl.body)
is_concurrent(ctx, lvl::VirtualPermissiveArray) = is_concurrent(ctx, lvl.body)


Base.show(io::IO, ex::VirtualPermissiveArray) = Base.show(io, MIME"text/plain"(), ex)
function Base.show(io::IO, mime::MIME"text/plain", ex::VirtualPermissiveArray)
Expand Down
5 changes: 4 additions & 1 deletion src/tensors/combinators/product.jl
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,10 @@ function is_concurrent(ctx, lvl::VirtualProductArray)
sub = is_concurrent(ctx, lvl.body)
return [sub[1:lvl.dim]..., false, sub[lvl.dim + 1:end]...]
end
is_atomic(ctx, lvl::VirtualProductArray) = is_atomic(ctx, lvl.body)
function is_atomic(ctx, lvl::VirtualProductArray)
(below, overall) = is_atomic(ctx, lvl.body)
return ([below[1:lvl.dim]..., below[lvl.dim] && below[lvl.dim+1], below[lvl.dim + 1:end]... ], overall)
end

Base.show(io::IO, ex::VirtualProductArray) = Base.show(io, MIME"text/plain"(), ex)
function Base.show(io::IO, mime::MIME"text/plain", ex::VirtualProductArray)
Expand Down
2 changes: 2 additions & 0 deletions src/tensors/combinators/protocolized.jl
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@ end

is_injective(ctx, lvl::VirtualProtocolizedArray) = is_injective(ctx, lvl.body)
is_atomic(ctx, lvl::VirtualProtocolizedArray) = is_atomic(ctx, lvl.body)
is_concurrent(ctx, lvl::VirtualProtocolizedArray) = is_concurrent(ctx, lvl.body)


Base.:(==)(a::VirtualProtocolizedArray, b::VirtualProtocolizedArray) = a.body == b.body && a.protos == b.protos

Expand Down
1 change: 1 addition & 0 deletions src/tensors/combinators/roots.jl
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ lower_access(ctx::AbstractCompiler, node, tns::FinchNode) =

is_injective(ctx, lvl::FinchNode) = is_injective(ctx, resolve(ctx, lvl))
is_atomic(ctx, lvl::FinchNode) = is_atomic(ctx, resolve(ctx, lvl))
is_concurrent(ctx, lvl::FinchNode) = is_concurrent(ctx, resolve(ctx, lvl))

function getroot(node::FinchNode)
if node.kind === virtual
Expand Down
2 changes: 2 additions & 0 deletions src/tensors/combinators/scale.jl
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@ end

is_injective(ctx, lvl::VirtualScaleArray) = is_injective(ctx, lvl.body)
is_atomic(ctx, lvl::VirtualScaleArray) = is_atomic(ctx, lvl.body)
is_concurrent(ctx, lvl::VirtualScaleArray) = is_concurrent(ctx, lvl.body)


Base.show(io::IO, ex::VirtualScaleArray) = Base.show(io, MIME"text/plain"(), ex)
function Base.show(io::IO, mime::MIME"text/plain", ex::VirtualScaleArray)
Expand Down
10 changes: 9 additions & 1 deletion src/tensors/combinators/toeplitz.jl
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,15 @@ function is_injective(ctx, lvl::VirtualToeplitzArray)
sub = is_injective(ctx, lvl.body)
return [sub[1:lvl.dim]..., false, sub[lvl.dim + 1:end]...]
end
is_atomic(ctx, lvl::VirtualToeplitzArray) = is_atomic(ctx, lvl.body)
function is_atomic(ctx, lvl::VirtualToeplitzArray)
(below, overall) = is_atomic(ctx, lvl.body)
newBelow = [below[1:lvl.dim]..., below[lvl.dim] && below[lvl.dim + 1], below[lvl.dim + 1:end]...]
return (newBelow, overall)
end
function is_concurrent(ctx, lvl::VirtualToeplitzArray)
sub = is_concurrent(ctx, lvl.body)
return [sub[1:lvl.dim]..., false, sub[lvl.dim + 1:end]...]
end

Base.show(io::IO, ex::VirtualToeplitzArray) = Base.show(io, MIME"text/plain"(), ex)
function Base.show(io::IO, mime::MIME"text/plain", ex::VirtualToeplitzArray)
Expand Down
1 change: 1 addition & 0 deletions src/tensors/combinators/unfurled.jl
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,7 @@ getroot(tns::Unfurled) = getroot(tns.arr)

is_injective(ctx, lvl::Unfurled) = is_injective(ctx, lvl.arr)
is_atomic(ctx, lvl::Unfurled) = is_atomic(ctx, lvl.arr)
is_concurrent(ctx, lvl::Unfurled) = is_concurrent(ctx, lvl.arr)

function lower_access(ctx::AbstractCompiler, node, tns::Unfurled)
if !isempty(node.idxs)
Expand Down
1 change: 1 addition & 0 deletions src/tensors/combinators/windowed.jl
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ end

is_injective(ctx, lvl::VirtualWindowedArray) = is_injective(ctx, lvl.body)
is_atomic(ctx, lvl::VirtualWindowedArray) = is_atomic(ctx, lvl.body)
is_concurrent(ctx, lvl::VirtualWindowedArray) = is_concurrent(ctx, lvl.body)

Base.show(io::IO, ex::VirtualWindowedArray) = Base.show(io, MIME"text/plain"(), ex)
function Base.show(io::IO, mime::MIME"text/plain", ex::VirtualWindowedArray)
Expand Down
2 changes: 2 additions & 0 deletions src/tensors/fibers.jl
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,8 @@ mutable struct VirtualFiber{Lvl} <: AbstractVirtualFiber{Lvl}
end

is_injective(ctx, tns::VirtualFiber) = is_level_injective(ctx, tns.lvl)
is_concurrent(ctx, tns::VirtualFiber) = is_level_concurrent(ctx, tns.lvl)[1]

is_atomic(ctx, tns::VirtualFiber) = is_level_atomic(ctx, tns.lvl)

function virtualize(ctx, ex, ::Type{<:Tensor{Lvl}}, tag=freshen(ctx, :tns)) where {Lvl}
Expand Down
3 changes: 3 additions & 0 deletions src/tensors/levels/abstractlevel.jl
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
abstract type AbstractLevel end
abstract type AbstractVirtualLevel end

virtual_level_ndims(lvl:: AbstractVirtualLevel, ctx) = length(virtual_level_size(lvl, ctx))


#is_laminable_updater(lvl::AbstractVirtualLevel, ctx, ::Union{::typeof(defaultread), ::typeof(walk), ::typeof(gallop), ::typeof(follow), typeof(defaultupdate), typeof(laminate), typeof(extrude)}, protos...) = false

#is_laminable_updater(lvl::AbstractVirtualLevel, ctx) = false
Expand Down
39 changes: 26 additions & 13 deletions src/tensors/levels/atomiclevels.jl
Original file line number Diff line number Diff line change
Expand Up @@ -93,9 +93,15 @@ postype(lvl:: AtomicLevel) = postype(lvl.lvl)

postype(lvl:: VirtualAtomicLevel) = postype(lvl.lvl)

is_level_injective(ctx, lvl::VirtualAtomicLevel) = [is_level_injective(ctx, lvl.lvl)..., true]
is_level_concurrent(ctx, lvl::VirtualAtomicLevel) = [is_level_concurrent(ctx, lvl.lvl)..., true]
is_level_atomic(ctx, lvl::VirtualAtomicLevel) = true
is_level_injective(ctx, lvl::VirtualAtomicLevel) = [is_level_injective(ctx, lvl.lvl)...]
function is_level_concurrent(ctx, lvl::VirtualAtomicLevel)
(below, c) = is_level_concurrent(ctx, lvl.lvl)
return (below, c)
end
function is_level_atomic(ctx, lvl::VirtualAtomicLevel)
(below, _) = is_level_atomic(ctx, lvl.lvl)
return (below, true)
end

function lower(ctx::AbstractCompiler, lvl::VirtualAtomicLevel, ::DefaultStyle)
quote
Expand All @@ -118,7 +124,7 @@ end
Base.summary(lvl::VirtualAtomicLevel) = "Atomic($(lvl.Lvl))"
virtual_level_resize!(ctx, lvl::VirtualAtomicLevel, dims...) = (lvl.lvl = virtual_level_resize!(ctx, lvl.lvl, dims...); lvl)
virtual_level_size(ctx, lvl::VirtualAtomicLevel) = virtual_level_size(ctx, lvl.lvl)
virtual_level_size(ctx, x) = error(string("Not defined for", x))
virtual_level_ndims(ctx, lvl::VirtualAtomicLevel) = length(virtual_level_size(ctx, lvl.lvl))
virtual_level_eltype(lvl::VirtualAtomicLevel) = virtual_level_eltype(lvl.lvl)
virtual_level_default(lvl::VirtualAtomicLevel) = virtual_level_default(lvl.lvl)

Expand Down Expand Up @@ -203,17 +209,21 @@ function instantiate(ctx, fbr::VirtualSubFiber{VirtualAtomicLevel}, mode::Update
lockVal = freshen(ctx.code, lvl.ex, :lockVal)
dev = lower(ctx, virtual_get_device(ctx.code.task), DefaultStyle())
return Thunk(

body = (ctx) -> begin
preamble = quote
$atomicData = get_lock($dev, $(lvl.locks), $(ctx(pos)), eltype($(lvl.AVal)))
$lockVal = aquire_lock!($dev, $atomicData)
end,
body = (ctx) -> begin
end
epilogue = quote
release_lock!($dev, $atomicData) end
push!(ctx.code.preamble, preamble)
push!(ctx.code.epilogue, epilogue)
lvl_2 = lvl.lvl
update = instantiate(ctx, VirtualSubFiber(lvl_2, pos), mode, protos)
return update
end,
epilogue = quote
release_lock!($dev, $atomicData) end

)
end
function instantiate(ctx, fbr::VirtualHollowSubFiber{VirtualAtomicLevel}, mode::Updater, protos)
Expand All @@ -223,16 +233,19 @@ function instantiate(ctx, fbr::VirtualHollowSubFiber{VirtualAtomicLevel}, mode::
lockVal = freshen(ctx.code, lvl.ex, :lockVal)
dev = lower(ctx, virtual_get_device(ctx.code.task), DefaultStyle())
return Thunk(

body = (ctx) -> begin
preamble = quote
$atomicData = get_lock($dev, $(lvl.locks), $(ctx(pos)), eltype($(lvl.AVal)))
$lockVal = aquire_lock!($dev, $atomicData)
end,
body = (ctx) -> begin
end
epilogue = quote
release_lock!($dev, $atomicData) end
push!(ctx.code.preamble, preamble)
push!(ctx.code.epilogue, epilogue)
lvl_2 = lvl.lvl
update = instantiate(ctx, VirtualHollowSubFiber(lvl_2, pos, fbr.dirty), mode, protos)
return update
end,
epilogue = quote
release_lock!($dev, $atomicData) end
end
)
end
11 changes: 10 additions & 1 deletion src/tensors/levels/denselevels.jl
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,14 @@ mutable struct VirtualDenseLevel <: AbstractVirtualLevel
end

is_level_injective(ctx, lvl::VirtualDenseLevel) = [is_level_injective(ctx, lvl.lvl)..., true]
is_level_atomic(ctx, lvl::VirtualDenseLevel) = is_level_atomic(ctx, lvl.lvl)
function is_level_atomic(ctx, lvl::VirtualDenseLevel)
(data, atomic) = is_level_atomic(ctx, lvl.lvl)
return ([data; atomic], atomic)
end
function is_level_concurrent(ctx, lvl::VirtualDenseLevel)
(data, concurrent) = is_level_concurrent(ctx, lvl.lvl)
return ([data; concurrent], concurrent)
end

function virtualize(ctx, ex, ::Type{DenseLevel{Ti, Lvl}}, tag=:lvl) where {Ti, Lvl}
sym = freshen(ctx, tag)
Expand Down Expand Up @@ -206,3 +213,5 @@ function instantiate(ctx, trv::DenseTraversal, mode, subprotos, ::Union{typeof(d
)
)
end


10 changes: 8 additions & 2 deletions src/tensors/levels/denserlelevels.jl
Original file line number Diff line number Diff line change
Expand Up @@ -145,8 +145,14 @@ mutable struct VirtualDenseRLELevel <: AbstractVirtualLevel
end

is_level_injective(ctx, lvl::VirtualDenseRLELevel) = [false, is_level_injective(ctx, lvl.lvl)...]
is_level_concurrent(ctx, lvl::VirtualDenseRLELevel) = [false, is_level_concurrent(ctx, lvl.lvl)...]
is_level_atomic(ctx, lvl::VirtualDenseRLELevel) = false
function is_level_atomic(ctx, lvl::VirtualDenseRLELevel)
(below, atomic) = is_level_atomic(ctx, lvl.lvl)
return ([below; [atomic]], atomic)
end
function is_level_concurrent(ctx, lvl::VirtualDenseRLELevel)
(data, _) = is_level_concurrent(ctx, lvl.lvl)
return ([data; [false]], false)
end

postype(lvl::VirtualDenseRLELevel) = postype(lvl.lvl)

Expand Down
5 changes: 4 additions & 1 deletion src/tensors/levels/elementlevels.jl
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,10 @@ mutable struct VirtualElementLevel <: AbstractVirtualLevel
end

is_level_injective(ctx, ::VirtualElementLevel) = []
is_level_atomic(ctx, lvl::VirtualElementLevel) = false
is_level_atomic(ctx, lvl::VirtualElementLevel) = ([false], false)
function is_level_concurrent(ctx, lvl::VirtualElementLevel)
return ([], true)
end

lower(ctx::AbstractCompiler, lvl::VirtualElementLevel, ::DefaultStyle) = lvl.ex

Expand Down
5 changes: 4 additions & 1 deletion src/tensors/levels/patternlevels.jl
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,10 @@ function virtual_moveto_level(ctx::AbstractCompiler, lvl::VirtualPatternLevel, a
end

is_level_injective(ctx, ::VirtualPatternLevel) = []
is_level_atomic(ctx, lvl::VirtualPatternLevel) = true
is_level_atomic(ctx, lvl::VirtualPatternLevel) = ([false], false)
function is_level_concurrent(ctx, lvl::VirtualPatternLevel)
return ([], true)
end

lower(ctx::AbstractCompiler, lvl::VirtualPatternLevel, ::DefaultStyle) = :(PatternLevel())
virtualize(ctx, ex, ::Type{PatternLevel{Tp}}) where {Tp} = VirtualPatternLevel(Tp)
Expand Down
5 changes: 4 additions & 1 deletion src/tensors/levels/repeatrlelevels.jl
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,10 @@ mutable struct VirtualRepeatRLELevel <: AbstractVirtualLevel
prev_pos
end
is_level_injective(ctx, ::VirtualRepeatRLELevel) = [false]
is_level_atomic(ctx, lvl::VirtualRepeatRLELevel) = false
is_level_atomic(ctx, lvl::VirtualRepeatRLELevel) = ([false], false)
function is_level_concurrent(ctx, lvl::VirtualRepeatRLELevel)
return ([false], false)
end

function virtualize(ctx, ex, ::Type{RepeatRLELevel{D, Ti, Tp, Tv, Ptr, Idx, Val}}, tag=:lvl) where {D, Ti, Tp, Tv, Ptr, Idx, Val}
sym = freshen(ctx, tag)
Expand Down
Loading
Loading