diff --git a/src/nditeration.jl b/src/nditeration.jl index ddf6682e..32d52e26 100644 --- a/src/nditeration.jl +++ b/src/nditeration.jl @@ -1,5 +1,63 @@ module NDIteration +import Base.MultiplicativeInverses: SignedMultiplicativeInverse + +# CartesianIndex uses Int instead of Int32 + +@eval EmptySMI() = $(Expr(:new, SignedMultiplicativeInverse{Int32}, Int32(0), typemax(Int32), 0%Int8, 0%UInt8)) +SMI(i) = i == 0 ? EmptySMI() : SignedMultiplicativeInverse{Int32}(i) + +struct FastCartesianIndices{N} <: AbstractArray{CartesianIndex{N}, N} + inverses::NTuple{N, SignedMultiplicativeInverse{Int32}} +end + +function FastCartesianIndices(indices::NTuple{N}) where N + inverses = map(i->SMI(Int32(i)), indices) + FastCartesianIndices(inverses) +end + +function Base.size(FCI::FastCartesianIndices{N}) where N + ntuple(Val(N)) do I + FCI.inverses[I].divisor + end +end + +@inline function Base.getindex(::FastCartesianIndices{0}) + return CartesianIndex() +end + +@inline function Base.getindex(iter::FastCartesianIndices{N}, I::Vararg{Int, N}) where N + @boundscheck checkbounds(iter, I...) + index = map(iter.inverses, I) do inv, i + @inbounds getindex(Base.OneTo(inv.divisor), i) + end + CartesianIndex(index) +end + +_ind2sub_recuse(::Tuple{}, ind) = (ind+1,) +function _ind2sub_recurse(indslast::NTuple{1}, ind) + @inline + (_lookup(ind, indslast[1]),) +end + +function _ind2sub_recurse(inds, ind) + @inline + inv = inds[1] + indnext, f, l = _div(ind, inv) + (ind-l*indnext+f, _ind2sub_recurse(Base.tail(inds), indnext)...) +end + +_lookup(ind, inv::SignedMultiplicativeInverse) = ind+1 +function _div(ind, inv::SignedMultiplicativeInverse) + inv.divisor == 0 && throw(DivideError()) + div(ind%Int32, inv), 1, inv.divisor +end + +function Base._ind2sub(inv::FastCartesianIndices, ind) + @inline + _ind2sub_recurse(inv.inverses, ind-1) +end + export _Size, StaticSize, DynamicSize, get export NDRange, blocks, workitems, expand export DynamicCheck, NoDynamicCheck @@ -50,18 +108,30 @@ struct NDRange{N, StaticBlocks, StaticWorkitems, DynamicBlock, DynamicWorkitems} blocks::DynamicBlock workitems::DynamicWorkitems - function NDRange{N, B, W}() where {N, B, W} - new{N, B, W, Nothing, Nothing}(nothing, nothing) - end - - function NDRange{N, B, W}(blocks, workitems) where {N, B, W} + function NDRange{N, B, W}(blocks::Union{Nothing, FastCartesianIndices{N}}, workitems::Union{Nothing, FastCartesianIndices{N}}) where {N, B, W} new{N, B, W, typeof(blocks), typeof(workitems)}(blocks, workitems) end end -@inline workitems(range::NDRange{N, B, W}) where {N, B, W <: DynamicSize} = range.workitems::CartesianIndices{N} +function NDRange{N, B, W}() where {N, B, W} + NDRange{N, B, W}(nothing, nothing) +end + +function NDRange{N, B, W}(blocks::CartesianIndices, workitems::CartesianIndices) where {N, B, W} + return NDRange{N, B, W}(FastCartesianIndices(size(blocks)), FastCartesianIndices(size(workitems))) +end + +function NDRange{N, B, W}(blocks::Nothing, workitems::CartesianIndices) where {N, B, W} + return NDRange{N, B, W}(blocks, FastCartesianIndices(size(workitems))) +end + +function NDRange{N, B, W}(blocks::CartesianIndices, workitems::Nothing) where {N, B, W} + return NDRange{N, B, W}(FastCartesianIndices(size(blocks)), workitems) +end + +@inline workitems(range::NDRange{N, B, W}) where {N, B, W <: DynamicSize} = range.workitems::FastCartesianIndices{N} @inline workitems(range::NDRange{N, B, W}) where {N, B, W <: StaticSize} = CartesianIndices(get(W))::CartesianIndices{N} -@inline blocks(range::NDRange{N, B}) where {N, B <: DynamicSize} = range.blocks::CartesianIndices{N} +@inline blocks(range::NDRange{N, B}) where {N, B <: DynamicSize} = range.blocks::FastCartesianIndices{N} @inline blocks(range::NDRange{N, B}) where {N, B <: StaticSize} = CartesianIndices(get(B))::CartesianIndices{N} import Base.iterate @@ -82,7 +152,7 @@ end Base.@propagate_inbounds function expand(ndrange::NDRange{N}, groupidx::Integer, idx::Integer) where {N} # This causes two sdiv operations, one for each Linear to CartesianIndex - # expand(ndrange, blocks(ndrange)[groupidx], workitems(ndrange)[idx]) + return expand(ndrange, blocks(ndrange)[groupidx], workitems(ndrange)[idx]) # The formulation below saves one sdiv # but leads to a different index order... @@ -90,15 +160,15 @@ Base.@propagate_inbounds function expand(ndrange::NDRange{N}, groupidx::Integer, # CartesianIndex(32, 32) # now: julia> expand(ndrange, 1, 32*32) # CartesianIndex(1024, 1) - B = blocks(ndrange) - W = workitems(ndrange) - Ind = ntuple(Val(N)) do I - Base.@_inline_meta - b = B.indices[I] - w = W.indices[I] - length(b) * length(w) - end - CartesianIndices(Ind)[(groupidx-1)* prod(size(W)) + idx] + # B = blocks(ndrange)::CartesianIndices + # W = workitems(ndrange)::CartesianIndices + # Ind = ntuple(Val(N)) do I + # Base.@_inline_meta + # b = B.indices[I] + # w = W.indices[I] + # length(b) * length(w) + # end + # CartesianIndices(Ind)[(groupidx-1)* prod(size(W)) + idx] end Base.@propagate_inbounds function expand(ndrange::NDRange{N}, groupidx::CartesianIndex{N}, idx::Integer) where {N} diff --git a/test/compiler.jl b/test/compiler.jl index cf86386e..ba930305 100644 --- a/test/compiler.jl +++ b/test/compiler.jl @@ -39,7 +39,7 @@ end function compiler_testsuite(backend, ArrayT) kernel = index(CPU(), DynamicSize(), DynamicSize()) - iterspace = NDRange{1, StaticSize{(128,)}, StaticSize{(8,)}}(); + iterspace = NDRange{1, StaticSize{(128,)}, StaticSize{(8,)}}() ctx = KernelAbstractions.mkcontext(kernel, 1, nothing, iterspace, Val(KernelAbstractions.NoDynamicCheck())) @test KernelAbstractions.__index_Global_NTuple(ctx, CartesianIndex(1)) == (1,) diff --git a/test/localmem.jl b/test/localmem.jl index b03bfa74..8f1167a3 100644 --- a/test/localmem.jl +++ b/test/localmem.jl @@ -8,7 +8,7 @@ using Test end I = @index(Global, Linear) i = @index(Local, Linear) - lmem = @localmem Int (N,) # Ok iff groupsize is static + lmem = @localmem Int (N,) # Ok iff groupsize is static @inbounds begin lmem[i] = i @synchronize @@ -23,7 +23,7 @@ end end I = @index(Global, Linear) i = @index(Local, Linear) - lmem = @localmem Int (N,) # Ok iff groupsize is static + lmem = @localmem Int (N,) # Ok iff groupsize is static @inbounds begin lmem[i] = i + 3 for j in 1:2 diff --git a/test/test.jl b/test/test.jl index 8977231b..c2d07137 100644 --- a/test/test.jl +++ b/test/test.jl @@ -154,7 +154,7 @@ function unittest_testsuite(Backend, backend_str, backend_mod, BackendArrayT; sk @conditional_testset "Const" skip_tests begin let kernel = constarg(Backend(), 8, (1024,)) # this is poking at internals - iterspace = NDRange{1, StaticSize{(128,)}, StaticSize{(8,)}}(); + iterspace = NDRange{1, StaticSize{(128,)}, StaticSize{(8,)}}() ctx = if Backend == CPU KernelAbstractions.mkcontext(kernel, 1, nothing, iterspace, Val(NoDynamicCheck())) else