diff --git a/NEWS.md b/NEWS.md index adf5ec02b972a..155e4e384f804 100644 --- a/NEWS.md +++ b/NEWS.md @@ -192,12 +192,23 @@ Library improvements * `is_valid_char(c)` now correctly handles Unicode "non-characters", which are valid Unicode codepoints. ([#11171]) - * Data-structure processing + * Array and AbstractArray improvements * New multidimensional iterators and index types for efficient iteration over `AbstractArray`s. Array iteration should generally be written as `for i in eachindex(A) ... end` rather than `for i = 1:length(A) ... end`. ([#8432]) * New implementation of SubArrays with substantial performance and functionality improvements ([#8501]). + * AbstractArray subtypes only need to implement `size` and `getindex` + for scalar indices to support indexing; all other indexing behaviors + (including logical idexing, ranges of indices, vectors, colons, etc.) are + implemented in default fallbacks. Similarly, they only need to implement + scalar `setindex!` to support all forms of indexed assingment ([#10525]). + + * AbstractArrays that do not extend `similar` now return an `Array` by + default ([#10525]). + + * Data-structure processing + * New `sortperm!` function for pre-allocated index arrays ([#8792]). * Switch from `O(N)` to `O(log N)` algorithm for `dequeue!(pq, key)` @@ -1417,6 +1428,7 @@ Too numerous to mention. [#10400]: https://github.com/JuliaLang/julia/issues/10400 [#10446]: https://github.com/JuliaLang/julia/issues/10446 [#10458]: https://github.com/JuliaLang/julia/issues/10458 +[#10525]: https://github.com/JuliaLang/julia/issues/10525 [#10543]: https://github.com/JuliaLang/julia/issues/10543 [#10659]: https://github.com/JuliaLang/julia/issues/10659 [#10679]: https://github.com/JuliaLang/julia/issues/10679 diff --git a/base/abstractarray.jl b/base/abstractarray.jl index 7014523b88e9d..0624fffd2a42c 100644 --- a/base/abstractarray.jl +++ b/base/abstractarray.jl @@ -118,41 +118,52 @@ linearindexing{T<:Range}(::Type{T}) = LinearFast() *(::LinearFast, ::LinearSlow) = LinearSlow() *(::LinearSlow, ::LinearSlow) = LinearSlow() +# The real @inline macro is not available this early in the bootstrap, so this +# internal macro splices the meta Expr directly into the function body. +macro _inline_meta() + Expr(:meta, :inline) +end +macro _noinline_meta() + Expr(:meta, :noinline) +end + ## Bounds checking ## -checkbounds(sz::Int, ::Colon) = nothing -checkbounds(sz::Int, i::Int) = 1 <= i <= sz || throw(BoundsError()) -checkbounds(sz::Int, i::Real) = checkbounds(sz, to_index(i)) -checkbounds(sz::Int, I::AbstractVector{Bool}) = length(I) == sz || throw(BoundsError()) -checkbounds(sz::Int, r::Range{Int}) = isempty(r) || (minimum(r) >= 1 && maximum(r) <= sz) || throw(BoundsError()) -checkbounds{T<:Real}(sz::Int, r::Range{T}) = checkbounds(sz, to_index(r)) - -function checkbounds{T <: Real}(sz::Int, I::AbstractArray{T}) +_checkbounds(sz, i::Integer) = 1 <= i <= sz +_checkbounds(sz, i::Real) = 1 <= to_index(i) <= sz +_checkbounds(sz, I::AbstractVector{Bool}) = length(I) == sz +_checkbounds(sz, r::Range{Int}) = (@_inline_meta; isempty(r) || (minimum(r) >= 1 && maximum(r) <= sz)) +_checkbounds{T<:Real}(sz, r::Range{T}) = (@_inline_meta; _checkbounds(sz, to_index(r))) +_checkbounds(sz, ::Colon) = true +function _checkbounds{T <: Real}(sz, I::AbstractArray{T}) + @_inline_meta + b = true for i in I - checkbounds(sz, i) + b &= _checkbounds(sz, i) end + b end +# Prevent allocation of a GC frame by hiding the BoundsError in a noinline function +throw_boundserror(A, I) = (@_noinline_meta; throw(BoundsError(A, I))) -checkbounds(A::AbstractArray, I::AbstractArray{Bool}) = size(A) == size(I) || throw(BoundsError()) - -checkbounds(A::AbstractArray, I) = checkbounds(length(A), I) - -function checkbounds(A::AbstractMatrix, I::Union(Real,Colon,AbstractArray), J::Union(Real,Colon,AbstractArray)) - checkbounds(size(A,1), I) - checkbounds(size(A,2), J) +checkbounds(A::AbstractArray, I::AbstractArray{Bool}) = size(A) == size(I) || throw_boundserror(A, I) +checkbounds(A::AbstractArray, I::AbstractVector{Bool}) = length(A) == length(I) || throw_boundserror(A, I) +checkbounds(A::AbstractArray, I) = (@_inline_meta; _checkbounds(length(A), I) || throw_boundserror(A, I)) +function checkbounds(A::AbstractMatrix, I::Union(Real,AbstractArray,Colon), J::Union(Real,AbstractArray,Colon)) + @_inline_meta + (_checkbounds(size(A,1), I) && _checkbounds(size(A,2), J)) || throw_boundserror(A, (I, J)) end - -function checkbounds(A::AbstractArray, I::Union(Real,Colon,AbstractArray), J::Union(Real,Colon,AbstractArray)) - checkbounds(size(A,1), I) - checkbounds(trailingsize(A,2), J) +function checkbounds(A::AbstractArray, I::Union(Real,AbstractArray,Colon), J::Union(Real,AbstractArray,Colon)) + @_inline_meta + (_checkbounds(size(A,1), I) && _checkbounds(trailingsize(A,2), J)) || throw_boundserror(A, (I, J)) end - -function checkbounds(A::AbstractArray, I::Union(Real,Colon,AbstractArray)...) +function checkbounds(A::AbstractArray, I::Union(Real,AbstractArray,Colon)...) + @_inline_meta n = length(I) if n > 0 for dim = 1:(n-1) - checkbounds(size(A,dim), I[dim]) + _checkbounds(size(A,dim), I[dim]) || throw_boundserror(A, I) end - checkbounds(trailingsize(A,n), I[n]) + _checkbounds(trailingsize(A,n), I[n]) || throw_boundserror(A, I) end end @@ -178,6 +189,8 @@ similar (a::AbstractArray, T) = similar(a, T, size(a)) similar{T}(a::AbstractArray{T}, dims::Dims) = similar(a, T, dims) similar{T}(a::AbstractArray{T}, dims::Int...) = similar(a, T, dims) similar (a::AbstractArray, T, dims::Int...) = similar(a, T, dims) +# similar creates an Array by default +similar (a::AbstractArray, T, dims::Dims) = Array(T, dims) function reshape(a::AbstractArray, dims::Dims) if prod(dims) != length(a) @@ -361,11 +374,7 @@ zero{T}(x::AbstractArray{T}) = fill!(similar(x), zero(T)) # While the definitions for LinearFast are all simple enough to inline on their # own, LinearSlow's CartesianRange is more complicated and requires explicit -# inlining. The real @inline macro is not available this early in the bootstrap, -# so this internal macro splices the meta Expr directly into the function body. -macro _inline_meta() - Expr(:meta, :inline) -end +# inlining. start(A::AbstractArray) = (@_inline_meta(); itr = eachindex(A); (itr, start(itr))) next(A::AbstractArray,i) = (@_inline_meta(); (idx, s) = next(i[1], i[2]); (A[idx], (i[1], s))) done(A::AbstractArray,i) = done(i[1], i[2]) @@ -430,19 +439,6 @@ imag{T<:Real}(x::AbstractArray{T}) = zero(x) \(A::Number, B::AbstractArray) = B ./ A -## Indexing: getindex ## - -getindex(t::AbstractArray, i::Real) = error("indexing not defined for ", typeof(t)) - -# linear indexing with a single multi-dimensional index -function getindex(A::AbstractArray, I::AbstractArray) - x = similar(A, size(I)) - for i in eachindex(I) - x[i] = A[I[i]] - end - return x -end - # index A[:,:,...,i,:,:,...] where "i" is in dimension "d" # TODO: more optimized special cases slicedim(A::AbstractArray, d::Integer, i) = @@ -490,42 +486,202 @@ function circshift{T,N}(a::AbstractArray{T,N}, shiftamts) a[(I::NTuple{N,Vector{Int}})...] end -## Indexing: setindex! ## - -# 1-d indexing is assumed defined on subtypes -setindex!(t::AbstractArray, x, i::Real) = - error("setindex! not defined for ",typeof(t)) -setindex!(t::AbstractArray, x) = throw(MethodError(setindex!, (t, x))) - -## Indexing: handle more indices than dimensions if "extra" indices are 1 - -# Don't require vector/matrix subclasses to implement more than 1/2 indices, -# respectively, by handling the extra dimensions in AbstractMatrix. - -function getindex(A::AbstractVector, i1,i2,i3...) - if i2*prod(i3) != 1 - throw(BoundsError()) +## Approach: +# We only define one fallback method on getindex for all argument types. +# That dispatches to an (inlined) internal _getindex function, where the goal is +# to transform the indices such that we can call the only getindex method that +# we require AbstractArray subtypes must define, either: +# getindex(::T, ::Int) # if linearindexing(T) == LinearFast() +# getindex(::T, ::Int, ::Int, #=...ndims(A) indices...=#) if LinearSlow() +# Unfortunately, it is currently impossible to express the latter method for +# arbitrary dimensionalities. We could get around that with ::CartesianIndex{N}, +# but that isn't as obvious and would require that the function be inlined to +# avoid allocations. If the subtype hasn't defined those methods, it goes back +# to the _getindex function where an error is thrown to prevent stack overflows. +# +# We use the same scheme for unsafe_getindex, with the exception that we can +# fallback to the safe version if the subtype hasn't defined the required +# unsafe method. + +function getindex(A::AbstractArray, I...) + @_inline_meta + _getindex(linearindexing(A), A, I...) +end +function unsafe_getindex(A::AbstractArray, I...) + @_inline_meta + _unsafe_getindex(linearindexing(A), A, I...) +end +## Internal defitions +_getindex(::LinearFast, A::AbstractArray) = (@_inline_meta; getindex(A, 1)) +_getindex(::LinearSlow, A::AbstractArray) = (@_inline_meta; _getindex(A, 1)) +_unsafe_getindex(::LinearFast, A::AbstractArray) = (@_inline_meta; unsafe_getindex(A, 1)) +_unsafe_getindex(::LinearSlow, A::AbstractArray) = (@_inline_meta; _unsafe_getindex(A, 1)) +_getindex(::LinearIndexing, A::AbstractArray, I...) = error("indexing $(typeof(A)) with types $(typeof(I)) is not supported") +_unsafe_getindex(::LinearIndexing, A::AbstractArray, I...) = error("indexing $(typeof(A)) with types $(typeof(I)) is not supported") + +## LinearFast Scalar indexing +_getindex(::LinearFast, A::AbstractArray, I::Int) = error("indexing not defined for ", typeof(A)) +function _getindex(::LinearFast, A::AbstractArray, I::Real...) + @_inline_meta + # We must check bounds for sub2ind; so we can then call unsafe_getindex + checkbounds(A, I...) + unsafe_getindex(A, sub2ind(size(A), to_index(I)...)) +end +_unsafe_getindex(::LinearFast, A::AbstractArray, I::Int) = (@_inline_meta; getindex(A, I)) +function _unsafe_getindex(::LinearFast, A::AbstractArray, I::Real...) + @_inline_meta + unsafe_getindex(A, sub2ind(size(A), to_index(I)...)) +end + +# LinearSlow Scalar indexing +@generated function _getindex{T,AN}(::LinearSlow, A::AbstractArray{T,AN}, I::Real...) + N = length(I) + if N == AN + :(error("indexing not defined for ", typeof(A))) + elseif N > AN + # Drop trailing ones + Isplat = Expr[:(to_index(I[$d])) for d = 1:AN] + Osplat = Expr[:(to_index(I[$d]) == 1) for d = AN+1:N] + quote + $(Expr(:meta, :inline)) + (&)($(Osplat...)) || throw_boundserror(A, I) + getindex(A, $(Isplat...)) + end + else + # Expand the last index into the appropriate number of indices + Isplat = Expr[:(to_index(I[$d])) for d = 1:N-1] + i = 0 + for d=N:AN + push!(Isplat, :(s[$(i+=1)])) + end + sz = Expr(:tuple) + sz.args = Expr[:(size(A, $d)) for d=N:AN] + quote + $(Expr(:meta, :inline)) + # ind2sub requires all dimensions to be nonzero, so checkbounds first + checkbounds(A, I...) + s = ind2sub($sz, to_index(I[$N])) + unsafe_getindex(A, $(Isplat...)) + end end - A[i1] end -function getindex(A::AbstractMatrix, i1,i2,i3,i4...) - if i3*prod(i4) != 1 - throw(BoundsError()) +@generated function _unsafe_getindex{T,AN}(::LinearSlow, A::AbstractArray{T,AN}, I::Real...) + N = length(I) + if N == AN + Isplat = Expr[:(to_index(I[$d])) for d = 1:N] + :($(Expr(:meta, :inline)); getindex(A, $(Isplat...))) + elseif N > AN + # Drop trailing dimensions (unchecked) + Isplat = Expr[:(to_index(I[$d])) for d = 1:AN] + quote + $(Expr(:meta, :inline)) + unsafe_getindex(A, $(Isplat...)) + end + else + # Expand the last index into the appropriate number of indices + Isplat = Expr[:(to_index(I[$d])) for d = 1:N-1] + for d=N:AN + push!(Isplat, :(s[$(d-N+1)])) + end + sz = Expr(:tuple) + sz.args = Expr[:(size(A, $d)) for d=N:AN] + quote + $(Expr(:meta, :inline)) + s = ind2sub($sz, to_index(I[$N])) + unsafe_getindex(A, $(Isplat...)) + end end - A[i1,i2] end -function setindex!(A::AbstractVector, x, i1,i2,i3...) - if i2*prod(i3) != 1 - throw(BoundsError()) +## Setindex! is defined similarly. We first dispatch to an internal _setindex! +# function that allows dispatch on array storage +function setindex!(A::AbstractArray, v, I...) + @_inline_meta + _setindex!(linearindexing(A), A, v, I...) +end +function unsafe_setindex!(A::AbstractArray, v, I...) + @_inline_meta + _unsafe_setindex!(linearindexing(A), A, v, I...) +end +## Internal defitions +_setindex!(::LinearFast, A::AbstractArray, v) = (@_inline_meta; setindex!(A, v, 1)) +_setindex!(::LinearSlow, A::AbstractArray, v) = (@_inline_meta; _setindex!(A, v, 1)) +_unsafe_setindex!(::LinearFast, A::AbstractArray, v) = (@_inline_meta; unsafe_setindex!(A, v, 1)) +_unsafe_setindex!(::LinearSlow, A::AbstractArray, v) = (@_inline_meta; _unsafe_setindex!(A, v, 1)) +_setindex!(::LinearIndexing, A::AbstractArray, v, I...) = error("indexing $(typeof(A)) with types $(typeof(I)) is not supported") +_unsafe_setindex!(::LinearIndexing, A::AbstractArray, v, I...) = error("indexing $(typeof(A)) with types $(typeof(I)) is not supported") + +## LinearFast Scalar indexing +_setindex!(::LinearFast, A::AbstractArray, v, I::Int) = error("indexed assignment not defined for ", typeof(A)) +function _setindex!(::LinearFast, A::AbstractArray, v, I::Real...) + @_inline_meta + # We must check bounds for sub2ind; so we can then call unsafe_setindex! + checkbounds(A, I...) + unsafe_setindex!(A, v, sub2ind(size(A), to_index(I)...)) +end +_unsafe_setindex!(::LinearFast, A::AbstractArray, v, I::Int) = (@_inline_meta; setindex!(A, v, I)) +function _unsafe_setindex!(::LinearFast, A::AbstractArray, v, I::Real...) + @_inline_meta + unsafe_setindex!(A, v, sub2ind(size(A), to_index(I)...)) +end + +# LinearSlow Scalar indexing +@generated function _setindex!{T,AN}(::LinearSlow, A::AbstractArray{T,AN}, v, I::Real...) + N = length(I) + if N == AN + :(error("indexed assignment not defined for ", typeof(A))) + elseif N > AN + # Drop trailing ones + Isplat = Expr[:(to_index(I[$d])) for d = 1:AN] + Osplat = Expr[:(to_index(I[$d]) == 1) for d = AN+1:N] + quote + $(Expr(:meta, :inline)) + (&)($(Osplat...)) || throw_boundserror(A, I) + setindex!(A, v, $(Isplat...)) + end + else + # Expand the last index into the appropriate number of indices + Isplat = Expr[:(to_index(I[$d])) for d = 1:N-1] + i = 0 + for d=N:AN + push!(Isplat, :(s[$(i+=1)])) + end + sz = Expr(:tuple) + sz.args = Expr[:(size(A, $d)) for d=N:AN] + quote + $(Expr(:meta, :inline)) + checkbounds(A, I...) + s = ind2sub($sz, to_index(I[$N])) + unsafe_setindex!(A, v, $(Isplat...)) + end end - A[i1] = x end -function setindex!(A::AbstractMatrix, x, i1,i2,i3,i4...) - if i3*prod(i4) != 1 - throw(BoundsError()) +@generated function _unsafe_setindex!{T,AN}(::LinearSlow, A::AbstractArray{T,AN}, v, I::Real...) + N = length(I) + if N == AN + Isplat = Expr[:(to_index(I[$d])) for d = 1:N] + :(setindex!(A, v, $(Isplat...))) + elseif N > AN + # Drop trailing dimensions (unchecked) + Isplat = Expr[:(to_index(I[$d])) for d = 1:AN] + quote + $(Expr(:meta, :inline)) + unsafe_setindex!(A, v, $(Isplat...)) + end + else + # Expand the last index into the appropriate number of indices + Isplat = Expr[:(to_index(I[$d])) for d = 1:N-1] + for d=N:AN + push!(Isplat, :(s[$(d-N+1)])) + end + sz = Expr(:tuple) + sz.args = Expr[:(size(A, $d)) for d=N:AN] + quote + $(Expr(:meta, :inline)) + s = ind2sub($sz, to_index(I[$N])) + unsafe_setindex!(A, v, $(Isplat...)) + end end - A[i1,i2] = x end ## get (getindex with a default value) ## diff --git a/base/array.jl b/base/array.jl index 3341ef11b057a..c7fbb35f46150 100644 --- a/base/array.jl +++ b/base/array.jl @@ -293,108 +293,47 @@ done(a::Array,i) = (i > length(a)) ## Indexing: getindex ## -getindex(a::Array) = arrayref(a,1) - -getindex(A::Array, i0::Real) = arrayref(A,to_index(i0)) -getindex(A::Array, i0::Real, i1::Real) = arrayref(A,to_index(i0),to_index(i1)) -getindex(A::Array, i0::Real, i1::Real, i2::Real) = - arrayref(A,to_index(i0),to_index(i1),to_index(i2)) -getindex(A::Array, i0::Real, i1::Real, i2::Real, i3::Real) = - arrayref(A,to_index(i0),to_index(i1),to_index(i2),to_index(i3)) -getindex(A::Array, i0::Real, i1::Real, i2::Real, i3::Real, i4::Real) = - arrayref(A,to_index(i0),to_index(i1),to_index(i2),to_index(i3),to_index(i4)) -getindex(A::Array, i0::Real, i1::Real, i2::Real, i3::Real, i4::Real, i5::Real) = - arrayref(A,to_index(i0),to_index(i1),to_index(i2),to_index(i3),to_index(i4),to_index(i5)) - -getindex(A::Array, i0::Real, i1::Real, i2::Real, i3::Real, i4::Real, i5::Real, I::Real...) = - arrayref(A,to_index(i0),to_index(i1),to_index(i2),to_index(i3),to_index(i4),to_index(i5),to_index(I)...) - -# Fast copy using copy! for UnitRange -function getindex(A::Array, I::UnitRange{Int}) +getindex(A::Array, i1::Int) = arrayref(A, i1) +unsafe_getindex(A::Array, i1::Int) = @inbounds return arrayref(A, i1) + +# Faster contiguous indexing using copy! for UnitRange and Colon +getindex(A::Array, I::UnitRange{Int}) = (checkbounds(A, I); unsafe_getindex(A, I)) +function unsafe_getindex(A::Array, I::UnitRange{Int}) lI = length(I) X = similar(A, lI) if lI > 0 - copy!(X, 1, A, first(I), lI) + unsafe_copy!(X, 1, A, first(I), lI) end return X end - -function getindex{T<:Real}(A::Array, I::AbstractVector{T}) - return [ A[i] for i in to_index(I) ] -end -function getindex{T<:Real}(A::Range, I::AbstractVector{T}) - return [ A[i] for i in to_index(I) ] -end -function getindex(A::Range, I::AbstractVector{Bool}) - checkbounds(A, I) - return [ A[i] for i in to_index(I) ] -end - - -# logical indexing -# (when the indexing is provided as an Array{Bool} or a BitArray we can be -# sure about the behaviour and use unsafe_getindex; in the general case -# we can't and must use getindex, otherwise silent corruption can happen) - -@generated function getindex_bool_1d(A::Array, I::AbstractArray{Bool}) - idxop = I <: Union(Array{Bool}, BitArray) ? :unsafe_getindex : :getindex - quote - checkbounds(A, I) - n = sum(I) - out = similar(A, n) - c = 1 - for i = 1:length(I) - if $idxop(I, i) - @inbounds out[c] = A[i] - c += 1 - end - end - out +getindex(A::Array, c::Colon) = unsafe_getindex(A, c) +function unsafe_getindex(A::Array, ::Colon) + lI = length(A) + X = similar(A, lI) + if lI > 0 + unsafe_copy!(X, 1, A, 1, lI) end + return X end -getindex(A::Vector, I::AbstractVector{Bool}) = getindex_bool_1d(A, I) -getindex(A::Vector, I::AbstractArray{Bool}) = getindex_bool_1d(A, I) -getindex(A::Array, I::AbstractVector{Bool}) = getindex_bool_1d(A, I) -getindex(A::Array, I::AbstractArray{Bool}) = getindex_bool_1d(A, I) - +# This is redundant with the abstract fallbacks, but needed for bootstrap +function getindex{T<:Real}(A::Array, I::Range{T}) + return [ A[to_index(i)] for i in I ] +end ## Indexing: setindex! ## -setindex!{T}(A::Array{T}, x) = arrayset(A, convert(T,x), 1) - setindex!{T}(A::Array{T}, x, i0::Real) = arrayset(A, convert(T,x), to_index(i0)) -setindex!{T}(A::Array{T}, x, i0::Real, i1::Real) = - arrayset(A, convert(T,x), to_index(i0), to_index(i1)) -setindex!{T}(A::Array{T}, x, i0::Real, i1::Real, i2::Real) = - arrayset(A, convert(T,x), to_index(i0), to_index(i1), to_index(i2)) -setindex!{T}(A::Array{T}, x, i0::Real, i1::Real, i2::Real, i3::Real) = - arrayset(A, convert(T,x), to_index(i0), to_index(i1), to_index(i2), to_index(i3)) -setindex!{T}(A::Array{T}, x, i0::Real, i1::Real, i2::Real, i3::Real, i4::Real) = - arrayset(A, convert(T,x), to_index(i0), to_index(i1), to_index(i2), to_index(i3), to_index(i4)) -setindex!{T}(A::Array{T}, x, i0::Real, i1::Real, i2::Real, i3::Real, i4::Real, i5::Real) = - arrayset(A, convert(T,x), to_index(i0), to_index(i1), to_index(i2), to_index(i3), to_index(i4), to_index(i5)) -setindex!{T}(A::Array{T}, x, i0::Real, i1::Real, i2::Real, i3::Real, i4::Real, i5::Real, I::Real...) = - arrayset(A, convert(T,x), to_index(i0), to_index(i1), to_index(i2), to_index(i3), to_index(i4), to_index(i5), to_index(I)...) - -function setindex!{T<:Real}(A::Array, x, I::AbstractVector{T}) + +# These are redundant with the abstract fallbacks but needed for bootstrap +function setindex!(A::Array, x, I::AbstractVector{Int}) + is(A, I) && (I = copy(I)) for i in I A[i] = x end return A end - -function setindex!{T}(A::Array{T}, X::Array{T}, I::UnitRange{Int}) - if length(X) != length(I) - throw_setindex_mismatch(X, (I,)) - end - copy!(A, first(I), X, 1, length(I)) - return A -end - -function setindex!{T<:Real}(A::Array, X::AbstractArray, I::AbstractVector{T}) - if length(X) != length(I) - throw_setindex_mismatch(X, (I,)) - end +function setindex!(A::Array, X::AbstractArray, I::AbstractVector{Int}) + setindex_shape_check(X, length(I)) count = 1 if is(X,A) X = copy(X) @@ -409,48 +348,26 @@ function setindex!{T<:Real}(A::Array, X::AbstractArray, I::AbstractVector{T}) return A end - -# logical indexing -# (when the indexing is provided as an Array{Bool} or a BitArray we can be -# sure about the behaviour and use unsafe_getindex; in the general case -# we can't and must use getindex, otherwise silent corruption can happen) - -@generated function assign_bool_scalar_1d!(A::Array, x, I::AbstractArray{Bool}) - idxop = I <: Union(Array{Bool}, BitArray) ? :unsafe_getindex : :getindex - quote - checkbounds(A, I) - for i = 1:length(I) - if $idxop(I, i) - @inbounds A[i] = x - end - end - A +# Faster contiguous setindex! with copy! +setindex!{T}(A::Array{T}, X::Array{T}, I::UnitRange{Int}) = (checkbounds(A, I); unsafe_setindex!(A, X, I)) +function unsafe_setindex!{T}(A::Array{T}, X::Array{T}, I::UnitRange{Int}) + lI = length(I) + setindex_shape_check(X, lI) + if lI > 0 + unsafe_copy!(A, first(I), X, 1, lI) end + return A end - -@generated function assign_bool_vector_1d!(A::Array, X::AbstractArray, I::AbstractArray{Bool}) - idxop = I <: Union(Array{Bool}, BitArray) ? :unsafe_getindex : :getindex - quote - checkbounds(A, I) - c = 1 - for i = 1:length(I) - if $idxop(I, i) - x = X[c] - @inbounds A[i] = x - c += 1 - end - end - if length(X) != c-1 - throw(DimensionMismatch("assigned $(length(X)) elements to length $(c-1) destination")) - end - A +setindex!{T}(A::Array{T}, X::Array{T}, c::Colon) = unsafe_setindex!(A, X, c) +function unsafe_setindex!{T}(A::Array{T}, X::Array{T}, ::Colon) + lI = length(A) + setindex_shape_check(X, lI) + if lI > 0 + unsafe_copy!(A, 1, X, 1, lI) end + return A end -setindex!(A::Array, X::AbstractArray, I::AbstractVector{Bool}) = assign_bool_vector_1d!(A, X, I) -setindex!(A::Array, X::AbstractArray, I::AbstractArray{Bool}) = assign_bool_vector_1d!(A, X, I) -setindex!(A::Array, x, I::AbstractVector{Bool}) = assign_bool_scalar_1d!(A, x, I) -setindex!(A::Array, x, I::AbstractArray{Bool}) = assign_bool_scalar_1d!(A, x, I) # efficiently grow an array diff --git a/base/bitarray.jl b/base/bitarray.jl index 961bb998f5c12..231a78e26ea9f 100644 --- a/base/bitarray.jl +++ b/base/bitarray.jl @@ -348,60 +348,8 @@ bitpack{T,N}(A::AbstractArray{T,N}) = convert(BitArray{N}, A) return r end -@inline function getindex(B::BitArray, i::Int) - 1 <= i <= length(B) || throw(BoundsError(B, i)) - return unsafe_bitgetindex(B.chunks, i) -end - -getindex(B::BitArray, i::Real) = getindex(B, to_index(i)) - -getindex(B::BitArray) = getindex(B, 1) - -# 0d bitarray -getindex(B::BitArray{0}) = unsafe_bitgetindex(B.chunks, 1) - -function getindex{T<:Real}(B::BitArray, I::AbstractVector{T}) - X = BitArray(length(I)) - lB = length(B) - Xc = X.chunks - Bc = B.chunks - ind = 1 - for i in I - # faster X[ind] = B[i] - j = to_index(i) - 1 <= j <= lB || throw(BoundsError(B, j)) - unsafe_bitsetindex!(Xc, unsafe_bitgetindex(Bc, j), ind) - ind += 1 - end - return X -end - -# logical indexing -# (when the indexing is provided as an Array{Bool} or a BitArray we can be -# sure about the behaviour and use unsafe_getindex; in the general case -# we can't and must use getindex, otherwise silent corruption can happen) -# (multiple signatures for disambiguation) -for IT in [AbstractVector{Bool}, AbstractArray{Bool}] - @eval @generated function getindex(B::BitArray, I::$IT) - idxop = I <: Union(Array{Bool}, BitArray) ? :unsafe_getindex : :getindex - quote - checkbounds(B, I) - n = sum(I) - X = BitArray(n) - Xc = X.chunks - Bc = B.chunks - ind = 1 - for i = 1:length(I) - if $idxop(I, i) - # faster X[ind] = B[i] - unsafe_bitsetindex!(Xc, unsafe_bitgetindex(Bc, i), ind) - ind += 1 - end - end - return X - end - end -end +@inline getindex(B::BitArray, i::Int) = (checkbounds(B, i); unsafe_getindex(B, i)) +@inline unsafe_getindex(B::BitArray, i::Int) = unsafe_bitgetindex(B.chunks, i) ## Indexing: setindex! ## @@ -417,11 +365,9 @@ end end end -setindex!(B::BitArray, x) = setindex!(B, convert(Bool,x), 1) - -function setindex!(B::BitArray, x::Bool, i::Int) - 1 <= i <= length(B) || throw(BoundsError(B, i)) - unsafe_bitsetindex!(B.chunks, x, i) +setindex!(B::BitArray, x, i::Int) = (checkbounds(B, i); unsafe_setindex!(B, x, i)) +@inline function unsafe_setindex!(B::BitArray, x, i::Int) + unsafe_bitsetindex!(B.chunks, convert(Bool, x), i) return B end @@ -430,12 +376,13 @@ end # sure about the behaviour and use unsafe_getindex; in the general case # we can't and must use getindex, otherwise silent corruption can happen) -function setindex!(B::BitArray, x, I::BitArray) - checkbounds(B, I) +# When indexing with a BitArray, we can operate whole chunks at a time for a ~100x gain +setindex!(B::BitArray, x, I::BitArray) = (checkbounds(B, I); unsafe_setindex!(B, x, I)) +function unsafe_setindex!(B::BitArray, x, I::BitArray) y = convert(Bool, x) Bc = B.chunks Ic = I.chunks - @assert length(Bc) == length(Ic) + length(Bc) == length(Ic) || throw_boundserror(B, I) @inbounds if y for i = 1:length(Bc) Bc[i] |= Ic[i] @@ -448,26 +395,15 @@ function setindex!(B::BitArray, x, I::BitArray) return B end -@generated function setindex!(B::BitArray, x, I::AbstractArray{Bool}) - idxop = I <: Array{Bool} ? :unsafe_getindex : :getindex - quote - checkbounds(B, I) - y = convert(Bool, x) - Bc = B.chunks - for i = 1:length(I) - # faster I[i] && B[i] = y - $idxop(I, i) && unsafe_bitsetindex!(Bc, y, i) - end - return B - end -end - -function setindex!(B::BitArray, X::AbstractArray, I::BitArray) - checkbounds(B, I) +# Assigning an array of bools is more complicated, but we can still do some +# work on chunks by combining X and I 64 bits at a time to improve perf by ~40% +setindex!(B::BitArray, X::AbstractArray, I::BitArray) = (checkbounds(B, I); unsafe_setindex!(B, X, I)) +function unsafe_setindex!(B::BitArray, X::AbstractArray, I::BitArray) Bc = B.chunks Ic = I.chunks - @assert length(Bc) == length(Ic) + length(Bc) == length(Ic) || throw_boundserror(B, I) lc = length(Bc) + lx = length(X) last_chunk_len = Base._mod64(length(B)-1)+1 c = 1 @@ -477,7 +413,8 @@ function setindex!(B::BitArray, X::AbstractArray, I::BitArray) u = UInt64(1) for j = 1:(i < lc ? 64 : last_chunk_len) if Imsk & u != 0 - x = convert(Bool, X[c]) + lx < c && throw_setindex_mismatch(X, c) + x = convert(Bool, unsafe_getindex(X, c)) if x C |= u else @@ -490,31 +427,11 @@ function setindex!(B::BitArray, X::AbstractArray, I::BitArray) @inbounds Bc[i] = C end if length(X) != c-1 - throw(DimensionMismatch("assigned $(length(X)) elements to length $(c-1) destination")) + throw_setindex_mismatch(X, c-1) end return B end -@generated function setindex!(B::BitArray, X::AbstractArray, I::AbstractArray{Bool}) - idxop = I <: Array{Bool} ? :unsafe_getindex : :getindex - quote - checkbounds(B, I) - Bc = B.chunks - c = 1 - for i = 1:length(I) - if $idxop(I, i) - # faster B[i] = X[c] - unsafe_bitsetindex!(Bc, convert(Bool, X[c]), i) - c += 1 - end - end - if length(X) != c-1 - throw(DimensionMismatch("assigned $(length(X)) elements to length $(c-1) destination")) - end - return B - end -end - ## Dequeue functionality ## function push!(B::BitVector, item) diff --git a/base/broadcast.jl b/base/broadcast.jl index 740b506be5301..5c6e3bec4bb45 100644 --- a/base/broadcast.jl +++ b/base/broadcast.jl @@ -273,7 +273,7 @@ end X = x # To call setindex_shape_check, we need to create fake 1-d indexes of the proper size @nexprs $N d->(fakeI_d = 1:shape_d) - Base.setindex_shape_check(X, (@ntuple $N fakeI)...) + @ncall $N Base.setindex_shape_check X shape k = 1 @nloops $N i d->(1:shape_d) d->(@nexprs $N k->(j_d_k = size(I_k, d) == 1 ? 1 : i_d)) begin @nexprs $N k->(@inbounds J_k = @nref $N I_k d->j_d_k) diff --git a/base/deprecated.jl b/base/deprecated.jl index 3552ee9fc1ea0..d926e93d1f46a 100644 --- a/base/deprecated.jl +++ b/base/deprecated.jl @@ -489,3 +489,12 @@ function chol(A::AbstractMatrix, uplo::Symbol) "use chol(a::AbstractMatrix, uplo::Union(Val{:L},Val{:U})) instead"), :chol) chol(A, Val{uplo}) end + +_ensure_vector(A::AbstractArray) = vec(A) +_ensure_vector(A) = A +_ensure_vectors() = () +_ensure_vectors(A, As...) = (_ensure_vector(A), _ensure_vectors(As...)...) +function _unsafe_setindex!(l::LinearIndexing, A::AbstractArray, x, J::Union(Real,AbstractArray,Colon)...) + depwarn("multidimensional indexed assignment with multidimensional arrays is deprecated, use vec to convert indices to vectors", :_unsafe_setindex!) + _unsafe_setindex!(l, A, x, _ensure_vectors(J...)...) +end diff --git a/base/linalg/diagonal.jl b/base/linalg/diagonal.jl index e8e88b94a96f2..131865ba31690 100644 --- a/base/linalg/diagonal.jl +++ b/base/linalg/diagonal.jl @@ -34,14 +34,8 @@ end fill!(D::Diagonal, x) = (fill!(D.diag, x); D) full(D::Diagonal) = diagm(D.diag) -getindex(D::Diagonal, i::Integer, j::Integer) = i == j ? D.diag[i] : zero(eltype(D.diag)) - -function getindex(D::Diagonal, i::Integer) - n = length(D.diag) - id = div(i-1, n) - id + id * n == i-1 && return D.diag[id+1] - zero(eltype(D.diag)) -end +getindex(D::Diagonal, i::Int, j::Int) = i == j ? D.diag[i] : zero(eltype(D.diag)) +unsafe_getindex(D::Diagonal, i::Int, j::Int) = i == j ? unsafe_getindex(D.diag, i) : zero(eltype(D.diag)) ishermitian{T<:Real}(D::Diagonal{T}) = true ishermitian(D::Diagonal) = all(D.diag .== real(D.diag)) diff --git a/base/linalg/symmetric.jl b/base/linalg/symmetric.jl index 133ace895bba1..2fa2772356735 100644 --- a/base/linalg/symmetric.jl +++ b/base/linalg/symmetric.jl @@ -23,9 +23,10 @@ typealias HermOrSym{T,S} Union(Hermitian{T,S}, Symmetric{T,S}) typealias RealHermSymComplexHerm{T<:Real,S} Union(Hermitian{T,S}, Symmetric{T,S}, Hermitian{Complex{T},S}) size(A::HermOrSym, args...) = size(A.data, args...) -getindex(A::HermOrSym, i::Integer) = ((q, r) = divrem(i - 1, size(A, 1)); A[r + 1, q + 1]) getindex(A::Symmetric, i::Integer, j::Integer) = (A.uplo == 'U') == (i < j) ? getindex(A.data, i, j) : getindex(A.data, j, i) getindex(A::Hermitian, i::Integer, j::Integer) = (A.uplo == 'U') == (i < j) ? getindex(A.data, i, j) : conj(getindex(A.data, j, i)) +unsafe_getindex(A::Symmetric, i::Integer, j::Integer) = (A.uplo == 'U') == (i < j) ? unsafe_getindex(A.data, i, j) : unsafe_getindex(A.data, j, i) +unsafe_getindex(A::Hermitian, i::Integer, j::Integer) = (A.uplo == 'U') == (i < j) ? unsafe_getindex(A.data, i, j) : conj(unsafe_getindex(A.data, j, i)) full(A::Symmetric) = copytri!(copy(A.data), A.uplo) full(A::Hermitian) = copytri!(copy(A.data), A.uplo, true) convert{T,S<:AbstractMatrix}(::Type{Symmetric{T,S}},A::Symmetric{T,S}) = A diff --git a/base/linalg/triangular.jl b/base/linalg/triangular.jl index ea984dd5717cc..d6dee55628f6b 100644 --- a/base/linalg/triangular.jl +++ b/base/linalg/triangular.jl @@ -107,7 +107,6 @@ function full!{T,S}(A::UnitUpperTriangular{T,S}) B end -getindex(A::AbstractTriangular, i::Integer) = ((m, n) = divrem(i - 1, size(A, 1)); A[n + 1, m + 1]) getindex{T,S}(A::UnitLowerTriangular{T,S}, i::Integer, j::Integer) = i == j ? one(T) : (i > j ? A.data[i,j] : zero(A.data[i,j])) getindex{T,S}(A::LowerTriangular{T,S}, i::Integer, j::Integer) = i >= j ? A.data[i,j] : zero(A.data[i,j]) getindex{T,S}(A::UnitUpperTriangular{T,S}, i::Integer, j::Integer) = i == j ? one(T) : (i < j ? A.data[i,j] : zero(A.data[i,j])) diff --git a/base/multidimensional.jl b/base/multidimensional.jl index cb93c225865a9..87096095155b7 100644 --- a/base/multidimensional.jl +++ b/base/multidimensional.jl @@ -55,65 +55,6 @@ length{I<:CartesianIndex}(::Type{I})=length(super(I)) # indexing getindex(index::CartesianIndex, i::Integer) = getfield(index, i)::Int -@generated function getindex{N}(A::Array, index::CartesianIndex{N}) - :(Base.arrayref(A, $(cartindex_exprs((index,), (:index,))...))) -end -@generated function getindex{N}(A::Array, i::Integer, index::CartesianIndex{N}) - :(Base.arrayref(A, $(cartindex_exprs((i, index), (:i, :index))...))) -end -@generated function getindex{M,N}(A::Array, index1::CartesianIndex{M}, i::Integer, index2::CartesianIndex{N}) - :(Base.arrayref(A, $(cartindex_exprs((index1, i, index2), (:index1, :i, :index2))...))) -end -@generated function setindex!{T,N}(A::Array{T}, v, index::CartesianIndex{N}) - :(Base.arrayset(A, convert($T,v), $(cartindex_exprs((index,), (:index,))...))) -end -@generated function setindex!{T,N}(A::Array{T}, v, i::Integer, index::CartesianIndex{N}) - :(Base.arrayset(A, convert($T,v), $(cartindex_exprs((i, index), (:i, :index))...))) -end -@generated function setindex!{T,M,N}(A::Array{T}, v, index1::CartesianIndex{M}, i::Integer, index2::CartesianIndex{N}) - :(Base.arrayset(A, convert($T,v), $(cartindex_exprs((index1, i, index2), (:index1, :i, :index2))...))) -end - -@generated function getindex{N}(A::AbstractArray, index::CartesianIndex{N}) - :(getindex(A, $(cartindex_exprs((index,), (:index,))...))) -end -@generated function getindex{N}(A::AbstractArray, i::Integer, index::CartesianIndex{N}) - :(getindex(A, $(cartindex_exprs((i, index), (:i, :index))...))) -end -@generated function setindex!{T,N}(A::AbstractArray{T}, v, index::CartesianIndex{N}) - :(setindex!(A, v, $(cartindex_exprs((index,), (:index,))...))) -end -@generated function setindex!{T,N}(A::AbstractArray{T}, v, i::Integer, index::CartesianIndex{N}) - :(setindex!(A, v, $(cartindex_exprs((i, index), (:i, :index))...))) -end -for AT in (AbstractVector, AbstractMatrix, AbstractArray) # nix ambiguity warning - @eval begin - @generated function getindex{M,N}(A::$AT, index1::CartesianIndex{M}, i::Integer, index2::CartesianIndex{N}) - :(getindex(A, $(cartindex_exprs((index1, i, index2), (:index1, :i, :index2))...))) - end - @generated function setindex!{M,N}(A::$AT, v, index1::CartesianIndex{M}, i::Integer, index2::CartesianIndex{N}) - :(setindex!(A, v, $(cartindex_exprs((index1, i, index2), (:index1, :i, :index2))...))) - end - end -end - -function cartindex_exprs(indexes, syms) - exprs = Any[] - for (i,ind) in enumerate(indexes) - if ind <: Number - push!(exprs, :($(syms[i]))) - else - for j = 1:length(ind) - push!(exprs, :($(syms[i])[$j])) - end - end - end - if isempty(exprs) - push!(exprs, 1) # Handle the zero-dimensional case - end - exprs -end - # arithmetic, min/max for op in (:+, :-, :min, :max) @eval begin @@ -226,116 +167,237 @@ end # IteratorsMD using .IteratorsMD - -### From array.jl - -@generated function checksize(A::AbstractArray, I...) +# Recursively compute the lengths of a list of indices, without dropping scalars +# These need to be inlined for more than 3 indexes +index_lengths(A::AbstractArray, I::Colon) = (length(A),) +index_lengths(A::AbstractArray, I::AbstractArray{Bool}) = (sum(I),) +index_lengths(A::AbstractArray, I::AbstractArray) = (length(I),) +@inline index_lengths(A::AbstractArray, I...) = index_lengths_dim(A, 1, I...) +index_lengths_dim(A, dim) = () +index_lengths_dim(A, dim, ::Colon) = (trailingsize(A, dim),) +@inline index_lengths_dim(A, dim, ::Colon, i, I...) = (size(A, dim), index_lengths_dim(A, dim+1, i, I...)...) +@inline index_lengths_dim(A, dim, ::Real, I...) = (1, index_lengths_dim(A, dim+1, I...)...) +@inline index_lengths_dim(A, dim, i::AbstractVector{Bool}, I...) = (sum(i), index_lengths_dim(A, dim+1, I...)...) +@inline index_lengths_dim(A, dim, i::AbstractVector, I...) = (length(i), index_lengths_dim(A, dim+1, I...)...) + +# shape of array to create for getindex() with indexes I, dropping trailing scalars +index_shape(A::AbstractArray, I::AbstractArray) = size(I) # Linear index reshape +index_shape(A::AbstractArray, I::AbstractArray{Bool}) = (sum(I),) # Logical index +index_shape(A::AbstractArray, I::Colon) = (length(A),) +@inline index_shape(A::AbstractArray, I...) = index_shape_dim(A, 1, I...) +index_shape_dim(A, dim, I::Real...) = () +index_shape_dim(A, dim, ::Colon) = (trailingsize(A, dim),) +@inline index_shape_dim(A, dim, ::Colon, i, I...) = (size(A, dim), index_shape_dim(A, dim+1, i, I...)...) +@inline index_shape_dim(A, dim, ::Real, I...) = (1, index_shape_dim(A, dim+1, I...)...) +@inline index_shape_dim(A, dim, i::AbstractVector{Bool}, I...) = (sum(i), index_shape_dim(A, dim+1, I...)...) +@inline index_shape_dim(A, dim, i::AbstractVector, I...) = (length(i), index_shape_dim(A, dim+1, I...)...) + +### From abstractarray.jl: Internal multidimensional indexing definitions ### +# These are not defined on directly ongetindex and unsafe_getindex to avoid +# ambiguities for AbstractArray subtypes. See the note in abstractarray.jl + +# Note that it's most efficient to call checkbounds first, and then to_index +@inline function _getindex(l::LinearIndexing, A::AbstractArray, I::Union(Real, AbstractArray, Colon)...) + checkbounds(A, I...) + _unsafe_getindex(l, A, I...) +end +@generated function _unsafe_getindex(l::LinearIndexing, A::AbstractArray, I::Union(Real, AbstractArray, Colon)...) N = length(I) quote - @nexprs $N d->(size(A, d) == length(I[d]) || throw(DimensionMismatch("index $d has length $(length(I[d])), but size(A, $d) = $(size(A,d))"))) - nothing + # This is specifically *not* inlined. + @nexprs $N d->(I_d = to_index(I[d])) + dest = similar(A, @ncall $N index_shape A I) + @ncall $N checksize dest I + @ncall $N _unsafe_getindex! dest l A I + end +end + +# logical indexing optimization - don't use find (within to_index) +# This is inherently a linear operation in the source, but we could potentially +# use fast dividing integers to speed it up. +function _unsafe_getindex(::LinearIndexing, src::AbstractArray, I::AbstractArray{Bool}) + # Both index_shape and checksize compute sum(I); manually hoist it out + N = sum(I) + dest = similar(src, (N,)) + size(dest) == (N,) || throw(DimensionMismatch()) + D = eachindex(dest) + Ds = start(D) + s = 0 + for b in eachindex(I) + s+=1 + if unsafe_getindex(I, b) + d, Ds = next(D, Ds) + unsafe_setindex!(dest, unsafe_getindex(src, s), d) + end end + dest end -@inline unsafe_getindex(v::BitArray, ind::Int) = Base.unsafe_bitgetindex(v.chunks, ind) - -@inline unsafe_setindex!{T}(v::Array{T}, x::T, ind::Int) = (@inbounds v[ind] = x; v) -@inline unsafe_setindex!{T}(v::AbstractArray{T}, x::T, ind::Int) = (v[ind] = x; v) -@inline unsafe_setindex!(v::BitArray, x::Bool, ind::Int) = (Base.unsafe_bitsetindex!(v.chunks, x, ind); v) -@inline unsafe_setindex!{T}(v::AbstractArray{T}, x::T, ind::Real) = unsafe_setindex!(v, x, to_index(ind)) +# Indexing with an array of indices is inherently linear in the source, but +# might be able to be optimized with fast dividing integers +@inline function _unsafe_getindex!(dest::AbstractArray, ::LinearIndexing, src::AbstractArray, I::AbstractArray) + D = eachindex(dest) + Ds = start(D) + for idx in I + d, Ds = next(D, Ds) + unsafe_setindex!(dest, unsafe_getindex(src, idx), d) + end + dest +end -# Version that uses cartesian indexing for src -@generated function _getindex!(dest::Array, src::AbstractArray, I::Union(Int,AbstractVector)...) +# Fast source - compute the linear index +@generated function _unsafe_getindex!(dest::AbstractArray, ::LinearFast, src::AbstractArray, I::Union(Real, AbstractVector, Colon)...) N = length(I) - Isplat = Expr[:(I[$d]) for d = 1:N] quote - checksize(dest, $(Isplat...)) - k = 1 - @nloops $N i dest d->(@inbounds j_d = unsafe_getindex(I[d], i_d)) begin - @inbounds dest[k] = (@nref $N src j) - k += 1 + $(Expr(:meta, :inline)) + stride_1 = 1 + @nexprs $N d->(stride_{d+1} = stride_d*size(src, d)) + $(symbol(:offset_, N)) = 1 + D = eachindex(dest) + Ds = start(D) + @nloops $N i dest d->(offset_{d-1} = offset_d + (unsafe_getindex(I[d], i_d)-1)*stride_d) begin + d, Ds = next(D, Ds) + unsafe_setindex!(dest, unsafe_getindex(src, offset_0), d) end dest end end - -# Version that uses linear indexing for src -@generated function _getindex!(dest::Array, src::Array, I::Union(Int,AbstractVector)...) +# Slow source - index with the indices provided. +# TODO: this may not be the full dimensionality; that case could be optimized +@generated function _unsafe_getindex!(dest::AbstractArray, ::LinearSlow, src::AbstractArray, I::Union(Real, AbstractVector, Colon)...) N = length(I) - Isplat = Expr[:(I[$d]) for d = 1:N] quote - checksize(dest, $(Isplat...)) - stride_1 = 1 - @nexprs $N d->(stride_{d+1} = stride_d*size(src,d)) - @nexprs $N d->(offset_d = 1) # only really need offset_$N = 1 - k = 1 - @nloops $N i dest d->(@inbounds offset_{d-1} = offset_d + (unsafe_getindex(I[d], i_d)-1)*stride_d) begin - @inbounds dest[k] = src[offset_0] - k += 1 + $(Expr(:meta, :inline)) + D = eachindex(dest) + Ds = start(D) + @nloops $N i dest d->(j_d = unsafe_getindex(I[d], i_d)) begin + d, Ds = next(D, Ds) + v = @ncall $N unsafe_getindex src j + unsafe_setindex!(dest, v, d) end dest end end -# It's most efficient to call checkbounds first, then to_index, and finally -# allocate the output. Hence the different variants. -_getindex(A, I::Tuple{Vararg{Union(Int,AbstractVector),}}) = - _getindex!(similar(A, index_shape(I...)), A, I...) - -# The @generated function here is just to work around the performance hit -# of splatting -@generated function getindex(A::Array, I::Union(Real,AbstractVector)...) +# checksize ensures the output array A is the correct size for the given indices +checksize(A::AbstractArray, I::AbstractArray) = size(A) == size(I) || throw(DimensionMismatch("index 1 has size $(size(I)), but size(A) = $(size(A))")) +checksize(A::AbstractArray, I::AbstractArray{Bool}) = length(A) == sum(I) || throw(DimensionMismatch("index 1 selects $(sum(I)) elements, but length(A) = $(length(A))")) +@generated function checksize(A::AbstractArray, I...) N = length(I) - Isplat = Expr[:(I[$d]) for d = 1:N] quote - checkbounds(A, $(Isplat...)) - _getindex(A, to_index($(Isplat...))) + @nexprs $N d->(_checksize(A, d, I[d]) || throw(DimensionMismatch("index $d selects $(length(I[d])) elements, but size(A, $d) = $(size(A,d))"))) end end +_checksize(A::AbstractArray, dim, I) = size(A, dim) == length(I) +_checksize(A::AbstractArray, dim, I::AbstractVector{Bool}) = size(A, dim) == sum(I) +_checksize(A::AbstractArray, dim, ::Colon) = true +_checksize(A::AbstractArray, dim, ::Real) = size(A, dim) == 1 + +@inline unsafe_setindex!{T}(v::Array{T}, x::T, ind::Int) = (@inbounds v[ind] = x; v) +@inline unsafe_setindex!(v::BitArray, x::Bool, ind::Int) = (Base.unsafe_bitsetindex!(v.chunks, x, ind); v) +@inline unsafe_setindex!(v::BitArray, x, ind::Real) = (Base.unsafe_bitsetindex!(v.chunks, convert(Bool, x), to_index(ind)); v) + +## setindex! ## +# For multi-element setindex!, we check bounds, convert the indices (to_index), +# and ensure the value to set is either an AbstractArray or a Repeated scalar +# before redispatching to the _unsafe_batchsetindex! +_iterable(v::AbstractArray) = v +_iterable(v) = repeated(v) +@inline function _setindex!(l::LinearIndexing, A::AbstractArray, x, J::Union(Real,AbstractArray,Colon)...) + checkbounds(A, J...) + _unsafe_setindex!(l, A, x, J...) +end +@inline function _unsafe_setindex!(l::LinearIndexing, A::AbstractArray, x, J::Union(Real,AbstractVector,Colon)...) + _unsafe_batchsetindex!(l, A, _iterable(x), to_index(J)...) +end + +# While setindex! with one array argument doesn't mean anything special, it is +# still supported for symmetry with getindex. +_unsafe_setindex!(l::LinearIndexing, A::AbstractArray, x, I::AbstractArray) = _unsafe_setindex!(l, A, x, vec(I)) +# 1-d logical indexing: override the above to avoid calling find (in to_index) +function _unsafe_setindex!(::LinearIndexing, A::AbstractArray, x, I::AbstractVector{Bool}) + X = _iterable(x) + Xs = start(X) + i = 0 + c = 0 + for b in eachindex(I) + i+=1 + if unsafe_getindex(I, b) + done(X, Xs) && throw_setindex_mismatch(x, c+1) + (v, Xs) = next(X, Xs) + unsafe_setindex!(A, v, i) + c += 1 + end + end + setindex_shape_check(X, c) + A +end -# Also a safe version of getindex! -@generated function getindex!(dest, src, I::Union(Real,AbstractVector)...) +# Use iteration over X so we don't need to worry about its storage +@generated function _unsafe_batchsetindex!(::LinearFast, A::AbstractArray, X, I::Union(Real,AbstractVector,Colon)...) N = length(I) - Isplat = Expr[:(I[$d]) for d = 1:N] - Jsplat = Expr[:(to_index(I[$d])) for d = 1:N] quote - checkbounds(src, $(Isplat...)) - _getindex!(dest, src, $(Jsplat...)) + @nexprs $N d->(I_d = I[d]) + idxlens = @ncall $N index_lengths A I + @ncall $N setindex_shape_check X (d->idxlens[d]) + Xs = start(X) + stride_1 = 1 + @nexprs $N d->(stride_{d+1} = stride_d*size(A,d)) + $(symbol(:offset_, N)) = 1 + @nloops $N i d->(1:idxlens[d]) d->(offset_{d-1} = offset_d + (unsafe_getindex(I_d, i_d)-1)*stride_d) begin + v, Xs = next(X, Xs) + unsafe_setindex!(A, v, offset_0) + end + A end end - - -@generated function setindex!(A::Array, x, J::Union(Real,AbstractArray)...) - N = length(J) - if x<:AbstractArray - ex=quote - X = x - @ncall $N setindex_shape_check X I - Xs = start(X) - @nloops $N i d->(1:length(I_d)) d->(@inbounds offset_{d-1} = offset_d + (unsafe_getindex(I_d, i_d)-1)*stride_d) begin - v, Xs = next(X, Xs) - @inbounds A[offset_0] = v - end +@generated function _unsafe_batchsetindex!(::LinearSlow, A::AbstractArray, X, I::Union(Real,AbstractVector,Colon)...) + N = length(I) + quote + @nexprs $N d->(I_d = I[d]) + idxlens = @ncall $N index_lengths A I + @ncall $N setindex_shape_check X (d->idxlens[d]) + Xs = start(X) + @nloops $N i d->(1:idxlens[d]) d->(j_d = unsafe_getindex(I_d, i_d)) begin + v, Xs = next(X, Xs) + @ncall $N unsafe_setindex! A v j end - else - ex=quote - @nloops $N i d->(1:length(I_d)) d->(@inbounds offset_{d-1} = offset_d + (unsafe_getindex(I_d, i_d)-1)*stride_d) begin - @inbounds A[offset_0] = x + A + end +end + +# Cartesian indexing +function cartindex_exprs(indexes, syms) + exprs = Any[] + for (i,ind) in enumerate(indexes) + if ind <: CartesianIndex + for j = 1:length(ind) + push!(exprs, :($syms[$i][$j])) end + else + push!(exprs, :($syms[$i])) end end - quote - @nexprs $N d->(J_d = J[d]) - @ncall $N checkbounds A J - @nexprs $N d->(I_d = to_index(J_d)) - stride_1 = 1 - @nexprs $N d->(stride_{d+1} = stride_d*size(A,d)) - @nexprs $N d->(offset_d = 1) # really only need offset_$N = 1 - $ex - A + if isempty(exprs) + push!(exprs, 1) # Handle the zero-dimensional case end + exprs +end +@generated function _getindex{T,N}(l::LinearIndexing, A::AbstractArray{T,N}, I::Union(Real,AbstractArray,Colon,CartesianIndex)...) + :($(Expr(:meta, :inline)); getindex(A, $(cartindex_exprs(I, :I)...))) +end +@generated function _unsafe_getindex{T,N}(l::LinearIndexing, A::AbstractArray{T,N}, I::Union(Real,AbstractArray,Colon,CartesianIndex)...) + :($(Expr(:meta, :inline)); unsafe_getindex(A, $(cartindex_exprs(I, :I)...))) end +@generated function _setindex!{T,N}(l::LinearIndexing, A::AbstractArray{T,N}, v, I::Union(Real,AbstractArray,Colon,CartesianIndex)...) + :($(Expr(:meta, :inline)); setindex!(A, v, $(cartindex_exprs(I, :I)...))) +end +@generated function _unsafe_setindex!{T,N}(l::LinearIndexing, A::AbstractArray{T,N}, v, I::Union(Real,AbstractArray,Colon,CartesianIndex)...) + :($(Expr(:meta, :inline)); unsafe_setindex!(A, v, $(cartindex_exprs(I, :I)...))) +end + +## @generated function findn{T,N}(A::AbstractArray{T,N}) quote @@ -352,31 +414,6 @@ end end end - -### subarray.jl - -function gen_setindex_body(N::Int) - quote - Base.Cartesian.@nexprs $N d->(J_d = J[d]) - Base.Cartesian.@ncall $N checkbounds V J - Base.Cartesian.@nexprs $N d->(I_d = Base.to_index(J_d)) - if !isa(x, AbstractArray) - Base.Cartesian.@nloops $N i d->(1:length(I_d)) d->(@inbounds j_d = Base.unsafe_getindex(I_d, i_d)) begin - @inbounds (Base.Cartesian.@nref $N V j) = x - end - else - X = x - Base.Cartesian.@ncall $N Base.setindex_shape_check X I - k = 1 - Base.Cartesian.@nloops $N i d->(1:length(I_d)) d->(@inbounds j_d = Base.unsafe_getindex(I_d, i_d)) begin - @inbounds (Base.Cartesian.@nref $N V j) = X[k] - k += 1 - end - end - V - end -end - ## SubArray index merging # A view created like V = A[2:3:8, 5:2:17] can later be indexed as V[2:7], # creating a new 1d view. @@ -438,6 +475,26 @@ function merge_indexes(V, parentindexes::NTuple, parentdims::Dims, linindex, lin merge_indexes_div(V, parentindexes, parentdims, linindex, lindim) end +# Even simpler is the case where the linear index is ::Colon: return all indexes +@generated function merge_indexes(V, indexes::NTuple, dims::Dims, ::Colon) + N = length(indexes) + N > 0 || throw(ArgumentError("cannot merge empty indexes")) + quote + Base.Cartesian.@nexprs $N d->(I_d = indexes[d]) + dimoffset = ndims(V.parent) - length(dims) + n = prod(map(length, indexes)) + Pstride_1 = 1 # parent strides + Base.Cartesian.@nexprs $(N-1) d->(Pstride_{d+1} = Pstride_d*dims[d]) + Base.Cartesian.@nexprs $N d->(offset_d = 1) # offset_0 is a linear index into parent + k = 0 + index = Array(Int, n) + Base.Cartesian.@nloops $N i d->(1:dimsize(V, d+dimoffset, I_d)) d->(offset_{d-1} = offset_d + (I_d[i_d]-1)*Pstride_d) begin + index[k+=1] = offset_0 + end + index + end +end + # This could be written as a regular function, but performance # will be better using Cartesian macros to avoid the heap and # an extra loop. @@ -552,67 +609,25 @@ end ## getindex -# general scalar indexing with two or more indices -# (uses linear indexing, which is defined in bitarray.jl) -# (code is duplicated for safe and unsafe versions for performance reasons) - -@generated function unsafe_getindex(B::BitArray, I_0::Int, I::Int...) - N = length(I) - quote - stride = 1 - index = I_0 - @nexprs $N d->begin - stride *= size(B,d) - index += (I[d] - 1) * stride - end - return unsafe_getindex(B, index) - end -end - -@generated function getindex(B::BitArray, I_0::Int, I::Int...) - N = length(I) - quote - stride = 1 - index = I_0 - @nexprs $N d->(I_d = I[d]) - @nexprs $N d->begin - l = size(B,d) - stride *= l - 1 <= I_{d-1} <= l || throw(BoundsError()) - index += (I_d - 1) * stride - end - return B[index] - end -end - # contiguous multidimensional indexing: if the first dimension is a range, # we can get some performance from using copy_chunks! - -function unsafe_getindex(B::BitArray, I0::UnitRange{Int}) - X = BitArray(length(I0)) - copy_chunks!(X.chunks, 1, B.chunks, first(I0), length(I0)) +@inline function _unsafe_getindex!(X::BitArray, ::LinearFast, B::BitArray, I0::Union(UnitRange{Int}, Colon)) + copy_chunks!(X.chunks, 1, B.chunks, first(I0), index_lengths(B, I0)[1]) return X end -function getindex(B::BitArray, I0::UnitRange{Int}) - checkbounds(B, I0) - return unsafe_getindex(B, I0) -end - -getindex{T<:Real}(B::BitArray, I0::UnitRange{T}) = getindex(B, to_index(I0)) - -@generated function unsafe_getindex(B::BitArray, I0::UnitRange{Int}, I::Union(Int,UnitRange{Int})...) +# Optimization where the inner dimension is contiguous improves perf dramatically +@generated function _unsafe_getindex!(X::BitArray, ::LinearFast, B::BitArray, I0::Union(Colon,UnitRange{Int}), I::Union(Int,UnitRange{Int},Colon)...) N = length(I) - Isplat = Expr[:(I[$d]) for d = 1:N] quote + $(Expr(:meta, :inline)) @nexprs $N d->(I_d = I[d]) - X = BitArray(index_shape(I0, $(Isplat...))) f0 = first(I0) - l0 = length(I0) + l0 = size(X, 1) gap_lst_1 = 0 - @nexprs $N d->(gap_lst_{d+1} = length(I_d)) + @nexprs $N d->(gap_lst_{d+1} = size(X, d+1)) stride = 1 ind = f0 @nexprs $N d->begin @@ -623,91 +638,40 @@ getindex{T<:Real}(B::BitArray, I0::UnitRange{T}) = getindex(B, to_index(I0)) end storeind = 1 + Xc, Bc = X.chunks, B.chunks @nloops($N, i, d->I_d, d->nothing, # PRE d->(ind += stride_lst_d - gap_lst_d), # POST begin # BODY - copy_chunks!(X.chunks, storeind, B.chunks, ind, l0) + copy_chunks!(Xc, storeind, Bc, ind, l0) storeind += l0 end) return X end end -# general multidimensional non-scalar indexing - -@generated function unsafe_getindex(B::BitArray, I::Union(Int,AbstractVector{Int})...) +# in the general multidimensional non-scalar case, can we do about 10% better +# in most cases by manually hoisting the bitarray chunks access out of the loop +# (This should really be handled by the compiler or with an immutable BitArray) +@generated function _unsafe_getindex!(X::BitArray, ::LinearFast, B::BitArray, I::Union(Int,AbstractVector{Int},Colon)...) N = length(I) - Isplat = Expr[:(I[$d]) for d = 1:N] quote - @nexprs $N d->(I_d = I[d]) - X = BitArray(index_shape($(Isplat...))) - Xc = X.chunks - + $(Expr(:meta, :inline)) stride_1 = 1 - @nexprs $N d->(stride_{d+1} = stride_d * size(B, d)) - @nexprs 1 d->(offset_{$N} = 1) - ind = 1 - @nloops($N, i, d->I_d, - d->(offset_{d-1} = offset_d + (i_d-1)*stride_d), # PRE - begin - unsafe_bitsetindex!(Xc, B[offset_0], ind) - ind += 1 - end) + @nexprs $N d->(stride_{d+1} = stride_d*size(B, d)) + $(symbol(:offset_, N)) = 1 + ind = 0 + Xc, Bc = X.chunks, B.chunks + @nloops $N i X d->(offset_{d-1} = offset_d + (unsafe_getindex(I[d], i_d)-1)*stride_d) begin + ind += 1 + unsafe_bitsetindex!(Xc, unsafe_bitgetindex(Bc, offset_0), ind) + end return X end end -# general version with Real (or logical) indexing which dispatches on the appropriate method - -@generated function getindex(B::BitArray, I::Union(Real,AbstractVector)...) - N = length(I) - Isplat = Expr[:(I[$d]) for d = 1:N] - Jsplat = Expr[:(to_index(I[$d])) for d = 1:N] - quote - checkbounds(B, $(Isplat...)) - return unsafe_getindex(B, $(Jsplat...)) - end -end - ## setindex! -# general scalar indexing with two or more indices -# (uses linear indexing, which - in the safe version - performs the final -# bounds check and is defined in bitarray.jl) -# (code is duplicated for safe and unsafe versions for performance reasons) - -@generated function unsafe_setindex!(B::BitArray, x::Bool, I_0::Int, I::Int...) - N = length(I) - quote - stride = 1 - index = I_0 - @nexprs $N d->begin - stride *= size(B,d) - index += (I[d] - 1) * stride - end - unsafe_setindex!(B, x, index) - return B - end -end - -@generated function setindex!(B::BitArray, x::Bool, I_0::Int, I::Int...) - N = length(I) - quote - stride = 1 - index = I_0 - @nexprs $N d->(I_d = I[d]) - @nexprs $N d->begin - l = size(B,d) - stride *= l - 1 <= I_{d-1} <= l || throw(BoundsError()) - index += (I_d - 1) * stride - end - B[index] = x - return B - end -end - # contiguous multidimensional indexing: if the first dimension is a range, # we can get some performance from using copy_chunks! @@ -787,72 +751,6 @@ end end end - -# general multidimensional non-scalar indexing - -@generated function unsafe_setindex!(B::BitArray, X::AbstractArray, I::Union(Int,AbstractArray{Int})...) - N = length(I) - quote - refind = 1 - @nexprs $N d->(I_d = I[d]) - @nloops $N i d->I_d @inbounds begin - @ncall $N unsafe_setindex! B convert(Bool,X[refind]) i - refind += 1 - end - return B - end -end - -@generated function unsafe_setindex!(B::BitArray, x::Bool, I::Union(Int,AbstractArray{Int})...) - N = length(I) - quote - @nexprs $N d->(I_d = I[d]) - @nloops $N i d->I_d begin - @ncall $N unsafe_setindex! B x i - end - return B - end -end - -# general versions with Real (or logical) indexing which dispatch on the appropriate method - -# this one is for disambiguation only -function setindex!(B::BitArray, x, i::Real) - checkbounds(B, i) - return unsafe_setindex!(B, convert(Bool,x), to_index(i)) -end - -@generated function setindex!(B::BitArray, x, I::Union(Real,AbstractArray)...) - N = length(I) - quote - checkbounds(B, I...) - #return unsafe_setindex!(B, convert(Bool,x), to_index(I...)...) # segfaults! (???) - @nexprs $N d->(J_d = to_index(I[d])) - return @ncall $N unsafe_setindex! B convert(Bool,x) J - end -end - - -# this one is for disambiguation only -function setindex!(B::BitArray, X::AbstractArray, i::Real) - checkbounds(B, i) - j = to_index(i) - setindex_shape_check(X, j) - return unsafe_setindex!(B, X, j) -end - -@generated function setindex!(B::BitArray, X::AbstractArray, I::Union(Real,AbstractArray)...) - N = length(I) - quote - checkbounds(B, I...) - @nexprs $N d->(J_d = to_index(I[d])) - @ncall $N setindex_shape_check X J - return @ncall $N unsafe_setindex! B X J - end -end - - - ## findn @generated function findn{N}(B::BitArray{N}) diff --git a/base/number.jl b/base/number.jl index 1c0029ce60419..fcd6a02d5f5d0 100644 --- a/base/number.jl +++ b/base/number.jl @@ -15,6 +15,7 @@ getindex(x::Number) = x getindex(x::Number, i::Integer) = i == 1 ? x : throw(BoundsError()) getindex(x::Number, I::Integer...) = all([i == 1 for i in I]) ? x : throw(BoundsError()) getindex(x::Number, I::Real...) = getindex(x, to_index(I)...) +unsafe_getindex(x::Real, i::Real) = x first(x::Number) = x last(x::Number) = x diff --git a/base/operators.jl b/base/operators.jl index 0f20a088a467c..3db8f3f244864 100644 --- a/base/operators.jl +++ b/base/operators.jl @@ -221,16 +221,11 @@ function promote_shape(a::Dims, b::Dims) return a end -# shape of array to create for getindex() with indexes I -# drop dimensions indexed with trailing scalars -index_shape(I::Real...) = () -index_shape(i, I...) = tuple(length(i), index_shape(I...)...) - function throw_setindex_mismatch(X, I) if length(I) == 1 - throw(DimensionMismatch("tried to assign $(length(X)) elements to $(length(I[1])) destinations")) + throw(DimensionMismatch("tried to assign $(length(X)) elements to $(I[1]) destinations")) else - throw(DimensionMismatch("tried to assign $(dims2string(size(X))) array to $(dims2string(map(length,I))) destination")) + throw(DimensionMismatch("tried to assign $(dims2string(size(X))) array to $(dims2string(I)) destination")) end end @@ -239,13 +234,13 @@ end # for permutations that leave array elements in the same linear order. # those are the permutations that preserve the order of the non-singleton # dimensions. -function setindex_shape_check(X::AbstractArray, I...) +function setindex_shape_check(X::AbstractArray, I::Int...) li = ndims(X) lj = length(I) i = j = 1 while true ii = size(X,i) - jj = length(I[j])::Int + jj = I[j] if i == li || j == lj while i < li i += 1 @@ -253,7 +248,7 @@ function setindex_shape_check(X::AbstractArray, I...) end while j < lj j += 1 - jj *= length(I[j])::Int + jj *= I[j] end if ii != jj throw_setindex_mismatch(X, I) @@ -276,25 +271,25 @@ end setindex_shape_check(X::AbstractArray) = (length(X)==1 || throw_setindex_mismatch(X,())) -setindex_shape_check(X::AbstractArray, i) = - (length(X)==length(i) || throw_setindex_mismatch(X, (i,))) +setindex_shape_check(X::AbstractArray, i::Int) = + (length(X)==i || throw_setindex_mismatch(X, (i,))) -setindex_shape_check{T}(X::AbstractArray{T,1}, i) = - (length(X)==length(i) || throw_setindex_mismatch(X, (i,))) +setindex_shape_check{T}(X::AbstractArray{T,1}, i::Int) = + (length(X)==i || throw_setindex_mismatch(X, (i,))) -setindex_shape_check{T}(X::AbstractArray{T,1}, i, j) = - (length(X)==length(i)*length(j) || throw_setindex_mismatch(X, (i,j))) +setindex_shape_check{T}(X::AbstractArray{T,1}, i::Int, j::Int) = + (length(X)==i*j || throw_setindex_mismatch(X, (i,j))) -function setindex_shape_check{T}(X::AbstractArray{T,2}, i, j) - li, lj = length(i), length(j) - if length(X) != li*lj +function setindex_shape_check{T}(X::AbstractArray{T,2}, i::Int, j::Int) + if length(X) != i*j throw_setindex_mismatch(X, (i,j)) end sx1 = size(X,1) - if !(li == 1 || li == sx1 || sx1 == 1) + if !(i == 1 || i == sx1 || sx1 == 1) throw_setindex_mismatch(X, (i,j)) end end +setindex_shape_check(X, I::Int...) = nothing # Non-arrays broadcast to all idxs # convert to integer index to_index(i::Int) = i @@ -305,6 +300,7 @@ to_index(I::UnitRange{Bool}) = find(I) to_index(I::Range{Bool}) = find(I) to_index{T<:Integer}(r::UnitRange{T}) = to_index(first(r)):to_index(last(r)) to_index{T<:Integer}(r::StepRange{T}) = to_index(first(r)):to_index(step(r)):to_index(last(r)) +to_index(c::Colon) = c to_index(I::AbstractArray{Bool}) = find(I) to_index(A::AbstractArray{Int}) = A to_index{T<:Integer}(A::AbstractArray{T}) = [to_index(x) for x in A] diff --git a/base/range.jl b/base/range.jl index 8f40abf8d5876..7e58a677eb3d1 100644 --- a/base/range.jl +++ b/base/range.jl @@ -347,54 +347,44 @@ done(r::UnitRange, i) = i==oftype(i,r.stop)+1 ## indexing -getindex(r::Range, i::Real) = getindex(r, to_index(i)) +getindex(r::Range, i::Integer) = (checkbounds(r, i); unsafe_getindex(r, i)) +unsafe_getindex{T}(v::Range{T}, i::Integer) = convert(T, first(v) + (i-1)*step(v)) -function getindex{T}(r::Range{T}, i::Integer) - 1 <= i <= length(r) || throw(BoundsError()) - convert(T, first(r) + (i-1)*step(r)) -end -function getindex{T}(r::FloatRange{T}, i::Integer) - 1 <= i <= length(r) || throw(BoundsError()) - convert(T, (r.start + (i-1)*r.step)/r.divisor) -end -function getindex{T}(r::LinSpace{T}, i::Integer) - 1 <= i <= length(r) || throw(BoundsError()) - convert(T, ((r.len-i)*r.start + (i-1)*r.stop)/r.divisor) -end +getindex{T}(r::FloatRange{T}, i::Integer) = (checkbounds(r, i); unsafe_getindex(r, i)) +unsafe_getindex{T}(r::FloatRange{T}, i::Integer) = convert(T, (r.start + (i-1)*r.step)/r.divisor) -function check_indexingrange(s, r) - sl = length(s) - rl = length(r) - sl == 0 || 1 <= first(s) <= rl && - 1 <= last(s) <= rl || throw(BoundsError()) - sl -end +getindex{T}(r::LinSpace{T}, i::Integer) = (checkbounds(r, i); unsafe_getindex(r, i)) +unsafe_getindex{T}(r::LinSpace{T}, i::Integer) = convert(T, ((r.len-i)*r.start + (i-1)*r.stop)/r.divisor) + +getindex(r::Range, ::Colon) = copy(r) +unsafe_getindex(r::Range, ::Colon) = copy(r) -function getindex(r::UnitRange, s::UnitRange{Int}) - sl = check_indexingrange(s, r) +getindex(r::UnitRange, s::UnitRange{Int}) = (checkbounds(r, s); unsafe_getindex(r, s)) +function unsafe_getindex(r::UnitRange, s::UnitRange{Int}) st = oftype(r.start, r.start + s.start-1) - range(st, sl) + range(st, length(s)) end -function getindex(r::UnitRange, s::StepRange{Int}) - sl = check_indexingrange(s, r) +getindex(r::UnitRange, s::StepRange{Int}) = (checkbounds(r, s); unsafe_getindex(r, s)) +function unsafe_getindex(r::UnitRange, s::StepRange{Int}) st = oftype(r.start, r.start + s.start-1) - range(st, step(s), sl) + range(st, step(s), length(s)) end -function getindex(r::StepRange, s::Range{Int}) - sl = check_indexingrange(s, r) +getindex(r::StepRange, s::Range{Int}) = (checkbounds(r, s); unsafe_getindex(r, s)) +function unsafe_getindex(r::StepRange, s::Range{Int}) st = oftype(r.start, r.start + (first(s)-1)*step(r)) - range(st, step(r)*step(s), sl) + range(st, step(r)*step(s), length(s)) end -function getindex(r::FloatRange, s::OrdinalRange) - sl = check_indexingrange(s, r) - FloatRange(r.start + (first(s)-1)*r.step, step(s)*r.step, sl, r.divisor) +getindex(r::FloatRange, s::OrdinalRange) = (checkbounds(r, s); unsafe_getindex(r, s)) +function unsafe_getindex(r::FloatRange, s::OrdinalRange) + FloatRange(r.start + (first(s)-1)*r.step, step(s)*r.step, length(s), r.divisor) end -function getindex{T}(r::LinSpace{T}, s::OrdinalRange) - sl::T = check_indexingrange(s, r) +getindex(r::LinSpace, s::OrdinalRange) = (checkbounds(r, s); unsafe_getindex(r, s)) +function unsafe_getindex{T}(r::LinSpace{T}, s::OrdinalRange) + sl::T = length(s) ifirst = first(s) ilast = last(s) vfirst::T = ((r.len - ifirst) * r.start + (ifirst - 1) * r.stop) / r.divisor diff --git a/base/sharedarray.jl b/base/sharedarray.jl index 595470de3f530..8ef176423ef47 100644 --- a/base/sharedarray.jl +++ b/base/sharedarray.jl @@ -213,7 +213,8 @@ convert(::Type{Array}, S::SharedArray) = S.s getindex(S::SharedArray) = getindex(S.s) getindex(S::SharedArray, I::Real) = getindex(S.s, I) getindex(S::SharedArray, I::AbstractArray) = getindex(S.s, I) -@generated function getindex(S::SharedArray, I::Union(Real,AbstractVector)...) +getindex(S::SharedArray, I::Colon) = getindex(S.s, I) +@generated function getindex(S::SharedArray, I::Union(Real,AbstractVector,Colon)...) N = length(I) Isplat = Expr[:(I[$d]) for d = 1:N] quote @@ -224,7 +225,8 @@ end setindex!(S::SharedArray, x) = setindex!(S.s, x) setindex!(S::SharedArray, x, I::Real) = setindex!(S.s, x, I) setindex!(S::SharedArray, x, I::AbstractArray) = setindex!(S.s, x, I) -@generated function setindex!(S::SharedArray, x, I::Union(Real,AbstractVector)...) +setindex!(S::SharedArray, x, I::Colon) = setindex!(S.s, x, I) +@generated function setindex!(S::SharedArray, x, I::Union(Real,AbstractVector,Colon)...) N = length(I) Isplat = Expr[:(I[$d]) for d = 1:N] quote diff --git a/base/sparse/cholmod.jl b/base/sparse/cholmod.jl index 5382777a85e3b..c2e1a1aead03e 100644 --- a/base/sparse/cholmod.jl +++ b/base/sparse/cholmod.jl @@ -2,7 +2,8 @@ module CHOLMOD -import Base: (*), convert, copy, eltype, getindex, show, size +import Base: (*), convert, copy, eltype, getindex, show, size, + linearindexing, LinearFast, LinearSlow import Base.LinAlg: (\), A_mul_Bc, A_mul_Bt, Ac_ldiv_B, Ac_mul_B, At_ldiv_B, At_mul_B, cholfact, cholfact!, det, diag, ishermitian, isposdef, @@ -935,19 +936,14 @@ function size(F::Factor, i::Integer) return 1 end +linearindexing(::Dense) = LinearFast() function getindex(A::Dense, i::Integer) s = unsafe_load(A.p) 0 < i <= s.nrow*s.ncol || throw(BoundsError()) unsafe_load(s.x, i) end -function getindex(A::Dense, i::Integer, j::Integer) - s = unsafe_load(A.p) - 0 < i <= s.nrow || throw(BoundsError()) - 0 < j <= s.ncol || throw(BoundsError()) - unsafe_load(s.x, i + (j - 1)*s.d) -end -getindex(A::Sparse, i::Integer) = getindex(A, ind2sub(size(A),i)...) +linearindexing(::Sparse) = LinearSlow() function getindex{T}(A::Sparse{T}, i0::Integer, i1::Integer) s = unsafe_load(A.p) !(1 <= i0 <= s.nrow && 1 <= i1 <= s.ncol) && throw(BoundsError()) diff --git a/base/sparse/sparsematrix.jl b/base/sparse/sparsematrix.jl index 95bb014144726..a15c12bebc2cf 100644 --- a/base/sparse/sparsematrix.jl +++ b/base/sparse/sparsematrix.jl @@ -146,7 +146,6 @@ copy(S::SparseMatrixCSC) = SparseMatrixCSC(S.m, S.n, copy(S.colptr), copy(S.rowval), copy(S.nzval)) similar(S::SparseMatrixCSC, Tv::Type=eltype(S)) = SparseMatrixCSC(S.m, S.n, copy(S.colptr), copy(S.rowval), Array(Tv, length(S.nzval))) -similar{Tv,Ti,TvNew}(S::SparseMatrixCSC{Tv,Ti}, ::Type{TvNew}, ::Type{Ti}) = similar(S, TvNew) similar{Tv,Ti,TvNew,TiNew}(S::SparseMatrixCSC{Tv,Ti}, ::Type{TvNew}, ::Type{TiNew}) = SparseMatrixCSC(S.m, S.n, convert(Array{TiNew},S.colptr), convert(Array{TiNew}, S.rowval), Array(TvNew, length(S.nzval))) similar{Tv}(S::SparseMatrixCSC, ::Type{Tv}, d::NTuple{Integer}) = spzeros(Tv, d...) @@ -1225,7 +1224,6 @@ function rangesearch(haystack::Range, needle) (rem==0 && 1<=i+1<=length(haystack)) ? i+1 : 0 end -getindex(A::SparseMatrixCSC, i::Integer) = isempty(A) ? throw(BoundsError()) : getindex(A, ind2sub(size(A),i)) getindex(A::SparseMatrixCSC, I::Tuple{Integer,Integer}) = getindex(A, I[1], I[2]) function getindex{T}(A::SparseMatrixCSC{T}, i0::Integer, i1::Integer) @@ -1240,6 +1238,12 @@ end getindex{T<:Integer}(A::SparseMatrixCSC, I::AbstractVector{T}, j::Integer) = getindex(A,I,[j]) getindex{T<:Integer}(A::SparseMatrixCSC, i::Integer, J::AbstractVector{T}) = getindex(A,[i],J) +# Colon translation (this could be done more efficiently) +getindex(A::SparseMatrixCSC, ::Colon) = getindex(A, 1:length(A)) +getindex(A::SparseMatrixCSC, ::Colon, ::Colon) = getindex(A, 1:size(A, 1), 1:size(A, 2)) +getindex(A::SparseMatrixCSC, ::Colon, j) = getindex(A, 1:size(A, 1), j) +getindex(A::SparseMatrixCSC, i, ::Colon) = getindex(A, i, 1:size(A, 2)) + function getindex_cols{Tv,Ti}(A::SparseMatrixCSC{Tv,Ti}, J::AbstractVector) # for indexing whole columns (m, n) = size(A) @@ -1681,8 +1685,6 @@ end ## setindex! -setindex!(A::SparseMatrixCSC, v, i::Integer) = setindex!(A, v, ind2sub(size(A),i)...) - function setindex!{T,Ti}(A::SparseMatrixCSC{T,Ti}, v, i0::Integer, i1::Integer) i0 = convert(Ti, i0) i1 = convert(Ti, i1) @@ -1723,6 +1725,12 @@ setindex!{T<:Integer}(A::SparseMatrixCSC, v::AbstractMatrix, I::AbstractVector{T setindex!{T<:Integer}(A::SparseMatrixCSC, x::Number, i::Integer, J::AbstractVector{T}) = setindex!(A, x, [i], J) setindex!{T<:Integer}(A::SparseMatrixCSC, x::Number, I::AbstractVector{T}, j::Integer) = setindex!(A, x, I, [j]) +# Colon translation +setindex!(A::SparseMatrixCSC, x, ::Colon) = setindex!(A, x, 1:length(A)) +setindex!(A::SparseMatrixCSC, x, ::Colon, ::Colon) = setindex!(A, x, 1:size(A, 1), 1:size(A,2)) +setindex!(A::SparseMatrixCSC, x, ::Colon, j::Union(Integer, AbstractVector)) = setindex!(A, x, 1:size(A, 1), j) +setindex!(A::SparseMatrixCSC, x, i::Union(Integer, AbstractVector), ::Colon) = setindex!(A, x, i, 1:size(A, 2)) + setindex!{Tv,T<:Integer}(A::SparseMatrixCSC{Tv}, x::Number, I::AbstractVector{T}, J::AbstractVector{T}) = (0 == x) ? spdelete!(A, I, J) : spset!(A, convert(Tv,x), I, J) diff --git a/base/subarray.jl b/base/subarray.jl index d8bb2870d3296..03221e36151c9 100644 --- a/base/subarray.jl +++ b/base/subarray.jl @@ -342,9 +342,8 @@ end length(I.parameters) == LD ? (:(LinearFast())) : (:(LinearSlow())) end -getindex(::Colon, ::Colon) = Colon() -getindex{T}(v::AbstractArray{T,1}, ::Colon) = v -getindex(::Colon, i) = i +getindex(::Colon, i) = to_index(i) +unsafe_getindex(v::Colon, i) = to_index(i) step(::Colon) = 1 first(::Colon) = 1 diff --git a/base/subarray2.jl b/base/subarray2.jl index 5528b6a91589d..59532baeacc85 100644 --- a/base/subarray2.jl +++ b/base/subarray2.jl @@ -1,109 +1,49 @@ # This file is a part of Julia. License is MIT: http://julialang.org/license ## Scalar indexing -# Low dimensions: avoid splatting -newsym = (:i_1, :i_2, :i_3, :i_4) -vars = Array(Expr, 0) -varsInt = Array(Expr, 0) -varsOther = Array(Expr, 0) -vars_toindex = Array(Expr, 0) -for i = 1:4 - sym = newsym[i] - push!(vars, Expr(:quote, sym)) - push!(varsInt, :($sym::Int)) - push!(varsOther, :($sym::Union(Real, AbstractVector))) - push!(vars_toindex, :(to_index($sym))) - ex = i == 1 ? quote - getindex{T,N,P,IV}(V::SubArray{T,N,P,IV}, $sym::Real) = getindex(V, to_index($sym)) - setindex!{T,N,P,IV}(V::SubArray{T,N,P,IV}, v, $sym::Real) = setindex!(V, v, to_index($sym)) - getindex{T,N,P,IV}(V::SubArray{T,N,P,IV}, $sym::AbstractVector{Bool}) = getindex(V, to_index($sym)) - setindex!{T,N,P,IV}(V::SubArray{T,N,P,IV}, v, $sym::AbstractVector{Bool}) = setindex!(V, v, to_index($sym)) - end : quote - getindex{T,N,P,IV}(V::SubArray{T,N,P,IV}, $(varsOther...)) = getindex(V, $(vars_toindex...)) - setindex!{T,N,P,IV}(V::SubArray{T,N,P,IV}, v, $(varsOther...)) = setindex!(V, v, $(vars_toindex...)) - end - @eval begin - @generated function getindex{T,N,P,IV,LD}(V::SubArray{T,N,P,IV,LD}, $(varsInt...)) - if $i == 1 && length(IV.parameters) == LD # linear indexing - meta = Expr(:meta, :inline) - if iscontiguous(V) - return :($meta; V.parent[V.first_index + i_1 - 1]) - end - return :($meta; V.parent[V.first_index + V.stride1*(i_1-1)]) - end - exhead, ex = index_generate(ndims(P), IV, :V, [$(vars...)]) - quote - $exhead - $ex - end +@inline getindex(V::SubArray, I::Int...) = (checkbounds(V, I...); unsafe_getindex(V, I...)) +@generated function unsafe_getindex{T,N,P,IV,LD}(V::SubArray{T,N,P,IV,LD}, I::Int...) + ni = length(I) + if ni == 1 && length(IV.parameters) == LD # linear indexing + meta = Expr(:meta, :inline) + if iscontiguous(V) + return :($meta; V.parent[V.first_index + I[1] - 1]) end - @generated function setindex!{T,N,P,IV,LD}(V::SubArray{T,N,P,IV,LD}, v, $(varsInt...)) - if $i == 1 && length(IV.parameters) == LD # linear indexing - meta = Expr(:meta, :inline) - if iscontiguous(V) - return :($meta; V.parent[V.first_index + i_1 - 1] = v) - end - return :($meta; V.parent[V.first_index + V.stride1*(i_1-1)] = v) - end - exhead, ex = index_generate(ndims(P), IV, :V, [$(vars...)]) - quote - $exhead - $ex = v - end - end - $ex + return :($meta; V.parent[V.first_index + V.stride1*(I[1]-1)]) end -end -# V[] notation (extracts the first element) -@generated function getindex{T,N,P,IV}(V::SubArray{T,N,P,IV}) - Isyms = ones(Int, N) - exhead, ex = index_generate(ndims(P), IV, :V, Isyms) + Isyms = [:(I[$d]) for d = 1:ni] + exhead, idxs = index_generate(ndims(P), IV, :V, Isyms) quote $exhead - $ex + unsafe_getindex(V.parent, $(idxs...)) end end -# Splatting variants -@generated function getindex{T,N,P,IV}(V::SubArray{T,N,P,IV}, I::Int...) - Isyms = [:(I[$d]) for d = 1:length(I)] - exhead, ex = index_generate(ndims(P), IV, :V, Isyms) - quote - $exhead - $ex +@inline setindex!(V::SubArray, v, I::Int...) = (checkbounds(V, I...); unsafe_setindex!(V, v, I...)) +@generated function unsafe_setindex!{T,N,P,IV,LD}(V::SubArray{T,N,P,IV,LD}, v, I::Int...) + ni = length(I) + if ni == 1 && length(IV.parameters) == LD # linear indexing + meta = Expr(:meta, :inline) + if iscontiguous(V) + return :($meta; V.parent[V.first_index + I[1] - 1] = v) + end + return :($meta; V.parent[V.first_index + V.stride1*(I[1]-1)] = v) end -end -@generated function setindex!{T,N,P,IV}(V::SubArray{T,N,P,IV}, v, I::Int...) - Isyms = [:(I[$d]) for d = 1:length(I)] - exhead, ex = index_generate(ndims(P), IV, :V, Isyms) + Isyms = [:(I[$d]) for d = 1:ni] + exhead, idxs = index_generate(ndims(P), IV, :V, Isyms) quote $exhead - $ex = v + unsafe_setindex!(V.parent, v, $(idxs...)) end end # Indexing with non-scalars. For now, this returns a copy, but changing that # is just a matter of deleting the explicit call to copy. getindex{T,N,P,IV}(V::SubArray{T,N,P,IV}, I::ViewIndex...) = copy(sub(V, I...)) -getindex{T,N,P,IV}(V::SubArray{T,N,P,IV}, I::AbstractArray{Bool,N}) = copy(sub(V, find(I))) # this could be much better optimized -getindex{T,N,P,IV}(V::SubArray{T,N,P,IV}, I::Union(Real, AbstractVector)...) = getindex(V, to_index(I)...) +getindex{T,N,P,IV}(V::SubArray{T,N,P,IV}, I::Union(Real, AbstractArray, Colon)...) = getindex(V, to_index(I)...) +unsafe_getindex{T,N,P,IV}(V::SubArray{T,N,P,IV}, I::ViewIndex...) = copy(sub_unsafe(V, I)) +unsafe_getindex{T,N,P,IV}(V::SubArray{T,N,P,IV}, I::Union(Real, AbstractArray, Colon)...) = unsafe_getindex(V, to_index(I)...) -function setindex!{T,P,IV}(V::SubArray{T,1,P,IV}, v, I::AbstractArray{Bool,1}) - length(I) == length(V) || throw(DimensionMismatch("logical vector must match array length")) - setindex!(V, v, to_index(I)) -end -function setindex!{T,N,P,IV}(V::SubArray{T,N,P,IV}, v, I::AbstractArray{Bool,1}) - length(I) == length(V) || throw(DimensionMismatch("logical vector must match array length")) - setindex!(V, v, to_index(I)) -end -function setindex!{T,N,P,IV}(V::SubArray{T,N,P,IV}, v, I::AbstractArray{Bool,N}) - size(I) == size(V) || throw(DimensionMismatch("size of Boolean mask must match array size")) - _setindex!(V, v, find(I)) # this could be better optimized -end -setindex!{T,N,P,IV}(V::SubArray{T,N,P,IV}, v, I::Union(Real,AbstractVector)...) = setindex!(V, v, to_index(I)...) -setindex!{T,N,P,IV}(V::SubArray{T,N,P,IV}, x, J::Union(Int,AbstractVector)...) = _setindex!(V, x, J...) -@generated function _setindex!(V::SubArray, x, J::Union(Real,AbstractVector)...) - gen_setindex_body(length(J)) -end +# Nonscalar setindex! falls back to the AbstractArray versions # NP is parent dimensionality, Itypes is the tuple typeof(V.indexes) # NP may not be equal to length(Itypes), because a view of a 2d matrix A @@ -153,27 +93,13 @@ function index_generate(NP, Itypes, Vsym, Isyms) indexexprs[i] = :($Vsym.indexes[$i]) else j += 1 - indexexprs[i] = :(unsafe_getindex($Vsym.indexes[$i], $(Isyms[j]))) # TODO: make Range bounds-checking respect @inbounds + indexexprs[i] = :(unsafe_getindex($Vsym.indexes[$i], $(Isyms[j]))) end end - # Append any extra indexes. Must be trailing 1s or it will cause a BoundsError. - if L < NP && j < length(Isyms) - # This view was created as V = A[5:13], so appending them would generate interpretive confusion. - # Instead, use double-indexing, i.e., A[indexes1...][indexes2...], where indexes2 contains the leftovers. - return exhead, :($Vsym.parent[$(indexexprs...)][$(Isyms[j+1:end]...)]) - end - for k = j+1:length(Isyms) - push!(indexexprs, Isyms[k]) - end + # Note that we drop any extra indices. We're trusting that the indices are + # already checked to be in-bounds, so any extra indices must be 1 (and no-op) if exhead == :nothing exhead = Expr(:meta, :inline) end - exhead, :($Vsym.parent[$(indexexprs...)]) + exhead, indexexprs end - -unsafe_getindex(v::Real, ind::Int) = v -unsafe_getindex(v::Range, ind::Int) = first(v) + (ind-1)*step(v) -@inline unsafe_getindex(v::Array, ind::Int) = (@inbounds x = v[ind]; x) -unsafe_getindex(v::AbstractArray, ind::Int) = v[ind] -unsafe_getindex(v::Colon, ind::Int) = ind -unsafe_getindex(v, ind::Real) = unsafe_getindex(v, to_index(ind)) diff --git a/test/parallel.jl b/test/parallel.jl index 3b18cdf02f959..6cb2714c8f0b6 100644 --- a/test/parallel.jl +++ b/test/parallel.jl @@ -124,6 +124,9 @@ map!(x->1, d) @test fill!(d, 1) == ones(10, 10) @test fill!(d, 2.) == fill(2, 10, 10) +@test d[:] == fill(2, 100) +@test d[:,1] == fill(2, 10) +@test d[1,:] == fill(2, 1, 10) # Boundary cases where length(S) <= length(pids) @test 2.0 == remotecall_fetch(id_other, D->D[2], Base.shmem_fill(2.0, 2; pids=[id_me, id_other])) diff --git a/test/perf/array/indexing.jl b/test/perf/array/indexing.jl new file mode 100644 index 0000000000000..ae42d1c8689ed --- /dev/null +++ b/test/perf/array/indexing.jl @@ -0,0 +1,172 @@ +# Performance testing + +import Base: unsafe_getindex +# @inline unsafe_getindex(xs...) = Base.getindex(xs...) + +function sumelt(A, n) + s = zero(eltype(A)) + zero(eltype(A)) + for k = 1:n + for a in A + s += a + end + end + s +end + +function sumeach(A, n) + s = zero(eltype(A)) + zero(eltype(A)) + for k = 1:n + for I in eachindex(A) + val = unsafe_getindex(A, I) + s += val + end + end + s +end + +function sumlinear(A, n) + s = zero(eltype(A)) + zero(eltype(A)) + for k = 1:n + for I in 1:length(A) + val = unsafe_getindex(A, I) + s += val + end + end + s +end +function sumcartesian(A, n) + s = zero(eltype(A)) + zero(eltype(A)) + for k = 1:n + for I in CartesianRange(size(A)) + val = unsafe_getindex(A, I) + s += val + end + end + s +end + +function sumcolon(A, n) + s = zero(eltype(A)) + zero(eltype(A)) + nrows = size(A, 1) + ncols = size(A, 2) + c = Colon() + for k = 1:n + @simd for i = 1:ncols + val = unsafe_getindex(A, c, i) + s += first(val) + end + end + s +end + +function sumrange(A, n) + s = zero(eltype(A)) + zero(eltype(A)) + nrows = size(A, 1) + ncols = size(A, 2) + r = 1:nrows + for k = 1:n + @simd for i = 1:ncols + val = unsafe_getindex(A, r, i) + s += first(val) + end + end + s +end + +function sumlogical(A, n) + s = zero(eltype(A)) + zero(eltype(A)) + nrows = size(A, 1) + ncols = size(A, 2) + r = falses(nrows) + r[1:4:end] = true + for k = 1:n + @simd for i = 1:ncols + val = unsafe_getindex(A, r, i) + s += first(val) + end + end + s +end + +function sumvector(A, n) + s = zero(eltype(A)) + zero(eltype(A)) + nrows = size(A, 1) + ncols = size(A, 2) + r = rand(1:nrows, 5) + for k = 1:n + @simd for i = 1:ncols + val = unsafe_getindex(A, r, i) + s += first(val) + end + end + s +end + +abstract MyArray{T,N} <: AbstractArray{T,N} + +immutable ArrayLS{T,N} <: MyArray{T,N} # LinearSlow + data::Array{T,N} +end +immutable ArrayLSLS{T,N} <: MyArray{T,N} # LinearSlow with LinearSlow similar + data::Array{T,N} +end +Base.similar{T}(A::ArrayLSLS, ::Type{T}, dims::Tuple{Vararg{Int}}) = ArrayLSLS(similar(A.data, T, dims)) +@inline Base.setindex!(A::ArrayLSLS, v, I::Int...) = A.data[I...] = v +@inline Base.unsafe_setindex!(A::ArrayLSLS, v, I::Int...) = Base.unsafe_setindex!(A.data, v, I...) +Base.first(A::ArrayLSLS) = first(A.data) + +immutable ArrayLF{T,N} <: MyArray{T,N} # LinearFast + data::Array{T,N} +end +immutable ArrayStrides{T,N} <: MyArray{T,N} + data::Array{T,N} + strides::NTuple{N,Int} +end +ArrayStrides(A::Array) = ArrayStrides(A, strides(A)) + +immutable ArrayStrides1{T} <: MyArray{T,2} + data::Matrix{T} + stride1::Int +end +ArrayStrides1(A::Array) = ArrayStrides1(A, size(A,1)) + +Base.size(A::MyArray) = size(A.data) + +@inline Base.getindex(A::ArrayLF, i::Int) = getindex(A.data, i) +@inline Base.getindex(A::ArrayLF, i::Int, i2::Int) = getindex(A.data, i, i2) +@inline Base.getindex(A::Union(ArrayLS, ArrayLSLS), i::Int, j::Int) = getindex(A.data, i, j) +@inline Base.unsafe_getindex(A::ArrayLF, indx::Int) = unsafe_getindex(A.data, indx) +@inline Base.unsafe_getindex(A::Union(ArrayLS, ArrayLSLS), i::Int, j::Int) = unsafe_getindex(A.data, i, j) + +@inline Base.getindex{T}(A::ArrayStrides{T,2}, i::Real, j::Real) = getindex(A.data, 1+A.strides[1]*(i-1)+A.strides[2]*(j-1)) +@inline Base.getindex(A::ArrayStrides1, i::Real, j::Real) = getindex(A.data, i + A.stride1*(j-1)) +@inline Base.unsafe_getindex{T}(A::ArrayStrides{T,2}, i::Real, j::Real) = unsafe_getindex(A.data, 1+A.strides[1]*(i-1)+A.strides[2]*(j-1)) +@inline Base.unsafe_getindex(A::ArrayStrides1, i::Real, j::Real) = unsafe_getindex(A.data, i + A.stride1*(j-1)) + +# Using the qualified Base.LinearFast() in the linearindexing definition +# requires looking up the symbol in the module on each call. +import Base: LinearFast +Base.linearindexing{T<:ArrayLF}(::Type{T}) = LinearFast() + +if !applicable(unsafe_getindex, [1 2], 1:1, 2) + @inline Base.unsafe_getindex(A::Array, I...) = @inbounds return A[I...] + @inline Base.unsafe_getindex(A::MyArray, I...) = @inbounds return A[I...] + @inline Base.unsafe_getindex(A::SubArray, I...) = @inbounds return A[I...] + @inline Base.unsafe_getindex(A::BitArray, I1::BitArray, I2::Int) = unsafe_getindex(A, Base.to_index(I1), I2) +end + +function makearrays{T}(::Type{T}, sz) + L = prod(sz) + A = reshape(convert(Vector{T}, [1:L;]), sz) + AS = ArrayLS(A) + ASS = ArrayLSLS(A) + AF = ArrayLF(A) + Astrd = ArrayStrides(A) + Astrd1 = ArrayStrides1(A) + outersz = (sz[1]+1,sz[2]+2) + B = reshape(convert(Vector{T}, [1:prod(outersz);]), outersz) + Asub = sub(B, 1:sz[1], 2:sz[2]+1) + Bit = trues(sz) + (A, AS, AF, AS, ASS, AF, Asub, Bit,) +end + diff --git a/test/perf/array/perf.jl b/test/perf/array/perf.jl new file mode 100644 index 0000000000000..55fd7b865c00a --- /dev/null +++ b/test/perf/array/perf.jl @@ -0,0 +1,57 @@ +include("../perfutil.jl") + +include("indexing.jl") + +briefname(A) = typeof(A).name.name + +# Small array tests +sz = (3,5) +Alist = makearrays(Int, sz) +for Ar in Alist + @timeit sumelt(Ar, 10^5) string("sumeltIs ", briefname(Ar)) string("for a in A indexing, ", briefname(Ar)) sz + @timeit sumeach(Ar, 10^5) string("sumeachIs ", briefname(Ar)) string("for I in eachindex(A), ", briefname(Ar)) sz + @timeit sumlinear(Ar, 10^5) string("sumlinearIs ", briefname(Ar)) string("for I in 1:length(A), ", briefname(Ar)) sz + @timeit sumcartesian(Ar, 10^5) string("sumcartesianIs ", briefname(Ar)) string("for I in CartesianRange(size(A)), ", briefname(Ar)) sz + @timeit sumcolon(Ar, 10^5) string("sumcolonIs ", briefname(Ar)) string("colon indexing, ", briefname(Ar)) sz + @timeit sumrange(Ar, 10^5) string("sumrangeIs ", briefname(Ar)) string("range indexing, ", briefname(Ar)) sz + @timeit sumlogical(Ar, 10^5) string("sumlogicalIs ", briefname(Ar)) string("logical indexing, ", briefname(Ar)) sz + @timeit sumvector(Ar, 10^5) string("sumvectorIs ", briefname(Ar)) string("vector indexing, ", briefname(Ar)) sz +end + +Alist = makearrays(Float32, sz) # SIMD-able +for Ar in Alist + @timeit sumelt(Ar, 10^5) string("sumeltFs ", briefname(Ar)) string("for a in A indexing, ", briefname(Ar)) sz + @timeit sumeach(Ar, 10^5) string("sumeachFs ", briefname(Ar)) string("for I in eachindex(A), ", briefname(Ar)) sz + @timeit sumlinear(Ar, 10^5) string("sumlinearFs ", briefname(Ar)) string("for I in 1:length(A), ", briefname(Ar)) sz + @timeit sumcartesian(Ar, 10^5) string("sumcartesianFs ", briefname(Ar)) string("for I in CartesianRange(size(A)), ", briefname(Ar)) sz + @timeit sumcolon(Ar, 10^5) string("sumcolonFs ", briefname(Ar)) string("colon indexing, ", briefname(Ar)) sz + @timeit sumrange(Ar, 10^5) string("sumrangeFs ", briefname(Ar)) string("range indexing, ", briefname(Ar)) sz + @timeit sumlogical(Ar, 10^5) string("sumlogicalFs ", briefname(Ar)) string("logical indexing, ", briefname(Ar)) sz + @timeit sumvector(Ar, 10^5) string("sumvectorFs ", briefname(Ar)) string("vector indexing, ", briefname(Ar)) sz +end + +# Big array tests +sz = (300,500) +Alist = makearrays(Int, sz) +for Ar in Alist + @timeit sumelt(Ar, 100) string("sumeltIb ", briefname(Ar)) string("for a in A indexing, ", briefname(Ar)) sz + @timeit sumeach(Ar, 100) string("sumeachIb ", briefname(Ar)) string("for I in eachindex(A), ", briefname(Ar)) sz + @timeit sumlinear(Ar, 100) string("sumlinearIb ", briefname(Ar)) string("for I in 1:length(A), ", briefname(Ar)) sz + @timeit sumcartesian(Ar, 100) string("sumcartesianIb ", briefname(Ar)) string("for I in CartesianRange(size(A)), ", briefname(Ar)) sz + @timeit sumcolon(Ar, 100) string("sumcolonIb ", briefname(Ar)) string("colon indexing, ", briefname(Ar)) sz + @timeit sumrange(Ar, 100) string("sumrangeIb ", briefname(Ar)) string("range indexing, ", briefname(Ar)) sz + @timeit sumlogical(Ar, 100) string("sumlogicalIb ", briefname(Ar)) string("logical indexing, ", briefname(Ar)) sz + @timeit sumvector(Ar, 100) string("sumvectorIb ", briefname(Ar)) string("vector indexing, ", briefname(Ar)) sz +end + +Alist = makearrays(Float32, sz) # SIMD-able +for Ar in Alist + @timeit sumelt(Ar, 100) string("sumeltFb ", briefname(Ar)) string("for a in A indexing, ", briefname(Ar)) sz + @timeit sumeach(Ar, 100) string("sumeachFb ", briefname(Ar)) string("for I in eachindex(A), ", briefname(Ar)) sz + @timeit sumlinear(Ar, 100) string("sumlinearFb ", briefname(Ar)) string("for I in 1:length(A), ", briefname(Ar)) sz + @timeit sumcartesian(Ar, 100) string("sumcartesianFb ", briefname(Ar)) string("for I in CartesianRange(size(A)), ", briefname(Ar)) sz + @timeit sumcolon(Ar, 100) string("sumcolonFb ", briefname(Ar)) string("colon indexing, ", briefname(Ar)) sz + @timeit sumrange(Ar, 100) string("sumrangeFb ", briefname(Ar)) string("range indexing, ", briefname(Ar)) sz + @timeit sumlogical(Ar, 100) string("sumlogicalFb ", briefname(Ar)) string("logical indexing, ", briefname(Ar)) sz + @timeit sumvector(Ar, 100) string("sumvectorFb ", briefname(Ar)) string("vector indexing, ", briefname(Ar)) sz +end diff --git a/test/sparsedir/sparse.jl b/test/sparsedir/sparse.jl index 4a23c3df99ba7..1475ad14ee6df 100644 --- a/test/sparsedir/sparse.jl +++ b/test/sparsedir/sparse.jl @@ -742,15 +742,11 @@ a = SparseMatrixCSC(2, 2, [1, 3, 5], [1, 2, 1, 2], [1.0, 0.0, 0.0, 1.0]) @test_approx_eq cholfact(a)\[2.0, 3.0] [2.0, 3.0] end -# issue #10113 -let S = spzeros(5,1), I = [false,true,false,true,false] - @test_throws BoundsError S[I] -end - # issue #9917 @test sparse([]') == reshape(sparse([]), 1, 0) @test full(sparse([])) == zeros(0, 1) @test_throws BoundsError sparse([])[1] +@test_throws BoundsError sparse([])[1] = 1 x = speye(100) @test_throws BoundsError x[-10:10] diff --git a/test/subarray.jl b/test/subarray.jl index 84558dadd1b76..078d1eed08eea 100644 --- a/test/subarray.jl +++ b/test/subarray.jl @@ -395,6 +395,8 @@ sA = sub(A, 1:2:3, 1:3:5, 1:2:8) @test sA[:] == A[1:2:3, 1:3:5, 1:2:8][:] # issue #8807 @test sub(sub([1:5;], 1:5), 1:5) == [1:5;] +# Test with mixed types +@test sA[:, Int16[1,2], big(2)] == [31 40; 33 42] # sub logical indexing #4763 A = sub([1:10;], 5:8)