diff --git a/NEWS.md b/NEWS.md index db83648157df93..df0ceb91ba24b3 100644 --- a/NEWS.md +++ b/NEWS.md @@ -859,6 +859,8 @@ Deprecated or removed * The functions `eigs` and `svds` have been moved to the `IterativeEigensolvers` standard library module ([#24714]). + * Sparse array functionality has moved to the `SparseArrays` standard library ([#TBD]). + * `@printf` and `@sprintf` have been moved to the `Printf` standard library ([#23929],[#25056]). * `isnumber` has been deprecated in favor of `isnumeric`, `is_assigned_char` diff --git a/base/asyncmap.jl b/base/asyncmap.jl index ff8853bfd1cc30..34f46de0dfc72d 100644 --- a/base/asyncmap.jl +++ b/base/asyncmap.jl @@ -262,10 +262,10 @@ end # TODO: Optimize for sparse arrays # For now process as regular arrays and convert back -function asyncmap(f, s::AbstractSparseArray...; kwargs...) - sa = map(Array, s) - return sparse(asyncmap(f, sa...; kwargs...)) -end +# function asyncmap(f, s::AbstractSparseArray...; kwargs...) +# sa = map(Array, s) +# return sparse(asyncmap(f, sa...; kwargs...)) +# end mutable struct AsyncCollector f diff --git a/base/deprecated.jl b/base/deprecated.jl index 729a8bd45dc25d..f18fca98e5dad1 100644 --- a/base/deprecated.jl +++ b/base/deprecated.jl @@ -226,13 +226,13 @@ end # Deprecate three-arg SubArray since the constructor doesn't need the dims tuple @deprecate SubArray(parent::AbstractArray, indices::Tuple, dims::Tuple) SubArray(parent, indices) -# Deprecate vectorized unary functions over sparse matrices in favor of compact broadcast syntax (#17265). -for f in (:sind, :asind, :tand, :atand, :sinpi, :cosc, :ceil, :floor, :trunc, - :round, :log1p, :expm1, :abs, :abs2, :log2, :log10, :exp2, :exp10, - :sinc, :cospi, :cosd, :acosd, :cotd, :acotd, :secd, :cscd) - @eval import .Math: $f - @eval @deprecate $f(A::SparseMatrixCSC) $f.(A) -end +# # Deprecate vectorized unary functions over sparse matrices in favor of compact broadcast syntax (#17265). +# for f in (:sind, :asind, :tand, :atand, :sinpi, :cosc, :ceil, :floor, :trunc, +# :round, :log1p, :expm1, :abs, :abs2, :log2, :log10, :exp2, :exp10, +# :sinc, :cospi, :cosd, :acosd, :cotd, :acotd, :secd, :cscd) +# @eval import .Math: $f +# @eval @deprecate $f(A::SparseMatrixCSC) $f.(A) +# end # For deprecating vectorized functions in favor of compact broadcast syntax macro dep_vectorize_1arg(S, f) @@ -349,7 +349,7 @@ end @deprecate abs(D::Diagonal) abs.(D) @deprecate abs(M::Tridiagonal) abs.(M) @deprecate abs(M::SymTridiagonal) abs.(M) -@deprecate abs(x::AbstractSparseVector) abs.(x) +# @deprecate abs(x::AbstractSparseVector) abs.(x) # Deprecate @textmime into the Multimedia module, #18441 @eval Multimedia macro textmime(mime) @@ -523,132 +523,132 @@ for (dep, f, op) in [(:sumabs!, :sum!, :abs), end end -## Deprecate broadcast_zpreserving[!] (wasn't exported, but might as well be friendly) -function gen_broadcast_function_sparse(genbody::Function, f::Function, is_first_sparse::Bool) - body = genbody(f, is_first_sparse) - @eval let - local _F_ - function _F_(B::SparseMatrixCSC{Tv,Ti}, A_1, A_2) where {Tv,Ti} - $body - end - _F_ - end -end -function gen_broadcast_body_zpreserving(f::Function, is_first_sparse::Bool) - F = Expr(:quote, f) - if is_first_sparse - A1 = :(A_1) - A2 = :(A_2) - op1 = :(val1) - op2 = :(val2) - else - A1 = :(A_2) - A2 = :(A_1) - op1 = :(val2) - op2 = :(val1) - end - quote - Base.Broadcast.check_broadcast_indices(axes(B), $A1) - Base.Broadcast.check_broadcast_indices(axes(B), $A2) - - nnzB = isempty(B) ? 0 : - nnz($A1) * div(B.n, ($A1).n) * div(B.m, ($A1).m) - if length(B.rowval) < nnzB - resize!(B.rowval, nnzB) - end - if length(B.nzval) < nnzB - resize!(B.nzval, nnzB) - end - z = zero(Tv) - - ptrB = 1 - B.colptr[1] = 1 - - @inbounds for col = 1:B.n - ptr1::Int = ($A1).n == 1 ? ($A1).colptr[1] : ($A1).colptr[col] - stop1::Int = ($A1).n == 1 ? ($A1).colptr[2] : ($A1).colptr[col+1] - col2 = size($A2, 2) == 1 ? 1 : col - row = 1 - while ptr1 < stop1 && row <= B.m - if ($A1).m != 1 - row = ($A1).rowval[ptr1] - end - row2 = size($A2, 1) == 1 ? 1 : row - val1 = ($A1).nzval[ptr1] - val2 = ($A2)[row2,col2] - res = ($F)($op1, $op2) - if res != z - B.rowval[ptrB] = row - B.nzval[ptrB] = res - ptrB += 1 - end - if ($A1).m != 1 - ptr1 += 1 - else - row += 1 - end - end - B.colptr[col+1] = ptrB - end - deleteat!(B.rowval, B.colptr[end]:length(B.rowval)) - deleteat!(B.nzval, B.colptr[end]:length(B.nzval)) - nothing - end -end -for (Bsig, A1sig, A2sig, gbb, funcname) in - ( - (SparseMatrixCSC , SparseMatrixCSC , Array, :gen_broadcast_body_zpreserving, :_broadcast_zpreserving!), - (SparseMatrixCSC , Array , SparseMatrixCSC, :gen_broadcast_body_zpreserving, :_broadcast_zpreserving!), - (SparseMatrixCSC , Number , SparseMatrixCSC, :gen_broadcast_body_zpreserving, :_broadcast_zpreserving!), - (SparseMatrixCSC , SparseMatrixCSC , Number, :gen_broadcast_body_zpreserving, :_broadcast_zpreserving!), - (SparseMatrixCSC , BitArray , SparseMatrixCSC, :gen_broadcast_body_zpreserving, :_broadcast_zpreserving!), - (SparseMatrixCSC , SparseMatrixCSC , BitArray, :gen_broadcast_body_zpreserving, :_broadcast_zpreserving!), - ) - @eval let cache = Dict{Function,Function}() - global $funcname - function $funcname(f::Function, B::$Bsig, A1::$A1sig, A2::$A2sig) - func = @get! cache f gen_broadcast_function_sparse($gbb, f, ($A1sig) <: SparseMatrixCSC) - # need eval because func was just created by gen_broadcast_function_sparse - # TODO: convert this to a generated function - eval(_current_module(), Expr(:body, Expr(:return, Expr(:call, QuoteNode(func), QuoteNode(B), QuoteNode(A1), QuoteNode(A2))))) - return B - end - end # let broadcast_cache -end -_broadcast_zpreserving!(args...) = broadcast!(args...) -# note: promote_eltype_op also deprecated, defined later in this file -_broadcast_zpreserving(f, As...) = - broadcast!(f, similar(Array{_promote_eltype_op(f, As...)}, Base.Broadcast.broadcast_indices(As...)), As...) -_broadcast_zpreserving(f::Function, A_1::SparseMatrixCSC{Tv1,Ti1}, A_2::SparseMatrixCSC{Tv2,Ti2}) where {Tv1,Ti1,Tv2,Ti2} = - _broadcast_zpreserving!(f, spzeros(promote_type(Tv1, Tv2), promote_type(Ti1, Ti2), Base.to_shape(Base.Broadcast.broadcast_indices(A_1, A_2))), A_1, A_2) -_broadcast_zpreserving(f::Function, A_1::SparseMatrixCSC{<:Any,Ti}, A_2::Union{Array,BitArray,Number}) where {Ti} = - _broadcast_zpreserving!(f, spzeros(promote_eltype(A_1, A_2), Ti, Base.to_shape(Base.Broadcast.broadcast_indices(A_1, A_2))), A_1, A_2) -_broadcast_zpreserving(f::Function, A_1::Union{Array,BitArray,Number}, A_2::SparseMatrixCSC{<:Any,Ti}) where {Ti} = - _broadcast_zpreserving!(f, spzeros(promote_eltype(A_1, A_2), Ti, Base.to_shape(Base.Broadcast.broadcast_indices(A_1, A_2))), A_1, A_2) - -function _depstring_bczpres() - return string("broadcast_zpreserving[!] is deprecated. Generic sparse broadcast[!] ", - "provides most of broadcast_zpreserving[!]'s functionality. If you have a use case ", - "that generic sparse broadcast[!] does not cover, please describe your use case in ", - " issue #19533 (https://github.com/JuliaLang/julia/issues/19533).") -end -function _depwarn_bczpres(f, args...) - depwarn(_depstring_bczpres(), :broadcast_zpreserving) - return _broadcast_zpreserving(f, args...) -end -function _depwarn_bczpres!(f, args...) - depwarn(_depstring_bczpres(), :broadcast_zpreserving!) - return _broadcast_zpreserving!(f, args...) -end -@eval SparseArrays begin - broadcast_zpreserving(f, args...) = Base._depwarn_bczpres(f, args...) - broadcast_zpreserving(f, A::SparseMatrixCSC, B::SparseMatrixCSC) = Base._depwarn_bczpres(f, A, B) - broadcast_zpreserving(f, A::SparseMatrixCSC, B::Union{Array,BitArray,Number}) = Base._depwarn_bczpres(f, A, B) - broadcast_zpreserving(f, A::Union{Array,BitArray,Number}, B::SparseMatrixCSC) = Base._depwarn_bczpres(f, A, B) - broadcast_zpreserving!(f, args...) = Base._depwarn_bczpres!(f, args...) - broadcast_zpreserving!(f, C::SparseMatrixCSC, A::SparseMatrixCSC, B::Union{Array,BitArray,Number}) = Base._depwarn_bczpres!(f, C, A, B) - broadcast_zpreserving!(f, C::SparseMatrixCSC, A::Union{Array,BitArray,Number}, B::SparseMatrixCSC) = Base._depwarn_bczpres!(f, C, A, B) -end +# ## Deprecate broadcast_zpreserving[!] (wasn't exported, but might as well be friendly) +# function gen_broadcast_function_sparse(genbody::Function, f::Function, is_first_sparse::Bool) +# body = genbody(f, is_first_sparse) +# @eval let +# local _F_ +# function _F_(B::SparseMatrixCSC{Tv,Ti}, A_1, A_2) where {Tv,Ti} +# $body +# end +# _F_ +# end +# end +# function gen_broadcast_body_zpreserving(f::Function, is_first_sparse::Bool) +# F = Expr(:quote, f) +# if is_first_sparse +# A1 = :(A_1) +# A2 = :(A_2) +# op1 = :(val1) +# op2 = :(val2) +# else +# A1 = :(A_2) +# A2 = :(A_1) +# op1 = :(val2) +# op2 = :(val1) +# end +# quote +# Base.Broadcast.check_broadcast_indices(axes(B), $A1) +# Base.Broadcast.check_broadcast_indices(axes(B), $A2) + +# nnzB = isempty(B) ? 0 : +# nnz($A1) * div(B.n, ($A1).n) * div(B.m, ($A1).m) +# if length(B.rowval) < nnzB +# resize!(B.rowval, nnzB) +# end +# if length(B.nzval) < nnzB +# resize!(B.nzval, nnzB) +# end +# z = zero(Tv) + +# ptrB = 1 +# B.colptr[1] = 1 + +# @inbounds for col = 1:B.n +# ptr1::Int = ($A1).n == 1 ? ($A1).colptr[1] : ($A1).colptr[col] +# stop1::Int = ($A1).n == 1 ? ($A1).colptr[2] : ($A1).colptr[col+1] +# col2 = size($A2, 2) == 1 ? 1 : col +# row = 1 +# while ptr1 < stop1 && row <= B.m +# if ($A1).m != 1 +# row = ($A1).rowval[ptr1] +# end +# row2 = size($A2, 1) == 1 ? 1 : row +# val1 = ($A1).nzval[ptr1] +# val2 = ($A2)[row2,col2] +# res = ($F)($op1, $op2) +# if res != z +# B.rowval[ptrB] = row +# B.nzval[ptrB] = res +# ptrB += 1 +# end +# if ($A1).m != 1 +# ptr1 += 1 +# else +# row += 1 +# end +# end +# B.colptr[col+1] = ptrB +# end +# deleteat!(B.rowval, B.colptr[end]:length(B.rowval)) +# deleteat!(B.nzval, B.colptr[end]:length(B.nzval)) +# nothing +# end +# end +# for (Bsig, A1sig, A2sig, gbb, funcname) in +# ( +# (SparseMatrixCSC , SparseMatrixCSC , Array, :gen_broadcast_body_zpreserving, :_broadcast_zpreserving!), +# (SparseMatrixCSC , Array , SparseMatrixCSC, :gen_broadcast_body_zpreserving, :_broadcast_zpreserving!), +# (SparseMatrixCSC , Number , SparseMatrixCSC, :gen_broadcast_body_zpreserving, :_broadcast_zpreserving!), +# (SparseMatrixCSC , SparseMatrixCSC , Number, :gen_broadcast_body_zpreserving, :_broadcast_zpreserving!), +# (SparseMatrixCSC , BitArray , SparseMatrixCSC, :gen_broadcast_body_zpreserving, :_broadcast_zpreserving!), +# (SparseMatrixCSC , SparseMatrixCSC , BitArray, :gen_broadcast_body_zpreserving, :_broadcast_zpreserving!), +# ) +# @eval let cache = Dict{Function,Function}() +# global $funcname +# function $funcname(f::Function, B::$Bsig, A1::$A1sig, A2::$A2sig) +# func = @get! cache f gen_broadcast_function_sparse($gbb, f, ($A1sig) <: SparseMatrixCSC) +# # need eval because func was just created by gen_broadcast_function_sparse +# # TODO: convert this to a generated function +# eval(_current_module(), Expr(:body, Expr(:return, Expr(:call, QuoteNode(func), QuoteNode(B), QuoteNode(A1), QuoteNode(A2))))) +# return B +# end +# end # let broadcast_cache +# end +# _broadcast_zpreserving!(args...) = broadcast!(args...) +# # note: promote_eltype_op also deprecated, defined later in this file +# _broadcast_zpreserving(f, As...) = +# broadcast!(f, similar(Array{_promote_eltype_op(f, As...)}, Base.Broadcast.broadcast_indices(As...)), As...) +# _broadcast_zpreserving(f::Function, A_1::SparseMatrixCSC{Tv1,Ti1}, A_2::SparseMatrixCSC{Tv2,Ti2}) where {Tv1,Ti1,Tv2,Ti2} = +# _broadcast_zpreserving!(f, spzeros(promote_type(Tv1, Tv2), promote_type(Ti1, Ti2), Base.to_shape(Base.Broadcast.broadcast_indices(A_1, A_2))), A_1, A_2) +# _broadcast_zpreserving(f::Function, A_1::SparseMatrixCSC{<:Any,Ti}, A_2::Union{Array,BitArray,Number}) where {Ti} = +# _broadcast_zpreserving!(f, spzeros(promote_eltype(A_1, A_2), Ti, Base.to_shape(Base.Broadcast.broadcast_indices(A_1, A_2))), A_1, A_2) +# _broadcast_zpreserving(f::Function, A_1::Union{Array,BitArray,Number}, A_2::SparseMatrixCSC{<:Any,Ti}) where {Ti} = +# _broadcast_zpreserving!(f, spzeros(promote_eltype(A_1, A_2), Ti, Base.to_shape(Base.Broadcast.broadcast_indices(A_1, A_2))), A_1, A_2) + +# function _depstring_bczpres() +# return string("broadcast_zpreserving[!] is deprecated. Generic sparse broadcast[!] ", +# "provides most of broadcast_zpreserving[!]'s functionality. If you have a use case ", +# "that generic sparse broadcast[!] does not cover, please describe your use case in ", +# " issue #19533 (https://github.com/JuliaLang/julia/issues/19533).") +# end +# function _depwarn_bczpres(f, args...) +# depwarn(_depstring_bczpres(), :broadcast_zpreserving) +# return _broadcast_zpreserving(f, args...) +# end +# function _depwarn_bczpres!(f, args...) +# depwarn(_depstring_bczpres(), :broadcast_zpreserving!) +# return _broadcast_zpreserving!(f, args...) +# end +# @eval SparseArrays begin +# broadcast_zpreserving(f, args...) = Base._depwarn_bczpres(f, args...) +# broadcast_zpreserving(f, A::SparseMatrixCSC, B::SparseMatrixCSC) = Base._depwarn_bczpres(f, A, B) +# broadcast_zpreserving(f, A::SparseMatrixCSC, B::Union{Array,BitArray,Number}) = Base._depwarn_bczpres(f, A, B) +# broadcast_zpreserving(f, A::Union{Array,BitArray,Number}, B::SparseMatrixCSC) = Base._depwarn_bczpres(f, A, B) +# broadcast_zpreserving!(f, args...) = Base._depwarn_bczpres!(f, args...) +# broadcast_zpreserving!(f, C::SparseMatrixCSC, A::SparseMatrixCSC, B::Union{Array,BitArray,Number}) = Base._depwarn_bczpres!(f, C, A, B) +# broadcast_zpreserving!(f, C::SparseMatrixCSC, A::Union{Array,BitArray,Number}, B::SparseMatrixCSC) = Base._depwarn_bczpres!(f, C, A, B) +# end # #19719 @deprecate getindex(t::Tuple, r::AbstractArray) getindex(t, vec(r)) @@ -688,8 +688,8 @@ function promote_array_type(F, R, S, T) _promote_array_type(F, R, S, T) end -# Deprecate manually vectorized abs2 methods in favor of compact broadcast syntax -@deprecate abs2(x::AbstractSparseVector) abs2.(x) +# # Deprecate manually vectorized abs2 methods in favor of compact broadcast syntax +# @deprecate abs2(x::AbstractSparseVector) abs2.(x) # Deprecate manually vectorized sign methods in favor of compact broadcast syntax @deprecate sign(A::AbstractArray) sign.(A) @@ -1389,8 +1389,8 @@ import .LinAlg: lufact, lufact!, qrfact, qrfact!, cholfact, cholfact! @deprecate cholfact!(A::AbstractMatrix, ::Type{Val{false}}) cholfact!(A, Val(false)) @deprecate cholfact!(A::AbstractMatrix, ::Type{Val{true}}; tol = 0.0) cholfact!(A, Val(true); tol = tol) @deprecate cat(::Type{Val{N}}, A::AbstractArray...) where {N} cat(Val(N), A...) -@deprecate cat(::Type{Val{N}}, A::SparseArrays._SparseConcatGroup...) where {N} cat(Val(N), A...) -@deprecate cat(::Type{Val{N}}, A::SparseArrays._DenseConcatGroup...) where {N} cat(Val(N), A...) +# @deprecate cat(::Type{Val{N}}, A::SparseArrays._SparseConcatGroup...) where {N} cat(Val(N), A...) +# @deprecate cat(::Type{Val{N}}, A::SparseArrays._DenseConcatGroup...) where {N} cat(Val(N), A...) @deprecate cat_t(::Type{Val{N}}, ::Type{T}, A, B) where {N,T} cat_t(Val(N), T, A, B) false @deprecate reshape(A::AbstractArray, ::Type{Val{N}}) where {N} reshape(A, Val(N)) @@ -1442,23 +1442,23 @@ end # remove parse-with-chains-warn and bitshift-warn # update precedence table in doc/src/manual/mathematical-operations.md -# deprecate remaining vectorized methods over SparseVectors (zero-preserving) -for op in (:floor, :ceil, :trunc, :round, - :log1p, :expm1, :sinpi, - :sin, :tan, :sind, :tand, - :asin, :atan, :asind, :atand, - :sinh, :tanh, :asinh, :atanh) - @eval @deprecate ($op)(x::AbstractSparseVector{<:Number,<:Integer}) ($op).(x) -end -# deprecate remaining vectorized methods over SparseVectors (not-zero-preserving) -for op in (:exp, :exp2, :exp10, :log, :log2, :log10, - :cos, :cosd, :acos, :cosh, :cospi, - :csc, :cscd, :acot, :csch, :acsch, - :cot, :cotd, :acosd, :coth, - :sec, :secd, :acotd, :sech, :asech) - @eval import .Math: $op - @eval @deprecate ($op)(x::AbstractSparseVector{<:Number,<:Integer}) ($op).(x) -end +# # deprecate remaining vectorized methods over SparseVectors (zero-preserving) +# for op in (:floor, :ceil, :trunc, :round, +# :log1p, :expm1, :sinpi, +# :sin, :tan, :sind, :tand, +# :asin, :atan, :asind, :atand, +# :sinh, :tanh, :asinh, :atanh) +# @eval @deprecate ($op)(x::AbstractSparseVector{<:Number,<:Integer}) ($op).(x) +# end +# # deprecate remaining vectorized methods over SparseVectors (not-zero-preserving) +# for op in (:exp, :exp2, :exp10, :log, :log2, :log10, +# :cos, :cosd, :acos, :cosh, :cospi, +# :csc, :cscd, :acot, :csch, :acsch, +# :cot, :cotd, :acosd, :coth, +# :sec, :secd, :acotd, :sech, :asech) +# @eval import .Math: $op +# @eval @deprecate ($op)(x::AbstractSparseVector{<:Number,<:Integer}) ($op).(x) +# end # PR #22182 @deprecate is_apple Sys.isapple @@ -1629,11 +1629,12 @@ end # PR #23066 @deprecate cfunction(f, r, a::Tuple) cfunction(f, r, Tuple{a...}) -# PR 23341 -import .LinAlg: diagm -@deprecate diagm(A::SparseMatrixCSC) sparse(Diagonal(sparsevec(A))) +# # PR 23341 +# import .LinAlg: diagm +# @deprecate diagm(A::SparseMatrixCSC) sparse(Diagonal(sparsevec(A))) # PR #23373 +import .LinAlg: diagm @deprecate diagm(A::BitMatrix) BitMatrix(Diagonal(vec(A))) # PR 23341 @@ -1748,31 +1749,31 @@ end @deprecate contains(eq::Function, itr, x) any(y->eq(y,x), itr) # PR #23757 -import .SparseArrays.spdiagm -@deprecate spdiagm(x::AbstractVector) sparse(Diagonal(x)) -function spdiagm(x::AbstractVector, d::Number) - depwarn(string("`spdiagm(x::AbstractVector, d::Number)` is deprecated, use ", - "`spdiagm(d => x)` instead, which now returns a square matrix. To preserve the old ", - "behaviour, use `sparse(SparseArrays.spdiagm_internal(d => x)...)`"), :spdiagm) - I, J, V = SparseArrays.spdiagm_internal(d => x) - return sparse(I, J, V) -end -function spdiagm(x, d) - depwarn(string("`spdiagm((x1, x2, ...), (d1, d2, ...))` is deprecated, use ", - "`spdiagm(d1 => x1, d2 => x2, ...)` instead, which now returns a square matrix. ", - "To preserve the old behaviour, use ", - "`sparse(SparseArrays.spdiagm_internal(d1 => x1, d2 => x2, ...)...)`"), :spdiagm) - I, J, V = SparseArrays.spdiagm_internal((d[i] => x[i] for i in 1:length(x))...) - return sparse(I, J, V) -end -function spdiagm(x, d, m::Integer, n::Integer) - depwarn(string("`spdiagm((x1, x2, ...), (d1, d2, ...), m, n)` is deprecated, use ", - "`spdiagm(d1 => x1, d2 => x2, ...)` instead, which now returns a square matrix. ", - "To specify a non-square matrix and preserve the old behaviour, use ", - "`I, J, V = SparseArrays.spdiagm_internal(d1 => x1, d2 => x2, ...); sparse(I, J, V, m, n)`"), :spdiagm) - I, J, V = SparseArrays.spdiagm_internal((d[i] => x[i] for i in 1:length(x))...) - return sparse(I, J, V, m, n) -end +# import .SparseArrays.spdiagm +# @deprecate spdiagm(x::AbstractVector) sparse(Diagonal(x)) +# function spdiagm(x::AbstractVector, d::Number) +# depwarn(string("`spdiagm(x::AbstractVector, d::Number)` is deprecated, use ", +# "`spdiagm(d => x)` instead, which now returns a square matrix. To preserve the old ", +# "behaviour, use `sparse(SparseArrays.spdiagm_internal(d => x)...)`"), :spdiagm) +# I, J, V = SparseArrays.spdiagm_internal(d => x) +# return sparse(I, J, V) +# end +# function spdiagm(x, d) +# depwarn(string("`spdiagm((x1, x2, ...), (d1, d2, ...))` is deprecated, use ", +# "`spdiagm(d1 => x1, d2 => x2, ...)` instead, which now returns a square matrix. ", +# "To preserve the old behaviour, use ", +# "`sparse(SparseArrays.spdiagm_internal(d1 => x1, d2 => x2, ...)...)`"), :spdiagm) +# I, J, V = SparseArrays.spdiagm_internal((d[i] => x[i] for i in 1:length(x))...) +# return sparse(I, J, V) +# end +# function spdiagm(x, d, m::Integer, n::Integer) +# depwarn(string("`spdiagm((x1, x2, ...), (d1, d2, ...), m, n)` is deprecated, use ", +# "`spdiagm(d1 => x1, d2 => x2, ...)` instead, which now returns a square matrix. ", +# "To specify a non-square matrix and preserve the old behaviour, use ", +# "`I, J, V = SparseArrays.spdiagm_internal(d1 => x1, d2 => x2, ...); sparse(I, J, V, m, n)`"), :spdiagm) +# I, J, V = SparseArrays.spdiagm_internal((d[i] => x[i] for i in 1:length(x))...) +# return sparse(I, J, V, m, n) +# end # deprecate zeros(D::Diagonal[, opts...]) function zeros(D::Diagonal) @@ -1940,7 +1941,7 @@ function toc() return t end -@eval Base.SparseArrays @deprecate sparse(s::UniformScaling, m::Integer) sparse(s, m, m) +# @eval Base.SparseArrays @deprecate sparse(s::UniformScaling, m::Integer) sparse(s, m, m) # A[I...] .= with scalar indices should modify the element at A[I...] function Broadcast.dotview(A::AbstractArray, args::Number...) @@ -2019,11 +2020,11 @@ end # PR #25030 @eval LinAlg @deprecate fillslots! fillstored! false -# PR #25037 -@eval SparseArrays @deprecate spones(A::SparseMatrixCSC) LinAlg.fillstored!(copy(A), 1) -@eval SparseArrays @deprecate spones(A::SparseVector) LinAlg.fillstored!(copy(A), 1) -using .SparseArrays.spones -export spones +# # PR #25037 +# @eval SparseArrays @deprecate spones(A::SparseMatrixCSC) LinAlg.fillstored!(copy(A), 1) +# @eval SparseArrays @deprecate spones(A::SparseVector) LinAlg.fillstored!(copy(A), 1) +# using .SparseArrays.spones +# export spones function diagm(v::BitVector) depwarn(string("`diagm(v::BitVector)` is deprecated, use `diagm(0 => v)` or ", @@ -2073,18 +2074,18 @@ function full(A::Union{Diagonal,Bidiagonal,Tridiagonal,SymTridiagonal}) return Matrix(A) end -# full for sparse arrays -function full(S::Union{SparseVector,SparseMatrixCSC}) - (arrtypestr, desttypestr) = - isa(S, SparseVector) ? ("SparseVector", "Vector") : - isa(S, SparseMatrixCSC) ? ("SparseMatrixCSC", "Matrix") : - error("should not be reachable!") - depwarn(string( - "`full(S::$(arrtypestr))` (and `full` in general) has been deprecated. ", - "To replace `full(S::$(arrtypestr))`, consider `$(desttypestr)(S)` or, ", - "if that option is too narrow, `Array(S)`."), :full) - return Array(S) -end +# # full for sparse arrays +# function full(S::Union{SparseVector,SparseMatrixCSC}) +# (arrtypestr, desttypestr) = +# isa(S, SparseVector) ? ("SparseVector", "Vector") : +# isa(S, SparseMatrixCSC) ? ("SparseMatrixCSC", "Matrix") : +# error("should not be reachable!") +# depwarn(string( +# "`full(S::$(arrtypestr))` (and `full` in general) has been deprecated. ", +# "To replace `full(S::$(arrtypestr))`, consider `$(desttypestr)(S)` or, ", +# "if that option is too narrow, `Array(S)`."), :full) +# return Array(S) +# end # full for factorizations function full(F::Union{LinAlg.LU,LinAlg.LQ,LinAlg.QR,LinAlg.QRPivoted,LinAlg.QRCompactWY, @@ -2198,7 +2199,7 @@ end # issue #22849 @deprecate reinterpret(::Type{T}, a::Array{S}, dims::NTuple{N,Int}) where {T, S, N} reshape(reinterpret(T, vec(a)), dims) -@deprecate reinterpret(::Type{T}, a::SparseMatrixCSC{S}, dims::NTuple{N,Int}) where {T, S, N} reinterpret(T, reshape(a, dims)) +# @deprecate reinterpret(::Type{T}, a::SparseMatrixCSC{S}, dims::NTuple{N,Int}) where {T, S, N} reinterpret(T, reshape(a, dims)) @deprecate reinterpret(::Type{T}, a::ReinterpretArray{S}, dims::NTuple{N,Int}) where {T, S, N} reshape(reinterpret(T, vec(a)), dims) # issue #24006 @@ -2236,53 +2237,53 @@ end @deprecate bits bitstring # deprecate speye -export speye -function speye(n::Integer) - depwarn(string("`speye(n::Integer)` has been deprecated in favor of `I`, `sparse`, and ", - "`SparseMatrixCSC` constructor methods. For a direct replacement, consider ", - "`sparse(1.0I, n, n)`, `SparseMatrixCSC(1.0I, n, n)`, or `SparseMatrixCSC{Float64}(I, n, n)`. ", - "If `Float64` element type is not necessary, consider the shorter `sparse(I, n, n)` ", - "or `SparseMatrixCSC(I, n, n)` (with default `eltype(I)` of `Bool`)."), :speye) - return sparse(1.0I, n, n) -end -function speye(m::Integer, n::Integer) - depwarn(string("`speye(m::Integer, n::Integer)` has been deprecated in favor of `I`, ", - "`sparse`, and `SparseMatrixCSC` constructor methods. For a direct ", - "replacement, consider `sparse(1.0I, m, n)`, `SparseMatrixCSC(1.0I, m, n)`, ", - "or `SparseMatrixCSC{Float64}(I, m, n)`. If `Float64` element type is not ", - " necessary, consider the shorter `sparse(I, m, n)` or `SparseMatrixCSC(I, m, n)` ", - "(with default `eltype(I)` of `Bool`)."), :speye) - return sparse(1.0I, m, n) -end -function speye(::Type{T}, n::Integer) where T - depwarn(string("`speye(T, n::Integer)` has been deprecated in favor of `I`, `sparse`, and ", - "`SparseMatrixCSC` constructor methods. For a direct replacement, consider ", - "`sparse(T(1)I, n, n)` if `T` is concrete or `SparseMatrixCSC{T}(I, n, n)` ", - "if `T` is either concrete or abstract. If element type `T` is not necessary, ", - "consider the shorter `sparse(I, n, n)` or `SparseMatrixCSC(I, n, n)` ", - "(with default `eltype(I)` of `Bool`)."), :speye) - return SparseMatrixCSC{T}(I, n, n) -end -function speye(::Type{T}, m::Integer, n::Integer) where T - depwarn(string("`speye(T, m::Integer, n::Integer)` has been deprecated in favor of `I`, ", - "`sparse`, and `SparseMatrixCSC` constructor methods. For a direct ", - "replacement, consider `sparse(T(1)I, m, n)` if `T` is concrete or ", - "`SparseMatrixCSC{T}(I, m, n)` if `T` is either concrete or abstract. ", - "If element type `T` is not necessary, consider the shorter ", - "`sparse(I, m, n)` or `SparseMatrixCSC(I, m, n)` (with default `eltype(I)` ", - "of `Bool`)."), :speye) - return SparseMatrixCSC{T}(I, m, n) -end -function speye(S::SparseMatrixCSC{T}) where T - depwarn(string("`speye(S::SparseMatrixCSC{T})` has been deprecated in favor of `I`, ", - "`sparse`, and `SparseMatrixCSC` constructor methods. For a direct ", - "replacement, consider `sparse(T(1)I, size(S)...)` if `T` is concrete or ", - "`SparseMatrixCSC{eltype(S)}(I, size(S))` if `T` is either concrete or abstract. ", - "If preserving element type `T` is not necessary, consider the shorter ", - "`sparse(I, size(S)...)` or `SparseMatrixCSC(I, size(S))` (with default ", - "`eltype(I)` of `Bool`)."), :speye) - return SparseMatrixCSC{T}(I, m, n) -end +# export speye +# function speye(n::Integer) +# depwarn(string("`speye(n::Integer)` has been deprecated in favor of `I`, `sparse`, and ", +# "`SparseMatrixCSC` constructor methods. For a direct replacement, consider ", +# "`sparse(1.0I, n, n)`, `SparseMatrixCSC(1.0I, n, n)`, or `SparseMatrixCSC{Float64}(I, n, n)`. ", +# "If `Float64` element type is not necessary, consider the shorter `sparse(I, n, n)` ", +# "or `SparseMatrixCSC(I, n, n)` (with default `eltype(I)` of `Bool`)."), :speye) +# return sparse(1.0I, n, n) +# end +# function speye(m::Integer, n::Integer) +# depwarn(string("`speye(m::Integer, n::Integer)` has been deprecated in favor of `I`, ", +# "`sparse`, and `SparseMatrixCSC` constructor methods. For a direct ", +# "replacement, consider `sparse(1.0I, m, n)`, `SparseMatrixCSC(1.0I, m, n)`, ", +# "or `SparseMatrixCSC{Float64}(I, m, n)`. If `Float64` element type is not ", +# " necessary, consider the shorter `sparse(I, m, n)` or `SparseMatrixCSC(I, m, n)` ", +# "(with default `eltype(I)` of `Bool`)."), :speye) +# return sparse(1.0I, m, n) +# end +# function speye(::Type{T}, n::Integer) where T +# depwarn(string("`speye(T, n::Integer)` has been deprecated in favor of `I`, `sparse`, and ", +# "`SparseMatrixCSC` constructor methods. For a direct replacement, consider ", +# "`sparse(T(1)I, n, n)` if `T` is concrete or `SparseMatrixCSC{T}(I, n, n)` ", +# "if `T` is either concrete or abstract. If element type `T` is not necessary, ", +# "consider the shorter `sparse(I, n, n)` or `SparseMatrixCSC(I, n, n)` ", +# "(with default `eltype(I)` of `Bool`)."), :speye) +# return SparseMatrixCSC{T}(I, n, n) +# end +# function speye(::Type{T}, m::Integer, n::Integer) where T +# depwarn(string("`speye(T, m::Integer, n::Integer)` has been deprecated in favor of `I`, ", +# "`sparse`, and `SparseMatrixCSC` constructor methods. For a direct ", +# "replacement, consider `sparse(T(1)I, m, n)` if `T` is concrete or ", +# "`SparseMatrixCSC{T}(I, m, n)` if `T` is either concrete or abstract. ", +# "If element type `T` is not necessary, consider the shorter ", +# "`sparse(I, m, n)` or `SparseMatrixCSC(I, m, n)` (with default `eltype(I)` ", +# "of `Bool`)."), :speye) +# return SparseMatrixCSC{T}(I, m, n) +# end +# function speye(S::SparseMatrixCSC{T}) where T +# depwarn(string("`speye(S::SparseMatrixCSC{T})` has been deprecated in favor of `I`, ", +# "`sparse`, and `SparseMatrixCSC` constructor methods. For a direct ", +# "replacement, consider `sparse(T(1)I, size(S)...)` if `T` is concrete or ", +# "`SparseMatrixCSC{eltype(S)}(I, size(S))` if `T` is either concrete or abstract. ", +# "If preserving element type `T` is not necessary, consider the shorter ", +# "`sparse(I, size(S)...)` or `SparseMatrixCSC(I, size(S))` (with default ", +# "`eltype(I)` of `Bool`)."), :speye) +# return SparseMatrixCSC{T}(I, m, n) +# end # issue #24167 @deprecate EnvHash EnvDict @@ -3255,86 +3256,86 @@ end @deprecate A_mul_Bc(A::AbstractVecOrMat{T}, R::AbstractRotation{S}) where {T,S} (*)(A, Adjoint(R)) end -# former imports into SparseArrays -@eval Base.SparseArrays begin - import Base: A_mul_B!, Ac_mul_B, Ac_mul_B!, At_mul_B, At_mul_B! - import Base: A_mul_Bc, A_mul_Bt, Ac_mul_Bc, At_mul_Bt - import Base: At_ldiv_B, Ac_ldiv_B, A_ldiv_B! - import Base.LinAlg: At_ldiv_B!, Ac_ldiv_B!, A_rdiv_B!, A_rdiv_Bc!, mul!, ldiv!, rdiv! -end - -# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/sparse/linalg.jl, to deprecate -@eval Base.SparseArrays begin - using Base.LinAlg: Adjoint, Transpose - @deprecate Ac_ldiv_B(A::SparseMatrixCSC, B::RowVector) (\)(Adjoint(A), B) - @deprecate At_ldiv_B(A::SparseMatrixCSC, B::RowVector) (\)(Transpose(A), B) - @deprecate Ac_ldiv_B(A::SparseMatrixCSC, B::AbstractVecOrMat) (\)(Adjoint(A), B) - @deprecate At_ldiv_B(A::SparseMatrixCSC, B::AbstractVecOrMat) (\)(Transpose(A), B) - @deprecate A_rdiv_Bc!(A::SparseMatrixCSC{T}, D::Diagonal{T}) where {T} rdiv!(A, Adjoint(D)) - @deprecate A_rdiv_Bt!(A::SparseMatrixCSC{T}, D::Diagonal{T}) where {T} rdiv!(A, Transpose(D)) - @deprecate A_rdiv_B!(A::SparseMatrixCSC{T}, D::Diagonal{T}) where {T} rdiv!(A, D) - @deprecate A_ldiv_B!(L::LowerTriangular{T,<:SparseMatrixCSCUnion{T}}, B::StridedVecOrMat) where {T} ldiv!(L, B) - @deprecate A_ldiv_B!(U::UpperTriangular{T,<:SparseMatrixCSCUnion{T}}, B::StridedVecOrMat) where {T} ldiv!(U, B) - @deprecate A_mul_Bt(A::SparseMatrixCSC{Tv,Ti}, B::SparseMatrixCSC{Tv,Ti}) where {Tv,Ti} (*)(A, Transpose(B)) - @deprecate A_mul_Bc(A::SparseMatrixCSC{Tv,Ti}, B::SparseMatrixCSC{Tv,Ti}) where {Tv,Ti} (*)(A, Adjoint(B)) - @deprecate At_mul_B(A::SparseMatrixCSC{Tv,Ti}, B::SparseMatrixCSC{Tv,Ti}) where {Tv,Ti} (*)(Transpose(A), B) - @deprecate Ac_mul_B(A::SparseMatrixCSC{Tv,Ti}, B::SparseMatrixCSC{Tv,Ti}) where {Tv,Ti} (*)(Adjoint(A), B) - @deprecate At_mul_Bt(A::SparseMatrixCSC{Tv,Ti}, B::SparseMatrixCSC{Tv,Ti}) where {Tv,Ti} (*)(Transpose(A), Transpose(B)) - @deprecate Ac_mul_Bc(A::SparseMatrixCSC{Tv,Ti}, B::SparseMatrixCSC{Tv,Ti}) where {Tv,Ti} (*)(Adjoint(A), Adjoint(B)) - @deprecate A_mul_B!(C::StridedVecOrMat, A::SparseMatrixCSC, B::StridedVecOrMat) mul!(C, A, B) - @deprecate Ac_mul_B!(C::StridedVecOrMat, A::SparseMatrixCSC, B::StridedVecOrMat) mul!(C, Adjoint(A), B) - @deprecate At_mul_B!(C::StridedVecOrMat, A::SparseMatrixCSC, B::StridedVecOrMat) mul!(C, Transpose(A), B) - @deprecate A_mul_B!(α::Number, A::SparseMatrixCSC, B::StridedVecOrMat, β::Number, C::StridedVecOrMat) mul!(α, A, B, β, C) - @deprecate A_mul_B(A::SparseMatrixCSC{TA,S}, x::StridedVector{Tx}) where {TA,S,Tx} (*)(A, x) - @deprecate A_mul_B(A::SparseMatrixCSC{TA,S}, B::StridedMatrix{Tx}) where {TA,S,Tx} (*)(A, B) - @deprecate Ac_mul_B!(α::Number, A::SparseMatrixCSC, B::StridedVecOrMat, β::Number, C::StridedVecOrMat) mul!(α, Adjoint(A), B, β, C) - @deprecate Ac_mul_B(A::SparseMatrixCSC{TA,S}, x::StridedVector{Tx}) where {TA,S,Tx} (*)(Adjoint(A), x) - @deprecate Ac_mul_B(A::SparseMatrixCSC{TA,S}, B::StridedMatrix{Tx}) where {TA,S,Tx} (*)(Adjoint(A), B) - @deprecate At_mul_B!(α::Number, A::SparseMatrixCSC, B::StridedVecOrMat, β::Number, C::StridedVecOrMat) mul!(α, Transpose(A), B, β, C) - @deprecate At_mul_B(A::SparseMatrixCSC{TA,S}, x::StridedVector{Tx}) where {TA,S,Tx} (*)(Transpose(A), x) - @deprecate At_mul_B(A::SparseMatrixCSC{TA,S}, B::StridedMatrix{Tx}) where {TA,S,Tx} (*)(Transpose(A), B) - @deprecate A_mul_Bt(A::SparseMatrixCSC{TvA,TiA}, B::SparseMatrixCSC{TvB,TiB}) where {TvA,TiA,TvB,TiB} (*)(A, Transpose(B)) - @deprecate A_mul_Bc(A::SparseMatrixCSC{TvA,TiA}, B::SparseMatrixCSC{TvB,TiB}) where {TvA,TiA,TvB,TiB} (*)(A, Adjoint(B)) - @deprecate At_mul_B(A::SparseMatrixCSC{TvA,TiA}, B::SparseMatrixCSC{TvB,TiB}) where {TvA,TiA,TvB,TiB} (*)(Transpose(A), B) - @deprecate Ac_mul_B(A::SparseMatrixCSC{TvA,TiA}, B::SparseMatrixCSC{TvB,TiB}) where {TvA,TiA,TvB,TiB} (*)(Adjoint(A),B) - @deprecate At_mul_Bt(A::SparseMatrixCSC{TvA,TiA}, B::SparseMatrixCSC{TvB,TiB}) where {TvA,TiA,TvB,TiB} (*)(Transpose(A), Transpose(B)) - @deprecate Ac_mul_Bc(A::SparseMatrixCSC{TvA,TiA}, B::SparseMatrixCSC{TvB,TiB}) where {TvA,TiA,TvB,TiB} (*)(Adjoint(A), Adjoint(B)) -end - -# A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/sparse/sparsevector.jl, to deprecate -for isunittri in (true, false), islowertri in (true, false) - unitstr = isunittri ? "Unit" : "" - halfstr = islowertri ? "Lower" : "Upper" - tritype = :(Base.LinAlg.$(Symbol(unitstr, halfstr, "Triangular"))) - @eval Base.SparseArrays begin - using Base.LinAlg: Adjoint, Transpose - @deprecate At_ldiv_B(A::$tritype{TA,<:AbstractMatrix}, b::SparseVector{Tb}) where {TA<:Number,Tb<:Number} (\)(Transpose(A), b) - @deprecate At_ldiv_B(A::$tritype{TA,<:StridedMatrix}, b::SparseVector{Tb}) where {TA<:Number,Tb<:Number} (\)(Transpose(A), b) - @deprecate At_ldiv_B(A::$tritype, b::SparseVector) (\)(Transpose(A), b) - @deprecate Ac_ldiv_B(A::$tritype{TA,<:AbstractMatrix}, b::SparseVector{Tb}) where {TA<:Number,Tb<:Number} (\)(Adjoint(A), b) - @deprecate Ac_ldiv_B(A::$tritype{TA,<:StridedMatrix}, b::SparseVector{Tb}) where {TA<:Number,Tb<:Number} (\)(Adjoint(A), b) - @deprecate Ac_ldiv_B(A::$tritype, b::SparseVector) (\)(Adjoint(A), b) - @deprecate A_ldiv_B!(A::$tritype{<:Any,<:StridedMatrix}, b::SparseVector) ldiv!(A, b) - @deprecate At_ldiv_B!(A::$tritype{<:Any,<:StridedMatrix}, b::SparseVector) ldiv!(Transpose(A), b) - @deprecate Ac_ldiv_B!(A::$tritype{<:Any,<:StridedMatrix}, b::SparseVector) ldiv!(Adjoint(A), b) - end -end -@eval Base.SparseArrays begin - using Base.LinAlg: Adjoint, Transpose - @deprecate Ac_mul_B(A::SparseMatrixCSC, x::AbstractSparseVector) (*)(Adjoint(A), x) - @deprecate At_mul_B(A::SparseMatrixCSC, x::AbstractSparseVector) (*)(Transpose(A), x) - @deprecate Ac_mul_B!(α::Number, A::SparseMatrixCSC, x::AbstractSparseVector, β::Number, y::StridedVector) mul!(α, Adjoint(A), x, β, y) - @deprecate Ac_mul_B!(y::StridedVector{Ty}, A::SparseMatrixCSC, x::AbstractSparseVector{Tx}) where {Tx,Ty} mul!(y, Adjoint(A), x) - @deprecate At_mul_B!(α::Number, A::SparseMatrixCSC, x::AbstractSparseVector, β::Number, y::StridedVector) mul!(α, Transpose(A), x, β, y) - @deprecate At_mul_B!(y::StridedVector{Ty}, A::SparseMatrixCSC, x::AbstractSparseVector{Tx}) where {Tx,Ty} mul!(y, Transpose(A), x) - @deprecate A_mul_B!(α::Number, A::SparseMatrixCSC, x::AbstractSparseVector, β::Number, y::StridedVector) mul!(α, A, x, β, y) - @deprecate A_mul_B!(y::StridedVector{Ty}, A::SparseMatrixCSC, x::AbstractSparseVector{Tx}) where {Tx,Ty} mul!(y, A, x) - @deprecate At_mul_B!(α::Number, A::StridedMatrix, x::AbstractSparseVector, β::Number, y::StridedVector) mul!(α, Transpose(A), x, β, y) - @deprecate At_mul_B!(y::StridedVector{Ty}, A::StridedMatrix, x::AbstractSparseVector{Tx}) where {Tx,Ty} mul!(y, Transpose(A), x) - @deprecate At_mul_B(A::StridedMatrix{Ta}, x::AbstractSparseVector{Tx}) where {Ta,Tx} (*)(Transpose(A), x) - @deprecate A_mul_B!(α::Number, A::StridedMatrix, x::AbstractSparseVector, β::Number, y::StridedVector) mul!(α, A, x, β, y) - @deprecate A_mul_B!(y::StridedVector{Ty}, A::StridedMatrix, x::AbstractSparseVector{Tx}) where {Tx,Ty} mul!(y, A, x) -end +# # former imports into SparseArrays +# @eval Base.SparseArrays begin +# import Base: A_mul_B!, Ac_mul_B, Ac_mul_B!, At_mul_B, At_mul_B! +# import Base: A_mul_Bc, A_mul_Bt, Ac_mul_Bc, At_mul_Bt +# import Base: At_ldiv_B, Ac_ldiv_B, A_ldiv_B! +# import Base.LinAlg: At_ldiv_B!, Ac_ldiv_B!, A_rdiv_B!, A_rdiv_Bc!, mul!, ldiv!, rdiv! +# end + +# # A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/sparse/linalg.jl, to deprecate +# @eval Base.SparseArrays begin +# using Base.LinAlg: Adjoint, Transpose +# @deprecate Ac_ldiv_B(A::SparseMatrixCSC, B::RowVector) (\)(Adjoint(A), B) +# @deprecate At_ldiv_B(A::SparseMatrixCSC, B::RowVector) (\)(Transpose(A), B) +# @deprecate Ac_ldiv_B(A::SparseMatrixCSC, B::AbstractVecOrMat) (\)(Adjoint(A), B) +# @deprecate At_ldiv_B(A::SparseMatrixCSC, B::AbstractVecOrMat) (\)(Transpose(A), B) +# @deprecate A_rdiv_Bc!(A::SparseMatrixCSC{T}, D::Diagonal{T}) where {T} rdiv!(A, Adjoint(D)) +# @deprecate A_rdiv_Bt!(A::SparseMatrixCSC{T}, D::Diagonal{T}) where {T} rdiv!(A, Transpose(D)) +# @deprecate A_rdiv_B!(A::SparseMatrixCSC{T}, D::Diagonal{T}) where {T} rdiv!(A, D) +# @deprecate A_ldiv_B!(L::LowerTriangular{T,<:SparseMatrixCSCUnion{T}}, B::StridedVecOrMat) where {T} ldiv!(L, B) +# @deprecate A_ldiv_B!(U::UpperTriangular{T,<:SparseMatrixCSCUnion{T}}, B::StridedVecOrMat) where {T} ldiv!(U, B) +# @deprecate A_mul_Bt(A::SparseMatrixCSC{Tv,Ti}, B::SparseMatrixCSC{Tv,Ti}) where {Tv,Ti} (*)(A, Transpose(B)) +# @deprecate A_mul_Bc(A::SparseMatrixCSC{Tv,Ti}, B::SparseMatrixCSC{Tv,Ti}) where {Tv,Ti} (*)(A, Adjoint(B)) +# @deprecate At_mul_B(A::SparseMatrixCSC{Tv,Ti}, B::SparseMatrixCSC{Tv,Ti}) where {Tv,Ti} (*)(Transpose(A), B) +# @deprecate Ac_mul_B(A::SparseMatrixCSC{Tv,Ti}, B::SparseMatrixCSC{Tv,Ti}) where {Tv,Ti} (*)(Adjoint(A), B) +# @deprecate At_mul_Bt(A::SparseMatrixCSC{Tv,Ti}, B::SparseMatrixCSC{Tv,Ti}) where {Tv,Ti} (*)(Transpose(A), Transpose(B)) +# @deprecate Ac_mul_Bc(A::SparseMatrixCSC{Tv,Ti}, B::SparseMatrixCSC{Tv,Ti}) where {Tv,Ti} (*)(Adjoint(A), Adjoint(B)) +# @deprecate A_mul_B!(C::StridedVecOrMat, A::SparseMatrixCSC, B::StridedVecOrMat) mul!(C, A, B) +# @deprecate Ac_mul_B!(C::StridedVecOrMat, A::SparseMatrixCSC, B::StridedVecOrMat) mul!(C, Adjoint(A), B) +# @deprecate At_mul_B!(C::StridedVecOrMat, A::SparseMatrixCSC, B::StridedVecOrMat) mul!(C, Transpose(A), B) +# @deprecate A_mul_B!(α::Number, A::SparseMatrixCSC, B::StridedVecOrMat, β::Number, C::StridedVecOrMat) mul!(α, A, B, β, C) +# @deprecate A_mul_B(A::SparseMatrixCSC{TA,S}, x::StridedVector{Tx}) where {TA,S,Tx} (*)(A, x) +# @deprecate A_mul_B(A::SparseMatrixCSC{TA,S}, B::StridedMatrix{Tx}) where {TA,S,Tx} (*)(A, B) +# @deprecate Ac_mul_B!(α::Number, A::SparseMatrixCSC, B::StridedVecOrMat, β::Number, C::StridedVecOrMat) mul!(α, Adjoint(A), B, β, C) +# @deprecate Ac_mul_B(A::SparseMatrixCSC{TA,S}, x::StridedVector{Tx}) where {TA,S,Tx} (*)(Adjoint(A), x) +# @deprecate Ac_mul_B(A::SparseMatrixCSC{TA,S}, B::StridedMatrix{Tx}) where {TA,S,Tx} (*)(Adjoint(A), B) +# @deprecate At_mul_B!(α::Number, A::SparseMatrixCSC, B::StridedVecOrMat, β::Number, C::StridedVecOrMat) mul!(α, Transpose(A), B, β, C) +# @deprecate At_mul_B(A::SparseMatrixCSC{TA,S}, x::StridedVector{Tx}) where {TA,S,Tx} (*)(Transpose(A), x) +# @deprecate At_mul_B(A::SparseMatrixCSC{TA,S}, B::StridedMatrix{Tx}) where {TA,S,Tx} (*)(Transpose(A), B) +# @deprecate A_mul_Bt(A::SparseMatrixCSC{TvA,TiA}, B::SparseMatrixCSC{TvB,TiB}) where {TvA,TiA,TvB,TiB} (*)(A, Transpose(B)) +# @deprecate A_mul_Bc(A::SparseMatrixCSC{TvA,TiA}, B::SparseMatrixCSC{TvB,TiB}) where {TvA,TiA,TvB,TiB} (*)(A, Adjoint(B)) +# @deprecate At_mul_B(A::SparseMatrixCSC{TvA,TiA}, B::SparseMatrixCSC{TvB,TiB}) where {TvA,TiA,TvB,TiB} (*)(Transpose(A), B) +# @deprecate Ac_mul_B(A::SparseMatrixCSC{TvA,TiA}, B::SparseMatrixCSC{TvB,TiB}) where {TvA,TiA,TvB,TiB} (*)(Adjoint(A),B) +# @deprecate At_mul_Bt(A::SparseMatrixCSC{TvA,TiA}, B::SparseMatrixCSC{TvB,TiB}) where {TvA,TiA,TvB,TiB} (*)(Transpose(A), Transpose(B)) +# @deprecate Ac_mul_Bc(A::SparseMatrixCSC{TvA,TiA}, B::SparseMatrixCSC{TvB,TiB}) where {TvA,TiA,TvB,TiB} (*)(Adjoint(A), Adjoint(B)) +# end + +# # A[ct]_(mul|ldiv|rdiv)_B[ct][!] methods from base/sparse/sparsevector.jl, to deprecate +# for isunittri in (true, false), islowertri in (true, false) +# unitstr = isunittri ? "Unit" : "" +# halfstr = islowertri ? "Lower" : "Upper" +# tritype = :(Base.LinAlg.$(Symbol(unitstr, halfstr, "Triangular"))) +# @eval Base.SparseArrays begin +# using Base.LinAlg: Adjoint, Transpose +# @deprecate At_ldiv_B(A::$tritype{TA,<:AbstractMatrix}, b::SparseVector{Tb}) where {TA<:Number,Tb<:Number} (\)(Transpose(A), b) +# @deprecate At_ldiv_B(A::$tritype{TA,<:StridedMatrix}, b::SparseVector{Tb}) where {TA<:Number,Tb<:Number} (\)(Transpose(A), b) +# @deprecate At_ldiv_B(A::$tritype, b::SparseVector) (\)(Transpose(A), b) +# @deprecate Ac_ldiv_B(A::$tritype{TA,<:AbstractMatrix}, b::SparseVector{Tb}) where {TA<:Number,Tb<:Number} (\)(Adjoint(A), b) +# @deprecate Ac_ldiv_B(A::$tritype{TA,<:StridedMatrix}, b::SparseVector{Tb}) where {TA<:Number,Tb<:Number} (\)(Adjoint(A), b) +# @deprecate Ac_ldiv_B(A::$tritype, b::SparseVector) (\)(Adjoint(A), b) +# @deprecate A_ldiv_B!(A::$tritype{<:Any,<:StridedMatrix}, b::SparseVector) ldiv!(A, b) +# @deprecate At_ldiv_B!(A::$tritype{<:Any,<:StridedMatrix}, b::SparseVector) ldiv!(Transpose(A), b) +# @deprecate Ac_ldiv_B!(A::$tritype{<:Any,<:StridedMatrix}, b::SparseVector) ldiv!(Adjoint(A), b) +# end +# end +# @eval Base.SparseArrays begin +# using Base.LinAlg: Adjoint, Transpose +# @deprecate Ac_mul_B(A::SparseMatrixCSC, x::AbstractSparseVector) (*)(Adjoint(A), x) +# @deprecate At_mul_B(A::SparseMatrixCSC, x::AbstractSparseVector) (*)(Transpose(A), x) +# @deprecate Ac_mul_B!(α::Number, A::SparseMatrixCSC, x::AbstractSparseVector, β::Number, y::StridedVector) mul!(α, Adjoint(A), x, β, y) +# @deprecate Ac_mul_B!(y::StridedVector{Ty}, A::SparseMatrixCSC, x::AbstractSparseVector{Tx}) where {Tx,Ty} mul!(y, Adjoint(A), x) +# @deprecate At_mul_B!(α::Number, A::SparseMatrixCSC, x::AbstractSparseVector, β::Number, y::StridedVector) mul!(α, Transpose(A), x, β, y) +# @deprecate At_mul_B!(y::StridedVector{Ty}, A::SparseMatrixCSC, x::AbstractSparseVector{Tx}) where {Tx,Ty} mul!(y, Transpose(A), x) +# @deprecate A_mul_B!(α::Number, A::SparseMatrixCSC, x::AbstractSparseVector, β::Number, y::StridedVector) mul!(α, A, x, β, y) +# @deprecate A_mul_B!(y::StridedVector{Ty}, A::SparseMatrixCSC, x::AbstractSparseVector{Tx}) where {Tx,Ty} mul!(y, A, x) +# @deprecate At_mul_B!(α::Number, A::StridedMatrix, x::AbstractSparseVector, β::Number, y::StridedVector) mul!(α, Transpose(A), x, β, y) +# @deprecate At_mul_B!(y::StridedVector{Ty}, A::StridedMatrix, x::AbstractSparseVector{Tx}) where {Tx,Ty} mul!(y, Transpose(A), x) +# @deprecate At_mul_B(A::StridedMatrix{Ta}, x::AbstractSparseVector{Tx}) where {Ta,Tx} (*)(Transpose(A), x) +# @deprecate A_mul_B!(α::Number, A::StridedMatrix, x::AbstractSparseVector, β::Number, y::StridedVector) mul!(α, A, x, β, y) +# @deprecate A_mul_B!(y::StridedVector{Ty}, A::StridedMatrix, x::AbstractSparseVector{Tx}) where {Tx,Ty} mul!(y, A, x) +# end # methods involving RowVector from base/linalg/bidiag.jl, to deprecate @@ -3356,12 +3357,12 @@ end *(D::Diagonal, adjrowvec::Adjoint{<:Any,<:RowVector}) = (rowvec = adjrowvec.parent; D*rvadjoint(rowvec)) end -# methods involving RowVector from base/sparse/linalg.jl, to deprecate -@eval Base.SparseArrays begin - \(::SparseMatrixCSC, ::RowVector) = throw(DimensionMismatch("Cannot left-divide matrix by transposed vector")) - \(::Adjoint{<:Any,<:SparseMatrixCSC}, ::RowVector) = throw(DimensionMismatch("Cannot left-divide matrix by transposed vector")) - \(::Transpose{<:Any,<:SparseMatrixCSC}, ::RowVector) = throw(DimensionMismatch("Cannot left-divide matrix by transposed vector")) -end +# # methods involving RowVector from base/sparse/linalg.jl, to deprecate +# @eval Base.SparseArrays begin +# \(::SparseMatrixCSC, ::RowVector) = throw(DimensionMismatch("Cannot left-divide matrix by transposed vector")) +# \(::Adjoint{<:Any,<:SparseMatrixCSC}, ::RowVector) = throw(DimensionMismatch("Cannot left-divide matrix by transposed vector")) +# \(::Transpose{<:Any,<:SparseMatrixCSC}, ::RowVector) = throw(DimensionMismatch("Cannot left-divide matrix by transposed vector")) +# end # methods involving RowVector from base/linalg/qr.jl, to deprecate @eval Base.LinAlg begin @@ -3421,10 +3422,10 @@ end \(A::Transpose{<:Any,<:Factorization{<:Real}}, B::RowVector) = transpose(A.parent) \ B end -# methods involving RowVector from base/sparse/higherorderfns.jl, to deprecate -@eval Base.SparseArrays.HigherOrderFns begin - BroadcastStyle(::Type{<:Base.RowVector{T,<:Vector}}) where T = Broadcast.MatrixStyle() -end +# # methods involving RowVector from base/sparse/higherorderfns.jl, to deprecate +# @eval Base.SparseArrays.HigherOrderFns begin +# BroadcastStyle(::Type{<:Base.RowVector{T,<:Vector}}) where T = Broadcast.MatrixStyle() +# end # methods involving RowVector from base/linalg/symmetric.jl, to deprecate @eval Base.LinAlg begin diff --git a/base/exports.jl b/base/exports.jl index bbc505393a62e8..740e544042b0eb 100644 --- a/base/exports.jl +++ b/base/exports.jl @@ -434,7 +434,6 @@ export minimum, minmax, ndims, - nonzeros, ones, parent, parentindices, @@ -442,7 +441,6 @@ export partialsort!, partialsortperm, partialsortperm!, - permute, permute!, permutedims, permutedims!, @@ -513,7 +511,6 @@ export # linear algebra bkfact!, bkfact, - blkdiag, chol, cholfact!, cholfact, @@ -594,10 +591,6 @@ export ⋅, ×, -# sparse - dropzeros, - dropzeros!, - # bitarrays falses, flipbits!, @@ -1183,22 +1176,4 @@ export @goto, @view, @views, - @static, - -# SparseArrays module re-exports - SparseArrays, - AbstractSparseArray, - AbstractSparseMatrix, - AbstractSparseVector, - SparseMatrixCSC, - SparseVector, - issparse, - sparse, - sparsevec, - spdiagm, - sprand, - sprandn, - spzeros, - rowvals, - nzrange, - nnz + @static diff --git a/base/linalg/bidiag.jl b/base/linalg/bidiag.jl index 7c6ad8b1bd2ebc..18d756d4b2fcbb 100644 --- a/base/linalg/bidiag.jl +++ b/base/linalg/bidiag.jl @@ -178,7 +178,8 @@ broadcast(::typeof(big), B::Bidiagonal) = Bidiagonal(big.(B.dv), big.(B.ev), B.u # On the other hand, similar(B, [neweltype,] shape...) should yield a sparse matrix. # The first method below effects the former, and the second the latter. similar(B::Bidiagonal, ::Type{T}) where {T} = Bidiagonal(similar(B.dv, T), similar(B.ev, T), B.uplo) -similar(B::Bidiagonal, ::Type{T}, dims::Union{Dims{1},Dims{2}}) where {T} = spzeros(T, dims...) +# The method below is moved to SparseArrays for now +# similar(B::Bidiagonal, ::Type{T}, dims::Union{Dims{1},Dims{2}}) where {T} = spzeros(T, dims...) ################### diff --git a/base/linalg/dense.jl b/base/linalg/dense.jl index 246c3ec059a71a..d10fa4beb37ae2 100644 --- a/base/linalg/dense.jl +++ b/base/linalg/dense.jl @@ -303,8 +303,6 @@ Vector `kv.second` will be placed on the `kv.first` diagonal. versions with fast arithmetic, see [`Diagonal`](@ref), [`Bidiagonal`](@ref) [`Tridiagonal`](@ref) and [`SymTridiagonal`](@ref). -See also: [`spdiagm`](@ref) - # Examples ```jldoctest julia> diagm(1 => [1,2,3]) diff --git a/base/linalg/diagonal.jl b/base/linalg/diagonal.jl index 25600afa2293ff..35eee3a94fc163 100644 --- a/base/linalg/diagonal.jl +++ b/base/linalg/diagonal.jl @@ -59,7 +59,8 @@ Array(D::Diagonal) = Matrix(D) # On the other hand, similar(D, [neweltype,] shape...) should yield a sparse matrix. # The first method below effects the former, and the second the latter. similar(D::Diagonal, ::Type{T}) where {T} = Diagonal(similar(D.diag, T)) -similar(D::Diagonal, ::Type{T}, dims::Union{Dims{1},Dims{2}}) where {T} = spzeros(T, dims...) +# The method below is moved to SparseArrays for now +# similar(D::Diagonal, ::Type{T}, dims::Union{Dims{1},Dims{2}}) where {T} = spzeros(T, dims...) copyto!(D1::Diagonal, D2::Diagonal) = (copyto!(D1.diag, D2.diag); D1) diff --git a/base/linalg/tridiag.jl b/base/linalg/tridiag.jl index 0bc8448e50c0df..702af46f20ed76 100644 --- a/base/linalg/tridiag.jl +++ b/base/linalg/tridiag.jl @@ -111,7 +111,8 @@ end # On the other hand, similar(S, [neweltype,] shape...) should yield a sparse matrix. # The first method below effects the former, and the second the latter. similar(S::SymTridiagonal, ::Type{T}) where {T} = SymTridiagonal(similar(S.dv, T), similar(S.ev, T)) -similar(S::SymTridiagonal, ::Type{T}, dims::Union{Dims{1},Dims{2}}) where {T} = spzeros(T, dims...) +# The method below is moved to SparseArrays for now +# similar(S::SymTridiagonal, ::Type{T}, dims::Union{Dims{1},Dims{2}}) where {T} = spzeros(T, dims...) #Elementary operations broadcast(::typeof(abs), M::SymTridiagonal) = SymTridiagonal(abs.(M.dv), abs.(M.ev)) @@ -494,7 +495,8 @@ Array(M::Tridiagonal) = Matrix(M) # On the other hand, similar(M, [neweltype,] shape...) should yield a sparse matrix. # The first method below effects the former, and the second the latter. similar(M::Tridiagonal, ::Type{T}) where {T} = Tridiagonal(similar(M.dl, T), similar(M.d, T), similar(M.du, T)) -similar(M::Tridiagonal, ::Type{T}, dims::Union{Dims{1},Dims{2}}) where {T} = spzeros(T, dims...) +# The method below is moved to SparseArrays for now +# similar(M::Tridiagonal, ::Type{T}, dims::Union{Dims{1},Dims{2}}) where {T} = spzeros(T, dims...) # Operations on Tridiagonal matrices copyto!(dest::Tridiagonal, src::Tridiagonal) = (copyto!(dest.dl, src.dl); copyto!(dest.d, src.d); copyto!(dest.du, src.du); dest) diff --git a/base/sysimg.jl b/base/sysimg.jl index ab6906a7bd9052..2f1a42d157f132 100644 --- a/base/sysimg.jl +++ b/base/sysimg.jl @@ -428,8 +428,8 @@ include("libgit2/libgit2.jl") include("pkg/pkg.jl") # sparse matrices, vectors, and sparse linear algebra -include("sparse/sparse.jl") -using .SparseArrays +# include("sparse/sparse.jl") +# using .SparseArrays include("asyncmap.jl") @@ -499,6 +499,7 @@ Base.require(:IterativeEigensolvers) Base.require(:Mmap) Base.require(:Profile) Base.require(:SharedArrays) +Base.require(:SparseArrays) Base.require(:SuiteSparse) Base.require(:Test) Base.require(:Unicode) diff --git a/doc/make.jl b/doc/make.jl index e67f63e768003a..fc1119d713083e 100644 --- a/doc/make.jl +++ b/doc/make.jl @@ -30,6 +30,7 @@ if Sys.iswindows() cp_q("../stdlib/FileWatching/docs/src/index.md", "src/stdlib/filewatching.md") cp_q("../stdlib/CRC32c/docs/src/index.md", "src/stdlib/crc32c.md") cp_q("../stdlib/Dates/docs/src/index.md", "src/stdlib/dates.md") + cp_q("../stdlib/SparseArrays/docs/src/index.md", "src/stdlib/sparsearrays.md") cp_q("../stdlib/IterativeEigensolvers/docs/src/index.md", "src/stdlib/iterativeeigensolvers.md") cp_q("../stdlib/Unicode/docs/src/index.md", "src/stdlib/unicode.md") cp_q("../stdlib/Distributed/docs/src/index.md", "src/stdlib/distributed.md") @@ -44,6 +45,7 @@ else symlink_q("../../../stdlib/FileWatching/docs/src/index.md", "src/stdlib/filewatching.md") symlink_q("../../../stdlib/CRC32c/docs/src/index.md", "src/stdlib/crc32c.md") symlink_q("../../../stdlib/Dates/docs/src/index.md", "src/stdlib/dates.md") + symlink_q("../../../stdlib/SparseArrays/docs/src/index.md", "src/stdlib/sparsearrays.md") symlink_q("../../../stdlib/IterativeEigensolvers/docs/src/index.md", "src/stdlib/iterativeeigensolvers.md") symlink_q("../../../stdlib/Unicode/docs/src/index.md", "src/stdlib/unicode.md") symlink_q("../../../stdlib/Distributed/docs/src/index.md", "src/stdlib/distributed.md") @@ -125,6 +127,7 @@ const PAGES = [ "stdlib/sharedarrays.md", "stdlib/filewatching.md", "stdlib/crc32c.md", + "stdlib/sparsearrays.md", "stdlib/iterativeeigensolvers.md", "stdlib/unicode.md", "stdlib/printf.md", @@ -163,12 +166,13 @@ const PAGES = [ ] using DelimitedFiles, Test, Mmap, SharedArrays, Profile, Base64, FileWatching, CRC32c, - Dates, IterativeEigensolvers, Unicode, Distributed, Printf + Dates, SparseArrays, IterativeEigensolvers, Unicode, Distributed, Printf makedocs( build = joinpath(pwd(), "_build/html/en"), modules = [Base, Core, BuildSysImg, DelimitedFiles, Test, Mmap, SharedArrays, Profile, - Base64, FileWatching, Dates, IterativeEigensolvers, Unicode, Distributed, Printf], + Base64, FileWatching, Dates, SparseArrays, IterativeEigensolvers, Unicode, + Distributed, Printf], clean = false, doctest = "doctest" in ARGS, linkcheck = "linkcheck" in ARGS, diff --git a/doc/src/index.md b/doc/src/index.md index ce23e41a10256c..8b4be1fbf62c82 100644 --- a/doc/src/index.md +++ b/doc/src/index.md @@ -74,6 +74,7 @@ * [Memory-mapped I/O](@ref) * [Base64](@ref) * [File Events](@ref lib-filewatching) + * [Sparse Arrays](@ref) * [Iterative Eigensolvers](@ref lib-itereigen) * [Unicode](@ref) * [Printf](@ref) diff --git a/doc/src/manual/arrays.md b/doc/src/manual/arrays.md index 9b4099cea73460..d9071cf0eb2745 100644 --- a/doc/src/manual/arrays.md +++ b/doc/src/manual/arrays.md @@ -762,195 +762,3 @@ julia> r -1.58553 -0.921517 0.0 0.866567 ``` - -## Sparse Vectors and Matrices - -Julia has built-in support for sparse vectors and -[sparse matrices](https://en.wikipedia.org/wiki/Sparse_matrix). Sparse arrays are arrays -that contain enough zeros that storing them in a special data structure leads to savings -in space and execution time, compared to dense arrays. - -### [Compressed Sparse Column (CSC) Sparse Matrix Storage](@id man-csc) - -In Julia, sparse matrices are stored in the [Compressed Sparse Column (CSC) format](https://en.wikipedia.org/wiki/Sparse_matrix#Compressed_sparse_column_.28CSC_or_CCS.29). -Julia sparse matrices have the type [`SparseMatrixCSC{Tv,Ti}`](@ref), where `Tv` is the -type of the stored values, and `Ti` is the integer type for storing column pointers and -row indices. The internal representation of `SparseMatrixCSC` is as follows: - -```julia -struct SparseMatrixCSC{Tv,Ti<:Integer} <: AbstractSparseMatrix{Tv,Ti} - m::Int # Number of rows - n::Int # Number of columns - colptr::Vector{Ti} # Column i is in colptr[i]:(colptr[i+1]-1) - rowval::Vector{Ti} # Row indices of stored values - nzval::Vector{Tv} # Stored values, typically nonzeros -end -``` - -The compressed sparse column storage makes it easy and quick to access the elements in the column -of a sparse matrix, whereas accessing the sparse matrix by rows is considerably slower. Operations -such as insertion of previously unstored entries one at a time in the CSC structure tend to be slow. This is -because all elements of the sparse matrix that are beyond the point of insertion have to be moved -one place over. - -All operations on sparse matrices are carefully implemented to exploit the CSC data structure -for performance, and to avoid expensive operations. - -If you have data in CSC format from a different application or library, and wish to import it -in Julia, make sure that you use 1-based indexing. The row indices in every column need to be -sorted. If your `SparseMatrixCSC` object contains unsorted row indices, one quick way to sort -them is by doing a double transpose. - -In some applications, it is convenient to store explicit zero values in a `SparseMatrixCSC`. These -*are* accepted by functions in `Base` (but there is no guarantee that they will be preserved in -mutating operations). Such explicitly stored zeros are treated as structural nonzeros by many -routines. The [`nnz`](@ref) function returns the number of elements explicitly stored in the -sparse data structure, including structural nonzeros. In order to count the exact number of -numerical nonzeros, use [`count(!iszero, x)`](@ref), which inspects every stored element of a sparse -matrix. [`dropzeros`](@ref), and the in-place [`dropzeros!`](@ref), can be used to -remove stored zeros from the sparse matrix. - -```jldoctest -julia> A = sparse([1, 2, 3], [1, 2, 3], [0, 2, 0]) -3×3 SparseMatrixCSC{Int64,Int64} with 3 stored entries: - [1, 1] = 0 - [2, 2] = 2 - [3, 3] = 0 - -julia> dropzeros(A) -3×3 SparseMatrixCSC{Int64,Int64} with 1 stored entry: - [2, 2] = 2 -``` - -### Sparse Vector Storage - -Sparse vectors are stored in a close analog to compressed sparse column format for sparse -matrices. In Julia, sparse vectors have the type [`SparseVector{Tv,Ti}`](@ref) where `Tv` -is the type of the stored values and `Ti` the integer type for the indices. The internal -representation is as follows: - -```julia -struct SparseVector{Tv,Ti<:Integer} <: AbstractSparseVector{Tv,Ti} - n::Int # Length of the sparse vector - nzind::Vector{Ti} # Indices of stored values - nzval::Vector{Tv} # Stored values, typically nonzeros -end -``` - -As for [`SparseMatrixCSC`](@ref), the `SparseVector` type can also contain explicitly -stored zeros. (See [Sparse Matrix Storage](@ref man-csc).). - -### Sparse Vector and Matrix Constructors - -The simplest way to create a sparse array is to use a function equivalent to the [`zeros`](@ref) -function that Julia provides for working with dense arrays. To produce a -sparse array instead, you can use the same name with an `sp` prefix: - -```jldoctest -julia> spzeros(3) -3-element SparseVector{Float64,Int64} with 0 stored entries -``` - -The [`sparse`](@ref) function is often a handy way to construct sparse arrays. For -example, to construct a sparse matrix we can input a vector `I` of row indices, a vector -`J` of column indices, and a vector `V` of stored values (this is also known as the -[COO (coordinate) format](https://en.wikipedia.org/wiki/Sparse_matrix#Coordinate_list_.28COO.29)). -`sparse(I,J,V)` then constructs a sparse matrix such that `S[I[k], J[k]] = V[k]`. The -equivalent sparse vector constructor is [`sparsevec`](@ref), which takes the (row) index -vector `I` and the vector `V` with the stored values and constructs a sparse vector `R` -such that `R[I[k]] = V[k]`. - -```jldoctest sparse_function -julia> I = [1, 4, 3, 5]; J = [4, 7, 18, 9]; V = [1, 2, -5, 3]; - -julia> S = sparse(I,J,V) -5×18 SparseMatrixCSC{Int64,Int64} with 4 stored entries: - [1 , 4] = 1 - [4 , 7] = 2 - [5 , 9] = 3 - [3 , 18] = -5 - -julia> R = sparsevec(I,V) -5-element SparseVector{Int64,Int64} with 4 stored entries: - [1] = 1 - [3] = -5 - [4] = 2 - [5] = 3 -``` - -The inverse of the [`sparse`](@ref) and [`sparsevec`](@ref) functions is -[`findnz`](@ref), which retrieves the inputs used to create the sparse array. -There is also a [`findn`](@ref) function which only returns the index vectors. - -```jldoctest sparse_function -julia> findnz(S) -([1, 4, 5, 3], [4, 7, 9, 18], [1, 2, 3, -5]) - -julia> findn(S) -([1, 4, 5, 3], [4, 7, 9, 18]) - -julia> findnz(R) -([1, 3, 4, 5], [1, -5, 2, 3]) - -julia> find(!iszero, R) -4-element Array{Int64,1}: - 1 - 3 - 4 - 5 -``` - -Another way to create a sparse array is to convert a dense array into a sparse array using -the [`sparse`](@ref) function: - -```jldoctest -julia> sparse(Matrix(1.0I, 5, 5)) -5×5 SparseMatrixCSC{Float64,Int64} with 5 stored entries: - [1, 1] = 1.0 - [2, 2] = 1.0 - [3, 3] = 1.0 - [4, 4] = 1.0 - [5, 5] = 1.0 - -julia> sparse([1.0, 0.0, 1.0]) -3-element SparseVector{Float64,Int64} with 2 stored entries: - [1] = 1.0 - [3] = 1.0 -``` - -You can go in the other direction using the [`Array`](@ref) constructor. The [`issparse`](@ref) -function can be used to query if a matrix is sparse. - -```jldoctest -julia> issparse(spzeros(5)) -true -``` - -### Sparse matrix operations - -Arithmetic operations on sparse matrices also work as they do on dense matrices. Indexing of, -assignment into, and concatenation of sparse matrices work in the same way as dense matrices. -Indexing operations, especially assignment, are expensive, when carried out one element at a time. -In many cases it may be better to convert the sparse matrix into `(I,J,V)` format using [`findnz`](@ref), -manipulate the values or the structure in the dense vectors `(I,J,V)`, and then reconstruct -the sparse matrix. - -### Correspondence of dense and sparse methods - -The following table gives a correspondence between built-in methods on sparse matrices and their -corresponding methods on dense matrix types. In general, methods that generate sparse matrices -differ from their dense counterparts in that the resulting matrix follows the same sparsity pattern -as a given sparse matrix `S`, or that the resulting sparse matrix has density `d`, i.e. each matrix -element has a probability `d` of being non-zero. - -Details can be found in the [Sparse Vectors and Matrices](@ref stdlib-sparse-arrays) -section of the standard library reference. - -| Sparse | Dense | Description | -|:-------------------------- |:---------------------- |:--------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| [`spzeros(m,n)`](@ref) | [`zeros(m,n)`](@ref) | Creates a *m*-by-*n* matrix of zeros. ([`spzeros(m,n)`](@ref) is empty.) | -| [`sparse(I, n, n)`](@ref) | [`Matrix(I,n,n)`](@ref)| Creates a *n*-by-*n* identity matrix. | -| [`Array(S)`](@ref) | [`sparse(A)`](@ref) | Interconverts between dense and sparse formats. | -| [`sprand(m,n,d)`](@ref) | [`rand(m,n)`](@ref) | Creates a *m*-by-*n* random matrix (of density *d*) with iid non-zero elements distributed uniformly on the half-open interval ``[0, 1)``. | -| [`sprandn(m,n,d)`](@ref) | [`randn(m,n)`](@ref) | Creates a *m*-by-*n* random matrix (of density *d*) with iid non-zero elements distributed according to the standard normal (Gaussian) distribution. | -| [`sprandn(m,n,d,X)`](@ref) | [`randn(m,n,X)`](@ref) | Creates a *m*-by-*n* random matrix (of density *d*) with iid non-zero elements distributed according to the *X* distribution. (Requires the `Distributions` package.) | diff --git a/doc/src/stdlib/.gitignore b/doc/src/stdlib/.gitignore index e7a76fa8960d0e..39b8b4d335dc77 100644 --- a/doc/src/stdlib/.gitignore +++ b/doc/src/stdlib/.gitignore @@ -8,6 +8,7 @@ filewatching.md crc32c.md dates.md unicode.md +sparsearrays.md iterativeeigensolvers.md printf.md distributed.md diff --git a/doc/src/stdlib/arrays.md b/doc/src/stdlib/arrays.md index c3f1efca5d5ea6..b2a1074ab520d3 100644 --- a/doc/src/stdlib/arrays.md +++ b/doc/src/stdlib/arrays.md @@ -188,30 +188,3 @@ and can be converted to/from the latter via `Array(bitarray)` and `BitArray(arra ```@docs Base.flipbits! ``` - -## [Sparse Vectors and Matrices](@id stdlib-sparse-arrays) - -Sparse vectors and matrices largely support the same set of operations as their dense counterparts. -The following functions are specific to sparse arrays. - -```@docs -Base.SparseArrays.SparseVector -Base.SparseArrays.SparseMatrixCSC -Base.SparseArrays.sparse -Base.SparseArrays.sparsevec -Base.SparseArrays.issparse -Base.SparseArrays.nnz -Base.SparseArrays.spzeros -Base.SparseArrays.spdiagm -Base.SparseArrays.sprand -Base.SparseArrays.sprandn -Base.SparseArrays.nonzeros -Base.SparseArrays.rowvals -Base.SparseArrays.nzrange -Base.SparseArrays.dropzeros!(::SparseMatrixCSC, ::Bool) -Base.SparseArrays.dropzeros(::SparseMatrixCSC, ::Bool) -Base.SparseArrays.dropzeros!(::SparseVector, ::Bool) -Base.SparseArrays.dropzeros(::SparseVector, ::Bool) -Base.SparseArrays.permute -Base.permute!{Tv, Ti, Tp <: Integer, Tq <: Integer}(::SparseMatrixCSC{Tv,Ti}, ::SparseMatrixCSC{Tv,Ti}, ::AbstractArray{Tp,1}, ::AbstractArray{Tq,1}) -``` diff --git a/doc/src/stdlib/base.md b/doc/src/stdlib/base.md index d0ea63d72b38f9..f986d319ce3759 100644 --- a/doc/src/stdlib/base.md +++ b/doc/src/stdlib/base.md @@ -98,7 +98,6 @@ Base.Markdown Base.Meta Base.Pkg Base.Serializer -Base.SparseArrays Base.StackTraces Base.Sys Base.Threads diff --git a/doc/src/stdlib/index.md b/doc/src/stdlib/index.md index 5356aaf26ffa0f..7dc8a9fff2879e 100644 --- a/doc/src/stdlib/index.md +++ b/doc/src/stdlib/index.md @@ -31,5 +31,6 @@ * [Memory-mapped I/O](@ref) * [Base64](@ref) * [File Events](@ref lib-filewatching) + * [Sparse Arrays](@ref) * [Iterative Eigensolvers](@ref lib-itereigen) * [Printf](@ref) diff --git a/doc/src/stdlib/linalg.md b/doc/src/stdlib/linalg.md index a1b3ae0e292d5b..e6d500201aba2f 100644 --- a/doc/src/stdlib/linalg.md +++ b/doc/src/stdlib/linalg.md @@ -91,7 +91,6 @@ Base.LinAlg.pinv Base.LinAlg.nullspace Base.repmat Base.kron -Base.SparseArrays.blkdiag Base.LinAlg.linreg Base.LinAlg.exp(::StridedMatrix{<:Base.LinAlg.BlasFloat}) Base.LinAlg.log(::StridedMatrix) diff --git a/stdlib/Future/test/runtests.jl b/stdlib/Future/test/runtests.jl index 4233dc0fad0bdb..820f4bdd3cedd7 100644 --- a/stdlib/Future/test/runtests.jl +++ b/stdlib/Future/test/runtests.jl @@ -2,6 +2,7 @@ using Test using Future +using SparseArrays @testset "Future.copy! for AbstractSet" begin for S = (Set, BitSet) diff --git a/stdlib/IterativeEigensolvers/test/runtests.jl b/stdlib/IterativeEigensolvers/test/runtests.jl index a8782939da55cd..cb9ddc949b7871 100644 --- a/stdlib/IterativeEigensolvers/test/runtests.jl +++ b/stdlib/IterativeEigensolvers/test/runtests.jl @@ -1,7 +1,7 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license using IterativeEigensolvers -using Test +using Test, SparseArrays @testset "eigs" begin srand(1234) diff --git a/stdlib/SparseArrays/docs/src/index.md b/stdlib/SparseArrays/docs/src/index.md new file mode 100644 index 00000000000000..95e4b4f3bc4426 --- /dev/null +++ b/stdlib/SparseArrays/docs/src/index.md @@ -0,0 +1,216 @@ +# Sparse Arrays + +Julia has support for sparse vectors and [sparse matrices](https://en.wikipedia.org/wiki/Sparse_matrix) +in the `SparseArrays` stdlib module. Sparse arrays are arrays that contain enough zeros +that storing them in a special data structure leads to savings in space and execution time, +compared to dense arrays. + +## [Compressed Sparse Column (CSC) Sparse Matrix Storage](@id man-csc) + +In Julia, sparse matrices are stored in the [Compressed Sparse Column (CSC) format](https://en.wikipedia.org/wiki/Sparse_matrix#Compressed_sparse_column_.28CSC_or_CCS.29). +Julia sparse matrices have the type [`SparseMatrixCSC{Tv,Ti}`](@ref), where `Tv` is the +type of the stored values, and `Ti` is the integer type for storing column pointers and +row indices. The internal representation of `SparseMatrixCSC` is as follows: + +```julia +struct SparseMatrixCSC{Tv,Ti<:Integer} <: AbstractSparseMatrix{Tv,Ti} + m::Int # Number of rows + n::Int # Number of columns + colptr::Vector{Ti} # Column i is in colptr[i]:(colptr[i+1]-1) + rowval::Vector{Ti} # Row indices of stored values + nzval::Vector{Tv} # Stored values, typically nonzeros +end +``` + +The compressed sparse column storage makes it easy and quick to access the elements in the column +of a sparse matrix, whereas accessing the sparse matrix by rows is considerably slower. Operations +such as insertion of previously unstored entries one at a time in the CSC structure tend to be slow. This is +because all elements of the sparse matrix that are beyond the point of insertion have to be moved +one place over. + +All operations on sparse matrices are carefully implemented to exploit the CSC data structure +for performance, and to avoid expensive operations. + +If you have data in CSC format from a different application or library, and wish to import it +in Julia, make sure that you use 1-based indexing. The row indices in every column need to be +sorted. If your `SparseMatrixCSC` object contains unsorted row indices, one quick way to sort +them is by doing a double transpose. + +In some applications, it is convenient to store explicit zero values in a `SparseMatrixCSC`. These +*are* accepted by functions in `Base` (but there is no guarantee that they will be preserved in +mutating operations). Such explicitly stored zeros are treated as structural nonzeros by many +routines. The [`nnz`](@ref) function returns the number of elements explicitly stored in the +sparse data structure, including structural nonzeros. In order to count the exact number of +numerical nonzeros, use [`count(!iszero, x)`](@ref), which inspects every stored element of a sparse +matrix. [`dropzeros`](@ref), and the in-place [`dropzeros!`](@ref), can be used to +remove stored zeros from the sparse matrix. + +```jldoctest +julia> A = sparse([1, 2, 3], [1, 2, 3], [0, 2, 0]) +3×3 SparseMatrixCSC{Int64,Int64} with 3 stored entries: + [1, 1] = 0 + [2, 2] = 2 + [3, 3] = 0 + +julia> dropzeros(A) +3×3 SparseMatrixCSC{Int64,Int64} with 1 stored entry: + [2, 2] = 2 +``` + +## Sparse Vector Storage + +Sparse vectors are stored in a close analog to compressed sparse column format for sparse +matrices. In Julia, sparse vectors have the type [`SparseVector{Tv,Ti}`](@ref) where `Tv` +is the type of the stored values and `Ti` the integer type for the indices. The internal +representation is as follows: + +```julia +struct SparseVector{Tv,Ti<:Integer} <: AbstractSparseVector{Tv,Ti} + n::Int # Length of the sparse vector + nzind::Vector{Ti} # Indices of stored values + nzval::Vector{Tv} # Stored values, typically nonzeros +end +``` + +As for [`SparseMatrixCSC`](@ref), the `SparseVector` type can also contain explicitly +stored zeros. (See [Sparse Matrix Storage](@ref man-csc).). + +## Sparse Vector and Matrix Constructors + +The simplest way to create a sparse array is to use a function equivalent to the [`zeros`](@ref) +function that Julia provides for working with dense arrays. To produce a +sparse array instead, you can use the same name with an `sp` prefix: + +```jldoctest +julia> spzeros(3) +3-element SparseVector{Float64,Int64} with 0 stored entries +``` + +The [`sparse`](@ref) function is often a handy way to construct sparse arrays. For +example, to construct a sparse matrix we can input a vector `I` of row indices, a vector +`J` of column indices, and a vector `V` of stored values (this is also known as the +[COO (coordinate) format](https://en.wikipedia.org/wiki/Sparse_matrix#Coordinate_list_.28COO.29)). +`sparse(I,J,V)` then constructs a sparse matrix such that `S[I[k], J[k]] = V[k]`. The +equivalent sparse vector constructor is [`sparsevec`](@ref), which takes the (row) index +vector `I` and the vector `V` with the stored values and constructs a sparse vector `R` +such that `R[I[k]] = V[k]`. + +```jldoctest sparse_function +julia> I = [1, 4, 3, 5]; J = [4, 7, 18, 9]; V = [1, 2, -5, 3]; + +julia> S = sparse(I,J,V) +5×18 SparseMatrixCSC{Int64,Int64} with 4 stored entries: + [1 , 4] = 1 + [4 , 7] = 2 + [5 , 9] = 3 + [3 , 18] = -5 + +julia> R = sparsevec(I,V) +5-element SparseVector{Int64,Int64} with 4 stored entries: + [1] = 1 + [3] = -5 + [4] = 2 + [5] = 3 +``` + +The inverse of the [`sparse`](@ref) and [`sparsevec`](@ref) functions is +[`findnz`](@ref), which retrieves the inputs used to create the sparse array. +There is also a [`findn`](@ref) function which only returns the index vectors. + +```jldoctest sparse_function +julia> findnz(S) +([1, 4, 5, 3], [4, 7, 9, 18], [1, 2, 3, -5]) + +julia> findn(S) +([1, 4, 5, 3], [4, 7, 9, 18]) + +julia> findnz(R) +([1, 3, 4, 5], [1, -5, 2, 3]) + +julia> find(!iszero, R) +4-element Array{Int64,1}: + 1 + 3 + 4 + 5 +``` + +Another way to create a sparse array is to convert a dense array into a sparse array using +the [`sparse`](@ref) function: + +```jldoctest +julia> sparse(Matrix(1.0I, 5, 5)) +5×5 SparseMatrixCSC{Float64,Int64} with 5 stored entries: + [1, 1] = 1.0 + [2, 2] = 1.0 + [3, 3] = 1.0 + [4, 4] = 1.0 + [5, 5] = 1.0 + +julia> sparse([1.0, 0.0, 1.0]) +3-element SparseVector{Float64,Int64} with 2 stored entries: + [1] = 1.0 + [3] = 1.0 +``` + +You can go in the other direction using the [`Array`](@ref) constructor. The [`issparse`](@ref) +function can be used to query if a matrix is sparse. + +```jldoctest +julia> issparse(spzeros(5)) +true +``` + +## Sparse matrix operations + +Arithmetic operations on sparse matrices also work as they do on dense matrices. Indexing of, +assignment into, and concatenation of sparse matrices work in the same way as dense matrices. +Indexing operations, especially assignment, are expensive, when carried out one element at a time. +In many cases it may be better to convert the sparse matrix into `(I,J,V)` format using [`findnz`](@ref), +manipulate the values or the structure in the dense vectors `(I,J,V)`, and then reconstruct +the sparse matrix. + +## Correspondence of dense and sparse methods + +The following table gives a correspondence between built-in methods on sparse matrices and their +corresponding methods on dense matrix types. In general, methods that generate sparse matrices +differ from their dense counterparts in that the resulting matrix follows the same sparsity pattern +as a given sparse matrix `S`, or that the resulting sparse matrix has density `d`, i.e. each matrix +element has a probability `d` of being non-zero. + +Details can be found in the [Sparse Vectors and Matrices](@ref stdlib-sparse-arrays) +section of the standard library reference. + +| Sparse | Dense | Description | +|:-------------------------- |:---------------------- |:--------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| [`spzeros(m,n)`](@ref) | [`zeros(m,n)`](@ref) | Creates a *m*-by-*n* matrix of zeros. ([`spzeros(m,n)`](@ref) is empty.) | +| [`sparse(I, n, n)`](@ref) | [`Matrix(I,n,n)`](@ref)| Creates a *n*-by-*n* identity matrix. | +| [`Array(S)`](@ref) | [`sparse(A)`](@ref) | Interconverts between dense and sparse formats. | +| [`sprand(m,n,d)`](@ref) | [`rand(m,n)`](@ref) | Creates a *m*-by-*n* random matrix (of density *d*) with iid non-zero elements distributed uniformly on the half-open interval ``[0, 1)``. | +| [`sprandn(m,n,d)`](@ref) | [`randn(m,n)`](@ref) | Creates a *m*-by-*n* random matrix (of density *d*) with iid non-zero elements distributed according to the standard normal (Gaussian) distribution. | +| [`sprandn(m,n,d,X)`](@ref) | [`randn(m,n,X)`](@ref) | Creates a *m*-by-*n* random matrix (of density *d*) with iid non-zero elements distributed according to the *X* distribution. (Requires the `Distributions` package.) | + +# [Sparse Arrays](@id stdlib-sparse-arrays) + +```@docs +SparseArrays.SparseVector +SparseArrays.SparseMatrixCSC +SparseArrays.sparse +SparseArrays.sparsevec +SparseArrays.issparse +SparseArrays.nnz +SparseArrays.spzeros +SparseArrays.spdiagm +SparseArrays.blkdiag +SparseArrays.sprand +SparseArrays.sprandn +SparseArrays.nonzeros +SparseArrays.rowvals +SparseArrays.nzrange +SparseArrays.dropzeros!(::SparseMatrixCSC, ::Bool) +SparseArrays.dropzeros(::SparseMatrixCSC, ::Bool) +SparseArrays.dropzeros!(::SparseVector, ::Bool) +SparseArrays.dropzeros(::SparseVector, ::Bool) +SparseArrays.permute +permute!{Tv, Ti, Tp <: Integer, Tq <: Integer}(::SparseMatrixCSC{Tv,Ti}, ::SparseMatrixCSC{Tv,Ti}, ::AbstractArray{Tp,1}, ::AbstractArray{Tq,1}) +``` diff --git a/base/sparse/sparse.jl b/stdlib/SparseArrays/src/SparseArrays.jl similarity index 70% rename from base/sparse/sparse.jl rename to stdlib/SparseArrays/src/SparseArrays.jl index 8c9133aabe6aba..631bbb0a0599f8 100644 --- a/base/sparse/sparse.jl +++ b/stdlib/SparseArrays/src/SparseArrays.jl @@ -1,5 +1,7 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license +__precompile__(true) + """ Support for sparse arrays. Provides `AbstractSparseArray` and subtypes. """ @@ -31,10 +33,16 @@ export AbstractSparseArray, AbstractSparseMatrix, AbstractSparseVector, issparse, nonzeros, nzrange, rowvals, sparse, sparsevec, spdiagm, sprand, sprandn, spzeros, nnz, permute -include("abstractsparse.jl") -include("sparsematrix.jl") -include("sparsevector.jl") -include("higherorderfns.jl") -include("linalg.jl") +include(joinpath(@__DIR__, "abstractsparse.jl")) +include(joinpath(@__DIR__, "sparsematrix.jl")) +include(joinpath(@__DIR__, "sparsevector.jl")) +include(joinpath(@__DIR__, "higherorderfns.jl")) +include(joinpath(@__DIR__, "linalg.jl")) +include(joinpath(@__DIR__, "deprecated.jl")) + +similar(B::Bidiagonal, ::Type{T}, dims::Union{Dims{1},Dims{2}}) where {T} = spzeros(T, dims...) +similar(D::Diagonal, ::Type{T}, dims::Union{Dims{1},Dims{2}}) where {T} = spzeros(T, dims...) +similar(S::SymTridiagonal, ::Type{T}, dims::Union{Dims{1},Dims{2}}) where {T} = spzeros(T, dims...) +similar(M::Tridiagonal, ::Type{T}, dims::Union{Dims{1},Dims{2}}) where {T} = spzeros(T, dims...) end diff --git a/base/sparse/abstractsparse.jl b/stdlib/SparseArrays/src/abstractsparse.jl similarity index 100% rename from base/sparse/abstractsparse.jl rename to stdlib/SparseArrays/src/abstractsparse.jl diff --git a/stdlib/SparseArrays/src/deprecated.jl b/stdlib/SparseArrays/src/deprecated.jl new file mode 100644 index 00000000000000..ce74720e42d7b8 --- /dev/null +++ b/stdlib/SparseArrays/src/deprecated.jl @@ -0,0 +1,13 @@ +# This file is a part of Julia. License is MIT: https://julialang.org/license + +using Base: @deprecate, depwarn + +# BEGIN 0.7 deprecations + +# TODO: add all deprecations here + +# END 0.7 deprecations + +# BEGIN 1.0 deprecations + +# END 1.0 deprecations diff --git a/base/sparse/higherorderfns.jl b/stdlib/SparseArrays/src/higherorderfns.jl similarity index 100% rename from base/sparse/higherorderfns.jl rename to stdlib/SparseArrays/src/higherorderfns.jl diff --git a/base/sparse/linalg.jl b/stdlib/SparseArrays/src/linalg.jl similarity index 100% rename from base/sparse/linalg.jl rename to stdlib/SparseArrays/src/linalg.jl diff --git a/base/sparse/sparsematrix.jl b/stdlib/SparseArrays/src/sparsematrix.jl similarity index 100% rename from base/sparse/sparsematrix.jl rename to stdlib/SparseArrays/src/sparsematrix.jl diff --git a/base/sparse/sparsevector.jl b/stdlib/SparseArrays/src/sparsevector.jl similarity index 100% rename from base/sparse/sparsevector.jl rename to stdlib/SparseArrays/src/sparsevector.jl diff --git a/test/sparse/higherorderfns.jl b/stdlib/SparseArrays/test/higherorderfns.jl similarity index 100% rename from test/sparse/higherorderfns.jl rename to stdlib/SparseArrays/test/higherorderfns.jl diff --git a/stdlib/SparseArrays/test/runtests.jl b/stdlib/SparseArrays/test/runtests.jl new file mode 100644 index 00000000000000..e761a2fd34567c --- /dev/null +++ b/stdlib/SparseArrays/test/runtests.jl @@ -0,0 +1,7 @@ +# This file is a part of Julia. License is MIT: https://julialang.org/license + +using Test, SparseArrays + +include("higherorderfns.jl") +include("sparse.jl") +include("sparsevector.jl") diff --git a/test/sparse/sparse.jl b/stdlib/SparseArrays/test/sparse.jl similarity index 89% rename from test/sparse/sparse.jl rename to stdlib/SparseArrays/test/sparse.jl index 47279498187549..bc7cd93e0a4420 100644 --- a/test/sparse/sparse.jl +++ b/stdlib/SparseArrays/test/sparse.jl @@ -1,6 +1,6 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -using Base.LinAlg: mul!, ldiv!, rdiv!, Adjoint, Transpose +using Base.LinAlg: mul!, ldiv!, rdiv!, Adjoint, Transpose, diagm using Base.Printf: @printf @testset "issparse" begin @@ -23,7 +23,7 @@ end end @testset "indtype" begin - @test Base.SparseArrays.indtype(sparse(Int8[1,1],Int8[1,1],[1,1])) == Int8 + @test SparseArrays.indtype(sparse(Int8[1,1],Int8[1,1],[1,1])) == Int8 end @testset "sparse matrix construction" begin @@ -303,8 +303,8 @@ end a = sprand(10, 5, 0.7) b = sprand(5, 15, 0.3) @test maximum(abs.(a*b - Array(a)*Array(b))) < 100*eps() - @test maximum(abs.(Base.SparseArrays.spmatmul(a,b,sortindices=:sortcols) - Array(a)*Array(b))) < 100*eps() - @test maximum(abs.(Base.SparseArrays.spmatmul(a,b,sortindices=:doubletranspose) - Array(a)*Array(b))) < 100*eps() + @test maximum(abs.(SparseArrays.spmatmul(a,b,sortindices=:sortcols) - Array(a)*Array(b))) < 100*eps() + @test maximum(abs.(SparseArrays.spmatmul(a,b,sortindices=:doubletranspose) - Array(a)*Array(b))) < 100*eps() @test Array(kron(a,b)) == kron(Array(a), Array(b)) @test Array(kron(Array(a),b)) == kron(Array(a), Array(b)) @test Array(kron(a,Array(b))) == kron(Array(a), Array(b)) @@ -929,47 +929,47 @@ end @test nnz(A) == 19 # Test argument bounds checking for dropstored!(A, i, j) - @test_throws BoundsError Base.SparseArrays.dropstored!(A, 0, 1) - @test_throws BoundsError Base.SparseArrays.dropstored!(A, 1, 0) - @test_throws BoundsError Base.SparseArrays.dropstored!(A, 1, 11) - @test_throws BoundsError Base.SparseArrays.dropstored!(A, 11, 1) + @test_throws BoundsError SparseArrays.dropstored!(A, 0, 1) + @test_throws BoundsError SparseArrays.dropstored!(A, 1, 0) + @test_throws BoundsError SparseArrays.dropstored!(A, 1, 11) + @test_throws BoundsError SparseArrays.dropstored!(A, 11, 1) # Test argument bounds checking for dropstored!(A, I, J) - @test_throws BoundsError Base.SparseArrays.dropstored!(A, 0:1, 1:1) - @test_throws BoundsError Base.SparseArrays.dropstored!(A, 1:1, 0:1) - @test_throws BoundsError Base.SparseArrays.dropstored!(A, 10:11, 1:1) - @test_throws BoundsError Base.SparseArrays.dropstored!(A, 1:1, 10:11) + @test_throws BoundsError SparseArrays.dropstored!(A, 0:1, 1:1) + @test_throws BoundsError SparseArrays.dropstored!(A, 1:1, 0:1) + @test_throws BoundsError SparseArrays.dropstored!(A, 10:11, 1:1) + @test_throws BoundsError SparseArrays.dropstored!(A, 1:1, 10:11) # Test behavior of dropstored!(A, i, j) # --> Test dropping a single stored entry - Base.SparseArrays.dropstored!(A, 1, 2) + SparseArrays.dropstored!(A, 1, 2) @test nnz(A) == 18 # --> Test dropping a single nonstored entry - Base.SparseArrays.dropstored!(A, 2, 1) + SparseArrays.dropstored!(A, 2, 1) @test nnz(A) == 18 # Test behavior of dropstored!(A, I, J) and derivs. # --> Test dropping a single row including stored and nonstored entries - Base.SparseArrays.dropstored!(A, 1, :) + SparseArrays.dropstored!(A, 1, :) @test nnz(A) == 9 # --> Test dropping a single column including stored and nonstored entries - Base.SparseArrays.dropstored!(A, :, 2) + SparseArrays.dropstored!(A, :, 2) @test nnz(A) == 0 # --> Introduce nonzeros in rows one and two and columns two and three A[1:2,:] = 1 A[:,2:3] = 2 @test nnz(A) == 36 # --> Test dropping multiple rows containing stored and nonstored entries - Base.SparseArrays.dropstored!(A, 1:3, :) + SparseArrays.dropstored!(A, 1:3, :) @test nnz(A) == 14 # --> Test dropping multiple columns containing stored and nonstored entries - Base.SparseArrays.dropstored!(A, :, 2:4) + SparseArrays.dropstored!(A, :, 2:4) @test nnz(A) == 0 # --> Introduce nonzeros in every other row A[1:2:9, :] = 1 @test nnz(A) == 50 # --> Test dropping a block of the matrix towards the upper left - Base.SparseArrays.dropstored!(A, 2:5, 2:5) + SparseArrays.dropstored!(A, 2:5, 2:5) @test nnz(A) == 42 end @@ -1048,110 +1048,112 @@ end @test iA === iS === nothing end -# findmin/findmax/minumum/maximum +@testset "findmin/findmax/minumum/maximum" begin + A = sparse([1.0 5.0 6.0; + 5.0 2.0 4.0]) + for (tup, rval, rind) in [((1,), [1.0 2.0 4.0], [CartesianIndex(1,1) CartesianIndex(2,2) CartesianIndex(2,3)]), + ((2,), reshape([1.0,2.0], 2, 1), reshape([CartesianIndex(1,1),CartesianIndex(2,2)], 2, 1)), + ((1,2), fill(1.0,1,1),fill(CartesianIndex(1,1),1,1))] + @test findmin(A, tup) == (rval, rind) + end -A = sparse([1.0 5.0 6.0; - 5.0 2.0 4.0]) -for (tup, rval, rind) in [((1,), [1.0 2.0 4.0], [CartesianIndex(1,1) CartesianIndex(2,2) CartesianIndex(2,3)]), - ((2,), reshape([1.0,2.0], 2, 1), reshape([CartesianIndex(1,1),CartesianIndex(2,2)], 2, 1)), - ((1,2), fill(1.0,1,1),fill(CartesianIndex(1,1),1,1))] - @test findmin(A, tup) == (rval, rind) -end + for (tup, rval, rind) in [((1,), [5.0 5.0 6.0], [CartesianIndex(2,1) CartesianIndex(1,2) CartesianIndex(1,3)]), + ((2,), reshape([6.0,5.0], 2, 1), reshape([CartesianIndex(1,3),CartesianIndex(2,1)], 2, 1)), + ((1,2), fill(6.0,1,1),fill(CartesianIndex(1,3),1,1))] + @test findmax(A, tup) == (rval, rind) + end -for (tup, rval, rind) in [((1,), [5.0 5.0 6.0], [CartesianIndex(2,1) CartesianIndex(1,2) CartesianIndex(1,3)]), - ((2,), reshape([6.0,5.0], 2, 1), reshape([CartesianIndex(1,3),CartesianIndex(2,1)], 2, 1)), - ((1,2), fill(6.0,1,1),fill(CartesianIndex(1,3),1,1))] - @test findmax(A, tup) == (rval, rind) -end + #issue 23209 -#issue 23209 + A = sparse([1.0 5.0 6.0; + NaN 2.0 4.0]) + for (tup, rval, rind) in [((1,), [NaN 2.0 4.0], [CartesianIndex(2,1) CartesianIndex(2,2) CartesianIndex(2,3)]), + ((2,), reshape([1.0, NaN], 2, 1), reshape([CartesianIndex(1,1),CartesianIndex(2,1)], 2, 1)), + ((1,2), fill(NaN,1,1),fill(CartesianIndex(2,1),1,1))] + @test isequal(findmin(A, tup), (rval, rind)) + end -A = sparse([1.0 5.0 6.0; - NaN 2.0 4.0]) -for (tup, rval, rind) in [((1,), [NaN 2.0 4.0], [CartesianIndex(2,1) CartesianIndex(2,2) CartesianIndex(2,3)]), - ((2,), reshape([1.0, NaN], 2, 1), reshape([CartesianIndex(1,1),CartesianIndex(2,1)], 2, 1)), - ((1,2), fill(NaN,1,1),fill(CartesianIndex(2,1),1,1))] - @test isequal(findmin(A, tup), (rval, rind)) -end + for (tup, rval, rind) in [((1,), [NaN 5.0 6.0], [CartesianIndex(2,1) CartesianIndex(1,2) CartesianIndex(1,3)]), + ((2,), reshape([6.0, NaN], 2, 1), reshape([CartesianIndex(1,3),CartesianIndex(2,1)], 2, 1)), + ((1,2), fill(NaN,1,1),fill(CartesianIndex(2,1),1,1))] + @test isequal(findmax(A, tup), (rval, rind)) + end -for (tup, rval, rind) in [((1,), [NaN 5.0 6.0], [CartesianIndex(2,1) CartesianIndex(1,2) CartesianIndex(1,3)]), - ((2,), reshape([6.0, NaN], 2, 1), reshape([CartesianIndex(1,3),CartesianIndex(2,1)], 2, 1)), - ((1,2), fill(NaN,1,1),fill(CartesianIndex(2,1),1,1))] - @test isequal(findmax(A, tup), (rval, rind)) -end + A = sparse([1.0 NaN 6.0; + NaN 2.0 4.0]) + for (tup, rval, rind) in [((1,), [NaN NaN 4.0], [CartesianIndex(2,1) CartesianIndex(1,2) CartesianIndex(2,3)]), + ((2,), reshape([NaN, NaN], 2, 1), reshape([CartesianIndex(1,2),CartesianIndex(2,1)], 2, 1)), + ((1,2), fill(NaN,1,1),fill(CartesianIndex(2,1),1,1))] + @test isequal(findmin(A, tup), (rval, rind)) + end -A = sparse([1.0 NaN 6.0; - NaN 2.0 4.0]) -for (tup, rval, rind) in [((1,), [NaN NaN 4.0], [CartesianIndex(2,1) CartesianIndex(1,2) CartesianIndex(2,3)]), - ((2,), reshape([NaN, NaN], 2, 1), reshape([CartesianIndex(1,2),CartesianIndex(2,1)], 2, 1)), - ((1,2), fill(NaN,1,1),fill(CartesianIndex(2,1),1,1))] - @test isequal(findmin(A, tup), (rval, rind)) -end + for (tup, rval, rind) in [((1,), [NaN NaN 6.0], [CartesianIndex(2,1) CartesianIndex(1,2) CartesianIndex(1,3)]), + ((2,), reshape([NaN, NaN], 2, 1), reshape([CartesianIndex(1,2),CartesianIndex(2,1)], 2, 1)), + ((1,2), fill(NaN,1,1),fill(CartesianIndex(2,1),1,1))] + @test isequal(findmax(A, tup), (rval, rind)) + end -for (tup, rval, rind) in [((1,), [NaN NaN 6.0], [CartesianIndex(2,1) CartesianIndex(1,2) CartesianIndex(1,3)]), - ((2,), reshape([NaN, NaN], 2, 1), reshape([CartesianIndex(1,2),CartesianIndex(2,1)], 2, 1)), - ((1,2), fill(NaN,1,1),fill(CartesianIndex(2,1),1,1))] - @test isequal(findmax(A, tup), (rval, rind)) -end + A = sparse([Inf -Inf Inf -Inf; + Inf Inf -Inf -Inf]) + for (tup, rval, rind) in [((1,), [Inf -Inf -Inf -Inf], [CartesianIndex(1,1) CartesianIndex(1,2) CartesianIndex(2,3) CartesianIndex(1,4)]), + ((2,), reshape([-Inf -Inf], 2, 1), reshape([CartesianIndex(1,2),CartesianIndex(2,3)], 2, 1)), + ((1,2), fill(-Inf,1,1),fill(CartesianIndex(1,2),1,1))] + @test isequal(findmin(A, tup), (rval, rind)) + end -A = sparse([Inf -Inf Inf -Inf; - Inf Inf -Inf -Inf]) -for (tup, rval, rind) in [((1,), [Inf -Inf -Inf -Inf], [CartesianIndex(1,1) CartesianIndex(1,2) CartesianIndex(2,3) CartesianIndex(1,4)]), - ((2,), reshape([-Inf -Inf], 2, 1), reshape([CartesianIndex(1,2),CartesianIndex(2,3)], 2, 1)), - ((1,2), fill(-Inf,1,1),fill(CartesianIndex(1,2),1,1))] - @test isequal(findmin(A, tup), (rval, rind)) -end + for (tup, rval, rind) in [((1,), [Inf Inf Inf -Inf], [CartesianIndex(1,1) CartesianIndex(2,2) CartesianIndex(1,3) CartesianIndex(1,4)]), + ((2,), reshape([Inf Inf], 2, 1), reshape([CartesianIndex(1,1),CartesianIndex(2,1)], 2, 1)), + ((1,2), fill(Inf,1,1),fill(CartesianIndex(1,1),1,1))] + @test isequal(findmax(A, tup), (rval, rind)) + end -for (tup, rval, rind) in [((1,), [Inf Inf Inf -Inf], [CartesianIndex(1,1) CartesianIndex(2,2) CartesianIndex(1,3) CartesianIndex(1,4)]), - ((2,), reshape([Inf Inf], 2, 1), reshape([CartesianIndex(1,1),CartesianIndex(2,1)], 2, 1)), - ((1,2), fill(Inf,1,1),fill(CartesianIndex(1,1),1,1))] - @test isequal(findmax(A, tup), (rval, rind)) -end + A = sparse([BigInt(10)]) + for (tup, rval, rind) in [((2,), [BigInt(10)], [1])] + @test isequal(findmin(A, tup), (rval, rind)) + end -A = sparse([BigInt(10)]) -for (tup, rval, rind) in [((2,), [BigInt(10)], [1])] - @test isequal(findmin(A, tup), (rval, rind)) -end + for (tup, rval, rind) in [((2,), [BigInt(10)], [1])] + @test isequal(findmax(A, tup), (rval, rind)) + end -for (tup, rval, rind) in [((2,), [BigInt(10)], [1])] - @test isequal(findmax(A, tup), (rval, rind)) -end + A = sparse([BigInt(-10)]) + for (tup, rval, rind) in [((2,), [BigInt(-10)], [1])] + @test isequal(findmin(A, tup), (rval, rind)) + end -A = sparse([BigInt(-10)]) -for (tup, rval, rind) in [((2,), [BigInt(-10)], [1])] - @test isequal(findmin(A, tup), (rval, rind)) -end + for (tup, rval, rind) in [((2,), [BigInt(-10)], [1])] + @test isequal(findmax(A, tup), (rval, rind)) + end -for (tup, rval, rind) in [((2,), [BigInt(-10)], [1])] - @test isequal(findmax(A, tup), (rval, rind)) -end + A = sparse([BigInt(10) BigInt(-10)]) + for (tup, rval, rind) in [((2,), reshape([BigInt(-10)], 1, 1), reshape([CartesianIndex(1,2)], 1, 1))] + @test isequal(findmin(A, tup), (rval, rind)) + end -A = sparse([BigInt(10) BigInt(-10)]) -for (tup, rval, rind) in [((2,), reshape([BigInt(-10)], 1, 1), reshape([CartesianIndex(1,2)], 1, 1))] - @test isequal(findmin(A, tup), (rval, rind)) -end + for (tup, rval, rind) in [((2,), reshape([BigInt(10)], 1, 1), reshape([CartesianIndex(1,1)], 1, 1))] + @test isequal(findmax(A, tup), (rval, rind)) + end -for (tup, rval, rind) in [((2,), reshape([BigInt(10)], 1, 1), reshape([CartesianIndex(1,1)], 1, 1))] - @test isequal(findmax(A, tup), (rval, rind)) + A = sparse(["a", "b"]) + @test_throws MethodError findmin(A, 1) end -A = sparse(["a", "b"]) -@test_throws MethodError findmin(A, 1) - # Support the case when user defined `zero` and `isless` for non-numerical type struct CustomType x::String end Base.zero(::Type{CustomType}) = CustomType("") Base.isless(x::CustomType, y::CustomType) = isless(x.x, y.x) -A = sparse([CustomType("a"), CustomType("b")]) +let + A = sparse([CustomType("a"), CustomType("b")]) -for (tup, rval, rind) in [((1,), [CustomType("a")], [1])] - @test isequal(findmin(A, tup), (rval, rind)) -end + for (tup, rval, rind) in [((1,), [CustomType("a")], [1])] + @test isequal(findmin(A, tup), (rval, rind)) + end -for (tup, rval, rind) in [((1,), [CustomType("b")], [2])] - @test isequal(findmax(A, tup), (rval, rind)) + for (tup, rval, rind) in [((1,), [CustomType("b")], [2])] + @test isequal(findmax(A, tup), (rval, rind)) + end end @testset "findn" begin @@ -1190,9 +1192,9 @@ function test_getindex_algs(A::SparseMatrixCSC{Tv,Ti}, I::AbstractVector, J::Abs ((minj < 1) || (maxj > n)) && BoundsError() end - (alg == 0) ? Base.SparseArrays.getindex_I_sorted_bsearch_A(A, I, J) : - (alg == 1) ? Base.SparseArrays.getindex_I_sorted_bsearch_I(A, I, J) : - Base.SparseArrays.getindex_I_sorted_linear(A, I, J) + (alg == 0) ? SparseArrays.getindex_I_sorted_bsearch_A(A, I, J) : + (alg == 1) ? SparseArrays.getindex_I_sorted_bsearch_I(A, I, J) : + SparseArrays.getindex_I_sorted_linear(A, I, J) end @testset "test_getindex_algs" begin @@ -1396,8 +1398,8 @@ end local A = guardsrand(1234321) do triu(sprand(10, 10, 0.2)) end - @test Base.droptol!(A, 0.01).colptr == [1,1,1,2,2,3,4,6,6,7,9] - @test isequal(Base.droptol!(sparse([1], [1], [1]), 1), SparseMatrixCSC(1, 1, Int[1, 1], Int[], Int[])) + @test SparseArrays.droptol!(A, 0.01).colptr == [1,1,1,2,2,3,4,6,6,7,9] + @test isequal(SparseArrays.droptol!(sparse([1], [1], [1]), 1), SparseMatrixCSC(1, 1, Int[1, 1], Int[], Int[])) end @testset "dropzeros[!]" begin @@ -1487,10 +1489,10 @@ end @testset "expandptr" begin local A = sparse(1.0I, 5, 5) - @test Base.SparseArrays.expandptr(A.colptr) == 1:5 + @test SparseArrays.expandptr(A.colptr) == 1:5 A[1,2] = 1 - @test Base.SparseArrays.expandptr(A.colptr) == [1; 2; 2; 3; 4; 5] - @test_throws ArgumentError Base.SparseArrays.expandptr([2; 3]) + @test SparseArrays.expandptr(A.colptr) == [1; 2; 2; 3; 4; 5] + @test_throws ArgumentError SparseArrays.expandptr([2; 3]) end @testset "triu/tril" begin @@ -1697,13 +1699,13 @@ end Ari = ceil.(Int64, 100*Ar) if Base.USE_GPL_LIBS # NOTE: normestinv is probabilistic, so requires a fixed seed (set above in srand(1234)) - @test Base.SparseArrays.normestinv(Ac,3) ≈ norm(inv(Array(Ac)),1) atol=1e-4 - @test Base.SparseArrays.normestinv(Aci,3) ≈ norm(inv(Array(Aci)),1) atol=1e-4 - @test Base.SparseArrays.normestinv(Ar) ≈ norm(inv(Array(Ar)),1) atol=1e-4 - @test_throws ArgumentError Base.SparseArrays.normestinv(Ac,0) - @test_throws ArgumentError Base.SparseArrays.normestinv(Ac,21) + @test SparseArrays.normestinv(Ac,3) ≈ norm(inv(Array(Ac)),1) atol=1e-4 + @test SparseArrays.normestinv(Aci,3) ≈ norm(inv(Array(Aci)),1) atol=1e-4 + @test SparseArrays.normestinv(Ar) ≈ norm(inv(Array(Ar)),1) atol=1e-4 + @test_throws ArgumentError SparseArrays.normestinv(Ac,0) + @test_throws ArgumentError SparseArrays.normestinv(Ac,21) end - @test_throws DimensionMismatch Base.SparseArrays.normestinv(sprand(3,5,.9)) + @test_throws DimensionMismatch SparseArrays.normestinv(sprand(3,5,.9)) end @testset "issue #13008" begin @@ -1852,7 +1854,7 @@ end # Test temporary fix for issue #16548 in PR #16979. Somewhat brittle. Expect to remove with `\` revisions. @testset "issue #16548" begin ms = methods(\, (SparseMatrixCSC, AbstractVecOrMat)).ms - @test all(m -> m.module == Base.SparseArrays, ms) + @test all(m -> m.module == SparseArrays, ms) end @testset "row indexing a SparseMatrixCSC with non-Int integer type" begin @@ -1912,7 +1914,7 @@ end # Check calling of unary minus method specialized for SparseMatrixCSCs @testset "issue #19503" begin - @test which(-, (SparseMatrixCSC,)).module == Base.SparseArrays + @test which(-, (SparseMatrixCSC,)).module == SparseArrays end @testset "issue #14398" begin @@ -1921,16 +1923,16 @@ end @testset "dropstored issue #20513" begin x = sparse(rand(3,3)) - Base.SparseArrays.dropstored!(x, 1, 1) + SparseArrays.dropstored!(x, 1, 1) @test x[1, 1] == 0.0 @test x.colptr == [1, 3, 6, 9] - Base.SparseArrays.dropstored!(x, 2, 1) + SparseArrays.dropstored!(x, 2, 1) @test x.colptr == [1, 2, 5, 8] @test x[2, 1] == 0.0 - Base.SparseArrays.dropstored!(x, 2, 2) + SparseArrays.dropstored!(x, 2, 2) @test x.colptr == [1, 2, 4, 7] @test x[2, 2] == 0.0 - Base.SparseArrays.dropstored!(x, 2, 3) + SparseArrays.dropstored!(x, 2, 3) @test x.colptr == [1, 2, 4, 6] @test x[2, 3] == 0.0 end @@ -1949,47 +1951,47 @@ end @testset "show" begin io = IOBuffer() show(io, MIME"text/plain"(), sparse(Int64[1], Int64[1], [1.0])) - @test String(take!(io)) == "1×1 SparseMatrixCSC{Float64,Int64} with 1 stored entry:\n [1, 1] = 1.0" + @test String(take!(io)) == "1×1 SparseArrays.SparseMatrixCSC{Float64,Int64} with 1 stored entry:\n [1, 1] = 1.0" show(io, MIME"text/plain"(), spzeros(Float32, Int64, 2, 2)) - @test String(take!(io)) == "2×2 SparseMatrixCSC{Float32,Int64} with 0 stored entries" + @test String(take!(io)) == "2×2 SparseArrays.SparseMatrixCSC{Float32,Int64} with 0 stored entries" ioc = IOContext(io, :displaysize => (5, 80), :limit => true) show(ioc, MIME"text/plain"(), sparse(Int64[1], Int64[1], [1.0])) - @test String(take!(io)) == "1×1 SparseMatrixCSC{Float64,Int64} with 1 stored entry:\n [1, 1] = 1.0" + @test String(take!(io)) == "1×1 SparseArrays.SparseMatrixCSC{Float64,Int64} with 1 stored entry:\n [1, 1] = 1.0" show(ioc, MIME"text/plain"(), sparse(Int64[1, 1], Int64[1, 2], [1.0, 2.0])) - @test String(take!(io)) == "1×2 SparseMatrixCSC{Float64,Int64} with 2 stored entries:\n ⋮" + @test String(take!(io)) == "1×2 SparseArrays.SparseMatrixCSC{Float64,Int64} with 2 stored entries:\n ⋮" # even number of rows ioc = IOContext(io, :displaysize => (8, 80), :limit => true) show(ioc, MIME"text/plain"(), sparse(Int64[1,2,3,4], Int64[1,1,2,2], [1.0,2.0,3.0,4.0])) - @test String(take!(io)) == string("4×2 SparseMatrixCSC{Float64,Int64} with 4 stored entries:\n [1, 1]", + @test String(take!(io)) == string("4×2 SparseArrays.SparseMatrixCSC{Float64,Int64} with 4 stored entries:\n [1, 1]", " = 1.0\n [2, 1] = 2.0\n [3, 2] = 3.0\n [4, 2] = 4.0") show(ioc, MIME"text/plain"(), sparse(Int64[1,2,3,4,5], Int64[1,1,2,2,3], [1.0,2.0,3.0,4.0,5.0])) - @test String(take!(io)) == string("5×3 SparseMatrixCSC{Float64,Int64} with 5 stored entries:\n [1, 1]", + @test String(take!(io)) == string("5×3 SparseArrays.SparseMatrixCSC{Float64,Int64} with 5 stored entries:\n [1, 1]", " = 1.0\n ⋮\n [5, 3] = 5.0") show(ioc, MIME"text/plain"(), sparse(fill(1.,5,3))) - @test String(take!(io)) == string("5×3 SparseMatrixCSC{Float64,$Int} with 15 stored entries:\n [1, 1]", + @test String(take!(io)) == string("5×3 SparseArrays.SparseMatrixCSC{Float64,$Int} with 15 stored entries:\n [1, 1]", " = 1.0\n ⋮\n [5, 3] = 1.0") # odd number of rows ioc = IOContext(io, :displaysize => (9, 80), :limit => true) show(ioc, MIME"text/plain"(), sparse(Int64[1,2,3,4,5], Int64[1,1,2,2,3], [1.0,2.0,3.0,4.0,5.0])) - @test String(take!(io)) == string("5×3 SparseMatrixCSC{Float64,Int64} with 5 stored entries:\n [1, 1]", + @test String(take!(io)) == string("5×3 SparseArrays.SparseMatrixCSC{Float64,Int64} with 5 stored entries:\n [1, 1]", " = 1.0\n [2, 1] = 2.0\n [3, 2] = 3.0\n [4, 2] = 4.0\n [5, 3] = 5.0") show(ioc, MIME"text/plain"(), sparse(Int64[1,2,3,4,5,6], Int64[1,1,2,2,3,3], [1.0,2.0,3.0,4.0,5.0,6.0])) - @test String(take!(io)) == string("6×3 SparseMatrixCSC{Float64,Int64} with 6 stored entries:\n [1, 1]", + @test String(take!(io)) == string("6×3 SparseArrays.SparseMatrixCSC{Float64,Int64} with 6 stored entries:\n [1, 1]", " = 1.0\n [2, 1] = 2.0\n ⋮\n [5, 3] = 5.0\n [6, 3] = 6.0") show(ioc, MIME"text/plain"(), sparse(fill(1.,6,3))) - @test String(take!(io)) == string("6×3 SparseMatrixCSC{Float64,$Int} with 18 stored entries:\n [1, 1]", + @test String(take!(io)) == string("6×3 SparseArrays.SparseMatrixCSC{Float64,$Int} with 18 stored entries:\n [1, 1]", " = 1.0\n [2, 1] = 1.0\n ⋮\n [5, 3] = 1.0\n [6, 3] = 1.0") ioc = IOContext(io, :displaysize => (9, 80)) show(ioc, MIME"text/plain"(), sparse(Int64[1,2,3,4,5,6], Int64[1,1,2,2,3,3], [1.0,2.0,3.0,4.0,5.0,6.0])) - @test String(take!(io)) == string("6×3 SparseMatrixCSC{Float64,Int64} with 6 stored entries:\n [1, 1] = 1.0\n", + @test String(take!(io)) == string("6×3 SparseArrays.SparseMatrixCSC{Float64,Int64} with 6 stored entries:\n [1, 1] = 1.0\n", " [2, 1] = 2.0\n [3, 2] = 3.0\n [4, 2] = 4.0\n [5, 3] = 5.0\n [6, 3] = 6.0") end @@ -2079,7 +2081,7 @@ end a = sparse(rand(3,3) .+ 0.1) b = similar(a, Float32, Int32) c = similar(b, Float32, Int32) - Base.SparseArrays.dropstored!(b, 1, 1) + SparseArrays.dropstored!(b, 1, 1) @test length(c.rowval) == 9 @test length(c.nzval) == 9 end diff --git a/test/sparse/sparsevector.jl b/stdlib/SparseArrays/test/sparsevector.jl similarity index 98% rename from test/sparse/sparsevector.jl rename to stdlib/SparseArrays/test/sparsevector.jl index d2a822cd032265..7fe0805ac5a270 100644 --- a/test/sparse/sparsevector.jl +++ b/stdlib/SparseArrays/test/sparsevector.jl @@ -260,13 +260,13 @@ end @testset "dropstored!" begin x = SparseVector(10, [2, 7, 9], [2.0, 7.0, 9.0]) # Test argument bounds checking for dropstored!(x, i) - @test_throws BoundsError Base.SparseArrays.dropstored!(x, 0) - @test_throws BoundsError Base.SparseArrays.dropstored!(x, 11) + @test_throws BoundsError SparseArrays.dropstored!(x, 0) + @test_throws BoundsError SparseArrays.dropstored!(x, 11) # Test behavior of dropstored!(x, i) # --> Test dropping a single stored entry - @test Base.SparseArrays.dropstored!(x, 2) == SparseVector(10, [7, 9], [7.0, 9.0]) + @test SparseArrays.dropstored!(x, 2) == SparseVector(10, [7, 9], [7.0, 9.0]) # --> Test dropping a single nonstored entry - @test Base.SparseArrays.dropstored!(x, 5) == SparseVector(10, [7, 9], [7.0, 9.0]) + @test SparseArrays.dropstored!(x, 5) == SparseVector(10, [7, 9], [7.0, 9.0]) end @testset "find and findnz" begin @@ -680,7 +680,7 @@ end @test spresvec == op.(densevec) @test all(!iszero, spresvec.nzval) resvaltype = typeof(op(zero(eltype(spvec)))) - resindtype = Base.SparseArrays.indtype(spvec) + resindtype = SparseArrays.indtype(spvec) @test isa(spresvec, SparseVector{resvaltype,resindtype}) end end @@ -696,7 +696,7 @@ end spresvec = op.(spvec) @test spresvec == op.(densevec) resvaltype = typeof(op(zero(eltype(spvec)))) - resindtype = Base.SparseArrays.indtype(spvec) + resindtype = SparseArrays.indtype(spvec) @test isa(spresvec, SparseVector{resvaltype,resindtype}) end end @@ -1020,17 +1020,17 @@ end @testset "fkeep!" begin x = sparsevec(1:7, [3., 2., -1., 1., -2., -3., 3.], 7) # droptol - xdrop = Base.droptol!(copy(x), 1.5) + xdrop = SparseArrays.droptol!(copy(x), 1.5) @test exact_equal(xdrop, SparseVector(7, [1, 2, 5, 6, 7], [3., 2., -2., -3., 3.])) - Base.droptol!(xdrop, 2.5) + SparseArrays.droptol!(xdrop, 2.5) @test exact_equal(xdrop, SparseVector(7, [1, 6, 7], [3., -3., 3.])) - Base.droptol!(xdrop, 3.) + SparseArrays.droptol!(xdrop, 3.) @test exact_equal(xdrop, SparseVector(7, Int[], Float64[])) xdrop = copy(x) # This will keep index 1, 3, 4, 7 in xdrop f_drop(i, x) = (abs(x) == 1.) || (i in [1, 7]) - Base.SparseArrays.fkeep!(xdrop, f_drop) + SparseArrays.fkeep!(xdrop, f_drop) @test exact_equal(xdrop, SparseVector(7, [1, 3, 4, 7], [3., -1., 1., 3.])) end @testset "dropzeros[!]" begin @@ -1063,7 +1063,7 @@ end # original dropzeros! test xdrop = sparsevec(1:7, [3., 2., -1., 1., -2., -3., 3.], 7) xdrop.nzval[[2, 4, 6]] = 0.0 - Base.SparseArrays.dropzeros!(xdrop) + SparseArrays.dropzeros!(xdrop) @test exact_equal(xdrop, SparseVector(7, [1, 3, 5, 7], [3, -1., -2., 3.])) end end @@ -1161,9 +1161,9 @@ mutable struct t20488 end @testset "show" begin io = IOBuffer() show(io, MIME"text/plain"(), sparsevec(Int64[1], [1.0])) - @test String(take!(io)) == "1-element SparseVector{Float64,Int64} with 1 stored entry:\n [1] = 1.0" + @test String(take!(io)) == "1-element SparseArrays.SparseVector{Float64,Int64} with 1 stored entry:\n [1] = 1.0" show(io, MIME"text/plain"(), spzeros(Float64, Int64, 2)) - @test String(take!(io)) == "2-element SparseVector{Float64,Int64} with 0 stored entries" + @test String(take!(io)) == "2-element SparseArrays.SparseVector{Float64,Int64} with 0 stored entries" show(io, similar(sparsevec(rand(3) .+ 0.1), t20488)) @test String(take!(io)) == " [1] = #undef\n [2] = #undef\n [3] = #undef" end diff --git a/stdlib/SuiteSparse/src/cholmod.jl b/stdlib/SuiteSparse/src/cholmod.jl index b6b47080c7891a..46f21bec9d9e21 100644 --- a/stdlib/SuiteSparse/src/cholmod.jl +++ b/stdlib/SuiteSparse/src/cholmod.jl @@ -10,7 +10,7 @@ import Base.LinAlg: (\), issuccess, issymmetric, ldltfact, ldltfact!, logdet, Adjoint, Transpose -using ..SparseArrays +using SparseArrays using Base.Printf.@printf export @@ -18,7 +18,7 @@ export Factor, Sparse -import ..SparseArrays: AbstractSparseMatrix, SparseMatrixCSC, indtype, sparse, spzeros, nnz +import SparseArrays: AbstractSparseMatrix, SparseMatrixCSC, indtype, sparse, spzeros, nnz import ..increment, ..increment!, ..decrement, ..decrement! diff --git a/stdlib/SuiteSparse/src/spqr.jl b/stdlib/SuiteSparse/src/spqr.jl index 20716c27805618..110ea9ee26c594 100644 --- a/stdlib/SuiteSparse/src/spqr.jl +++ b/stdlib/SuiteSparse/src/spqr.jl @@ -22,7 +22,7 @@ const ORDERING_BESTAMD = Int32(9) # try COLAMD and AMD; pick best# # tried. If there is a high fill-in with AMD then try METIS(A'A) and take # the best of AMD and METIS. METIS is not tried if it isn't installed. -using ..SparseArrays: SparseMatrixCSC +using SparseArrays: SparseMatrixCSC using ..SuiteSparse.CHOLMOD using ..SuiteSparse.CHOLMOD: change_stype!, free! diff --git a/stdlib/SuiteSparse/src/umfpack.jl b/stdlib/SuiteSparse/src/umfpack.jl index 5c6ef5b7d34942..d41df2fdf37e27 100644 --- a/stdlib/SuiteSparse/src/umfpack.jl +++ b/stdlib/SuiteSparse/src/umfpack.jl @@ -8,8 +8,8 @@ import Base: (\), findnz, getproperty, show, size import Base.LinAlg: Factorization, det, lufact, ldiv! using Base.LinAlg: Adjoint, Transpose -using ..SparseArrays -import ..SparseArrays: nnz +using SparseArrays +import SparseArrays: nnz import ..increment, ..increment!, ..decrement, ..decrement! diff --git a/stdlib/SuiteSparse/test/runtests.jl b/stdlib/SuiteSparse/test/runtests.jl index f27f76068991a7..4cf807452f474d 100644 --- a/stdlib/SuiteSparse/test/runtests.jl +++ b/stdlib/SuiteSparse/test/runtests.jl @@ -1,7 +1,7 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license using Test -using SuiteSparse +using SuiteSparse, SparseArrays if Base.USE_GPL_LIBS include("umfpack.jl") diff --git a/test/abstractarray.jl b/test/abstractarray.jl index 910372bf4a077d..220f08332d29ef 100644 --- a/test/abstractarray.jl +++ b/test/abstractarray.jl @@ -1,5 +1,7 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license +using SparseArrays + A = rand(5,4,3) @testset "Bounds checking" begin @test checkbounds(Bool, A, 1, 1, 1) == true diff --git a/test/ambiguous.jl b/test/ambiguous.jl index b9532891ca5588..6364e7f69400a5 100644 --- a/test/ambiguous.jl +++ b/test/ambiguous.jl @@ -9,6 +9,8 @@ ambig(x::Int, y::Int) = 4 ambig(x::Number, y) = 5 # END OF LINE NUMBER SENSITIVITY +using SparseArrays + # For curmod_* include("testenv.jl") diff --git a/test/arrayops.jl b/test/arrayops.jl index 402ae878e062ae..e49134f3c0fbd9 100644 --- a/test/arrayops.jl +++ b/test/arrayops.jl @@ -3,6 +3,7 @@ # Array test isdefined(Main, :TestHelpers) || @eval Main include("TestHelpers.jl") using Main.TestHelpers.OAs +using SparseArrays @testset "basics" begin @test length([1, 2, 3]) == 3 diff --git a/test/choosetests.jl b/test/choosetests.jl index 21b8585bcbbd21..9c35301d9017d4 100644 --- a/test/choosetests.jl +++ b/test/choosetests.jl @@ -36,7 +36,7 @@ function choosetests(choices = []) "char", "strings", "triplequote", "unicode", "intrinsics", "dict", "hashing", "iobuffer", "staged", "offsetarray", "arrayops", "tuple", "reduce", "reducedim", "random", "abstractarray", - "intfuncs", "simdloop", "vecelement", "sparse", + "intfuncs", "simdloop", "vecelement", "bitarray", "copy", "math", "fastmath", "functional", "iterators", "operators", "path", "ccall", "parse", "loading", "bigint", "bigfloat", "sorting", "statistics", "spawn", "backtrace", @@ -101,14 +101,14 @@ function choosetests(choices = []) prepend!(tests, stringtests) end - sparsetests = ["sparse/sparse", "sparse/sparsevector", "sparse/higherorderfns"] - if "sparse" in skip_tests - filter!(x -> (x != "sparse" && !(x in sparsetests)), tests) - elseif "sparse" in tests - # specifically selected case - filter!(x -> x != "sparse", tests) - prepend!(tests, sparsetests) - end + # sparsetests = ["sparse/sparse", "sparse/sparsevector", "sparse/higherorderfns"] + # if "sparse" in skip_tests + # filter!(x -> (x != "sparse" && !(x in sparsetests)), tests) + # elseif "sparse" in tests + # # specifically selected case + # filter!(x -> x != "sparse", tests) + # prepend!(tests, sparsetests) + # end # do subarray before sparse but after linalg if "subarray" in skip_tests diff --git a/test/compile.jl b/test/compile.jl index 2fc3702c2885e8..d4fa1e4a5b46e4 100644 --- a/test/compile.jl +++ b/test/compile.jl @@ -220,7 +220,7 @@ try Dict(s => Base.module_uuid(Base.root_module(s)) for s in [:Base64, :CRC32c, :Dates, :DelimitedFiles, :FileWatching, :Future, :IterativeEigensolvers, :Logging, :Mmap, :Printf, :Profile, :SharedArrays, - :SuiteSparse, :Test, :Unicode, :Distributed])) + :SparseArrays, :SuiteSparse, :Test, :Unicode, :Distributed])) @test discard_module.(deps) == deps1 @test current_task()(0x01, 0x4000, 0x30031234) == 2 diff --git a/test/core.jl b/test/core.jl index 7b470ec4113a08..aeb3c509670714 100644 --- a/test/core.jl +++ b/test/core.jl @@ -3,6 +3,8 @@ # test core language features const Bottom = Union{} +using SparseArrays + # For curmod_* include("testenv.jl") diff --git a/test/generic_map_tests.jl b/test/generic_map_tests.jl index fb2444134f0727..8023395f5b7047 100644 --- a/test/generic_map_tests.jl +++ b/test/generic_map_tests.jl @@ -76,7 +76,7 @@ end function run_map_equivalence_tests(mapf) testmap_equivalence(mapf, identity, (1,2,3,4)) - testmap_equivalence(mapf, x->x>0 ? 1.0 : 0.0, sparse(sparse(1.0I, 5, 5))) + # testmap_equivalence(mapf, x->x>0 ? 1.0 : 0.0, sparse(sparse(1.0I, 5, 5))) testmap_equivalence(mapf, (x,y,z)->x+y+z, 1,2,3) testmap_equivalence(mapf, x->x ? false : true, BitMatrix(uninitialized, 10,10)) testmap_equivalence(mapf, x->"foobar", BitMatrix(uninitialized, 10,10)) diff --git a/test/linalg/adjtrans.jl b/test/linalg/adjtrans.jl index cad11f67a3de63..a6aef79bfb5291 100644 --- a/test/linalg/adjtrans.jl +++ b/test/linalg/adjtrans.jl @@ -4,6 +4,7 @@ using Test using Base.LinAlg: Adjoint, Transpose +using SparseArrays @testset "Adjoint and Transpose inner constructor basics" begin intvec, intmat = [1, 2], [1 2; 3 4] diff --git a/test/linalg/bidiag.jl b/test/linalg/bidiag.jl index d48d55cb105544..877eb8d4126237 100644 --- a/test/linalg/bidiag.jl +++ b/test/linalg/bidiag.jl @@ -3,6 +3,7 @@ using Test using Base.LinAlg: mul!, Adjoint, Transpose import Base.LinAlg: BlasReal, BlasFloat +using SparseArrays n = 10 #Size of test matrix srand(1) diff --git a/test/linalg/diagonal.jl b/test/linalg/diagonal.jl index 2c8fb4cab65396..d8af3146b365af 100644 --- a/test/linalg/diagonal.jl +++ b/test/linalg/diagonal.jl @@ -3,6 +3,7 @@ using Test using Base.LinAlg: mul!, ldiv!, rdiv!, Adjoint, Transpose import Base.LinAlg: BlasFloat, BlasComplex, SingularException +using SparseArrays n=12 #Size of matrix problem to test srand(1) diff --git a/test/linalg/special.jl b/test/linalg/special.jl index 751b63c6e1de1f..243338a6108ffb 100644 --- a/test/linalg/special.jl +++ b/test/linalg/special.jl @@ -1,6 +1,7 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license using Test +using SparseArrays using Base.LinAlg: mul!, Adjoint, Transpose diff --git a/test/linalg/symmetric.jl b/test/linalg/symmetric.jl index d05e292591ca52..8ef7dfb99b68ac 100644 --- a/test/linalg/symmetric.jl +++ b/test/linalg/symmetric.jl @@ -1,6 +1,7 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license using Test +using SparseArrays srand(101) diff --git a/test/linalg/triangular.jl b/test/linalg/triangular.jl index 8a6dfcba2f0a75..c624d5a8fb561f 100644 --- a/test/linalg/triangular.jl +++ b/test/linalg/triangular.jl @@ -5,6 +5,7 @@ using Test using Base.LinAlg: BlasFloat, errorbounds, full!, naivesub!, transpose!, UnitUpperTriangular, UnitLowerTriangular, mul!, rdiv!, Adjoint, Transpose +using SparseArrays debug && println("Triangular matrices") diff --git a/test/linalg/tridiag.jl b/test/linalg/tridiag.jl index 2fd6b4a4ca5a0e..7c09384d2eff35 100644 --- a/test/linalg/tridiag.jl +++ b/test/linalg/tridiag.jl @@ -1,5 +1,7 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license +using SparseArrays + #Test equivalence of eigenvectors/singular vectors taking into account possible phase (sign) differences function test_approx_eq_vecs(a::StridedVecOrMat{S}, b::StridedVecOrMat{T}, error=nothing) where {S<:Real,T<:Real} n = size(a, 1) diff --git a/test/linalg/uniformscaling.jl b/test/linalg/uniformscaling.jl index 7a9f3a1236c169..ac067fd997c2ea 100644 --- a/test/linalg/uniformscaling.jl +++ b/test/linalg/uniformscaling.jl @@ -1,6 +1,7 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license using Test +using SparseArrays srand(123) diff --git a/test/random.jl b/test/random.jl index e36d04c519ac10..329cf646b5aa5f 100644 --- a/test/random.jl +++ b/test/random.jl @@ -2,6 +2,7 @@ isdefined(Main, :TestHelpers) || @eval Main include(joinpath(dirname(@__FILE__), "TestHelpers.jl")) using Main.TestHelpers.OAs +using SparseArrays using Base.Random.dSFMT using Base.Random: Sampler, SamplerRangeFast, SamplerRangeInt, MT_CACHE_F, MT_CACHE_I diff --git a/test/show.jl b/test/show.jl index b852e512f0afe5..8ecd2ccc1b0533 100644 --- a/test/show.jl +++ b/test/show.jl @@ -1,5 +1,7 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license +using SparseArrays + # For curmod_* include("testenv.jl")