diff --git a/test/allocation_test.jl b/test/allocation_test.jl index 9e303a3..0d31234 100644 --- a/test/allocation_test.jl +++ b/test/allocation_test.jl @@ -1,25 +1,4 @@ using AdaptiveRegularization, LinearAlgebra, Test - -T = Float64 -S = Vector{T} -n = 1000 - -TR = TrustRegion(T(10)) -α = T(100) - -for XData in (PDataKARC(S, T, n), PDataTRK(S, T, n), PDataST(S, T, n)) - @testset "Allocation test in AdaptiveRegularization.decrease for $(typeof(XData))" begin - alloc_decrease() = @allocated AdaptiveRegularization.decrease(XData, α, TR) - alloc_decrease() - @test alloc_decrease() <= 16 - end - @testset "Allocation test in AdaptiveRegularization.decrease for $(typeof(XData))" begin - alloc_increase() = @allocated AdaptiveRegularization.increase(XData, α, TR) - alloc_increase() - @test (@allocated alloc_increase()) <= 16 - end -end - using ADNLPModels, NLPModels, OptimizationProblems, Stopping nlp = OptimizationProblems.ADNLPProblems.arglina() @@ -31,26 +10,31 @@ g = grad(nlp, x) ng = norm(g) calls, max_calls = 0, 1000000 -for (Data, solve, limit_solve, limit_preprocess) in ( - (PDataKARC, :solve_modelKARC, 144, 0), - (PDataTRK, :solve_modelTRK, 144, 0), - (PDataST, :solve_modelST_TR, 224, 0), +function alloc_preprocess(XData, H, g, ng, calls, max_calls, α) + AdaptiveRegularization.preprocess(XData, H, g, ng, calls, max_calls, α) + return nothing +end + +function alloc_solve_model(solve, XData, H, g, ng, calls, max_calls, α) + solve(XData, H, g, ng, calls, max_calls, α) + return nothing +end + +for (Data, solve) in ( + (PDataKARC, AdaptiveRegularization.solve_modelKARC), + (PDataTRK, AdaptiveRegularization.solve_modelTRK), + (PDataST, AdaptiveRegularization.solve_modelST_TR), ) + XData = Data(S, T, n) @testset "Allocation test in preprocess with $(Data)" begin - XData = Data(S, T, n) - alloc_preprocess(XData, H, g, ng, calls, max_calls, α) = - @allocated AdaptiveRegularization.preprocess(XData, H, g, ng, calls, max_calls, α) alloc_preprocess(XData, H, g, ng, calls, max_calls, α) - @test alloc_preprocess(XData, H, g, ng, calls, max_calls, α) <= limit_preprocess - @show alloc_preprocess(XData, H, g, ng, calls, max_calls, α) + al = @allocated alloc_preprocess(XData, H, g, ng, calls, max_calls, α) + @test al == 0 end @testset "Allocation test in $solve with $(Data)" begin - XData = Data(S, T, n) - alloc_solve_model(XData, H, g, ng, calls, max_calls, α) = - @allocated AdaptiveRegularization.eval(solve)(XData, H, g, ng, calls, max_calls, α) - alloc_solve_model(XData, H, g, ng, calls, max_calls, α) - @test alloc_solve_model(XData, H, g, ng, calls, max_calls, α) <= limit_solve - @show alloc_solve_model(XData, H, g, ng, calls, max_calls, α) + alloc_solve_model(solve, XData, H, g, ng, calls, max_calls, α) + al = @allocated alloc_solve_model(solve, XData, H, g, ng, calls, max_calls, α) + @test al == 0 end end diff --git a/test/allocation_test_main.jl b/test/allocation_test_main.jl index c662181..1d5b7fe 100644 --- a/test/allocation_test_main.jl +++ b/test/allocation_test_main.jl @@ -1,99 +1,27 @@ -using NLPModels, Stopping, AdaptiveRegularization +using NLPModelsTest, Stopping, AdaptiveRegularization -mutable struct EmptyNLPModel{T, S} <: AbstractNLPModel{T, S} - meta::NLPModelMeta{T, S} - counters::Counters -end - -NLPModels.obj(::EmptyNLPModel{T, S}, x) where {T, S} = one(T) -function NLPModels.grad!(::EmptyNLPModel{T, S}, x, gx::S) where {T, S} - gx .= one(T) - return gx -end -function NLPModels.hprod!( - ::EmptyNLPModel, - x::AbstractVector{T}, - v::AbstractVector{T}, - Hv::AbstractVector{T}; - obj_weight = one(T), -) where {T} - Hv .= zero(T) - return Hv -end -function NLPModels.hess_structure!( - ::EmptyNLPModel{T, S}, - rows::AbstractVector, - cols::AbstractVector, -) where {T, S} - rows .= one(T) - cols .= one(T) - return rows, cols -end -function NLPModels.hess_coord!( - ::EmptyNLPModel{T, S}, - x::AbstractVector{T}, - vals::AbstractVector; - obj_weight = one(T), -) where {T, S} - vals .= zero(T) - return vals -end - -import NLPModels: hess, hess_coord!, hess_coord, hess_op, hess_op!, hprod, hprod! +nlp = BROWNDEN() +n, x0 = nlp.meta.nvar, nlp.meta.x0 -mutable struct NothingPData{T} <: AdaptiveRegularization.TPData{T} - OK::Any - d::Any - λ::Any -end - -n = 100 +TRnothing = TrustRegion(10.0, max_unsuccinarow = 2) -function NothingPData(::Type{S}, ::Type{T}, n; kwargs...) where {T, S} - return NothingPData{T}(true, rand(T, n), one(T)) -end +S, T = typeof(x0), eltype(x0) +PData = PDataKARC(S, T, n) -function solve_nothing(X::NothingPData, H, g, ng, calls, max_calls, α::T) where {T} - X.d .= g - X.λ = zero(T) - return X.d, X.λ +function alloc_AdaptiveRegularization(stp, PData, workspace, TRnothing) + TRARC(stp, PData, workspace, TRnothing, solve_model = AdaptiveRegularization.solve_modelKARC) + return nothing end -x0 = rand(n) -nlp = EmptyNLPModel{eltype(x0), typeof(x0)}(NLPModelMeta(n), Counters()) - -for (Workspace, limit) in ( - (HessDense, 286624), - (HessSparse, 84016), - (HessSparseCOO, 0), # independent of `n` - (HessOp, 2304), # independent of `n` +@testset "Test TRARC allocations using $Hess" for (Hess, limit) in ( + (HessOp, 1904), + (HessSparseCOO, 944), ) - who = Workspace(nlp, n) - alloc_hessian(who, nlp, x0) = @allocated AdaptiveRegularization.hessian!(who, nlp, x0) - alloc_hessian(who, nlp, x0) - @test (alloc_hessian(who, nlp, x0)) <= limit - @show alloc_hessian(who, nlp, x0) + stp = NLPStopping(nlp) + stp.meta.max_iter = 5 + workspace = AdaptiveRegularization.TRARCWorkspace(nlp, Hess) + alloc_AdaptiveRegularization(stp, PData, workspace, TRnothing) + al = @allocated alloc_AdaptiveRegularization(stp, PData, workspace, TRnothing) + println("Allocations of TRARC using $Hess is $al .") + @show al end - -stp = NLPStopping(nlp) -stp.meta.max_iter = 5 -TRnothing = TrustRegion(10.0, max_unsuccinarow = 2) - -nlp = stp.pb -S, T = typeof(x0), eltype(x0) -PData = NothingPData(S, T, nlp.meta.nvar) -workspace = AdaptiveRegularization.TRARCWorkspace(nlp, HessOp) - -alloc_AdaptiveRegularization(stp, PData, workspace, TRnothing, solve_nothing) = - @allocated TRARC(stp, PData, workspace, TRnothing, solve_model = solve_nothing) -alloc_AdaptiveRegularization(stp, PData, workspace, TRnothing, solve_nothing) -alloc_AdaptiveRegularization(stp, PData, workspace, TRnothing, solve_nothing) -@show alloc_AdaptiveRegularization(stp, PData, workspace, TRnothing, solve_nothing) # 11192 - -workspace = AdaptiveRegularization.TRARCWorkspace(nlp, HessSparseCOO) - -alloc_AdaptiveRegularization_COO(stp, PData, workspace, TRnothing, solve_nothing) = - @allocated TRARC(stp, PData, workspace, TRnothing, solve_model = solve_nothing) -alloc_AdaptiveRegularization_COO(stp, PData, workspace, TRnothing, solve_nothing) -alloc_AdaptiveRegularization_COO(stp, PData, workspace, TRnothing, solve_nothing) -@show alloc_AdaptiveRegularization_COO(stp, PData, workspace, TRnothing, solve_nothing) # 11192 diff --git a/test/allocation_test_utils.jl b/test/allocation_test_utils.jl new file mode 100644 index 0000000..eb094df --- /dev/null +++ b/test/allocation_test_utils.jl @@ -0,0 +1,58 @@ +using AdaptiveRegularization, LinearAlgebra, Test + +T = Float64 +S = Vector{T} +n = 1000 + +TR = TrustRegion(T(10)) +α = T(100) + +# Test increase / decrease + +function alloc_decrease(XData, α, TR) + AdaptiveRegularization.decrease(XData, α, TR) + return nothing +end + +function alloc_increase(XData, α, TR) + AdaptiveRegularization.increase(XData, α, TR) + return nothing +end + +for XData in (PDataKARC(S, T, n), PDataTRK(S, T, n), PDataST(S, T, n)) + @testset "Allocation test in AdaptiveRegularization.decrease for $(typeof(XData))" begin + alloc_decrease(XData, α, TR) + al = @allocated alloc_decrease(XData, α, TR) + @test al == 0 + end + @testset "Allocation test in AdaptiveRegularization.decrease for $(typeof(XData))" begin + alloc_increase(XData, α, TR) + al = @allocated alloc_increase(XData, α, TR) + @test al == 0 + end +end + +# Test hessian evaluation in-place + +using NLPModelsTest +nlp = NLPModelsTest.BROWNDEN() +n = nlp.meta.nvar +x0 = nlp.meta.x0 + +function alloc_hessian(who, nlp, x0) + AdaptiveRegularization.hessian!(who, nlp, x0) + return nothing +end + +@testset "Test in-place hessian allocations" for (Workspace, limit) in ( + (HessDense, 1952), + (HessSparse, 944), + (HessSparseCOO, 0), + (HessOp, 960), +) + who = Workspace(nlp, n) + alloc_hessian(who, nlp, x0) + al = @allocated alloc_hessian(who, nlp, x0) + @test al <= limit + println("Allocations for $Workspace is $al.") +end diff --git a/test/runtests.jl b/test/runtests.jl index 48ce6cb..edab6e1 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -39,6 +39,7 @@ for solver in ALL_solvers end if VERSION >= v"1.7.0" + include("allocation_test_utils.jl") include("allocation_test.jl") include("allocation_test_main.jl") end