Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ using ProximalOperators
using ADNLPModels,
OptimizationProblems,
OptimizationProblems.ADNLPProblems,
ManualNLPModels,
NLPModels,
NLPModelsModifiers,
RegularizedProblems,
Expand All @@ -18,6 +19,10 @@ const global bpdn, bpdn_nls, sol = bpdn_model(compound)
const global bpdn2, bpdn_nls2, sol2 = bpdn_model(compound, bounds = true)
const global λ = norm(grad(bpdn, zeros(bpdn.meta.nvar)), Inf) / 10

include("utils.jl")
include("test-solver.jl")

include("test-R2N.jl")
include("test_AL.jl")

for (mod, mod_name) ∈ ((x -> x, "exact"), (LSR1Model, "lsr1"), (LBFGSModel, "lbfgs"))
Expand Down
98 changes: 98 additions & 0 deletions test/test-R2N.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
@testset "R2N" begin
# BASIC TESTS
# Test basic NLP with 2-norm
@testset "BASIC" begin
rosenbrock_nlp = construct_rosenbrock_nlp()
rosenbrock_reg_nlp = RegularizedNLPModel(rosenbrock_nlp, NormL2(0.01))

# Test first order status
first_order_kwargs = (atol = 1e-6, rtol = 1e-6)
test_solver(
rosenbrock_reg_nlp,
R2N,
expected_status = :first_order,
solver_kwargs = first_order_kwargs,
)

# Test max time status
max_time_kwargs = (x = [π, -π], atol = 1e-16, rtol = 1e-16, max_time = 1e-12)
test_solver(
rosenbrock_reg_nlp,
R2N,
expected_status = :max_time,
solver_kwargs = max_time_kwargs,
)

# Test max iter status
max_iter_kwargs = (x = [π, -π], atol = 1e-16, rtol = 1e-16, max_iter = 1)
test_solver(
rosenbrock_reg_nlp,
R2N,
expected_status = :max_iter,
solver_kwargs = max_iter_kwargs,
)

# Test max eval status
max_eval_kwargs = (x = [π, -π], atol = 1e-16, rtol = 1e-16, max_eval = 1)
test_solver(
rosenbrock_reg_nlp,
R2N,
expected_status = :max_eval,
solver_kwargs = max_eval_kwargs,
)

callback = (nlp, solver, stats) -> begin
# We could add some tests here as well.

# Check user status
if stats.iter == 4
stats.status = :user
end
end
callback_kwargs = (x = [π, -π], atol = 1e-16, rtol = 1e-16, callback = callback)
test_solver(
rosenbrock_reg_nlp,
R2N,
expected_status = :user,
solver_kwargs = callback_kwargs,
)
end

# BPDN TESTS
# Test bpdn with L-BFGS and 1-norm
@testset "BPDN" begin
bpdn_kwargs = (x = zeros(bpdn.meta.nvar), σk = 1.0, β = 1e16, atol = 1e-6, rtol = 1e-6)
reg_nlp = RegularizedNLPModel(LBFGSModel(bpdn), NormL1(λ))
test_solver(reg_nlp, R2N, expected_status = :first_order, solver_kwargs = bpdn_kwargs)
solver, stats = R2NSolver(reg_nlp), RegularizedExecutionStats(reg_nlp)
@test @wrappedallocs(
solve!(solver, reg_nlp, stats, σk = 1.0, β = 1e16, atol = 1e-6, rtol = 1e-6)
) == 0

#test_solver(reg_nlp, # FIXME: divide by 0 error in the LBFGS approximation
Copy link
Collaborator

@MohamedLaghdafHABIBOULLAH MohamedLaghdafHABIBOULLAH Jan 4, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@MaxenceGollier
I checked this in detail. The issue is actually related to R2DH, and we should probably open a dedicated issue. The problem occurs when $$s = 0$$. In this case, R2DH does not stop because $$\xi = h_k - m_k(s) + \max(1, |h_k|)\times 10 \times \varepsilon = \max(1, |h_k|)\times 10 \times \varepsilon.$$
Although this quantity may be small, it is not negligible. When we compute its root, the resulting value can be larger than expected, which may prevent the algorithm from terminating.

A simple and reasonable fix is to assume instead that $$\xi = h_k - m_k(s).$$

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I never understood why we added this term to $$\xi$$ @dpo ?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That term is there to compensate for catastrophic cancellation between $h_k$ and $m_k(s)$. It’s hard to believe that a solver would compute $s = 0$ exactly. That suggests that the solver should have stopped earlier.

@MohamedLaghdafHABIBOULLAH Could you please show the run of R2DH where you observed $s = 0$?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@MohamedLaghdafHABIBOULLAH, I agree with Dominique, could you provide us a run where this occurs ?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sorry I just saw the message, yes of course.

# "R2N",
# expected_status = :first_order,
# solver_kwargs=bpdn_kwargs,
# solver_constructor_kwargs=(subsolver=R2DHSolver,))

# Test bpdn with L-SR1 and 0-norm
reg_nlp = RegularizedNLPModel(LSR1Model(bpdn), NormL0(λ))
test_solver(reg_nlp, R2N, expected_status = :first_order, solver_kwargs = bpdn_kwargs)
solver, stats = R2NSolver(reg_nlp), RegularizedExecutionStats(reg_nlp)
@test @wrappedallocs(
solve!(solver, reg_nlp, stats, σk = 1.0, β = 1e16, atol = 1e-6, rtol = 1e-6)
) == 0

test_solver(
reg_nlp,
R2N,
expected_status = :first_order,
solver_kwargs = bpdn_kwargs,
solver_constructor_kwargs = (subsolver = R2DHSolver,),
)
solver, stats = R2NSolver(reg_nlp, subsolver = R2DHSolver), RegularizedExecutionStats(reg_nlp)
@test @wrappedallocs(
solve!(solver, reg_nlp, stats, σk = 1.0, β = 1e16, atol = 1e-6, rtol = 1e-6)
) == 0
end
end
40 changes: 40 additions & 0 deletions test/test-solver.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
function test_solver(
reg_nlp::R,
solver::F;
expected_status = :first_order,
solver_constructor_kwargs = (;),
solver_kwargs = (;),
) where {R, F}

# Test output with allocating calling form
stats_basic = solver(
reg_nlp;
solver_constructor_kwargs...,
solver_kwargs...,
)

x0 = get(solver_kwargs, :x, reg_nlp.model.meta.x0)
@test typeof(stats_basic.solution) == typeof(x0)
@test length(stats_basic.solution) == reg_nlp.model.meta.nvar
@test typeof(stats_basic.dual_feas) == eltype(stats_basic.solution)
@test stats_basic.status == expected_status
@test obj(reg_nlp, stats_basic.solution) == stats_basic.objective
@test stats_basic.objective <= obj(reg_nlp, x0)

# Test output with optimized calling form
solver_constructor = getfield(RegularizedOptimization, Symbol(string(solver) * "Solver"))
solver_object = solver_constructor(reg_nlp; solver_constructor_kwargs...)
stats_optimized = RegularizedExecutionStats(reg_nlp)

solve!(solver_object, reg_nlp, stats_optimized; solver_kwargs...)
@test typeof(stats_optimized.solution) == typeof(x0)
@test length(stats_optimized.solution) == reg_nlp.model.meta.nvar
@test typeof(stats_optimized.dual_feas) == eltype(stats_optimized.solution)
@test stats_optimized.status == expected_status
@test obj(reg_nlp, stats_optimized.solution) == stats_optimized.objective
@test stats_optimized.objective <= obj(reg_nlp, x0)
@test all(solver_object.mν∇fk + solver_object.∇fk/stats_optimized.solver_specific[:sigma_cauchy] .≤ eps(eltype(solver_object.mν∇fk)))

# TODO: test that the optimized entries in stats_optimized and stats_basic are the same.

end
41 changes: 0 additions & 41 deletions test/test_allocs.jl
Original file line number Diff line number Diff line change
@@ -1,44 +1,3 @@
"""
@wrappedallocs(expr)

Given an expression, this macro wraps that expression inside a new function
which will evaluate that expression and measure the amount of memory allocated
by the expression. Wrapping the expression in a new function allows for more
accurate memory allocation detection when using global variables (e.g. when
at the REPL).

This code is based on that of https://github.com/JuliaAlgebra/TypedPolynomials.jl/blob/master/test/runtests.jl

For example, `@wrappedallocs(x + y)` produces:

```julia
function g(x1, x2)
@allocated x1 + x2
end
g(x, y)
```

You can use this macro in a unit test to verify that a function does not
allocate:

```
@test @wrappedallocs(x + y) == 0
```
"""
macro wrappedallocs(expr)
kwargs = [a for a in expr.args if isa(a, Expr)]
args = [a for a in expr.args if isa(a, Symbol)]

argnames = [gensym() for a in args]
kwargs_dict = Dict{Symbol, Any}(a.args[1] => a.args[2] for a in kwargs if a.head == :kw)
quote
function g($(argnames...); kwargs_dict...)
$(Expr(expr.head, argnames..., kwargs...)) # Call the function twice to make the allocated macro more stable
@allocated $(Expr(expr.head, argnames..., kwargs...))
end
$(Expr(:call, :g, [esc(a) for a in args]...))
end
end

# Test non allocating solve!
@testset "NLP allocs" begin
Expand Down
52 changes: 52 additions & 0 deletions test/utils.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
"""
@wrappedallocs(expr)

Given an expression, this macro wraps that expression inside a new function
which will evaluate that expression and measure the amount of memory allocated
by the expression. Wrapping the expression in a new function allows for more
accurate memory allocation detection when using global variables (e.g. when
at the REPL).

This code is based on that of https://github.com/JuliaAlgebra/TypedPolynomials.jl/blob/master/test/runtests.jl

You can use this macro in a unit test to verify that a function does not
allocate:

```
@test @wrappedallocs(x + y) == 0
```
"""
macro wrappedallocs(expr)
kwargs = [a for a in expr.args if isa(a, Expr)]
args = [a for a in expr.args if isa(a, Symbol)]

argnames = [gensym() for a in args]
kwargs_dict = Dict{Symbol, Any}(a.args[1] => a.args[2] for a in kwargs if a.head == :kw)
quote
function g($(argnames...); kwargs_dict...)
$(Expr(expr.head, argnames..., kwargs...)) # Call the function twice to make the allocated macro more stable
@allocated $(Expr(expr.head, argnames..., kwargs...))
end
$(Expr(:call, :g, [esc(a) for a in args]...))
end
end

# Construct the rosenbrock problem.

function rosenbrock_f(x::Vector{T}) where {T <: Real}
100 * (x[2] - x[1]^2)^2 + (1 - x[1])^2
end

function rosenbrock_grad!(gx::Vector{T}, x::Vector{T}) where {T <: Real}
gx[1] = -400 * x[1] * (x[2] - x[1]^2) - 2 * (1 - x[1])
gx[2] = 200 * (x[2] - x[1]^2)
end

function rosenbrock_hv!(hv::Vector{T}, x::Vector{T}, v::Vector{T}; obj_weight = 1.0) where {T}
hv[1] = (1200 * x[1]^2 - 400 * x[2] + 2) * v[1] - 400 * x[1] * v[2]
hv[2] = -400 * x[1] * v[1] + 200 * v[2]
end

function construct_rosenbrock_nlp()
return NLPModel(zeros(2), rosenbrock_f, grad = rosenbrock_grad!, hprod = rosenbrock_hv!)
end
Loading