Skip to content
Open
Show file tree
Hide file tree
Changes from 13 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ using ProximalOperators
using ADNLPModels,
OptimizationProblems,
OptimizationProblems.ADNLPProblems,
ManualNLPModels,
NLPModels,
NLPModelsModifiers,
RegularizedProblems,
Expand All @@ -18,6 +19,10 @@ const global bpdn, bpdn_nls, sol = bpdn_model(compound)
const global bpdn2, bpdn_nls2, sol2 = bpdn_model(compound, bounds = true)
const global λ = norm(grad(bpdn, zeros(bpdn.meta.nvar)), Inf) / 10

include("utils.jl")
include("test-solver.jl")

include("test-R2N.jl")
include("test_AL.jl")
Comment thread
MaxenceGollier marked this conversation as resolved.

for (mod, mod_name) ∈ ((x -> x, "exact"), (LSR1Model, "lsr1"), (LBFGSModel, "lbfgs"))
Expand Down
101 changes: 101 additions & 0 deletions test/test-R2N.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
@testset "R2N" begin
# BASIC TESTS
# Test basic NLP with 2-norm
@testset "BASIC" begin
rosenbrock_nlp = construct_rosenbrock_nlp()
rosenbrock_reg_nlp = RegularizedNLPModel(rosenbrock_nlp, NormL2(0.01))

# Test first order status
first_order_kwargs = (atol = 1e-6, rtol = 1e-6)
test_solver(
rosenbrock_reg_nlp,
"R2N",
expected_status = :first_order,
solver_kwargs = first_order_kwargs,
)
solver, stats = R2NSolver(rosenbrock_reg_nlp), RegularizedExecutionStats(rosenbrock_reg_nlp)
Comment thread
MaxenceGollier marked this conversation as resolved.
Outdated

# Test max time status
max_time_kwargs = (x0 = [π, -π], atol = 1e-16, rtol = 1e-16, max_time = 1e-12)
test_solver(
rosenbrock_reg_nlp,
Comment thread
MohamedLaghdafHABIBOULLAH marked this conversation as resolved.
"R2N",
expected_status = :max_time,
solver_kwargs = max_time_kwargs,
)

# Test max iter status
max_iter_kwargs = (x0 = [π, -π], atol = 1e-16, rtol = 1e-16, max_iter = 1)
test_solver(
rosenbrock_reg_nlp,
"R2N",
expected_status = :max_iter,
solver_kwargs = max_iter_kwargs,
)

# Test max eval status
max_eval_kwargs = (x0 = [π, -π], atol = 1e-16, rtol = 1e-16, max_eval = 1)
test_solver(
rosenbrock_reg_nlp,
"R2N",
expected_status = :max_eval,
solver_kwargs = max_eval_kwargs,
)

callback = (nlp, solver, stats) -> begin
# We could add some tests here as well.

# Check user status
if stats.iter == 4
stats.status = :user
end
end
callback_kwargs = (x0 = [π, -π], atol = 1e-16, rtol = 1e-16, callback = callback)
test_solver(
rosenbrock_reg_nlp,
"R2N",
expected_status = :user,
solver_kwargs = callback_kwargs,
)
end

# BPDN TESTS
# Test bpdn with L-BFGS and 1-norm
@testset "BPDN" begin
bpdn_kwargs = (x0 = zeros(bpdn.meta.nvar), σk = 1.0, β = 1e16, atol = 1e-6, rtol = 1e-6)
reg_nlp = RegularizedNLPModel(LBFGSModel(bpdn), NormL1(λ))
test_solver(reg_nlp, "R2N", expected_status = :first_order, solver_kwargs = bpdn_kwargs)
solver, stats = R2NSolver(reg_nlp), RegularizedExecutionStats(reg_nlp)
@test @wrappedallocs(
solve!(solver, reg_nlp, stats, σk = 1.0, β = 1e16, atol = 1e-6, rtol = 1e-6)
) == 0

#test_solver(reg_nlp, # FIXME: divide by 0 error in the LBFGS approximation
Copy link
Copy Markdown
Collaborator

@MohamedLaghdafHABIBOULLAH MohamedLaghdafHABIBOULLAH Jan 4, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@MaxenceGollier
I checked this in detail. The issue is actually related to R2DH, and we should probably open a dedicated issue. The problem occurs when $$s = 0$$. In this case, R2DH does not stop because $$\xi = h_k - m_k(s) + \max(1, |h_k|)\times 10 \times \varepsilon = \max(1, |h_k|)\times 10 \times \varepsilon.$$
Although this quantity may be small, it is not negligible. When we compute its root, the resulting value can be larger than expected, which may prevent the algorithm from terminating.

A simple and reasonable fix is to assume instead that $$\xi = h_k - m_k(s).$$

Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I never understood why we added this term to $$\xi$$ @dpo ?

Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That term is there to compensate for catastrophic cancellation between $h_k$ and $m_k(s)$. It’s hard to believe that a solver would compute $s = 0$ exactly. That suggests that the solver should have stopped earlier.

@MohamedLaghdafHABIBOULLAH Could you please show the run of R2DH where you observed $s = 0$?

Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@MohamedLaghdafHABIBOULLAH, I agree with Dominique, could you provide us a run where this occurs ?

Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sorry I just saw the message, yes of course.

# "R2N",
# expected_status = :first_order,
# solver_kwargs=bpdn_kwargs,
# solver_constructor_kwargs=(subsolver=R2DHSolver,))

# Test bpdn with L-SR1 and 0-norm
reg_nlp = RegularizedNLPModel(LSR1Model(bpdn), NormL0(λ))
test_solver(reg_nlp, "R2N", expected_status = :first_order, solver_kwargs = bpdn_kwargs)
# FIXME: allocations fail with LSR1 -> a PR is awaiting on LinearOperators.jl
# solver, stats = R2NSolver(reg_nlp), RegularizedExecutionStats(reg_nlp)
# @test @wrappedallocs(
# solve!(solver, reg_nlp, stats, σk = 1.0, β = 1e16, atol = 1e-6, rtol = 1e-6)
# ) == 0

test_solver(
reg_nlp,
"R2N",
expected_status = :first_order,
solver_kwargs = bpdn_kwargs,
solver_constructor_kwargs = (subsolver = R2DHSolver,),
)
# FIXME: allocations fail with LSR1 -> a PR is awaiting on LinearOperators.jl
# solver, stats = R2NSolver(reg_nlp, subsolver = R2DHSolver), RegularizedExecutionStats(reg_nlp)
# @test @wrappedallocs(
# solve!(solver, reg_nlp, stats, σk = 1.0, β = 1e16, atol = 1e-6, rtol = 1e-6)
# ) == 0
end
end
48 changes: 48 additions & 0 deletions test/test-solver.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
function test_solver(
reg_nlp::R,
solver_name::String;
Comment thread
MaxenceGollier marked this conversation as resolved.
Outdated
expected_status = :first_order,
solver_constructor_kwargs = (;),
solver_kwargs = (;),
) where {R}

# Test output with allocating calling form
solver_fun = getfield(RegularizedOptimization, Symbol(solver_name))
stats_basic = solver_fun(
reg_nlp.model,
reg_nlp.h,
Comment thread
MaxenceGollier marked this conversation as resolved.
Outdated
ROSolverOptions();
solver_constructor_kwargs...,
solver_kwargs...,
)

x0 = get(solver_kwargs, :x0, reg_nlp.model.meta.x0)
@test typeof(stats_basic.solution) == typeof(x0)
@test length(stats_basic.solution) == reg_nlp.model.meta.nvar
@test typeof(stats_basic.dual_feas) == eltype(stats_basic.solution)
@test stats_basic.status == expected_status
@test obj(reg_nlp, stats_basic.solution) == stats_basic.objective
@test stats_basic.objective <= obj(reg_nlp, x0)

# Test output with optimized calling form
solver_constructor = getfield(RegularizedOptimization, Symbol(solver_name * "Solver"))
solver = solver_constructor(reg_nlp; solver_constructor_kwargs...)
stats_optimized = RegularizedExecutionStats(reg_nlp)

# Remove the x0 entry from solver_kwargs
optimized_solver_kwargs = Base.structdiff(solver_kwargs, NamedTuple{(:x0,)})
solve!(solver, reg_nlp, stats_optimized; x = x0, optimized_solver_kwargs...) # It would be interesting to check for allocations here as well but depending on
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Testing for allocations would be the main point I think, because the first set of test (where you call solver_fun) already call solver_contructor and solve!(). I wonder if something like @wrappedallocs @eval solve!(solver, reg_nlp, stats; atol = $atol) would work. I agree that allocation tests need not be in this function, but, in general, it should be possible to construct the expression of the solve call and measure allocations.

# the structure of solver_kwargs, some variables might get boxed, resulting in
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

could you open an issue for this comment?

Copy link
Copy Markdown
Collaborator Author

@MaxenceGollier MaxenceGollier Jan 5, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think it is just a bad idea to add the allocations tests inside this function because this means that you can not assign a variable to one of the solver_constructor_kwargs or solver_kwargs, or else they will get boxed and result in allocations.

To be clearer @wrappedallocs works in such a way that even if

@wrappedallocs solve!(solver, reg_nlp, stats; atol = 1e-3)

returns 0,

tol = 1e-3
@wrappedallocs solve!(solver, reg_nlp, stats; atol = tol)

doesn't.

I think adding this constraint is very confusing so we should just leave the allocation tests out of this function.
I want to keep the comment though so that users will understand why we did not add the allocation tests there, but it might be more precise.

Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can't we have a function wrappedallocsv2 to avoid this?

Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No, there is nothing "easy" we can do, keyword arguments will get boxed, this is a Julia issue.

# false positives, for example if tol = 1e-3; solver_kwargs = (atol = tol),
# then wrappedallocs would give a > 0 answer...
@test typeof(stats_optimized.solution) == typeof(x0)
@test length(stats_optimized.solution) == reg_nlp.model.meta.nvar
@test typeof(stats_optimized.dual_feas) == eltype(stats_optimized.solution)
@test stats_optimized.status == expected_status
@test obj(reg_nlp, stats_optimized.solution) == stats_optimized.objective
@test stats_optimized.objective <= obj(reg_nlp, x0)
@test all(solver.mν∇fk + solver.∇fk/stats_optimized.solver_specific[:sigma_cauchy] .≤ eps(eltype(solver.mν∇fk)))
Comment thread
MaxenceGollier marked this conversation as resolved.
Outdated

# TODO: test that the optimized entries in stats_optimized and stats_basic are the same.

end
41 changes: 0 additions & 41 deletions test/test_allocs.jl
Original file line number Diff line number Diff line change
@@ -1,44 +1,3 @@
"""
@wrappedallocs(expr)

Given an expression, this macro wraps that expression inside a new function
which will evaluate that expression and measure the amount of memory allocated
by the expression. Wrapping the expression in a new function allows for more
accurate memory allocation detection when using global variables (e.g. when
at the REPL).

This code is based on that of https://github.com/JuliaAlgebra/TypedPolynomials.jl/blob/master/test/runtests.jl

For example, `@wrappedallocs(x + y)` produces:

```julia
function g(x1, x2)
@allocated x1 + x2
end
g(x, y)
```

You can use this macro in a unit test to verify that a function does not
allocate:

```
@test @wrappedallocs(x + y) == 0
```
"""
macro wrappedallocs(expr)
kwargs = [a for a in expr.args if isa(a, Expr)]
args = [a for a in expr.args if isa(a, Symbol)]

argnames = [gensym() for a in args]
kwargs_dict = Dict{Symbol, Any}(a.args[1] => a.args[2] for a in kwargs if a.head == :kw)
quote
function g($(argnames...); kwargs_dict...)
$(Expr(expr.head, argnames..., kwargs...)) # Call the function twice to make the allocated macro more stable
@allocated $(Expr(expr.head, argnames..., kwargs...))
end
$(Expr(:call, :g, [esc(a) for a in args]...))
end
end

# Test non allocating solve!
@testset "NLP allocs" begin
Expand Down
52 changes: 52 additions & 0 deletions test/utils.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
"""
@wrappedallocs(expr)

Given an expression, this macro wraps that expression inside a new function
which will evaluate that expression and measure the amount of memory allocated
by the expression. Wrapping the expression in a new function allows for more
accurate memory allocation detection when using global variables (e.g. when
at the REPL).

This code is based on that of https://github.com/JuliaAlgebra/TypedPolynomials.jl/blob/master/test/runtests.jl

You can use this macro in a unit test to verify that a function does not
allocate:

```
@test @wrappedallocs(x + y) == 0
```
"""
macro wrappedallocs(expr)
kwargs = [a for a in expr.args if isa(a, Expr)]
args = [a for a in expr.args if isa(a, Symbol)]

argnames = [gensym() for a in args]
kwargs_dict = Dict{Symbol, Any}(a.args[1] => a.args[2] for a in kwargs if a.head == :kw)
quote
function g($(argnames...); kwargs_dict...)
Comment thread
MaxenceGollier marked this conversation as resolved.
$(Expr(expr.head, argnames..., kwargs...)) # Call the function twice to make the allocated macro more stable
@allocated $(Expr(expr.head, argnames..., kwargs...))
end
$(Expr(:call, :g, [esc(a) for a in args]...))
end
end

# Construct the rosenbrock problem.

function rosenbrock_f(x::Vector{T}) where {T <: Real}
100 * (x[2] - x[1]^2)^2 + (1 - x[1])^2
end

function rosenbrock_grad!(gx::Vector{T}, x::Vector{T}) where {T <: Real}
gx[1] = -400 * x[1] * (x[2] - x[1]^2) - 2 * (1 - x[1])
gx[2] = 200 * (x[2] - x[1]^2)
end

function rosenbrock_hv!(hv::Vector{T}, x::Vector{T}, v::Vector{T}; obj_weight = 1.0) where {T}
hv[1] = (1200 * x[1]^2 - 400 * x[2] + 2) * v[1] - 400 * x[1] * v[2]
hv[2] = -400 * x[1] * v[1] + 200 * v[2]
end

function construct_rosenbrock_nlp()
return NLPModel(zeros(2), rosenbrock_f, grad = rosenbrock_grad!, hprod = rosenbrock_hv!)
end
Loading