-
Notifications
You must be signed in to change notification settings - Fork 11
Refactor tests: R2N #271
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Refactor tests: R2N #271
Changes from 11 commits
59ba728
80979c4
33d0800
f4ff437
0dd0fd8
66f8b13
80070be
1a3e079
cbf4ab9
ad20f48
8dcdd0b
b29b832
7c5c929
a364151
ead3fc5
e40e560
63eb910
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,101 @@ | ||
| @testset "R2N" begin | ||
| # BASIC TESTS | ||
| # Test basic NLP with 2-norm | ||
| @testset "BASIC" begin | ||
| rosenbrock_nlp = construct_rosenbrock_nlp() | ||
| rosenbrock_reg_nlp = RegularizedNLPModel(rosenbrock_nlp, NormL2(0.01)) | ||
|
|
||
| # Test first order status | ||
| first_order_kwargs = (atol = 1e-6, rtol = 1e-6) | ||
| test_solver( | ||
| rosenbrock_reg_nlp, | ||
| "R2N", | ||
| expected_status = :first_order, | ||
| solver_kwargs = first_order_kwargs, | ||
| ) | ||
| solver, stats = R2NSolver(rosenbrock_reg_nlp), RegularizedExecutionStats(rosenbrock_reg_nlp) | ||
|
MaxenceGollier marked this conversation as resolved.
Outdated
|
||
|
|
||
| # Test max time status | ||
| max_time_kwargs = (x0 = [π, -π], atol = 1e-16, rtol = 1e-16, max_time = 1e-12) | ||
| test_solver( | ||
| rosenbrock_reg_nlp, | ||
|
MohamedLaghdafHABIBOULLAH marked this conversation as resolved.
|
||
| "R2N", | ||
| expected_status = :max_time, | ||
| solver_kwargs = max_time_kwargs, | ||
| ) | ||
|
|
||
| # Test max iter status | ||
| max_iter_kwargs = (x0 = [π, -π], atol = 1e-16, rtol = 1e-16, max_iter = 1) | ||
| test_solver( | ||
| rosenbrock_reg_nlp, | ||
| "R2N", | ||
| expected_status = :max_iter, | ||
| solver_kwargs = max_iter_kwargs, | ||
| ) | ||
|
|
||
| # Test max eval status | ||
| max_eval_kwargs = (x0 = [π, -π], atol = 1e-16, rtol = 1e-16, max_eval = 1) | ||
| test_solver( | ||
| rosenbrock_reg_nlp, | ||
| "R2N", | ||
| expected_status = :max_eval, | ||
| solver_kwargs = max_eval_kwargs, | ||
| ) | ||
|
|
||
| callback = (nlp, solver, stats) -> begin | ||
| # We could add some tests here as well. | ||
|
|
||
| # Check user status | ||
| if stats.iter == 4 | ||
| stats.status = :user | ||
| end | ||
| end | ||
| callback_kwargs = (x0 = [π, -π], atol = 1e-16, rtol = 1e-16, callback = callback) | ||
| test_solver( | ||
| rosenbrock_reg_nlp, | ||
| "R2N", | ||
| expected_status = :user, | ||
| solver_kwargs = callback_kwargs, | ||
| ) | ||
| end | ||
|
|
||
| # BPDN TESTS | ||
| # Test bpdn with L-BFGS and 1-norm | ||
| @testset "BPDN" begin | ||
| bpdn_kwargs = (x0 = zeros(bpdn.meta.nvar), σk = 1.0, β = 1e16, atol = 1e-6, rtol = 1e-6) | ||
| reg_nlp = RegularizedNLPModel(LBFGSModel(bpdn), NormL1(λ)) | ||
| test_solver(reg_nlp, "R2N", expected_status = :first_order, solver_kwargs = bpdn_kwargs) | ||
| solver, stats = R2NSolver(reg_nlp), RegularizedExecutionStats(reg_nlp) | ||
| @test @wrappedallocs( | ||
| solve!(solver, reg_nlp, stats, σk = 1.0, β = 1e16, atol = 1e-6, rtol = 1e-6) | ||
| ) == 0 | ||
|
|
||
| #test_solver(reg_nlp, # FIXME: divide by 0 error in the LBFGS approximation | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @MaxenceGollier A simple and reasonable fix is to assume instead that
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I never understood why we added this term to
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. That term is there to compensate for catastrophic cancellation between @MohamedLaghdafHABIBOULLAH Could you please show the run of R2DH where you observed
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @MohamedLaghdafHABIBOULLAH, I agree with Dominique, could you provide us a run where this occurs ?
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Sorry I just saw the message, yes of course. |
||
| # "R2N", | ||
| # expected_status = :first_order, | ||
| # solver_kwargs=bpdn_kwargs, | ||
| # solver_constructor_kwargs=(subsolver=R2DHSolver,)) | ||
|
|
||
| # Test bpdn with L-SR1 and 0-norm | ||
| reg_nlp = RegularizedNLPModel(LSR1Model(bpdn), NormL0(λ)) | ||
| test_solver(reg_nlp, "R2N", expected_status = :first_order, solver_kwargs = bpdn_kwargs) | ||
| # FIXME: allocations fail with LSR1 -> a PR is awaiting on LinearOperators.jl | ||
| # solver, stats = R2NSolver(reg_nlp), RegularizedExecutionStats(reg_nlp) | ||
| # @test @wrappedallocs( | ||
| # solve!(solver, reg_nlp, stats, σk = 1.0, β = 1e16, atol = 1e-6, rtol = 1e-6) | ||
| # ) == 0 | ||
|
|
||
| test_solver( | ||
| reg_nlp, | ||
| "R2N", | ||
| expected_status = :first_order, | ||
| solver_kwargs = bpdn_kwargs, | ||
| solver_constructor_kwargs = (subsolver = R2DHSolver,), | ||
| ) | ||
| # FIXME: allocations fail with LSR1 -> a PR is awaiting on LinearOperators.jl | ||
| # solver, stats = R2NSolver(reg_nlp, subsolver = R2DHSolver), RegularizedExecutionStats(reg_nlp) | ||
| # @test @wrappedallocs( | ||
| # solve!(solver, reg_nlp, stats, σk = 1.0, β = 1e16, atol = 1e-6, rtol = 1e-6) | ||
| # ) == 0 | ||
| end | ||
| end | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,48 @@ | ||
| function test_solver( | ||
| reg_nlp::R, | ||
| solver_name::String; | ||
|
MaxenceGollier marked this conversation as resolved.
Outdated
|
||
| expected_status = :first_order, | ||
| solver_constructor_kwargs = (;), | ||
| solver_kwargs = (;), | ||
| ) where {R} | ||
|
|
||
| # Test output with allocating calling form | ||
| solver_fun = getfield(RegularizedOptimization, Symbol(solver_name)) | ||
| stats_basic = solver_fun( | ||
| reg_nlp.model, | ||
| reg_nlp.h, | ||
|
MaxenceGollier marked this conversation as resolved.
Outdated
|
||
| ROSolverOptions(); | ||
| solver_constructor_kwargs..., | ||
| solver_kwargs..., | ||
| ) | ||
|
|
||
| x0 = get(solver_kwargs, :x0, reg_nlp.model.meta.x0) | ||
| @test typeof(stats_basic.solution) == typeof(x0) | ||
| @test length(stats_basic.solution) == reg_nlp.model.meta.nvar | ||
| @test typeof(stats_basic.dual_feas) == eltype(stats_basic.solution) | ||
| @test stats_basic.status == expected_status | ||
| @test obj(reg_nlp, stats_basic.solution) == stats_basic.objective | ||
| @test stats_basic.objective <= obj(reg_nlp, x0) | ||
|
|
||
| # Test output with optimized calling form | ||
| solver_constructor = getfield(RegularizedOptimization, Symbol(solver_name * "Solver")) | ||
| solver = solver_constructor(reg_nlp; solver_constructor_kwargs...) | ||
| stats_optimized = RegularizedExecutionStats(reg_nlp) | ||
|
|
||
| # Remove the x0 entry from solver_kwargs | ||
| optimized_solver_kwargs = Base.structdiff(solver_kwargs, NamedTuple{(:x0,)}) | ||
| solve!(solver, reg_nlp, stats_optimized; x = x0, optimized_solver_kwargs...) # It would be interesting to check for allocations here as well but depending on | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Testing for allocations would be the main point I think, because the first set of test (where you call |
||
| # the structure of solver_kwargs, some variables might get boxed, resulting in | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. could you open an issue for this comment?
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think it is just a bad idea to add the allocations tests inside this function because this means that you can not assign a variable to one of the To be clearer returns doesn't. I think adding this constraint is very confusing so we should just leave the allocation tests out of this function.
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can't we have a function
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. No, there is nothing "easy" we can do, keyword arguments will get boxed, this is a Julia issue. |
||
| # false positives, for example if tol = 1e-3; solver_kwargs = (atol = tol), | ||
| # then wrappedallocs would give a > 0 answer... | ||
| @test typeof(stats_optimized.solution) == typeof(x0) | ||
| @test length(stats_optimized.solution) == reg_nlp.model.meta.nvar | ||
| @test typeof(stats_optimized.dual_feas) == eltype(stats_optimized.solution) | ||
| @test stats_optimized.status == expected_status | ||
| @test obj(reg_nlp, stats_optimized.solution) == stats_optimized.objective | ||
| @test stats_optimized.objective <= obj(reg_nlp, x0) | ||
| @test all(solver.mν∇fk + solver.∇fk/stats_optimized.solver_specific[:sigma_cauchy] .≤ eps(eltype(solver.mν∇fk))) | ||
|
MaxenceGollier marked this conversation as resolved.
Outdated
|
||
|
|
||
| # TODO: test that the optimized entries in stats_optimized and stats_basic are the same. | ||
|
|
||
| end | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,52 @@ | ||
| """ | ||
| @wrappedallocs(expr) | ||
|
|
||
| Given an expression, this macro wraps that expression inside a new function | ||
| which will evaluate that expression and measure the amount of memory allocated | ||
| by the expression. Wrapping the expression in a new function allows for more | ||
| accurate memory allocation detection when using global variables (e.g. when | ||
| at the REPL). | ||
|
|
||
| This code is based on that of https://github.com/JuliaAlgebra/TypedPolynomials.jl/blob/master/test/runtests.jl | ||
|
|
||
| You can use this macro in a unit test to verify that a function does not | ||
| allocate: | ||
|
|
||
| ``` | ||
| @test @wrappedallocs(x + y) == 0 | ||
| ``` | ||
| """ | ||
| macro wrappedallocs(expr) | ||
| kwargs = [a for a in expr.args if isa(a, Expr)] | ||
| args = [a for a in expr.args if isa(a, Symbol)] | ||
|
|
||
| argnames = [gensym() for a in args] | ||
| kwargs_dict = Dict{Symbol, Any}(a.args[1] => a.args[2] for a in kwargs if a.head == :kw) | ||
| quote | ||
| function g($(argnames...); kwargs_dict...) | ||
|
MaxenceGollier marked this conversation as resolved.
|
||
| $(Expr(expr.head, argnames..., kwargs...)) # Call the function twice to make the allocated macro more stable | ||
| @allocated $(Expr(expr.head, argnames..., kwargs...)) | ||
| end | ||
| $(Expr(:call, :g, [esc(a) for a in args]...)) | ||
| end | ||
| end | ||
|
|
||
| # Construct the rosenbrock problem. | ||
|
|
||
| function rosenbrock_f(x::Vector{T}) where {T <: Real} | ||
| 100 * (x[2] - x[1]^2)^2 + (1 - x[1])^2 | ||
| end | ||
|
|
||
| function rosenbrock_grad!(gx::Vector{T}, x::Vector{T}) where {T <: Real} | ||
| gx[1] = -400 * x[1] * (x[2] - x[1]^2) - 2 * (1 - x[1]) | ||
| gx[2] = 200 * (x[2] - x[1]^2) | ||
| end | ||
|
|
||
| function rosenbrock_hv!(hv::Vector{T}, x::Vector{T}, v::Vector{T}; obj_weight = 1.0) where {T} | ||
| hv[1] = (1200 * x[1]^2 - 400 * x[2] + 2) * v[1] - 400 * x[1] * v[2] | ||
| hv[2] = -400 * x[1] * v[1] + 200 * v[2] | ||
| end | ||
|
|
||
| function construct_rosenbrock_nlp() | ||
| return NLPModel(zeros(2), rosenbrock_f, grad = rosenbrock_grad!, hprod = rosenbrock_hv!) | ||
| end | ||
Uh oh!
There was an error while loading. Please reload this page.