diff --git a/Project.toml b/Project.toml index 83af93f8c..ee208b3f7 100644 --- a/Project.toml +++ b/Project.toml @@ -1,6 +1,6 @@ name = "Optimization" uuid = "7f7a1694-90dd-40f0-9382-eb1efda571ba" -version = "3.25.0" +version = "3.25.1" [deps] ADTypes = "47edcb42-4c32-4615-8424-f2b9edc5f35b" @@ -21,20 +21,20 @@ SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" TerminalLoggers = "5d786b92-1e48-4d6f-9151-6b4477ca9bed" [compat] -ADTypes = "0.2.5, 1" -ArrayInterface = "7.6" +ADTypes = "1.2" +ArrayInterface = "7.10" ConsoleProgressMonitor = "0.1.1" DocStringExtensions = "0.9" LBFGSB = "0.4.1" LinearAlgebra = "1.10" Logging = "1.10" LoggingExtras = "0.4, 1" -OptimizationBase = "0.0.7" +OptimizationBase = "1" Pkg = "1" Printf = "1.10" ProgressLogging = "0.1" Reexport = "1.2" -SciMLBase = "2.30.0" +SciMLBase = "2.39.0" SparseArrays = "1.10" Symbolics = "5.12" TerminalLoggers = "0.1" diff --git a/docs/Project.toml b/docs/Project.toml index 4d4f121e4..8b655a74c 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -49,7 +49,7 @@ ModelingToolkit = "9" NLopt = "0.6, 1" Optimization = "3" OptimizationBBO = "0.1, 0.2" -OptimizationBase = "0.0.5, 0.0.6, 0.0.7" +OptimizationBase = "0.0.5, 0.0.6, 0.0.7, 1" OptimizationCMAEvolutionStrategy = "0.1, 0.2" OptimizationEvolutionary = "0.1, 0.2, 0.3" OptimizationFlux = "0.2.1" @@ -59,7 +59,7 @@ OptimizationMetaheuristics = "0.1, 0.2" OptimizationMultistartOptimization = "0.1, 0.2" OptimizationNLopt = "0.1, 0.2" OptimizationNOMAD = "0.1, 0.2" -OptimizationOptimJL = "0.1, 0.2" +OptimizationOptimJL = "0.1, 0.2, 0.3" OptimizationOptimisers = "0.1, 0.2" OptimizationPRIMA = "0.0.1, 0.0.2" OptimizationPolyalgorithms = "0.1, 0.2" diff --git a/docs/src/optimization_packages/manopt.md b/docs/src/optimization_packages/manopt.md index f0324c80f..4a6020bb3 100644 --- a/docs/src/optimization_packages/manopt.md +++ b/docs/src/optimization_packages/manopt.md @@ -1,6 +1,6 @@ # Manopt.jl -[Manopt.jl](https://github.com/JuliaManifolds/Manopt.jl) is a package with implementations of a variety of optimziation solvers on manifolds supported by +[Manopt.jl](https://github.com/JuliaManifolds/Manopt.jl) is a package with implementations of a variety of optimization solvers on manifolds supported by [Manifolds](https://github.com/JuliaManifolds/Manifolds.jl). ## Installation: OptimizationManopt.jl @@ -95,4 +95,4 @@ prob = OptimizationProblem(optf, U; manifold = M, maxiters = 1000) sol = Optimization.solve(prob, opt, sub_problem = (M, q, p, X) -> closed_form_solution!(M, q, L, U, p, X)) ``` -This example is based on the [example](https://juliamanifolds.github.io/ManoptExamples.jl/stable/examples/Riemannian-mean/) in the Manopt and https://doi.org/10.1007/s10107-022-01840-5. \ No newline at end of file +This example is based on the [example](https://juliamanifolds.github.io/ManoptExamples.jl/stable/examples/Riemannian-mean/) in the Manopt and https://doi.org/10.1007/s10107-022-01840-5. diff --git a/lib/OptimizationBBO/src/OptimizationBBO.jl b/lib/OptimizationBBO/src/OptimizationBBO.jl index d224e1ea8..e455426db 100644 --- a/lib/OptimizationBBO/src/OptimizationBBO.jl +++ b/lib/OptimizationBBO/src/OptimizationBBO.jl @@ -10,10 +10,6 @@ SciMLBase.requiresbounds(::BBO) = true SciMLBase.allowsbounds(::BBO) = true SciMLBase.supports_opt_cache_interface(opt::BBO) = true - - - - for j in string.(BlackBoxOptim.SingleObjectiveMethodNames) eval(Meta.parse("Base.@kwdef struct BBO_" * j * " <: BBO method=:" * j * " end")) eval(Meta.parse("export BBO_" * j)) diff --git a/lib/OptimizationGCMAES/src/OptimizationGCMAES.jl b/lib/OptimizationGCMAES/src/OptimizationGCMAES.jl index 478ec5b9b..64004c515 100644 --- a/lib/OptimizationGCMAES/src/OptimizationGCMAES.jl +++ b/lib/OptimizationGCMAES/src/OptimizationGCMAES.jl @@ -17,7 +17,6 @@ SciMLBase.requireshessian(::GCMAESOpt) = false SciMLBase.requiresconsjac(::GCMAESOpt) = false SciMLBase.requiresconshess(::GCMAESOpt) = false - function __map_optimizer_args(cache::OptimizationCache, opt::GCMAESOpt; callback = nothing, maxiters::Union{Number, Nothing} = nothing, diff --git a/lib/OptimizationMOI/src/OptimizationMOI.jl b/lib/OptimizationMOI/src/OptimizationMOI.jl index 6848ca7ef..74e3bb4d9 100644 --- a/lib/OptimizationMOI/src/OptimizationMOI.jl +++ b/lib/OptimizationMOI/src/OptimizationMOI.jl @@ -16,10 +16,22 @@ const MOI = MathOptInterface const DenseOrSparse{T} = Union{Matrix{T}, SparseMatrixCSC{T}} -SciMLBase.requiresgradient(opt::Union{MOI.AbstractOptimizer,MOI.OptimizerWithAttributes}) = true -SciMLBase.requireshessian(opt::Union{MOI.AbstractOptimizer,MOI.OptimizerWithAttributes}) = true -SciMLBase.requiresconsjac(opt::Union{MOI.AbstractOptimizer,MOI.OptimizerWithAttributes}) = true -SciMLBase.requiresconshess(opt::Union{MOI.AbstractOptimizer,MOI.OptimizerWithAttributes}) = true +function SciMLBase.requiresgradient(opt::Union{ + MOI.AbstractOptimizer, MOI.OptimizerWithAttributes}) + true +end +function SciMLBase.requireshessian(opt::Union{ + MOI.AbstractOptimizer, MOI.OptimizerWithAttributes}) + true +end +function SciMLBase.requiresconsjac(opt::Union{ + MOI.AbstractOptimizer, MOI.OptimizerWithAttributes}) + true +end +function SciMLBase.requiresconshess(opt::Union{ + MOI.AbstractOptimizer, MOI.OptimizerWithAttributes}) + true +end function SciMLBase.allowsbounds(opt::Union{MOI.AbstractOptimizer, MOI.OptimizerWithAttributes}) diff --git a/lib/OptimizationNLopt/Project.toml b/lib/OptimizationNLopt/Project.toml index 387b2a4b2..c2aedb1e4 100644 --- a/lib/OptimizationNLopt/Project.toml +++ b/lib/OptimizationNLopt/Project.toml @@ -1,7 +1,7 @@ name = "OptimizationNLopt" uuid = "4e6fcdb7-1186-4e1f-a706-475e75c168bb" authors = ["Vaibhav Dixit and contributors"] -version = "0.2.0" +version = "0.2.2" [deps] NLopt = "76087f3c-5699-56af-9a33-bf431cd00edd" diff --git a/lib/OptimizationNLopt/src/OptimizationNLopt.jl b/lib/OptimizationNLopt/src/OptimizationNLopt.jl index 16d15a8a2..fe2eb9abf 100644 --- a/lib/OptimizationNLopt/src/OptimizationNLopt.jl +++ b/lib/OptimizationNLopt/src/OptimizationNLopt.jl @@ -12,77 +12,30 @@ SciMLBase.supports_opt_cache_interface(opt::Union{NLopt.Algorithm, NLopt.Opt}) = function SciMLBase.requiresgradient(opt::NLopt.Algorithm) #https://github.com/JuliaOpt/NLopt.jl/blob/master/src/NLopt.jl#L18C7-L18C16 str_opt = string(opt) if str_opt[2] == "D" - return true + return true else - return false + return false end end function SciMLBase.requireshessian(opt::NLopt.Algorithm) #https://github.com/JuliaOpt/NLopt.jl/blob/master/src/NLopt.jl#L18C7-L18C16 str_opt = string(opt) if (str_opt[2] == "D" && str_opt[4] == "N") - return true + return true else - return false + return false end end -function SciMLBase.requireshessian(opt::NLopt.Algorithm) #https://github.com/JuliaOpt/NLopt.jl/blob/master/src/NLopt.jl#L18C7-L18C16 - str_opt = string(opt) - if str_opt[2] == "D" && str_opt[4] == "N" - return true - else - return false - end -end function SciMLBase.requiresconsjac(opt::NLopt.Algorithm) #https://github.com/JuliaOpt/NLopt.jl/blob/master/src/NLopt.jl#L18C7-L18C16 str_opt = string(opt) if str_opt[3] == "O" || str_opt[3] == "I" || str_opt[5] == "G" - return true - else - return false - end -end - - - -function SciMLBase.requiresgradient(opt::NLopt.Algorithm) #https://github.com/JuliaOpt/NLopt.jl/blob/master/src/NLopt.jl#L18C7-L18C16 - str_opt = string(opt) - if str_opt[2] == "D" - return true + return true else - return false + return false end end -function SciMLBase.requireshessian(opt::NLopt.Algorithm) #https://github.com/JuliaOpt/NLopt.jl/blob/master/src/NLopt.jl#L18C7-L18C16 - str_opt = string(opt) - if (str_opt[2] == "D" && str_opt[4] == "N") - return true - else - return false - end -end - -function SciMLBase.requireshessian(opt::NLopt.Algorithm) #https://github.com/JuliaOpt/NLopt.jl/blob/master/src/NLopt.jl#L18C7-L18C16 - str_opt = string(opt) - if str_opt[2] == "D" && str_opt[4] == "N" - return true - else - return false - end -end -function SciMLBase.requiresconsjac(opt::NLopt.Algorithm) #https://github.com/JuliaOpt/NLopt.jl/blob/master/src/NLopt.jl#L18C7-L18C16 - str_opt = string(opt) - if str_opt[3] == "O" || str_opt[3] == "I" || str_opt[5] == "G" - return true - else - return false - end -end - - - function __map_optimizer_args!(cache::OptimizationCache, opt::NLopt.Opt; callback = nothing, maxiters::Union{Number, Nothing} = nothing, @@ -212,7 +165,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ x = cache.f(θ, cache.p) opt_state = Optimization.OptimizationState(u = θ, objective = x[1]) if cache.callback(opt_state, x...) - error("Optimization halted by callback.") + NLopt.force_stop!(opt_setup) end return x[1] end diff --git a/lib/OptimizationNLopt/test/runtests.jl b/lib/OptimizationNLopt/test/runtests.jl index d8ea63f3c..7b485e2e3 100644 --- a/lib/OptimizationNLopt/test/runtests.jl +++ b/lib/OptimizationNLopt/test/runtests.jl @@ -71,4 +71,15 @@ using Test @test sol.retcode == ReturnCode.Success @test sol.u≈[2.0] atol=1e-3 end + + @testset "callback" begin + cbstopping = function(state, loss) + println(state.iter, " ", state.u, " ", state.objective) + return state.objective < 0.7 + end + + sol = solve(prob, NLopt.LD_LBFGS()) + #nlopt gives the last best not the one where callback stops + @test sol.objective < 0.8 + end end diff --git a/lib/OptimizationOptimJL/Project.toml b/lib/OptimizationOptimJL/Project.toml index 7464dafd6..b5215b660 100644 --- a/lib/OptimizationOptimJL/Project.toml +++ b/lib/OptimizationOptimJL/Project.toml @@ -1,7 +1,7 @@ name = "OptimizationOptimJL" uuid = "36348300-93cb-4f02-beb5-3c3902f8871e" authors = ["Vaibhav Dixit and contributors"] -version = "0.3.0" +version = "0.3.1" [deps] Optim = "429524aa-4258-5aef-a3af-852621145aeb" diff --git a/lib/OptimizationOptimJL/src/OptimizationOptimJL.jl b/lib/OptimizationOptimJL/src/OptimizationOptimJL.jl index 1b86a8910..dcf187d45 100644 --- a/lib/OptimizationOptimJL/src/OptimizationOptimJL.jl +++ b/lib/OptimizationOptimJL/src/OptimizationOptimJL.jl @@ -14,7 +14,9 @@ SciMLBase.requiresbounds(opt::Optim.SAMIN) = true SciMLBase.supports_opt_cache_interface(opt::Optim.AbstractOptimizer) = true SciMLBase.supports_opt_cache_interface(opt::Union{Optim.Fminbox, Optim.SAMIN}) = true SciMLBase.supports_opt_cache_interface(opt::Optim.ConstrainedOptimizer) = true -SciMLBase.requiresgradient(opt::Optim.AbstractOptimizer) = !(opt isa Optim.ZerothOrderOptimizer) +function SciMLBase.requiresgradient(opt::Optim.AbstractOptimizer) + !(opt isa Optim.ZerothOrderOptimizer) +end SciMLBase.requiresgradient(::IPNewton) = true SciMLBase.requireshessian(::IPNewton) = true SciMLBase.requiresconsjac(::IPNewton) = true @@ -90,10 +92,10 @@ function SciMLBase.__init(prob::OptimizationProblem, opt = Optim.ParticleSwarm(; lower = prob.lb, upper = prob.ub, n_particles = opt.n_particles) else - if prob.f isa OptimizationFunction && !(prob.f.adtype isa NoAD) - opt = Optim.Fminbox(opt) + if prob.f isa OptimizationFunction && (!(prob.f.adtype isa SciMLBase.NoAD) || !isnothing(prob.f.grad)) + opt = Optim.Fminbox(opt) else - throw(ArgumentError("Fminbox($opt) requires gradients, since you didn't use `OptimizationFunction` with a valid AD backend https://docs.sciml.ai/Optimization/stable/API/ad/ the lower and upper bounds thus will be ignored.")) + throw(ArgumentError("Fminbox($opt) requires gradients, use `OptimizationFunction` either with a valid AD backend https://docs.sciml.ai/Optimization/stable/API/ad/ or a provided 'grad' function.")) end end end diff --git a/lib/OptimizationOptimJL/test/runtests.jl b/lib/OptimizationOptimJL/test/runtests.jl index 75a387f94..4dc4aac66 100644 --- a/lib/OptimizationOptimJL/test/runtests.jl +++ b/lib/OptimizationOptimJL/test/runtests.jl @@ -170,6 +170,34 @@ end sol = solve(prob, Optim.KrylovTrustRegion()) @test 10 * sol.objective < l1 + prob = OptimizationProblem(optprob, x0, _p; sense = Optimization.MaxSense, lb = [-1.0, -1.0], ub = [0.8, 0.8]) + sol = solve(prob, BFGS()) + @test 10 * sol.objective < l1 + + function rosenbrock_grad!(dx, x, p) + dx[1] = -2*(p[1] - x[1]) -4 * p[2] * (x[2] - x[1]^2)*x[1] + dx[2]= 2*p[2]*(x[2]-x[1]^2) + return nothing + end + + # https://github.com/SciML/Optimization.jl/issues/754 Optim.BFGS() with explicit gradient function + optprob = OptimizationFunction(rosenbrock; grad=rosenbrock_grad!) + prob = OptimizationProblem(optprob, x0, _p) + @test (sol = solve(prob, Optim.BFGS())) isa Any # test exception not thrown + @test 10 * sol.objective < l1 + + # https://github.com/SciML/Optimization.jl/issues/754 Optim.BFGS() with bounds and explicit gradient function + optprob = OptimizationFunction(rosenbrock; grad=rosenbrock_grad!) + prob = OptimizationProblem(optprob, x0, _p; lb = [-1.0, -1.0], ub = [0.8, 0.8]) + @test (sol = solve(prob, Optim.BFGS())) isa Any # test exception not thrown + @test 10 * sol.objective < l1 + + # test that Optim.BFGS() with bounds but no AD or user-supplied gradient fails + optprob = OptimizationFunction(rosenbrock, SciMLBase.NoAD()) + prob = OptimizationProblem(optprob, x0, _p; lb = [-1.0, -1.0], ub = [0.8, 0.8]) + @test_throws ArgumentError (sol = solve(prob, Optim.BFGS())) isa Any # test exception is thrown + @test 10 * sol.objective < l1 + @testset "cache" begin objective(x, p) = (p[1] - x[1])^2 x0 = zeros(1) diff --git a/lib/OptimizationPRIMA/src/OptimizationPRIMA.jl b/lib/OptimizationPRIMA/src/OptimizationPRIMA.jl index 77fed2703..89d5514a5 100644 --- a/lib/OptimizationPRIMA/src/OptimizationPRIMA.jl +++ b/lib/OptimizationPRIMA/src/OptimizationPRIMA.jl @@ -18,7 +18,6 @@ SciMLBase.requiresconstraints(opt::COBYLA) = true SciMLBase.requiresgradient(opt::Union{BOBYQA, LINCOA, COBYLA}) = true SciMLBase.requiresconsjac(opt::Union{LINCOA, COBYLA}) = true - function Optimization.OptimizationCache(prob::SciMLBase.OptimizationProblem, opt::PRIMASolvers, data; callback = Optimization.DEFAULT_CALLBACK, @@ -123,9 +122,11 @@ function SciMLBase.__solve(cache::Optimization.OptimizationCache{ P, C } + iter = 0 _loss = function (θ) x = cache.f(θ, cache.p) - opt_state = Optimization.OptimizationState(u = θ, objective = x[1]) + iter += 1 + opt_state = Optimization.OptimizationState(u = θ, objective = x[1], iter = iter) if cache.callback(opt_state, x...) error("Optimization halted by callback.") end diff --git a/lib/OptimizationPolyalgorithms/Project.toml b/lib/OptimizationPolyalgorithms/Project.toml index 3e8c5fe03..f88a168ad 100644 --- a/lib/OptimizationPolyalgorithms/Project.toml +++ b/lib/OptimizationPolyalgorithms/Project.toml @@ -1,7 +1,7 @@ name = "OptimizationPolyalgorithms" uuid = "500b13db-7e66-49ce-bda4-eed966be6282" authors = ["Vaibhav Dixit and contributors"] -version = "0.2.0" +version = "0.2.1" [deps] Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" @@ -11,7 +11,7 @@ Reexport = "189a3867-3050-52da-a836-e630ba90ab69" [compat] Optimization = "3.21" -OptimizationOptimJL = "0.1, 0.2" +OptimizationOptimJL = "0.1, 0.2, 0.3" OptimizationOptimisers = "0.1, 0.2" Reexport = "1.2" julia = "1.6" diff --git a/src/lbfgsb.jl b/src/lbfgsb.jl index da56639da..3aa2cf7c8 100644 --- a/src/lbfgsb.jl +++ b/src/lbfgsb.jl @@ -36,7 +36,7 @@ function __map_optimizer_args(cache::Optimization.OptimizationCache, opt::LBFGS; @warn "common abstol is currently not used by $(opt)" end - mapped_args = (; ) + mapped_args = (;) if cache.lb !== nothing && cache.ub !== nothing mapped_args = (; mapped_args..., lb = cache.lb, ub = cache.ub) @@ -117,7 +117,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ cons_tmp = zeros(eltype(cache.u0), length(cache.lcons)) cache.f.cons(cons_tmp, cache.u0) - ρ = max(1e-6, min(10, 2*(abs(cache.f(cache.u0, cache.p)))/ norm(cons_tmp) )) + ρ = max(1e-6, min(10, 2 * (abs(cache.f(cache.u0, cache.p))) / norm(cons_tmp))) _loss = function (θ) x = cache.f(θ, cache.p) @@ -129,17 +129,18 @@ function SciMLBase.__solve(cache::OptimizationCache{ if cache.callback(opt_state, x...) error("Optimization halted by callback.") end - return x[1] + sum(@. λ * cons_tmp[eq_inds] + ρ/2 * (cons_tmp[eq_inds].^2)) + 1 / (2*ρ) * sum((max.(Ref(0.0), μ .+ (ρ .* cons_tmp[ineq_inds]))).^2) + return x[1] + sum(@. λ * cons_tmp[eq_inds] + ρ / 2 * (cons_tmp[eq_inds] .^ 2)) + + 1 / (2 * ρ) * sum((max.(Ref(0.0), μ .+ (ρ .* cons_tmp[ineq_inds]))) .^ 2) end prev_eqcons = zero(λ) θ = cache.u0 β = max.(cons_tmp[ineq_inds], Ref(0.0)) prevβ = zero(β) - eqidxs = [eq_inds[i] > 0 ? i : nothing for i in eachindex(ineq_inds)] + eqidxs = [eq_inds[i] > 0 ? i : nothing for i in eachindex(ineq_inds)] ineqidxs = [ineq_inds[i] > 0 ? i : nothing for i in eachindex(ineq_inds)] - eqidxs = eqidxs[eqidxs.!=nothing] - ineqidxs = ineqidxs[ineqidxs.!=nothing] + eqidxs = eqidxs[eqidxs .!= nothing] + ineqidxs = ineqidxs[ineqidxs .!= nothing] function aug_grad(G, θ) cache.f.grad(G, θ) if !isnothing(cache.f.cons_jac_prototype) @@ -152,16 +153,24 @@ function SciMLBase.__solve(cache::OptimizationCache{ cache.f.cons(__tmp, θ) __tmp[eq_inds] .= __tmp[eq_inds] .- cache.lcons[eq_inds] __tmp[ineq_inds] .= __tmp[ineq_inds] .- cache.ucons[ineq_inds] - G .+= sum(λ[i] .* J[idx, :] + ρ * (__tmp[idx].* J[idx, :]) for (i,idx) in enumerate(eqidxs); init = zero(G)) #should be jvp - G .+= sum(1/ρ * (max.(Ref(0.0), μ[i] .+ (ρ .* __tmp[idx])) .* J[idx, :]) for (i, idx) in enumerate(ineqidxs);  init = zero(G)) #should be jvp + G .+= sum( + λ[i] .* J[idx, :] + ρ * (__tmp[idx] .* J[idx, :]) + for (i, idx) in enumerate(eqidxs); + init = zero(G)) #should be jvp + G .+= sum( + 1 / ρ * (max.(Ref(0.0), μ[i] .+ (ρ .* __tmp[idx])) .* J[idx, :]) + for (i, idx) in enumerate(ineqidxs); + init = zero(G)) #should be jvp end for i in 1:maxiters prev_eqcons .= cons_tmp[eq_inds] prevβ .= copy(β) if cache.lb !== nothing && cache.ub !== nothing - res = lbfgsb(_loss, aug_grad, θ; m = cache.opt.m, pgtol = sqrt(ϵ), maxiter = maxiters/100, lb = cache.lb, ub = cache.ub) + res = lbfgsb(_loss, aug_grad, θ; m = cache.opt.m, pgtol = sqrt(ϵ), + maxiter = maxiters / 100, lb = cache.lb, ub = cache.ub) else - res = lbfgsb(_loss, aug_grad, θ; m = cache.opt.m, pgtol = sqrt(ϵ), maxiter = maxiters/100) + res = lbfgsb(_loss, aug_grad, θ; m = cache.opt.m, + pgtol = sqrt(ϵ), maxiter = maxiters / 100) end # @show res[2] # @show res[1] @@ -174,11 +183,12 @@ function SciMLBase.__solve(cache::OptimizationCache{ θ = res[2] cons_tmp .= 0.0 cache.f.cons(cons_tmp, θ) - λ = max.(min.(λmax , λ .+ ρ * cons_tmp[eq_inds]), λmin) + λ = max.(min.(λmax, λ .+ ρ * cons_tmp[eq_inds]), λmin) β = max.(cons_tmp[ineq_inds], -1 .* μ ./ ρ) μ = min.(μmax, max.(μ .+ ρ * cons_tmp[ineq_inds], μmin)) - if max(norm(cons_tmp[eq_inds], Inf), norm(β, Inf)) > τ * max(norm(prev_eqcons, Inf), norm(prevβ, Inf)) + if max(norm(cons_tmp[eq_inds], Inf), norm(β, Inf)) > + τ * max(norm(prev_eqcons, Inf), norm(prevβ, Inf)) ρ = γ * ρ end if norm(cons_tmp[eq_inds], Inf) < ϵ && norm(β, Inf) < ϵ @@ -188,7 +198,8 @@ function SciMLBase.__solve(cache::OptimizationCache{ stats = Optimization.OptimizationStats(; iterations = maxiters, time = 0.0, fevals = maxiters, gevals = maxiters) - return SciMLBase.build_solution(cache, cache.opt, res[2], cache.f(res[2], cache.p)[1], stats = stats) + return SciMLBase.build_solution( + cache, cache.opt, res[2], cache.f(res[2], cache.p)[1], stats = stats) else _loss = function (θ) x = cache.f(θ, cache.p) diff --git a/test/lbfgsb.jl b/test/lbfgsb.jl index 1d3336e1d..ae0bc80fd 100644 --- a/test/lbfgsb.jl +++ b/test/lbfgsb.jl @@ -11,11 +11,11 @@ prob = OptimizationProblem(optf, x0) @time res = solve(prob, Optimization.LBFGS(), maxiters = 100) function con2_c(res, x, p) - res .= [x[1]^2 + x[2]^2, (x[2] * sin(x[1]) + x[1])-5] + res .= [x[1]^2 + x[2]^2, (x[2] * sin(x[1]) + x[1]) - 5] end optf = OptimizationFunction(rosenbrock, AutoZygote(), cons = con2_c) -prob = OptimizationProblem(optf, x0, lcons = [1.0, -Inf], - ucons = [1.0, 0.0], lb = [-1.0, -1.0], - ub = [1.0, 1.0]) +prob = OptimizationProblem(optf, x0, lcons = [1.0, -Inf], + ucons = [1.0, 0.0], lb = [-1.0, -1.0], + ub = [1.0, 1.0]) @time res = solve(prob, Optimization.LBFGS(), maxiters = 100)