From 7f6e53eec712c74ac9383e12cab7aa56879b4ae5 Mon Sep 17 00:00:00 2001 From: Vaibhav Dixit Date: Sun, 22 Oct 2023 11:19:09 -0400 Subject: [PATCH 1/3] Fix zygote constraint bug and update rosenbrock doc --- docs/src/examples/rosenbrock.md | 49 ++++++++++++++----- ext/OptimizationZygoteExt.jl | 4 +- .../src/OptimizationPRIMA.jl | 3 +- 3 files changed, 42 insertions(+), 14 deletions(-) diff --git a/docs/src/examples/rosenbrock.md b/docs/src/examples/rosenbrock.md index cbe756d70..887a2cb94 100644 --- a/docs/src/examples/rosenbrock.md +++ b/docs/src/examples/rosenbrock.md @@ -22,13 +22,14 @@ _p = [1.0, 100.0] f = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) l1 = rosenbrock(x0, _p) prob = OptimizationProblem(f, x0, _p) +``` ## Optim.jl Solvers -using OptimizationOptimJL - -# Start with some derivative-free optimizers +### Start with some derivative-free optimizers +```@example rosenbrock +using OptimizationOptimJL sol = solve(prob, SimulatedAnnealing()) prob = OptimizationProblem(f, x0, _p, lb = [-1.0, -1.0], ub = [0.8, 0.8]) sol = solve(prob, SAMIN()) @@ -36,23 +37,31 @@ sol = solve(prob, SAMIN()) l1 = rosenbrock(x0, _p) prob = OptimizationProblem(rosenbrock, x0, _p) sol = solve(prob, NelderMead()) +``` -# Now a gradient-based optimizer with forward-mode automatic differentiation +### Now a gradient-based optimizer with forward-mode automatic differentiation +```@example rosenbrock optf = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) prob = OptimizationProblem(optf, x0, _p) sol = solve(prob, BFGS()) +``` -# Now a second order optimizer using Hessians generated by forward-mode automatic differentiation +### Now a second order optimizer using Hessians generated by forward-mode automatic differentiation +```@example rosenbrock sol = solve(prob, Newton()) +``` -# Now a second order Hessian-free optimizer +### Now a second order Hessian-free optimizer +```@example rosenbrock sol = solve(prob, Optim.KrylovTrustRegion()) +``` -# Now derivative-based optimizers with various constraints +### Now derivative-based optimizers with various constraints +```@example rosenbrock cons = (res, x, p) -> res .= [x[1]^2 + x[2]^2] optf = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff(); cons = cons) @@ -68,9 +77,13 @@ sol = solve(prob, IPNewton()) prob = OptimizationProblem(optf, x0, _p, lcons = [0.5], ucons = [0.5], lb = [-500.0, -500.0], ub = [50.0, 50.0]) -sol = solve(prob, IPNewton()) # Notice now that x[1]^2 + x[2]^2 ≈ 0.5: -# cons(sol.u, _p) = 0.49999999999999994 +sol = solve(prob, IPNewton()) + +# Notice now that x[1]^2 + x[2]^2 ≈ 0.5: +cons(sol.u, _p) +``` +```@example rosenbrock function con_c(res, x, p) res .= [x[1]^2 + x[2]^2] end @@ -78,14 +91,18 @@ end optf = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff(); cons = con_c) prob = OptimizationProblem(optf, x0, _p, lcons = [-Inf], ucons = [0.25^2]) sol = solve(prob, IPNewton()) # -Inf < cons_circ(sol.u, _p) = 0.25^2 +``` ## Evolutionary.jl Solvers +```@example rosenbrock using OptimizationEvolutionary sol = solve(prob, CMAES(μ = 40, λ = 100), abstol = 1e-15) # -Inf < cons_circ(sol.u, _p) = 0.25^2 +``` ## IPOPT through OptimizationMOI +```@example rosenbrock using OptimizationMOI, Ipopt function con2_c(res, x, p) @@ -95,36 +112,46 @@ end optf = OptimizationFunction(rosenbrock, Optimization.AutoZygote(); cons = con2_c) prob = OptimizationProblem(optf, x0, _p, lcons = [-Inf, -Inf], ucons = [Inf, Inf]) sol = solve(prob, Ipopt.Optimizer()) +``` -# Now let's switch over to OptimizationOptimisers with reverse-mode AD +## Now let's switch over to OptimizationOptimisers with reverse-mode AD +```@example rosenbrock using OptimizationOptimisers optf = OptimizationFunction(rosenbrock, Optimization.AutoZygote()) prob = OptimizationProblem(optf, x0, _p) sol = solve(prob, Adam(0.05), maxiters = 1000, progress = false) +``` ## Try out CMAEvolutionStrategy.jl's evolutionary methods +```@example rosenbrock using OptimizationCMAEvolutionStrategy sol = solve(prob, CMAEvolutionStrategyOpt()) +``` ## Now try a few NLopt.jl solvers with symbolic differentiation via ModelingToolkit.jl +```@example rosenbrock using OptimizationNLopt, ModelingToolkit optf = OptimizationFunction(rosenbrock, Optimization.AutoModelingToolkit()) prob = OptimizationProblem(optf, x0, _p) sol = solve(prob, Opt(:LN_BOBYQA, 2)) sol = solve(prob, Opt(:LD_LBFGS, 2)) +``` -## Add some box constraints and solve with a few NLopt.jl methods +### Add some box constraints and solve with a few NLopt.jl methods +```@example rosenbrock prob = OptimizationProblem(optf, x0, _p, lb = [-1.0, -1.0], ub = [0.8, 0.8]) sol = solve(prob, Opt(:LD_LBFGS, 2)) sol = solve(prob, Opt(:G_MLSL_LDS, 2), local_method = Opt(:LD_LBFGS, 2), maxiters = 10000) #a global optimizer with random starts of local optimization +``` ## BlackBoxOptim.jl Solvers +```@example rosenbrock using OptimizationBBO prob = Optimization.OptimizationProblem(rosenbrock, x0, _p, lb = [-1.0, 0.2], ub = [0.8, 0.43]) diff --git a/ext/OptimizationZygoteExt.jl b/ext/OptimizationZygoteExt.jl index 3725466ce..76d20a918 100644 --- a/ext/OptimizationZygoteExt.jl +++ b/ext/OptimizationZygoteExt.jl @@ -124,12 +124,12 @@ function Optimization.instantiate_function(f, cache::Optimization.ReInitCache, cons = nothing else cons = (res, θ) -> f.cons(res, θ, cache.p) - cons_oop = (x) -> (_res = zeros(eltype(x), num_cons); cons(_res, x); _res) + cons_oop = (x) -> (_res = Zygote.Buffer(x, num_cons); cons(_res, x); copy(_res)) end if cons !== nothing && f.cons_j === nothing cons_j = function (J, θ) - J .= Zygote.jacobian(cons_oop, θ) + J .= first(Zygote.jacobian(cons_oop, θ)) end else cons_j = (J, θ) -> f.cons_j(J, θ, cache.p) diff --git a/lib/OptimizationPRIMA/src/OptimizationPRIMA.jl b/lib/OptimizationPRIMA/src/OptimizationPRIMA.jl index 9ca20ec5b..ff268e236 100644 --- a/lib/OptimizationPRIMA/src/OptimizationPRIMA.jl +++ b/lib/OptimizationPRIMA/src/OptimizationPRIMA.jl @@ -16,7 +16,8 @@ SciMLBase.allowsconstraints(::Union{LINCOA, COBYLA}) = true SciMLBase.allowsbounds(opt::Union{BOBYQA, LINCOA, COBYLA}) = true SciMLBase.requiresconstraints(opt::COBYLA) = true -function Optimization.OptimizationCache(prob::SciMLBase.OptimizationProblem, opt::PRIMASolvers, data; +function Optimization.OptimizationCache(prob::SciMLBase.OptimizationProblem, + opt::PRIMASolvers, data; callback = Optimization.DEFAULT_CALLBACK, maxiters::Union{Number, Nothing} = nothing, maxtime::Union{Number, Nothing} = nothing, From a8fe8a08befb02d86b55d0ea9f11c6db7cc3876a Mon Sep 17 00:00:00 2001 From: Vaibhav Dixit Date: Sun, 22 Oct 2023 15:16:17 -0400 Subject: [PATCH 2/3] evaluate inplace constraint --- docs/src/examples/rosenbrock.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/src/examples/rosenbrock.md b/docs/src/examples/rosenbrock.md index 887a2cb94..cce11090c 100644 --- a/docs/src/examples/rosenbrock.md +++ b/docs/src/examples/rosenbrock.md @@ -5,7 +5,7 @@ flexibility of Optimization.jl. This is a gauntlet of many solvers to get a feel for common workflows of the package and give copy-pastable starting points. !!! note - + This example uses many different solvers of Optimization.jl. Each solver subpackage needs to be installed separate. For example, for the details on the installation and usage of OptimizationOptimJL.jl package, see the @@ -80,7 +80,9 @@ prob = OptimizationProblem(optf, x0, _p, lcons = [0.5], ucons = [0.5], sol = solve(prob, IPNewton()) # Notice now that x[1]^2 + x[2]^2 ≈ 0.5: -cons(sol.u, _p) +res = zeros(1) +cons(res, sol.u, _p) +println(res) ``` ```@example rosenbrock From 69df7d1ca3d5ca5139cb2adb281aa0b43e604f31 Mon Sep 17 00:00:00 2001 From: Vaibhav Dixit Date: Sun, 22 Oct 2023 17:19:03 -0400 Subject: [PATCH 3/3] BBO x0 --- docs/src/examples/rosenbrock.md | 4 ++-- docs/src/optimization_packages/prima.md | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/src/examples/rosenbrock.md b/docs/src/examples/rosenbrock.md index cce11090c..8c2c1269b 100644 --- a/docs/src/examples/rosenbrock.md +++ b/docs/src/examples/rosenbrock.md @@ -5,7 +5,7 @@ flexibility of Optimization.jl. This is a gauntlet of many solvers to get a feel for common workflows of the package and give copy-pastable starting points. !!! note - + This example uses many different solvers of Optimization.jl. Each solver subpackage needs to be installed separate. For example, for the details on the installation and usage of OptimizationOptimJL.jl package, see the @@ -155,7 +155,7 @@ sol = solve(prob, Opt(:G_MLSL_LDS, 2), local_method = Opt(:LD_LBFGS, 2), maxiter ```@example rosenbrock using OptimizationBBO -prob = Optimization.OptimizationProblem(rosenbrock, x0, _p, lb = [-1.0, 0.2], +prob = Optimization.OptimizationProblem(rosenbrock, [0.0, 0.3], _p, lb = [-1.0, 0.2], ub = [0.8, 0.43]) sol = solve(prob, BBO_adaptive_de_rand_1_bin()) # -1.0 ≤ x[1] ≤ 0.8, 0.2 ≤ x[2] ≤ 0.43 ``` diff --git a/docs/src/optimization_packages/prima.md b/docs/src/optimization_packages/prima.md index 6a57c933a..e225aafe8 100644 --- a/docs/src/optimization_packages/prima.md +++ b/docs/src/optimization_packages/prima.md @@ -26,6 +26,8 @@ The five Powell's algorithms of the prima library are provided by the PRIMA.jl p `COBYLA`: (Constrained Optimization BY Linear Approximations) is for general constrained problems with bound constraints, non-linear constraints, linear equality constraints, and linear inequality constraints. ```@example PRIMA +using Optimization, OptimizationPRIMA + rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) _p = [1.0, 100.0]