diff --git a/docs/src/examples/rosenbrock.md b/docs/src/examples/rosenbrock.md index cbe756d70..8c2c1269b 100644 --- a/docs/src/examples/rosenbrock.md +++ b/docs/src/examples/rosenbrock.md @@ -22,13 +22,14 @@ _p = [1.0, 100.0] f = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) l1 = rosenbrock(x0, _p) prob = OptimizationProblem(f, x0, _p) +``` ## Optim.jl Solvers -using OptimizationOptimJL - -# Start with some derivative-free optimizers +### Start with some derivative-free optimizers +```@example rosenbrock +using OptimizationOptimJL sol = solve(prob, SimulatedAnnealing()) prob = OptimizationProblem(f, x0, _p, lb = [-1.0, -1.0], ub = [0.8, 0.8]) sol = solve(prob, SAMIN()) @@ -36,23 +37,31 @@ sol = solve(prob, SAMIN()) l1 = rosenbrock(x0, _p) prob = OptimizationProblem(rosenbrock, x0, _p) sol = solve(prob, NelderMead()) +``` -# Now a gradient-based optimizer with forward-mode automatic differentiation +### Now a gradient-based optimizer with forward-mode automatic differentiation +```@example rosenbrock optf = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) prob = OptimizationProblem(optf, x0, _p) sol = solve(prob, BFGS()) +``` -# Now a second order optimizer using Hessians generated by forward-mode automatic differentiation +### Now a second order optimizer using Hessians generated by forward-mode automatic differentiation +```@example rosenbrock sol = solve(prob, Newton()) +``` -# Now a second order Hessian-free optimizer +### Now a second order Hessian-free optimizer +```@example rosenbrock sol = solve(prob, Optim.KrylovTrustRegion()) +``` -# Now derivative-based optimizers with various constraints +### Now derivative-based optimizers with various constraints +```@example rosenbrock cons = (res, x, p) -> res .= [x[1]^2 + x[2]^2] optf = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff(); cons = cons) @@ -68,9 +77,15 @@ sol = solve(prob, IPNewton()) prob = OptimizationProblem(optf, x0, _p, lcons = [0.5], ucons = [0.5], lb = [-500.0, -500.0], ub = [50.0, 50.0]) -sol = solve(prob, IPNewton()) # Notice now that x[1]^2 + x[2]^2 ≈ 0.5: -# cons(sol.u, _p) = 0.49999999999999994 +sol = solve(prob, IPNewton()) + +# Notice now that x[1]^2 + x[2]^2 ≈ 0.5: +res = zeros(1) +cons(res, sol.u, _p) +println(res) +``` +```@example rosenbrock function con_c(res, x, p) res .= [x[1]^2 + x[2]^2] end @@ -78,14 +93,18 @@ end optf = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff(); cons = con_c) prob = OptimizationProblem(optf, x0, _p, lcons = [-Inf], ucons = [0.25^2]) sol = solve(prob, IPNewton()) # -Inf < cons_circ(sol.u, _p) = 0.25^2 +``` ## Evolutionary.jl Solvers +```@example rosenbrock using OptimizationEvolutionary sol = solve(prob, CMAES(μ = 40, λ = 100), abstol = 1e-15) # -Inf < cons_circ(sol.u, _p) = 0.25^2 +``` ## IPOPT through OptimizationMOI +```@example rosenbrock using OptimizationMOI, Ipopt function con2_c(res, x, p) @@ -95,38 +114,48 @@ end optf = OptimizationFunction(rosenbrock, Optimization.AutoZygote(); cons = con2_c) prob = OptimizationProblem(optf, x0, _p, lcons = [-Inf, -Inf], ucons = [Inf, Inf]) sol = solve(prob, Ipopt.Optimizer()) +``` -# Now let's switch over to OptimizationOptimisers with reverse-mode AD +## Now let's switch over to OptimizationOptimisers with reverse-mode AD +```@example rosenbrock using OptimizationOptimisers optf = OptimizationFunction(rosenbrock, Optimization.AutoZygote()) prob = OptimizationProblem(optf, x0, _p) sol = solve(prob, Adam(0.05), maxiters = 1000, progress = false) +``` ## Try out CMAEvolutionStrategy.jl's evolutionary methods +```@example rosenbrock using OptimizationCMAEvolutionStrategy sol = solve(prob, CMAEvolutionStrategyOpt()) +``` ## Now try a few NLopt.jl solvers with symbolic differentiation via ModelingToolkit.jl +```@example rosenbrock using OptimizationNLopt, ModelingToolkit optf = OptimizationFunction(rosenbrock, Optimization.AutoModelingToolkit()) prob = OptimizationProblem(optf, x0, _p) sol = solve(prob, Opt(:LN_BOBYQA, 2)) sol = solve(prob, Opt(:LD_LBFGS, 2)) +``` -## Add some box constraints and solve with a few NLopt.jl methods +### Add some box constraints and solve with a few NLopt.jl methods +```@example rosenbrock prob = OptimizationProblem(optf, x0, _p, lb = [-1.0, -1.0], ub = [0.8, 0.8]) sol = solve(prob, Opt(:LD_LBFGS, 2)) sol = solve(prob, Opt(:G_MLSL_LDS, 2), local_method = Opt(:LD_LBFGS, 2), maxiters = 10000) #a global optimizer with random starts of local optimization +``` ## BlackBoxOptim.jl Solvers +```@example rosenbrock using OptimizationBBO -prob = Optimization.OptimizationProblem(rosenbrock, x0, _p, lb = [-1.0, 0.2], +prob = Optimization.OptimizationProblem(rosenbrock, [0.0, 0.3], _p, lb = [-1.0, 0.2], ub = [0.8, 0.43]) sol = solve(prob, BBO_adaptive_de_rand_1_bin()) # -1.0 ≤ x[1] ≤ 0.8, 0.2 ≤ x[2] ≤ 0.43 ``` diff --git a/docs/src/optimization_packages/prima.md b/docs/src/optimization_packages/prima.md index 6a57c933a..e225aafe8 100644 --- a/docs/src/optimization_packages/prima.md +++ b/docs/src/optimization_packages/prima.md @@ -26,6 +26,8 @@ The five Powell's algorithms of the prima library are provided by the PRIMA.jl p `COBYLA`: (Constrained Optimization BY Linear Approximations) is for general constrained problems with bound constraints, non-linear constraints, linear equality constraints, and linear inequality constraints. ```@example PRIMA +using Optimization, OptimizationPRIMA + rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) _p = [1.0, 100.0] diff --git a/ext/OptimizationZygoteExt.jl b/ext/OptimizationZygoteExt.jl index 3725466ce..76d20a918 100644 --- a/ext/OptimizationZygoteExt.jl +++ b/ext/OptimizationZygoteExt.jl @@ -124,12 +124,12 @@ function Optimization.instantiate_function(f, cache::Optimization.ReInitCache, cons = nothing else cons = (res, θ) -> f.cons(res, θ, cache.p) - cons_oop = (x) -> (_res = zeros(eltype(x), num_cons); cons(_res, x); _res) + cons_oop = (x) -> (_res = Zygote.Buffer(x, num_cons); cons(_res, x); copy(_res)) end if cons !== nothing && f.cons_j === nothing cons_j = function (J, θ) - J .= Zygote.jacobian(cons_oop, θ) + J .= first(Zygote.jacobian(cons_oop, θ)) end else cons_j = (J, θ) -> f.cons_j(J, θ, cache.p) diff --git a/lib/OptimizationPRIMA/src/OptimizationPRIMA.jl b/lib/OptimizationPRIMA/src/OptimizationPRIMA.jl index 9ca20ec5b..ff268e236 100644 --- a/lib/OptimizationPRIMA/src/OptimizationPRIMA.jl +++ b/lib/OptimizationPRIMA/src/OptimizationPRIMA.jl @@ -16,7 +16,8 @@ SciMLBase.allowsconstraints(::Union{LINCOA, COBYLA}) = true SciMLBase.allowsbounds(opt::Union{BOBYQA, LINCOA, COBYLA}) = true SciMLBase.requiresconstraints(opt::COBYLA) = true -function Optimization.OptimizationCache(prob::SciMLBase.OptimizationProblem, opt::PRIMASolvers, data; +function Optimization.OptimizationCache(prob::SciMLBase.OptimizationProblem, + opt::PRIMASolvers, data; callback = Optimization.DEFAULT_CALLBACK, maxiters::Union{Number, Nothing} = nothing, maxtime::Union{Number, Nothing} = nothing,