Skip to content

Commit

Permalink
Merge branch 'master' into manopt
Browse files Browse the repository at this point in the history
  • Loading branch information
Vaibhavdixit02 authored Jun 2, 2024
2 parents 1b45e0a + f2d3e63 commit 487d277
Show file tree
Hide file tree
Showing 16 changed files with 112 additions and 99 deletions.
10 changes: 5 additions & 5 deletions Project.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
name = "Optimization"
uuid = "7f7a1694-90dd-40f0-9382-eb1efda571ba"
version = "3.25.0"
version = "3.25.1"

[deps]
ADTypes = "47edcb42-4c32-4615-8424-f2b9edc5f35b"
Expand All @@ -21,20 +21,20 @@ SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
TerminalLoggers = "5d786b92-1e48-4d6f-9151-6b4477ca9bed"

[compat]
ADTypes = "0.2.5, 1"
ArrayInterface = "7.6"
ADTypes = "1.2"
ArrayInterface = "7.10"
ConsoleProgressMonitor = "0.1.1"
DocStringExtensions = "0.9"
LBFGSB = "0.4.1"
LinearAlgebra = "1.10"
Logging = "1.10"
LoggingExtras = "0.4, 1"
OptimizationBase = "0.0.7"
OptimizationBase = "1"
Pkg = "1"
Printf = "1.10"
ProgressLogging = "0.1"
Reexport = "1.2"
SciMLBase = "2.30.0"
SciMLBase = "2.39.0"
SparseArrays = "1.10"
Symbolics = "5.12"
TerminalLoggers = "0.1"
Expand Down
4 changes: 2 additions & 2 deletions docs/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ ModelingToolkit = "9"
NLopt = "0.6, 1"
Optimization = "3"
OptimizationBBO = "0.1, 0.2"
OptimizationBase = "0.0.5, 0.0.6, 0.0.7"
OptimizationBase = "0.0.5, 0.0.6, 0.0.7, 1"
OptimizationCMAEvolutionStrategy = "0.1, 0.2"
OptimizationEvolutionary = "0.1, 0.2, 0.3"
OptimizationFlux = "0.2.1"
Expand All @@ -59,7 +59,7 @@ OptimizationMetaheuristics = "0.1, 0.2"
OptimizationMultistartOptimization = "0.1, 0.2"
OptimizationNLopt = "0.1, 0.2"
OptimizationNOMAD = "0.1, 0.2"
OptimizationOptimJL = "0.1, 0.2"
OptimizationOptimJL = "0.1, 0.2, 0.3"
OptimizationOptimisers = "0.1, 0.2"
OptimizationPRIMA = "0.0.1, 0.0.2"
OptimizationPolyalgorithms = "0.1, 0.2"
Expand Down
4 changes: 2 additions & 2 deletions docs/src/optimization_packages/manopt.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Manopt.jl

[Manopt.jl](https://github.com/JuliaManifolds/Manopt.jl) is a package with implementations of a variety of optimziation solvers on manifolds supported by
[Manopt.jl](https://github.com/JuliaManifolds/Manopt.jl) is a package with implementations of a variety of optimization solvers on manifolds supported by
[Manifolds](https://github.com/JuliaManifolds/Manifolds.jl).

## Installation: OptimizationManopt.jl
Expand Down Expand Up @@ -95,4 +95,4 @@ prob = OptimizationProblem(optf, U; manifold = M, maxiters = 1000)
sol = Optimization.solve(prob, opt, sub_problem = (M, q, p, X) -> closed_form_solution!(M, q, L, U, p, X))
```

This example is based on the [example](https://juliamanifolds.github.io/ManoptExamples.jl/stable/examples/Riemannian-mean/) in the Manopt and https://doi.org/10.1007/s10107-022-01840-5.
This example is based on the [example](https://juliamanifolds.github.io/ManoptExamples.jl/stable/examples/Riemannian-mean/) in the Manopt and https://doi.org/10.1007/s10107-022-01840-5.
4 changes: 0 additions & 4 deletions lib/OptimizationBBO/src/OptimizationBBO.jl
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,6 @@ SciMLBase.requiresbounds(::BBO) = true
SciMLBase.allowsbounds(::BBO) = true
SciMLBase.supports_opt_cache_interface(opt::BBO) = true





for j in string.(BlackBoxOptim.SingleObjectiveMethodNames)
eval(Meta.parse("Base.@kwdef struct BBO_" * j * " <: BBO method=:" * j * " end"))
eval(Meta.parse("export BBO_" * j))
Expand Down
1 change: 0 additions & 1 deletion lib/OptimizationGCMAES/src/OptimizationGCMAES.jl
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ SciMLBase.requireshessian(::GCMAESOpt) = false
SciMLBase.requiresconsjac(::GCMAESOpt) = false
SciMLBase.requiresconshess(::GCMAESOpt) = false


function __map_optimizer_args(cache::OptimizationCache, opt::GCMAESOpt;
callback = nothing,
maxiters::Union{Number, Nothing} = nothing,
Expand Down
20 changes: 16 additions & 4 deletions lib/OptimizationMOI/src/OptimizationMOI.jl
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,22 @@ const MOI = MathOptInterface

const DenseOrSparse{T} = Union{Matrix{T}, SparseMatrixCSC{T}}

SciMLBase.requiresgradient(opt::Union{MOI.AbstractOptimizer,MOI.OptimizerWithAttributes}) = true
SciMLBase.requireshessian(opt::Union{MOI.AbstractOptimizer,MOI.OptimizerWithAttributes}) = true
SciMLBase.requiresconsjac(opt::Union{MOI.AbstractOptimizer,MOI.OptimizerWithAttributes}) = true
SciMLBase.requiresconshess(opt::Union{MOI.AbstractOptimizer,MOI.OptimizerWithAttributes}) = true
function SciMLBase.requiresgradient(opt::Union{
MOI.AbstractOptimizer, MOI.OptimizerWithAttributes})
true
end
function SciMLBase.requireshessian(opt::Union{
MOI.AbstractOptimizer, MOI.OptimizerWithAttributes})
true
end
function SciMLBase.requiresconsjac(opt::Union{
MOI.AbstractOptimizer, MOI.OptimizerWithAttributes})
true
end
function SciMLBase.requiresconshess(opt::Union{
MOI.AbstractOptimizer, MOI.OptimizerWithAttributes})
true
end

function SciMLBase.allowsbounds(opt::Union{MOI.AbstractOptimizer,
MOI.OptimizerWithAttributes})
Expand Down
2 changes: 1 addition & 1 deletion lib/OptimizationNLopt/Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "OptimizationNLopt"
uuid = "4e6fcdb7-1186-4e1f-a706-475e75c168bb"
authors = ["Vaibhav Dixit <[email protected]> and contributors"]
version = "0.2.0"
version = "0.2.2"

[deps]
NLopt = "76087f3c-5699-56af-9a33-bf431cd00edd"
Expand Down
61 changes: 7 additions & 54 deletions lib/OptimizationNLopt/src/OptimizationNLopt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -12,77 +12,30 @@ SciMLBase.supports_opt_cache_interface(opt::Union{NLopt.Algorithm, NLopt.Opt}) =
function SciMLBase.requiresgradient(opt::NLopt.Algorithm) #https://github.com/JuliaOpt/NLopt.jl/blob/master/src/NLopt.jl#L18C7-L18C16
str_opt = string(opt)
if str_opt[2] == "D"
return true
return true
else
return false
return false
end
end

function SciMLBase.requireshessian(opt::NLopt.Algorithm) #https://github.com/JuliaOpt/NLopt.jl/blob/master/src/NLopt.jl#L18C7-L18C16
str_opt = string(opt)
if (str_opt[2] == "D" && str_opt[4] == "N")
return true
return true
else
return false
return false
end
end

function SciMLBase.requireshessian(opt::NLopt.Algorithm) #https://github.com/JuliaOpt/NLopt.jl/blob/master/src/NLopt.jl#L18C7-L18C16
str_opt = string(opt)
if str_opt[2] == "D" && str_opt[4] == "N"
return true
else
return false
end
end
function SciMLBase.requiresconsjac(opt::NLopt.Algorithm) #https://github.com/JuliaOpt/NLopt.jl/blob/master/src/NLopt.jl#L18C7-L18C16
str_opt = string(opt)
if str_opt[3] == "O" || str_opt[3] == "I" || str_opt[5] == "G"
return true
else
return false
end
end



function SciMLBase.requiresgradient(opt::NLopt.Algorithm) #https://github.com/JuliaOpt/NLopt.jl/blob/master/src/NLopt.jl#L18C7-L18C16
str_opt = string(opt)
if str_opt[2] == "D"
return true
return true
else
return false
return false
end
end

function SciMLBase.requireshessian(opt::NLopt.Algorithm) #https://github.com/JuliaOpt/NLopt.jl/blob/master/src/NLopt.jl#L18C7-L18C16
str_opt = string(opt)
if (str_opt[2] == "D" && str_opt[4] == "N")
return true
else
return false
end
end

function SciMLBase.requireshessian(opt::NLopt.Algorithm) #https://github.com/JuliaOpt/NLopt.jl/blob/master/src/NLopt.jl#L18C7-L18C16
str_opt = string(opt)
if str_opt[2] == "D" && str_opt[4] == "N"
return true
else
return false
end
end
function SciMLBase.requiresconsjac(opt::NLopt.Algorithm) #https://github.com/JuliaOpt/NLopt.jl/blob/master/src/NLopt.jl#L18C7-L18C16
str_opt = string(opt)
if str_opt[3] == "O" || str_opt[3] == "I" || str_opt[5] == "G"
return true
else
return false
end
end



function __map_optimizer_args!(cache::OptimizationCache, opt::NLopt.Opt;
callback = nothing,
maxiters::Union{Number, Nothing} = nothing,
Expand Down Expand Up @@ -212,7 +165,7 @@ function SciMLBase.__solve(cache::OptimizationCache{
x = cache.f(θ, cache.p)
opt_state = Optimization.OptimizationState(u = θ, objective = x[1])
if cache.callback(opt_state, x...)
error("Optimization halted by callback.")
NLopt.force_stop!(opt_setup)
end
return x[1]
end
Expand Down
11 changes: 11 additions & 0 deletions lib/OptimizationNLopt/test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -71,4 +71,15 @@ using Test
@test sol.retcode == ReturnCode.Success
@test sol.u[2.0] atol=1e-3
end

@testset "callback" begin
cbstopping = function(state, loss)
println(state.iter, " ", state.u, " ", state.objective)
return state.objective < 0.7
end

sol = solve(prob, NLopt.LD_LBFGS())
#nlopt gives the last best not the one where callback stops
@test sol.objective < 0.8
end
end
2 changes: 1 addition & 1 deletion lib/OptimizationOptimJL/Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "OptimizationOptimJL"
uuid = "36348300-93cb-4f02-beb5-3c3902f8871e"
authors = ["Vaibhav Dixit <[email protected]> and contributors"]
version = "0.3.0"
version = "0.3.1"

[deps]
Optim = "429524aa-4258-5aef-a3af-852621145aeb"
Expand Down
10 changes: 6 additions & 4 deletions lib/OptimizationOptimJL/src/OptimizationOptimJL.jl
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,9 @@ SciMLBase.requiresbounds(opt::Optim.SAMIN) = true
SciMLBase.supports_opt_cache_interface(opt::Optim.AbstractOptimizer) = true
SciMLBase.supports_opt_cache_interface(opt::Union{Optim.Fminbox, Optim.SAMIN}) = true
SciMLBase.supports_opt_cache_interface(opt::Optim.ConstrainedOptimizer) = true
SciMLBase.requiresgradient(opt::Optim.AbstractOptimizer) = !(opt isa Optim.ZerothOrderOptimizer)
function SciMLBase.requiresgradient(opt::Optim.AbstractOptimizer)
!(opt isa Optim.ZerothOrderOptimizer)
end
SciMLBase.requiresgradient(::IPNewton) = true
SciMLBase.requireshessian(::IPNewton) = true
SciMLBase.requiresconsjac(::IPNewton) = true
Expand Down Expand Up @@ -90,10 +92,10 @@ function SciMLBase.__init(prob::OptimizationProblem,
opt = Optim.ParticleSwarm(; lower = prob.lb, upper = prob.ub,
n_particles = opt.n_particles)
else
if prob.f isa OptimizationFunction && !(prob.f.adtype isa NoAD)
opt = Optim.Fminbox(opt)
if prob.f isa OptimizationFunction && (!(prob.f.adtype isa SciMLBase.NoAD) || !isnothing(prob.f.grad))
opt = Optim.Fminbox(opt)
else
throw(ArgumentError("Fminbox($opt) requires gradients, since you didn't use `OptimizationFunction` with a valid AD backend https://docs.sciml.ai/Optimization/stable/API/ad/ the lower and upper bounds thus will be ignored."))
throw(ArgumentError("Fminbox($opt) requires gradients, use `OptimizationFunction` either with a valid AD backend https://docs.sciml.ai/Optimization/stable/API/ad/ or a provided 'grad' function."))
end
end
end
Expand Down
28 changes: 28 additions & 0 deletions lib/OptimizationOptimJL/test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -170,6 +170,34 @@ end
sol = solve(prob, Optim.KrylovTrustRegion())
@test 10 * sol.objective < l1

prob = OptimizationProblem(optprob, x0, _p; sense = Optimization.MaxSense, lb = [-1.0, -1.0], ub = [0.8, 0.8])
sol = solve(prob, BFGS())
@test 10 * sol.objective < l1

function rosenbrock_grad!(dx, x, p)
dx[1] = -2*(p[1] - x[1]) -4 * p[2] * (x[2] - x[1]^2)*x[1]
dx[2]= 2*p[2]*(x[2]-x[1]^2)
return nothing
end

# https://github.com/SciML/Optimization.jl/issues/754 Optim.BFGS() with explicit gradient function
optprob = OptimizationFunction(rosenbrock; grad=rosenbrock_grad!)
prob = OptimizationProblem(optprob, x0, _p)
@test (sol = solve(prob, Optim.BFGS())) isa Any # test exception not thrown
@test 10 * sol.objective < l1

# https://github.com/SciML/Optimization.jl/issues/754 Optim.BFGS() with bounds and explicit gradient function
optprob = OptimizationFunction(rosenbrock; grad=rosenbrock_grad!)
prob = OptimizationProblem(optprob, x0, _p; lb = [-1.0, -1.0], ub = [0.8, 0.8])
@test (sol = solve(prob, Optim.BFGS())) isa Any # test exception not thrown
@test 10 * sol.objective < l1

# test that Optim.BFGS() with bounds but no AD or user-supplied gradient fails
optprob = OptimizationFunction(rosenbrock, SciMLBase.NoAD())
prob = OptimizationProblem(optprob, x0, _p; lb = [-1.0, -1.0], ub = [0.8, 0.8])
@test_throws ArgumentError (sol = solve(prob, Optim.BFGS())) isa Any # test exception is thrown
@test 10 * sol.objective < l1

@testset "cache" begin
objective(x, p) = (p[1] - x[1])^2
x0 = zeros(1)
Expand Down
5 changes: 3 additions & 2 deletions lib/OptimizationPRIMA/src/OptimizationPRIMA.jl
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ SciMLBase.requiresconstraints(opt::COBYLA) = true
SciMLBase.requiresgradient(opt::Union{BOBYQA, LINCOA, COBYLA}) = true
SciMLBase.requiresconsjac(opt::Union{LINCOA, COBYLA}) = true


function Optimization.OptimizationCache(prob::SciMLBase.OptimizationProblem,
opt::PRIMASolvers, data;
callback = Optimization.DEFAULT_CALLBACK,
Expand Down Expand Up @@ -123,9 +122,11 @@ function SciMLBase.__solve(cache::Optimization.OptimizationCache{
P,
C
}
iter = 0
_loss = function (θ)
x = cache.f(θ, cache.p)
opt_state = Optimization.OptimizationState(u = θ, objective = x[1])
iter += 1
opt_state = Optimization.OptimizationState(u = θ, objective = x[1], iter = iter)
if cache.callback(opt_state, x...)
error("Optimization halted by callback.")
end
Expand Down
4 changes: 2 additions & 2 deletions lib/OptimizationPolyalgorithms/Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "OptimizationPolyalgorithms"
uuid = "500b13db-7e66-49ce-bda4-eed966be6282"
authors = ["Vaibhav Dixit <[email protected]> and contributors"]
version = "0.2.0"
version = "0.2.1"

[deps]
Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba"
Expand All @@ -11,7 +11,7 @@ Reexport = "189a3867-3050-52da-a836-e630ba90ab69"

[compat]
Optimization = "3.21"
OptimizationOptimJL = "0.1, 0.2"
OptimizationOptimJL = "0.1, 0.2, 0.3"
OptimizationOptimisers = "0.1, 0.2"
Reexport = "1.2"
julia = "1.6"
Expand Down
Loading

0 comments on commit 487d277

Please sign in to comment.