Skip to content

Commit

Permalink
Merge pull request #708 from ArnoStrouwen/format
Browse files Browse the repository at this point in the history
reapply formatter
  • Loading branch information
ChrisRackauckas authored Feb 25, 2024
2 parents b389bc0 + a65ef30 commit 2736e2d
Show file tree
Hide file tree
Showing 38 changed files with 200 additions and 167 deletions.
3 changes: 2 additions & 1 deletion .JuliaFormatter.toml
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
style = "sciml"
format_markdown = true
format_markdown = true
format_docstrings = true
10 changes: 5 additions & 5 deletions docs/pages.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,18 +4,18 @@ pages = ["index.md",
"tutorials/minibatch.md",
"tutorials/symbolic.md",
"tutorials/constraints.md",
"tutorials/linearandinteger.md",
"tutorials/linearandinteger.md"
],
"Examples" => [
"examples/rosenbrock.md",
"examples/rosenbrock.md"
],
"Basics" => [
"API/optimization_problem.md",
"API/optimization_function.md",
"API/solve.md",
"API/optimization_solution.md",
"API/modelingtoolkit.md",
"API/FAQ.md",
"API/FAQ.md"
],
"Optimizer Packages" => [
"BlackBoxOptim.jl" => "optimization_packages/blackboxoptim.md",
Expand All @@ -33,6 +33,6 @@ pages = ["index.md",
"PRIMA.jl" => "optimization_packages/prima.md",
"Polyalgorithms.jl" => "optimization_packages/polyopt.md",
"QuadDIRECT.jl" => "optimization_packages/quaddirect.md",
"SpeedMapping.jl" => "optimization_packages/speedmapping.md",
],
"SpeedMapping.jl" => "optimization_packages/speedmapping.md"
]
]
1 change: 1 addition & 0 deletions docs/src/optimization_packages/optim.md
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ For a more extensive documentation of all the algorithms and options, please con
- [`Optim.IPNewton()`](https://julianlsolvers.github.io/Optim.jl/stable/#algo/ipnewton/)

+ `μ0` specifies the initial barrier penalty coefficient as either a number or `:auto`

+ `show_linesearch` is an option to turn on linesearch verbosity.
+ Defaults:

Expand Down
10 changes: 5 additions & 5 deletions docs/src/tutorials/linearandinteger.md
Original file line number Diff line number Diff line change
Expand Up @@ -44,11 +44,11 @@ using Optimization, OptimizationMOI, ModelingToolkit, HiGHS, LinearAlgebra
@variables m [bounds = (0.0, Inf)]
cons = [u[1] + v[1] - w[1] ~ 150 # January
u[2] + v[2] - w[2] - 1.01u[1] + 1.003w[1] ~ 100 # February
u[3] + v[3] - w[3] - 1.01u[2] + 1.003w[2] ~ -200 # March
u[4] - w[4] - 1.02v[1] - 1.01u[3] + 1.003w[3] ~ 200 # April
u[5] - w[5] - 1.02v[2] - 1.01u[4] + 1.003w[4] ~ -50 # May
-m - 1.02v[3] - 1.01u[5] + 1.003w[5] ~ -300]
u[2] + v[2] - w[2] - 1.01u[1] + 1.003w[1] ~ 100 # February
u[3] + v[3] - w[3] - 1.01u[2] + 1.003w[2] ~ -200 # March
u[4] - w[4] - 1.02v[1] - 1.01u[3] + 1.003w[3] ~ 200 # April
u[5] - w[5] - 1.02v[2] - 1.01u[4] + 1.003w[4] ~ -50 # May
-m - 1.02v[3] - 1.01u[5] + 1.003w[5] ~ -300]
@named optsys = OptimizationSystem(m, [u..., v..., w..., m], [], constraints = cons)
optprob = OptimizationProblem(optsys,
Expand Down
3 changes: 2 additions & 1 deletion docs/src/tutorials/minibatch.md
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,8 @@ train_loader = Flux.Data.DataLoader((ode_data, t), batchsize = k)
numEpochs = 300
l1 = loss_adjoint(pp, train_loader.data[1], train_loader.data[2])[1]
optfun = OptimizationFunction((θ, p, batch, time_batch) -> loss_adjoint(θ, batch,
optfun = OptimizationFunction(
(θ, p, batch, time_batch) -> loss_adjoint(θ, batch,
time_batch),
Optimization.AutoZygote())
optprob = OptimizationProblem(optfun, pp)
Expand Down
4 changes: 2 additions & 2 deletions docs/src/tutorials/symbolic.md
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,9 @@ our parameter values are and the initial conditions. This looks like:

```@example modelingtoolkit
u0 = [x => 1.0
y => 2.0]
y => 2.0]
p = [a => 6.0
b => 7.0]
b => 7.0]
```

And now we solve.
Expand Down
6 changes: 4 additions & 2 deletions ext/OptimizationFiniteDiffExt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,8 @@ function Optimization.instantiate_function(f, x, adtype::AutoFiniteDiff, p,

if f.grad === nothing
gradcache = FD.GradientCache(x, x, adtype.fdtype)
grad = (res, θ, args...) -> FD.finite_difference_gradient!(res, x -> _f(x, args...),
grad = (res, θ, args...) -> FD.finite_difference_gradient!(
res, x -> _f(x, args...),
θ, gradcache)
else
grad = (G, θ, args...) -> f.grad(G, θ, p, args...)
Expand Down Expand Up @@ -123,7 +124,8 @@ function Optimization.instantiate_function(f, cache::Optimization.ReInitCache,

if f.grad === nothing
gradcache = FD.GradientCache(cache.u0, cache.u0, adtype.fdtype)
grad = (res, θ, args...) -> FD.finite_difference_gradient!(res, x -> _f(x, args...),
grad = (res, θ, args...) -> FD.finite_difference_gradient!(
res, x -> _f(x, args...),
θ, gradcache)
else
grad = (G, θ, args...) -> f.grad(G, θ, cache.p, args...)
Expand Down
4 changes: 2 additions & 2 deletions ext/OptimizationForwardDiffExt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ function Optimization.instantiate_function(f::OptimizationFunction{true}, x,
if cons !== nothing && f.cons_h === nothing
fncs = [(x) -> cons_oop(x)[i] for i in 1:num_cons]
hess_config_cache = [ForwardDiff.HessianConfig(fncs[i], x,
ForwardDiff.Chunk{chunksize}())
ForwardDiff.Chunk{chunksize}())
for i in 1:num_cons]
cons_h = function (res, θ)
for i in 1:num_cons
Expand Down Expand Up @@ -143,7 +143,7 @@ function Optimization.instantiate_function(f::OptimizationFunction{true},
if cons !== nothing && f.cons_h === nothing
fncs = [(x) -> cons_oop(x)[i] for i in 1:num_cons]
hess_config_cache = [ForwardDiff.HessianConfig(fncs[i], cache.u0,
ForwardDiff.Chunk{chunksize}())
ForwardDiff.Chunk{chunksize}())
for i in 1:num_cons]
cons_h = function (res, θ)
for i in 1:num_cons
Expand Down
3 changes: 2 additions & 1 deletion ext/OptimizationMTKExt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,8 @@ function Optimization.instantiate_function(f, cache::Optimization.ReInitCache,
adtype::AutoModelingToolkit, num_cons = 0)
p = isnothing(cache.p) ? SciMLBase.NullParameters() : cache.p

sys = complete(ModelingToolkit.modelingtoolkitize(OptimizationProblem(f, cache.u0, cache.p;
sys = complete(ModelingToolkit.modelingtoolkitize(OptimizationProblem(
f, cache.u0, cache.p;
lcons = fill(0.0,
num_cons),
ucons = fill(0.0,
Expand Down
16 changes: 8 additions & 8 deletions ext/OptimizationReverseDiffExt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ function Optimization.instantiate_function(f, x, adtype::AutoReverseDiff,
xdual = ForwardDiff.Dual{
typeof(T),
eltype(x),
chunksize,
chunksize
}.(x, Ref(ForwardDiff.Partials((ones(eltype(x), chunksize)...,))))
h_tape = ReverseDiff.GradientTape(_f, xdual)
htape = ReverseDiff.compile(h_tape)
Expand Down Expand Up @@ -118,9 +118,9 @@ function Optimization.instantiate_function(f, x, adtype::AutoReverseDiff,
end
gs = [x -> grad_cons(x, conshtapes[i]) for i in 1:num_cons]
jaccfgs = [ForwardDiff.JacobianConfig(gs[i],
x,
ForwardDiff.Chunk{chunksize}(),
T) for i in 1:num_cons]
x,
ForwardDiff.Chunk{chunksize}(),
T) for i in 1:num_cons]
cons_h = function (res, θ)
for i in 1:num_cons
ForwardDiff.jacobian!(res[i], gs[i], θ, jaccfgs[i], Val{false}())
Expand Down Expand Up @@ -180,7 +180,7 @@ function Optimization.instantiate_function(f, cache::Optimization.ReInitCache,
xdual = ForwardDiff.Dual{
typeof(T),
eltype(cache.u0),
chunksize,
chunksize
}.(cache.u0, Ref(ForwardDiff.Partials((ones(eltype(cache.u0), chunksize)...,))))
h_tape = ReverseDiff.GradientTape(_f, xdual)
htape = ReverseDiff.compile(h_tape)
Expand Down Expand Up @@ -253,9 +253,9 @@ function Optimization.instantiate_function(f, cache::Optimization.ReInitCache,
end
gs = [x -> grad_cons(x, conshtapes[i]) for i in 1:num_cons]
jaccfgs = [ForwardDiff.JacobianConfig(gs[i],
cache.u0,
ForwardDiff.Chunk{chunksize}(),
T) for i in 1:num_cons]
cache.u0,
ForwardDiff.Chunk{chunksize}(),
T) for i in 1:num_cons]
cons_h = function (res, θ)
for i in 1:num_cons
ForwardDiff.jacobian!(res[i], gs[i], θ, jaccfgs[i], Val{false}())
Expand Down
74 changes: 39 additions & 35 deletions ext/OptimizationSparseDiffExt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,13 @@ module OptimizationSparseDiffExt
import Optimization, Optimization.ArrayInterface
import Optimization.SciMLBase: OptimizationFunction
import Optimization.ADTypes: AutoSparseForwardDiff,
AutoSparseFiniteDiff, AutoSparseReverseDiff
AutoSparseFiniteDiff, AutoSparseReverseDiff
using Optimization.LinearAlgebra, ReverseDiff
isdefined(Base, :get_extension) ?
(using SparseDiffTools,
SparseDiffTools.ForwardDiff, SparseDiffTools.FiniteDiff, Symbolics) :
SparseDiffTools.ForwardDiff, SparseDiffTools.FiniteDiff, Symbolics) :
(using ..SparseDiffTools,
..SparseDiffTools.ForwardDiff, ..SparseDiffTools.FiniteDiff, ..Symbolics)
..SparseDiffTools.ForwardDiff, ..SparseDiffTools.FiniteDiff, ..Symbolics)

function default_chunk_size(len)
if len < ForwardDiff.DEFAULT_CHUNK_THRESHOLD
Expand Down Expand Up @@ -98,8 +98,8 @@ function Optimization.instantiate_function(f::OptimizationFunction{true}, x,
end

fcons = [(x) -> (_res = zeros(eltype(x), num_cons);
cons(_res, x);
_res[i]) for i in 1:num_cons]
cons(_res, x);
_res[i]) for i in 1:num_cons]
cons_hess_caches = gen_conshess_cache.(fcons, Ref(x))
cons_h = function (res, θ)
for i in 1:num_cons
Expand Down Expand Up @@ -205,8 +205,8 @@ function Optimization.instantiate_function(f::OptimizationFunction{true},
end

fcons = [(x) -> (_res = zeros(eltype(x), num_cons);
cons(_res, x);
_res[i]) for i in 1:num_cons]
cons(_res, x);
_res[i]) for i in 1:num_cons]
cons_hess_caches = gen_conshess_cache.(fcons, Ref(cache.u0))
cons_h = function (res, θ)
for i in 1:num_cons
Expand Down Expand Up @@ -246,7 +246,8 @@ function Optimization.instantiate_function(f, x, adtype::AutoSparseFiniteDiff, p

if f.grad === nothing
gradcache = FD.GradientCache(x, x)
grad = (res, θ, args...) -> FD.finite_difference_gradient!(res, x -> _f(x, args...),
grad = (res, θ, args...) -> FD.finite_difference_gradient!(
res, x -> _f(x, args...),
θ, gradcache)
else
grad = (G, θ, args...) -> f.grad(G, θ, p, args...)
Expand Down Expand Up @@ -314,8 +315,8 @@ function Optimization.instantiate_function(f, x, adtype::AutoSparseFiniteDiff, p
end

fcons = [(x) -> (_res = zeros(eltype(x), num_cons);
cons(_res, x);
_res[i]) for i in 1:num_cons]
cons(_res, x);
_res[i]) for i in 1:num_cons]
conshess_caches = gen_conshess_cache.(fcons, Ref(x))
cons_h = function (res, θ)
for i in 1:num_cons
Expand Down Expand Up @@ -370,7 +371,8 @@ function Optimization.instantiate_function(f, cache::Optimization.ReInitCache,

if f.grad === nothing
gradcache = FD.GradientCache(cache.u0, cache.u0)
grad = (res, θ, args...) -> FD.finite_difference_gradient!(res, x -> _f(x, args...),
grad = (res, θ, args...) -> FD.finite_difference_gradient!(
res, x -> _f(x, args...),
θ, gradcache)
else
grad = (G, θ, args...) -> f.grad(G, θ, cache.p, args...)
Expand Down Expand Up @@ -439,8 +441,8 @@ function Optimization.instantiate_function(f, cache::Optimization.ReInitCache,
end

fcons = [(x) -> (_res = zeros(eltype(x), num_cons);
cons(_res, x);
_res[i]) for i in 1:num_cons]
cons(_res, x);
_res[i]) for i in 1:num_cons]
conshess_caches = [gen_conshess_cache(fcons[i], cache.u0) for i in 1:num_cons]
cons_h = function (res, θ)
for i in 1:num_cons
Expand Down Expand Up @@ -527,7 +529,7 @@ function Optimization.instantiate_function(f, x, adtype::AutoSparseReverseDiff,
xdual = ForwardDiff.Dual{
typeof(T),
eltype(x),
min(chunksize, maximum(hess_colors)),
min(chunksize, maximum(hess_colors))
}.(x,
Ref(ForwardDiff.Partials((ones(eltype(x),
min(chunksize, maximum(hess_colors)))...,))))
Expand Down Expand Up @@ -611,23 +613,24 @@ function Optimization.instantiate_function(f, x, adtype::AutoSparseReverseDiff,
if adtype.compile
T = ForwardDiff.Tag(OptimizationSparseReverseTag(), eltype(x))
xduals = [ForwardDiff.Dual{
typeof(T),
eltype(x),
min(chunksize, maximum(conshess_colors[i])),
}.(x,
Ref(ForwardDiff.Partials((ones(eltype(x),
min(chunksize, maximum(conshess_colors[i])))...,)))) for i in 1:num_cons]
typeof(T),
eltype(x),
min(chunksize, maximum(conshess_colors[i]))
}.(x,
Ref(ForwardDiff.Partials((ones(eltype(x),
min(chunksize, maximum(conshess_colors[i])))...,))))
for i in 1:num_cons]
consh_tapes = [ReverseDiff.GradientTape(fncs[i], xduals[i]) for i in 1:num_cons]
conshtapes = ReverseDiff.compile.(consh_tapes)
function grad_cons(res1, θ, htape)
ReverseDiff.gradient!(res1, htape, θ)
end
gs = [(res1, x) -> grad_cons(res1, x, conshtapes[i]) for i in 1:num_cons]
jaccfgs = [ForwardColorJacCache(gs[i],
x;
tag = typeof(T),
colorvec = conshess_colors[i],
sparsity = conshess_sparsity[i]) for i in 1:num_cons]
x;
tag = typeof(T),
colorvec = conshess_colors[i],
sparsity = conshess_sparsity[i]) for i in 1:num_cons]
cons_h = function (res, θ, args...)
for i in 1:num_cons
SparseDiffTools.forwarddiff_color_jacobian!(res[i],
Expand Down Expand Up @@ -701,7 +704,7 @@ function Optimization.instantiate_function(f, cache::Optimization.ReInitCache,
xdual = ForwardDiff.Dual{
typeof(T),
eltype(cache.u0),
min(chunksize, maximum(hess_colors)),
min(chunksize, maximum(hess_colors))
}.(cache.u0,
Ref(ForwardDiff.Partials((ones(eltype(cache.u0),
min(chunksize, maximum(hess_colors)))...,))))
Expand Down Expand Up @@ -802,12 +805,13 @@ function Optimization.instantiate_function(f, cache::Optimization.ReInitCache,
if adtype.compile
T = ForwardDiff.Tag(OptimizationSparseReverseTag(), eltype(cache.u0))
xduals = [ForwardDiff.Dual{
typeof(T),
eltype(cache.u0),
min(chunksize, maximum(conshess_colors[i])),
}.(cache.u0,
Ref(ForwardDiff.Partials((ones(eltype(cache.u0),
min(chunksize, maximum(conshess_colors[i])))...,)))) for i in 1:num_cons]
typeof(T),
eltype(cache.u0),
min(chunksize, maximum(conshess_colors[i]))
}.(cache.u0,
Ref(ForwardDiff.Partials((ones(eltype(cache.u0),
min(chunksize, maximum(conshess_colors[i])))...,))))
for i in 1:num_cons]
consh_tapes = [ReverseDiff.GradientTape(fncs[i], xduals[i]) for i in 1:num_cons]
conshtapes = ReverseDiff.compile.(consh_tapes)
function grad_cons(res1, θ, htape)
Expand All @@ -821,10 +825,10 @@ function Optimization.instantiate_function(f, cache::Optimization.ReInitCache,
end
end
jaccfgs = [ForwardColorJacCache(gs[i],
cache.u0;
tag = typeof(T),
colorvec = conshess_colors[i],
sparsity = conshess_sparsity[i]) for i in 1:num_cons]
cache.u0;
tag = typeof(T),
colorvec = conshess_colors[i],
sparsity = conshess_sparsity[i]) for i in 1:num_cons]
cons_h = function (res, θ)
for i in 1:num_cons
SparseDiffTools.forwarddiff_color_jacobian!(res[i],
Expand Down
6 changes: 4 additions & 2 deletions ext/OptimizationTrackerExt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,8 @@ function Optimization.instantiate_function(f, x, adtype::AutoTracker, p,
_f = (θ, args...) -> first(f.f(θ, p, args...))

if f.grad === nothing
grad = (res, θ, args...) -> res .= Tracker.data(Tracker.gradient(x -> _f(x, args...),
grad = (res, θ, args...) -> res .= Tracker.data(Tracker.gradient(
x -> _f(x, args...),
θ)[1])
else
grad = (G, θ, args...) -> f.grad(G, θ, p, args...)
Expand Down Expand Up @@ -42,7 +43,8 @@ function Optimization.instantiate_function(f, cache::Optimization.ReInitCache,
_f = (θ, args...) -> first(f.f(θ, cache.p, args...))

if f.grad === nothing
grad = (res, θ, args...) -> res .= Tracker.data(Tracker.gradient(x -> _f(x, args...),
grad = (res, θ, args...) -> res .= Tracker.data(Tracker.gradient(
x -> _f(x, args...),
θ)[1])
else
grad = (G, θ, args...) -> f.grad(G, θ, cache.p, args...)
Expand Down
4 changes: 2 additions & 2 deletions lib/OptimizationBBO/src/OptimizationBBO.jl
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ function SciMLBase.__solve(cache::Optimization.OptimizationCache{
O,
D,
P,
C,
C
}) where {
F,
RC,
Expand All @@ -101,7 +101,7 @@ function SciMLBase.__solve(cache::Optimization.OptimizationCache{
BBO,
D,
P,
C,
C
}
local x, cur, state

Expand Down
Loading

0 comments on commit 2736e2d

Please sign in to comment.