Skip to content

Commit

Permalink
Fix sparse jacobians and add sparsereversediff backend
Browse files Browse the repository at this point in the history
  • Loading branch information
Vaibhavdixit02 committed Sep 7, 2023
1 parent d52c45a commit 9ac5a8f
Show file tree
Hide file tree
Showing 3 changed files with 184 additions and 102 deletions.
6 changes: 4 additions & 2 deletions ext/OptimizationForwardDiffExt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -107,8 +107,8 @@ function Optimization.instantiate_function(f::OptimizationFunction{true},

if f.hess === nothing
hesscfg = ForwardDiff.HessianConfig(_f, cache.u0, ForwardDiff.Chunk{chunksize}())
hess = (res, θ, args...) -> ForwardDiff.hessian!(res, x -> _f(x, args...), θ,
hesscfg, Val{false}())
hess = (res, θ, args...) -> (ForwardDiff.hessian!(res, x -> _f(x, args...), θ,

Check warning on line 110 in ext/OptimizationForwardDiffExt.jl

View check run for this annotation

Codecov / codecov/patch

ext/OptimizationForwardDiffExt.jl#L110

Added line #L110 was not covered by tests
hesscfg, Val{false}()))
else
hess = (H, θ, args...) -> f.hess(H, θ, cache.p, args...)
end
Expand All @@ -135,6 +135,7 @@ function Optimization.instantiate_function(f::OptimizationFunction{true},
ForwardDiff.Chunk{chunksize}())
cons_j = function (J, θ)
ForwardDiff.jacobian!(J, cons_oop, θ, cjconfig)
println(J)

Check warning on line 138 in ext/OptimizationForwardDiffExt.jl

View check run for this annotation

Codecov / codecov/patch

ext/OptimizationForwardDiffExt.jl#L138

Added line #L138 was not covered by tests
end
else
cons_j = (J, θ) -> f.cons_j(J, θ, cache.p)
Expand All @@ -149,6 +150,7 @@ function Optimization.instantiate_function(f::OptimizationFunction{true},
for i in 1:num_cons
ForwardDiff.hessian!(res[i], fncs[i], θ, hess_config_cache[i], Val{true}())
end
# println(res)
end
else
cons_h = (res, θ) -> f.cons_h(res, θ, cache.p)
Expand Down
164 changes: 80 additions & 84 deletions ext/OptimizationReverseDiffExt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,9 @@ function Optimization.instantiate_function(f, x, adtype::AutoReverseDiff,
end

if f.hess === nothing

hess = function (res, θ, args...)

res .= SparseDiffTools.forwarddiff_color_jacobian(θ, colorvec = hess_colors, sparsity = hess_sparsity) do θ
ReverseDiff.gradient(x -> _f(x, args...), θ)
end
ReverseDiff.hessian!(res, x -> _f(x, args...), θ)

Check warning on line 25 in ext/OptimizationReverseDiffExt.jl

View check run for this annotation

Codecov / codecov/patch

ext/OptimizationReverseDiffExt.jl#L25

Added line #L25 was not covered by tests
end
else
hess = (H, θ, args...) -> f.hess(H, θ, p, args...)
Expand Down Expand Up @@ -61,9 +59,7 @@ function Optimization.instantiate_function(f, x, adtype::AutoReverseDiff,

cons_h = function (res, θ)
for i in 1:num_cons
res[i] .= SparseDiffTools.forwarddiff_color_jacobian(θ, ) do θ
ReverseDiff.gradient(fncs[i], θ)
end
ReverseDiff.gradient(res[i], fncs[i], θ)

Check warning on line 62 in ext/OptimizationReverseDiffExt.jl

View check run for this annotation

Codecov / codecov/patch

ext/OptimizationReverseDiffExt.jl#L62

Added line #L62 was not covered by tests
end
end
else
Expand All @@ -83,82 +79,82 @@ function Optimization.instantiate_function(f, x, adtype::AutoReverseDiff,
lag_h, f.lag_hess_prototype)
end

# function Optimization.instantiate_function(f, cache::Optimization.ReInitCache,
# adtype::AutoReverseDiff, num_cons = 0)
# _f = (θ, args...) -> first(f.f(θ, cache.p, args...))

# if f.grad === nothing
# grad = (res, θ, args...) -> ReverseDiff.gradient!(res, x -> _f(x, args...), θ)
# else
# grad = (G, θ, args...) -> f.grad(G, θ, cache.p, args...)
# end

# if f.hess === nothing
# hess_sparsity = Symbolics.hessian_sparsity(_f, cache.u0)
# hess_colors = SparseDiffTools.matrix_colors(tril(hess_sparsity))
# hess = function (res, θ, args...)
# res .= SparseDiffTools.forwarddiff_color_jacobian(θ, colorvec = hess_colors, sparsity = hess_sparsity) do θ
# ReverseDiff.gradient(x -> _f(x, args...), θ)
# end
# end
# else
# hess = (H, θ, args...) -> f.hess(H, θ, cache.p, args...)
# end

# if f.hv === nothing
# hv = function (H, θ, v, args...)
# _θ = ForwardDiff.Dual.(θ, v)
# res = similar(_θ)
# grad(res, _θ, args...)
# H .= getindex.(ForwardDiff.partials.(res), 1)
# end
# else
# hv = f.hv
# end

# if f.cons === nothing
# cons = nothing
# else
# cons = (res, θ) -> f.cons(res, θ, cache.p)
# cons_oop = (x) -> (_res = zeros(eltype(x), num_cons); cons(_res, x); _res)
# end

# if cons !== nothing && f.cons_j === nothing
# cjconfig = ReverseDiff.JacobianConfig(cache.u0)
# cons_j = function (J, θ)
# ReverseDiff.jacobian!(J, cons_oop, θ, cjconfig)
# end
# else
# cons_j = (J, θ) -> f.cons_j(J, θ, cache.p)
# end

# if cons !== nothing && f.cons_h === nothing
# fncs = [(x) -> cons_oop(x)[i] for i in 1:num_cons]
# conshess_sparsity = Symbolics.hessian_sparsity.(fncs, Ref(cache.u0))
# conshess_colors = SparseDiffTools.matrix_colors.(conshess_sparsity)
# cons_h = function (res, θ)
# for i in 1:num_cons
# res[i] .= SparseDiffTools.forwarddiff_color_jacobian(θ, colorvec = conshess_colors[i], sparsity = conshess_sparsity[i]) do θ
# ReverseDiff.gradient(fncs[i], θ)
# end
# end
# end
# else
# cons_h = (res, θ) -> f.cons_h(res, θ, cache.p)
# end

# if f.lag_h === nothing
# lag_h = nothing # Consider implementing this
# else
# lag_h = (res, θ, σ, μ) -> f.lag_h(res, θ, σ, μ, cache.p)
# end

# return OptimizationFunction{true}(f.f, adtype; grad = grad, hess = hess, hv = hv,
# cons = cons, cons_j = cons_j, cons_h = cons_h,
# hess_prototype = f.hess_prototype,
# cons_jac_prototype = f.cons_jac_prototype,
# cons_hess_prototype = f.cons_hess_prototype,
# lag_h, f.lag_hess_prototype)
# end
function Optimization.instantiate_function(f, cache::Optimization.ReInitCache,
adtype::AutoReverseDiff, num_cons = 0)
_f = (θ, args...) -> first(f.f(θ, cache.p, args...))

if f.grad === nothing
grad = (res, θ, args...) -> ReverseDiff.gradient!(res, x -> _f(x, args...), θ)
else
grad = (G, θ, args...) -> f.grad(G, θ, cache.p, args...)
end

if f.hess === nothing
hess_sparsity = Symbolics.hessian_sparsity(_f, cache.u0)
hess_colors = SparseDiffTools.matrix_colors(tril(hess_sparsity))

Check warning on line 94 in ext/OptimizationReverseDiffExt.jl

View check run for this annotation

Codecov / codecov/patch

ext/OptimizationReverseDiffExt.jl#L93-L94

Added lines #L93 - L94 were not covered by tests
hess = function (res, θ, args...)
res .= SparseDiffTools.forwarddiff_color_jacobian(θ, colorvec = hess_colors, sparsity = hess_sparsity) do θ

Check warning on line 96 in ext/OptimizationReverseDiffExt.jl

View check run for this annotation

Codecov / codecov/patch

ext/OptimizationReverseDiffExt.jl#L96

Added line #L96 was not covered by tests
ReverseDiff.gradient(x -> _f(x, args...), θ)
end
end
else
hess = (H, θ, args...) -> f.hess(H, θ, cache.p, args...)
end

if f.hv === nothing
hv = function (H, θ, v, args...)
= ForwardDiff.Dual.(θ, v)
res = similar(_θ)
grad(res, _θ, args...)
H .= getindex.(ForwardDiff.partials.(res), 1)
end
else
hv = f.hv
end

if f.cons === nothing
cons = nothing
else
cons = (res, θ) -> f.cons(res, θ, cache.p)
cons_oop = (x) -> (_res = zeros(eltype(x), num_cons); cons(_res, x); _res)
end

if cons !== nothing && f.cons_j === nothing
cjconfig = ReverseDiff.JacobianConfig(cache.u0)
cons_j = function (J, θ)
ReverseDiff.jacobian!(J, cons_oop, θ, cjconfig)
end
else
cons_j = (J, θ) -> f.cons_j(J, θ, cache.p)
end

if cons !== nothing && f.cons_h === nothing
fncs = [(x) -> cons_oop(x)[i] for i in 1:num_cons]
conshess_sparsity = Symbolics.hessian_sparsity.(fncs, Ref(cache.u0))
conshess_colors = SparseDiffTools.matrix_colors.(conshess_sparsity)

Check warning on line 134 in ext/OptimizationReverseDiffExt.jl

View check run for this annotation

Codecov / codecov/patch

ext/OptimizationReverseDiffExt.jl#L133-L134

Added lines #L133 - L134 were not covered by tests
cons_h = function (res, θ)
for i in 1:num_cons
res[i] .= SparseDiffTools.forwarddiff_color_jacobian(θ, colorvec = conshess_colors[i], sparsity = conshess_sparsity[i]) do θ

Check warning on line 137 in ext/OptimizationReverseDiffExt.jl

View check run for this annotation

Codecov / codecov/patch

ext/OptimizationReverseDiffExt.jl#L137

Added line #L137 was not covered by tests
ReverseDiff.gradient(fncs[i], θ)
end
end
end
else
cons_h = (res, θ) -> f.cons_h(res, θ, cache.p)
end

if f.lag_h === nothing
lag_h = nothing # Consider implementing this
else
lag_h = (res, θ, σ, μ) -> f.lag_h(res, θ, σ, μ, cache.p)
end

return OptimizationFunction{true}(f.f, adtype; grad = grad, hess = hess, hv = hv,
cons = cons, cons_j = cons_j, cons_h = cons_h,
hess_prototype = f.hess_prototype,
cons_jac_prototype = f.cons_jac_prototype,
cons_hess_prototype = f.cons_hess_prototype,
lag_h, f.lag_hess_prototype)
end

end
Loading

0 comments on commit 9ac5a8f

Please sign in to comment.