From de79ae8bbc5b1fb2068793e51859fba4a81de7b2 Mon Sep 17 00:00:00 2001 From: Chris Rackauckas Date: Mon, 30 May 2022 17:58:27 -0400 Subject: [PATCH 1/4] Rename to Optimization.jl Is the naming scheme for the sub libraries good? --- .github/workflows/CI.yml | 32 +++++----- Project.toml | 4 +- README.md | 56 ++++++++--------- docs/make.jl | 12 ++-- docs/src/API/modelingtoolkit.md | 4 +- docs/src/index.md | 16 ++--- .../optimization_packages/blackboxoptim.md | 8 +-- .../cmaevolutionstrategy.md | 8 +-- .../src/optimization_packages/evolutionary.md | 8 +-- docs/src/optimization_packages/flux.md | 6 +- docs/src/optimization_packages/gcmaes.md | 12 ++-- .../optimization_packages/mathoptinterface.md | 36 +++++------ .../optimization_packages/metaheuristics.md | 16 ++--- .../multistartoptimization.md | 14 ++--- docs/src/optimization_packages/nlopt.md | 20 +++--- docs/src/optimization_packages/nomad.md | 12 ++-- docs/src/optimization_packages/nonconvex.md | 12 ++-- docs/src/optimization_packages/optim.md | 32 +++++----- docs/src/optimization_packages/quaddirect.md | 8 +-- .../src/optimization_packages/speedmapping.md | 8 +-- docs/src/tutorials/intro.md | 14 ++--- docs/src/tutorials/minibatch.md | 8 +-- docs/src/tutorials/rosenbrock.md | 28 ++++----- docs/src/tutorials/symbolic.md | 4 +- lib/GalacticMOI/test/runtests.jl | 33 ---------- .../test/runtests.jl | 14 ----- lib/GalacticPolyalgorithms/Project.toml | 22 ------- lib/GalacticPolyalgorithms/test/runtests.jl | 14 ----- lib/{GalacticBBO => OptimizationBBO}/LICENSE | 0 .../Project.toml | 8 +-- .../src/OptimizationBBO.jl} | 12 ++-- .../test/runtests.jl | 6 +- .../LICENSE | 0 .../Project.toml | 8 +-- .../src/OptimizationCMAEvolutionStrategy.jl} | 12 ++-- .../test/runtests.jl | 4 +- .../LICENSE | 0 .../Project.toml | 8 +-- .../src/OptimizationEvolutionary.jl} | 12 ++-- .../test/runtests.jl | 6 +- .../LICENSE | 0 .../Project.toml | 8 +-- .../src/OptimizationFlux.jl} | 16 ++--- .../test/runtests.jl | 8 +-- .../LICENSE | 0 .../Project.toml | 8 +-- .../src/OptimizationGCMAES.jl} | 12 ++-- .../test/runtests.jl | 10 +-- lib/{GalacticMOI => OptimizationMOI}/LICENSE | 0 .../Project.toml | 8 +-- .../src/OptimizationMOI.jl} | 12 ++-- lib/OptimizationMOI/test/runtests.jl | 33 ++++++++++ .../LICENSE | 0 .../Project.toml | 8 +-- .../src/OptimizationMetaheuristics.jl} | 14 ++--- .../test/runtests.jl | 6 +- .../LICENSE | 0 .../Project.toml | 8 +-- .../OptimizationMultistartOptimization.jl} | 4 +- .../test/runtests.jl | 14 +++++ .../LICENSE | 0 .../Project.toml | 8 +-- .../src/OptimizationNLopt.jl} | 16 ++--- .../test/runtests.jl | 10 +-- .../LICENSE | 0 .../Project.toml | 8 +-- .../src/OptimizationNOMAD.jl} | 8 +-- .../test/runtests.jl | 8 +-- .../LICENSE | 0 .../Project.toml | 8 +-- .../src/OptimizationNonconvex.jl} | 16 ++--- .../src/nonconvex_bayesian.jl | 0 .../src/nonconvex_ipopt.jl | 0 .../src/nonconvex_juniper.jl | 0 .../src/nonconvex_mma.jl | 0 .../src/nonconvex_multistart.jl | 4 +- .../src/nonconvex_nlopt.jl | 0 .../src/nonconvex_pavito.jl | 0 .../src/nonconvex_percival.jl | 0 .../src/nonconvex_search.jl | 0 .../test/runtests.jl | 26 ++++---- .../LICENSE | 0 .../Project.toml | 8 +-- .../src/OptimizationOptimJL.jl} | 62 +++++++++---------- .../test/runtests.jl | 26 ++++---- .../LICENSE | 0 .../Project.toml | 8 +-- .../src/OptimizationOptimisers.jl} | 16 ++--- .../test/runtests.jl | 8 +-- .../LICENSE | 0 lib/OptimizationPolyalgorithms/Project.toml | 22 +++++++ .../src/OptimizationPolyalgorithms.jl} | 14 ++--- .../test/runtests.jl | 14 +++++ .../LICENSE | 0 .../Project.toml | 8 +-- .../src/OptimizationQuadDIRECT.jl} | 6 +- .../test/runtests.jl | 4 +- .../LICENSE | 0 .../Project.toml | 8 +-- .../src/OptimizationSpeedMapping.jl} | 10 +-- .../test/runtests.jl | 6 +- src/{GalacticOptim.jl => Optimization.jl} | 2 +- src/function/function.jl | 2 +- test/ADtests.jl | 34 +++++----- test/diffeqfluxtests.jl | 18 +++--- test/minibatch.jl | 6 +- test/runtests.jl | 8 +-- 107 files changed, 554 insertions(+), 554 deletions(-) delete mode 100644 lib/GalacticMOI/test/runtests.jl delete mode 100644 lib/GalacticMultistartOptimization/test/runtests.jl delete mode 100644 lib/GalacticPolyalgorithms/Project.toml delete mode 100644 lib/GalacticPolyalgorithms/test/runtests.jl rename lib/{GalacticBBO => OptimizationBBO}/LICENSE (100%) rename lib/{GalacticBBO => OptimizationBBO}/Project.toml (66%) rename lib/{GalacticBBO/src/GalacticBBO.jl => OptimizationBBO/src/OptimizationBBO.jl} (90%) rename lib/{GalacticBBO => OptimizationBBO}/test/runtests.jl (64%) rename lib/{GalacticCMAEvolutionStrategy => OptimizationCMAEvolutionStrategy}/LICENSE (100%) rename lib/{GalacticCMAEvolutionStrategy => OptimizationCMAEvolutionStrategy}/Project.toml (64%) rename lib/{GalacticCMAEvolutionStrategy/src/GalacticCMAEvolutionStrategy.jl => OptimizationCMAEvolutionStrategy/src/OptimizationCMAEvolutionStrategy.jl} (86%) rename lib/{GalacticCMAEvolutionStrategy => OptimizationCMAEvolutionStrategy}/test/runtests.jl (76%) rename lib/{GalacticEvolutionary => OptimizationEvolutionary}/LICENSE (100%) rename lib/{GalacticEvolutionary => OptimizationEvolutionary}/Project.toml (69%) rename lib/{GalacticEvolutionary/src/GalacticEvolutionary.jl => OptimizationEvolutionary/src/OptimizationEvolutionary.jl} (89%) rename lib/{GalacticEvolutionary => OptimizationEvolutionary}/test/runtests.jl (65%) rename lib/{GalacticFlux => OptimizationFlux}/LICENSE (100%) rename lib/{GalacticFlux => OptimizationFlux}/Project.toml (78%) rename lib/{GalacticFlux/src/GalacticFlux.jl => OptimizationFlux/src/OptimizationFlux.jl} (78%) rename lib/{GalacticFlux => OptimizationFlux}/test/runtests.jl (62%) rename lib/{GalacticGCMAES => OptimizationGCMAES}/LICENSE (100%) rename lib/{GalacticGCMAES => OptimizationGCMAES}/Project.toml (69%) rename lib/{GalacticGCMAES/src/GalacticGCMAES.jl => OptimizationGCMAES/src/OptimizationGCMAES.jl} (85%) rename lib/{GalacticGCMAES => OptimizationGCMAES}/test/runtests.jl (50%) rename lib/{GalacticMOI => OptimizationMOI}/LICENSE (100%) rename lib/{GalacticMOI => OptimizationMOI}/Project.toml (76%) rename lib/{GalacticMOI/src/GalacticMOI.jl => OptimizationMOI/src/OptimizationMOI.jl} (94%) create mode 100644 lib/OptimizationMOI/test/runtests.jl rename lib/{GalacticMetaheuristics => OptimizationMetaheuristics}/LICENSE (100%) rename lib/{GalacticMetaheuristics => OptimizationMetaheuristics}/Project.toml (69%) rename lib/{GalacticMetaheuristics/src/GalacticMetaheuristics.jl => OptimizationMetaheuristics/src/OptimizationMetaheuristics.jl} (92%) rename lib/{GalacticMetaheuristics => OptimizationMetaheuristics}/test/runtests.jl (86%) rename lib/{GalacticMultistartOptimization => OptimizationMultistartOptimization}/LICENSE (100%) rename lib/{GalacticMultistartOptimization => OptimizationMultistartOptimization}/Project.toml (75%) rename lib/{GalacticMultistartOptimization/src/GalacticMultistartOptimization.jl => OptimizationMultistartOptimization/src/OptimizationMultistartOptimization.jl} (91%) create mode 100644 lib/OptimizationMultistartOptimization/test/runtests.jl rename lib/{GalacticNLopt => OptimizationNLopt}/LICENSE (100%) rename lib/{GalacticNLopt => OptimizationNLopt}/Project.toml (72%) rename lib/{GalacticNLopt/src/GalacticNLopt.jl => OptimizationNLopt/src/OptimizationNLopt.jl} (86%) rename lib/{GalacticNLopt => OptimizationNLopt}/test/runtests.jl (83%) rename lib/{GalacticNOMAD => OptimizationNOMAD}/LICENSE (100%) rename lib/{GalacticNOMAD => OptimizationNOMAD}/Project.toml (64%) rename lib/{GalacticNOMAD/src/GalacticNOMAD.jl => OptimizationNOMAD/src/OptimizationNOMAD.jl} (91%) rename lib/{GalacticNOMAD => OptimizationNOMAD}/test/runtests.jl (68%) rename lib/{GalacticNonconvex => OptimizationNonconvex}/LICENSE (100%) rename lib/{GalacticNonconvex => OptimizationNonconvex}/Project.toml (88%) rename lib/{GalacticNonconvex/src/GalacticNonconvex.jl => OptimizationNonconvex/src/OptimizationNonconvex.jl} (91%) rename lib/{GalacticNonconvex => OptimizationNonconvex}/src/nonconvex_bayesian.jl (100%) rename lib/{GalacticNonconvex => OptimizationNonconvex}/src/nonconvex_ipopt.jl (100%) rename lib/{GalacticNonconvex => OptimizationNonconvex}/src/nonconvex_juniper.jl (100%) rename lib/{GalacticNonconvex => OptimizationNonconvex}/src/nonconvex_mma.jl (100%) rename lib/{GalacticNonconvex => OptimizationNonconvex}/src/nonconvex_multistart.jl (95%) rename lib/{GalacticNonconvex => OptimizationNonconvex}/src/nonconvex_nlopt.jl (100%) rename lib/{GalacticNonconvex => OptimizationNonconvex}/src/nonconvex_pavito.jl (100%) rename lib/{GalacticNonconvex => OptimizationNonconvex}/src/nonconvex_percival.jl (100%) rename lib/{GalacticNonconvex => OptimizationNonconvex}/src/nonconvex_search.jl (100%) rename lib/{GalacticNonconvex => OptimizationNonconvex}/test/runtests.jl (90%) rename lib/{GalacticOptimJL => OptimizationOptimJL}/LICENSE (100%) rename lib/{GalacticOptimJL => OptimizationOptimJL}/Project.toml (79%) rename lib/{GalacticOptimJL/src/GalacticOptimJL.jl => OptimizationOptimJL/src/OptimizationOptimJL.jl} (82%) rename lib/{GalacticOptimJL => OptimizationOptimJL}/test/runtests.jl (74%) rename lib/{GalacticOptimisers => OptimizationOptimisers}/LICENSE (100%) rename lib/{GalacticOptimisers => OptimizationOptimisers}/Project.toml (78%) rename lib/{GalacticOptimisers/src/GalacticOptimisers.jl => OptimizationOptimisers/src/OptimizationOptimisers.jl} (80%) rename lib/{GalacticOptimisers => OptimizationOptimisers}/test/runtests.jl (60%) rename lib/{GalacticPolyalgorithms => OptimizationPolyalgorithms}/LICENSE (100%) create mode 100644 lib/OptimizationPolyalgorithms/Project.toml rename lib/{GalacticPolyalgorithms/src/GalacticPolyalgorithms.jl => OptimizationPolyalgorithms/src/OptimizationPolyalgorithms.jl} (68%) create mode 100644 lib/OptimizationPolyalgorithms/test/runtests.jl rename lib/{GalacticQuadDIRECT => OptimizationQuadDIRECT}/LICENSE (100%) rename lib/{GalacticQuadDIRECT => OptimizationQuadDIRECT}/Project.toml (62%) rename lib/{GalacticQuadDIRECT/src/GalacticQuadDIRECT.jl => OptimizationQuadDIRECT/src/OptimizationQuadDIRECT.jl} (93%) rename lib/{GalacticQuadDIRECT => OptimizationQuadDIRECT}/test/runtests.jl (84%) rename lib/{GalacticSpeedMapping => OptimizationSpeedMapping}/LICENSE (100%) rename lib/{GalacticSpeedMapping => OptimizationSpeedMapping}/Project.toml (69%) rename lib/{GalacticSpeedMapping/src/GalacticSpeedMapping.jl => OptimizationSpeedMapping/src/OptimizationSpeedMapping.jl} (86%) rename lib/{GalacticSpeedMapping => OptimizationSpeedMapping}/test/runtests.jl (80%) rename src/{GalacticOptim.jl => Optimization.jl} (98%) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 8076b0ec0..8bd57519a 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -15,22 +15,22 @@ jobs: matrix: group: - Core - - GalacticBBO - - GalacticCMAEvolutionStrategy - - GalacticEvolutionary - - GalacticFlux - - GalacticGCMAES - - GalacticMetaheuristics - - GalacticMOI - - GalacticMultistartOptimization - - GalacticNLopt - #- GalacticNonconvex - - GalacticNOMAD - - GalacticOptimJL - - GalacticOptimisers - - GalacticQuadDIRECT - - GalacticSpeedMapping - - GalacticPolyalgorithms + - OptimizationBBO + - OptimizationCMAEvolutionStrategy + - OptimizationEvolutionary + - OptimizationFlux + - OptimizationGCMAES + - OptimizationMetaheuristics + - OptimizationMOI + - OptimizationMultistartOptimization + - OptimizationNLopt + #- OptimizationNonconvex + - OptimizationNOMAD + - OptimizationOptimJL + - OptimizationOptimisers + - OptimizationQuadDIRECT + - OptimizationSpeedMapping + - OptimizationPolyalgorithms version: - '1' - '1.6' diff --git a/Project.toml b/Project.toml index 75455a2d4..eb4f83c33 100644 --- a/Project.toml +++ b/Project.toml @@ -1,5 +1,5 @@ -name = "GalacticOptim" -uuid = "a75be94c-b780-496d-a8a9-0878b188d577" +name = "Optimization" +uuid = "7f7a1694-90dd-40f0-9382-eb1efda571ba" authors = ["Vaibhavdixit02 "] version = "3.5.0" diff --git a/README.md b/README.md index e82444aea..1967b1822 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,13 @@ -# GalacticOptim.jl +# Optimization.jl -[![Build Status](https://github.com/SciML/GalacticOptim.jl/workflows/CI/badge.svg)](https://github.com/SciML/GalacticOptim.jl/actions?query=workflow%3ACI) -[![Stable](https://img.shields.io/badge/docs-stable-blue.svg)](http://galacticoptim.sciml.ai/stable/) -[![Dev](https://img.shields.io/badge/docs-dev-blue.svg)](http://galacticoptim.sciml.ai/dev/) +[![Build Status](https://github.com/SciML/Optimization.jl/workflows/CI/badge.svg)](https://github.com/SciML/Optimization.jl/actions?query=workflow%3ACI) +[![Stable](https://img.shields.io/badge/docs-stable-blue.svg)](http://Optimization.sciml.ai/stable/) +[![Dev](https://img.shields.io/badge/docs-dev-blue.svg)](http://Optimization.sciml.ai/dev/) -GalacticOptim.jl is a package with a scope that is beyond your normal global optimization -package. GalacticOptim.jl seeks to bring together all of the optimization packages +Optimization.jl is a package with a scope that is beyond your normal global optimization +package. Optimization.jl seeks to bring together all of the optimization packages it can find, local and global, into one unified Julia interface. This means, you -learn one package and you learn them all! GalacticOptim.jl adds a few high-level +learn one package and you learn them all! Optimization.jl adds a few high-level features, such as integrating with automatic differentiation, to make its usage fairly simple for most cases, while allowing all of the options in a single unified interface. @@ -15,51 +15,51 @@ unified interface. ## Installation Assuming that you already have Julia correctly installed, it suffices to import -GalacticOptim.jl in the standard way: +Optimization.jl in the standard way: ```julia -import Pkg; Pkg.add("GalacticOptim") +import Pkg; Pkg.add("Optimization") ``` -The packages relevant to the core functionality of GalacticOptim.jl will be imported +The packages relevant to the core functionality of Optimization.jl will be imported accordingly and, in most cases, you do not have to worry about the manual installation of dependencies. Below is the list of packages that need to be installed explicitly if you intend to use the specific optimization algorithms offered by them: -- GalacticBBO for [BlackBoxOptim.jl](https://github.com/robertfeldt/BlackBoxOptim.jl) -- GalacticEvolutionary for [Evolutionary.jl](https://github.com/wildart/Evolutionary.jl) (see also [this documentation](https://wildart.github.io/Evolutionary.jl/dev/)) -- GalacticGCMAES for [GCMAES.jl](https://github.com/AStupidBear/GCMAES.jl) -- GalacticMOI for [MathOptInterface.jl](https://github.com/jump-dev/MathOptInterface.jl) (usage of algorithm via MathOptInterface API; see also the API [documentation](https://jump.dev/MathOptInterface.jl/stable/)) -- GalacticMetaheuristics for [Metaheuristics.jl](https://github.com/jmejia8/Metaheuristics.jl) (see also [this documentation](https://jmejia8.github.io/Metaheuristics.jl/stable/)) -- GalacticMultistartOptimization for [MultistartOptimization.jl](https://github.com/tpapp/MultistartOptimization.jl) (see also [this documentation](https://juliahub.com/docs/MultistartOptimization/cVZvi/0.1.0/)) -- GalacticNLopt for [NLopt.jl](https://github.com/JuliaOpt/NLopt.jl) (usage via the NLopt API; see also the available [algorithms](https://nlopt.readthedocs.io/en/latest/NLopt_Algorithms/)) -- GalacticNOMAD for [NOMAD.jl](https://github.com/bbopt/NOMAD.jl) (see also [this documentation](https://bbopt.github.io/NOMAD.jl/stable/)) -- GalacticNonconvex for [Nonconvex.jl](https://github.com/JuliaNonconvex/Nonconvex.jl) (see also [this documentation](https://julianonconvex.github.io/Nonconvex.jl/stable/)) -- GalacticQuadDIRECT for [QuadDIRECT.jl](https://github.com/timholy/QuadDIRECT.jl) -- GalacticSpeedMapping for [SpeedMapping.jl](https://github.com/NicolasL-S/SpeedMapping.jl) (see also [this documentation](https://nicolasl-s.github.io/SpeedMapping.jl/stable/)) +- OptimizationBBO for [BlackBoxOptim.jl](https://github.com/robertfeldt/BlackBoxOptim.jl) +- OptimizationEvolutionary for [Evolutionary.jl](https://github.com/wildart/Evolutionary.jl) (see also [this documentation](https://wildart.github.io/Evolutionary.jl/dev/)) +- OptimizationGCMAES for [GCMAES.jl](https://github.com/AStupidBear/GCMAES.jl) +- OptimizationMOI for [MathOptInterface.jl](https://github.com/jump-dev/MathOptInterface.jl) (usage of algorithm via MathOptInterface API; see also the API [documentation](https://jump.dev/MathOptInterface.jl/stable/)) +- OptimizationMetaheuristics for [Metaheuristics.jl](https://github.com/jmejia8/Metaheuristics.jl) (see also [this documentation](https://jmejia8.github.io/Metaheuristics.jl/stable/)) +- OptimizationMultistartOptimization for [MultistartOptimization.jl](https://github.com/tpapp/MultistartOptimization.jl) (see also [this documentation](https://juliahub.com/docs/MultistartOptimization/cVZvi/0.1.0/)) +- OptimizationNLopt for [NLopt.jl](https://github.com/JuliaOpt/NLopt.jl) (usage via the NLopt API; see also the available [algorithms](https://nlopt.readthedocs.io/en/latest/NLopt_Algorithms/)) +- OptimizationNOMAD for [NOMAD.jl](https://github.com/bbopt/NOMAD.jl) (see also [this documentation](https://bbopt.github.io/NOMAD.jl/stable/)) +- OptimizationNonconvex for [Nonconvex.jl](https://github.com/JuliaNonconvex/Nonconvex.jl) (see also [this documentation](https://julianonconvex.github.io/Nonconvex.jl/stable/)) +- OptimizationQuadDIRECT for [QuadDIRECT.jl](https://github.com/timholy/QuadDIRECT.jl) +- OptimizationSpeedMapping for [SpeedMapping.jl](https://github.com/NicolasL-S/SpeedMapping.jl) (see also [this documentation](https://nicolasl-s.github.io/SpeedMapping.jl/stable/)) ## Tutorials and Documentation For information on using the package, -[see the stable documentation](https://galacticoptim.sciml.ai/stable/). Use the -[in-development documentation](https://galacticoptim.sciml.ai/dev/) for the version of +[see the stable documentation](https://Optimization.sciml.ai/stable/). Use the +[in-development documentation](https://Optimization.sciml.ai/dev/) for the version of the documentation, which contains the unreleased features. ## Examples ```julia -using GalacticOptim +using Optimization rosenbrock(x,p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) p = [1.0,100.0] prob = OptimizationProblem(rosenbrock,x0,p) -using GalacticOptimJL +using OptimizationOptimJL sol = solve(prob,NelderMead()) -using GalacticBBO +using OptimizationBBO prob = OptimizationProblem(rosenbrock, x0, p, lb = [-1.0,-1.0], ub = [1.0,1.0]) sol = solve(prob,BBO_adaptive_de_rand_1_bin_radiuslimited()) ``` @@ -99,7 +99,7 @@ We can also explore other methods in a similar way: ```julia using ForwardDiff -f = OptimizationFunction(rosenbrock, GalacticOptim.AutoForwardDiff()) +f = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) prob = OptimizationProblem(f, x0, p) sol = solve(prob,BFGS()) ``` @@ -132,6 +132,6 @@ For instance, the above optimization task produces the following output: prob = OptimizationProblem(f, x0, p, lb = [-1.0,-1.0], ub = [1.0,1.0]) sol = solve(prob, Fminbox(GradientDescent())) ``` -The examples clearly demonstrate that GalacticOptim.jl provides an intuitive +The examples clearly demonstrate that Optimization.jl provides an intuitive way of specifying optimization tasks and offers a relatively easy access to a wide range of optimization algorithms. diff --git a/docs/make.jl b/docs/make.jl index f3502aceb..8553a0269 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -1,20 +1,20 @@ -using Documenter, GalacticOptim +using Documenter, Optimization using FiniteDiff, ForwardDiff, ModelingToolkit, ReverseDiff, Tracker, Zygote makedocs( - sitename="GalacticOptim.jl", + sitename="Optimization.jl", authors="Chris Rackauckas, Vaibhav Kumar Dixit et al.", clean=true, doctest=false, - modules=[GalacticOptim, GalacticOptim.SciMLBase, FiniteDiff, + modules=[Optimization, Optimization.SciMLBase, FiniteDiff, ForwardDiff, ModelingToolkit, ReverseDiff, Tracker, Zygote], format=Documenter.HTML(analytics = "UA-90474609-3", assets=["assets/favicon.ico"], - canonical="https://galacticoptim.sciml.ai/stable/"), + canonical="https://Optimization.sciml.ai/stable/"), pages=[ - "GalacticOptim.jl: Unified Global Optimization Package" => "index.md", + "Optimization.jl: Unified Global Optimization Package" => "index.md", "Tutorials" => [ "Basic usage" => "tutorials/intro.md", @@ -48,6 +48,6 @@ makedocs( ) deploydocs( - repo="github.com/SciML/GalacticOptim.jl"; + repo="github.com/SciML/Optimization.jl"; push_preview=true ) diff --git a/docs/src/API/modelingtoolkit.md b/docs/src/API/modelingtoolkit.md index 0393adb67..ef11871df 100644 --- a/docs/src/API/modelingtoolkit.md +++ b/docs/src/API/modelingtoolkit.md @@ -1,6 +1,6 @@ # ModelingToolkit Integration -GalacticOptim.jl is heavily integrated with the ModelingToolkit.jl +Optimization.jl is heavily integrated with the ModelingToolkit.jl symbolic system for symbolic-numeric optimizations. It provides a front-end for automating the construction, parallelization, and optimization of code. Optimizers can better interface with the extra @@ -13,7 +13,7 @@ the [OptimizationFunction documentation](@id optfunction) for more details. Secondly, one can generate `OptimizationProblem`s for use in -GalacticOptim.jl from purely a symbolic front-end. This is the form +Optimization.jl from purely a symbolic front-end. This is the form users will encounter when using ModelingToolkit.jl directly, and its also the form supplied by domain-specific languages. For more information, see the [OptimizationSystem documentation](https://mtk.sciml.ai/dev/systems/OptimizationSystem/). diff --git a/docs/src/index.md b/docs/src/index.md index ccd5f755b..24569489b 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -1,9 +1,9 @@ -# GalacticOptim.jl +# Optimization.jl -GalacticOptim.jl is a package with a scope that is beyond your normal global optimization -package. GalacticOptim.jl seeks to bring together all of the optimization packages +Optimization.jl is a package with a scope that is beyond your normal global optimization +package. Optimization.jl seeks to bring together all of the optimization packages it can find, local and global, into one unified Julia interface. This means, you -learn one package and you learn them all! GalacticOptim.jl adds a few high-level +learn one package and you learn them all! Optimization.jl adds a few high-level features, such as integrating with automatic differentiation, to make its usage fairly simple for most cases, while allowing all of the options in a single unified interface. @@ -11,12 +11,12 @@ unified interface. ## Installation Assuming that you already have Julia correctly installed, it suffices to import -GalacticOptim.jl in the standard way: +Optimization.jl in the standard way: ```julia -import Pkg; Pkg.add("GalacticOptim") +import Pkg; Pkg.add("Optimization") ``` -The packages relevant to the core functionality of GalacticOptim.jl will be imported +The packages relevant to the core functionality of Optimization.jl will be imported accordingly and, in most cases, you do not have to worry about the manual installation of dependencies. However, you will need to add the specific optimizer packages. @@ -41,6 +41,6 @@ packages. ✅ = supported -🟡 = supported in downstream library but not yet implemented in `GalacticOptim`; PR to add this functionality are welcome +🟡 = supported in downstream library but not yet implemented in `Optimization`; PR to add this functionality are welcome ❌ = not supported diff --git a/docs/src/optimization_packages/blackboxoptim.md b/docs/src/optimization_packages/blackboxoptim.md index 9872b01ee..77e0dd319 100644 --- a/docs/src/optimization_packages/blackboxoptim.md +++ b/docs/src/optimization_packages/blackboxoptim.md @@ -1,12 +1,12 @@ # BlackBoxOptim.jl [`BlackBoxOptim`](https://github.com/robertfeldt/BlackBoxOptim.jl) is a is a Julia package implementing **(Meta-)heuristic/stochastic algorithms** that do not require for the optimized function to be differentiable. -## Installation: GalacticBBO.jl +## Installation: OptimizationBBO.jl -To use this package, install the GalacticBBO package: +To use this package, install the OptimizationBBO package: ```julia -import Pkg; Pkg.add("GalacticBBO") +import Pkg; Pkg.add("OptimizationBBO") ``` ## Global Optimizers @@ -53,7 +53,7 @@ rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) p = [1.0, 100.0] f = OptimizationFunction(rosenbrock) -prob = GalacticOptim.OptimizationProblem(f, x0, p, lb = [-1.0,-1.0], ub = [1.0,1.0]) +prob = Optimization.OptimizationProblem(f, x0, p, lb = [-1.0,-1.0], ub = [1.0,1.0]) sol = solve(prob, BBO_adaptive_de_rand_1_bin_radiuslimited(), maxiters=100000, maxtime=1000.0) ``` diff --git a/docs/src/optimization_packages/cmaevolutionstrategy.md b/docs/src/optimization_packages/cmaevolutionstrategy.md index 26c90690b..5e3c775c5 100644 --- a/docs/src/optimization_packages/cmaevolutionstrategy.md +++ b/docs/src/optimization_packages/cmaevolutionstrategy.md @@ -3,12 +3,12 @@ The CMAEvolutionStrategy algorithm is called by `CMAEvolutionStrategyOpt()` -## Installation: GalacticCMAEvolutionStrategy.jl +## Installation: OptimizationCMAEvolutionStrategy.jl -To use this package, install the GalacticCMAEvolutionStrategy package: +To use this package, install the OptimizationCMAEvolutionStrategy package: ```julia -import Pkg; Pkg.add("GalacticCMAEvolutionStrategy") +import Pkg; Pkg.add("OptimizationCMAEvolutionStrategy") ``` ## Global Optimizer @@ -26,6 +26,6 @@ rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) p = [1.0, 100.0] f = OptimizationFunction(rosenbrock) -prob = GalacticOptim.OptimizationProblem(f, x0, p, lb = [-1.0,-1.0], ub = [1.0,1.0]) +prob = Optimization.OptimizationProblem(f, x0, p, lb = [-1.0,-1.0], ub = [1.0,1.0]) sol = solve(prob, CMAEvolutionStrategyOpt()) ``` \ No newline at end of file diff --git a/docs/src/optimization_packages/evolutionary.md b/docs/src/optimization_packages/evolutionary.md index 8c8730777..59d4d862f 100644 --- a/docs/src/optimization_packages/evolutionary.md +++ b/docs/src/optimization_packages/evolutionary.md @@ -1,12 +1,12 @@ # Evolutionary.jl [`Evolutionary`](https://github.com/wildart/Evolutionary.jl) is a Julia package implementing various evolutionary and genetic algorithm. -## Installation: GalacticCMAEvolutionStrategy.jl +## Installation: OptimizationCMAEvolutionStrategy.jl -To use this package, install the GalacticCMAEvolutionStrategy package: +To use this package, install the OptimizationCMAEvolutionStrategy package: ```julia -import Pkg; Pkg.add("GalacticCMAEvolutionStrategy") +import Pkg; Pkg.add("OptimizationCMAEvolutionStrategy") ``` ## Global Optimizer @@ -36,6 +36,6 @@ rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) p = [1.0, 100.0] f = OptimizationFunction(rosenbrock) -prob = GalacticOptim.OptimizationProblem(f, x0, p, lb = [-1.0,-1.0], ub = [1.0,1.0]) +prob = Optimization.OptimizationProblem(f, x0, p, lb = [-1.0,-1.0], ub = [1.0,1.0]) sol = solve(prob, Evolutionary.CMAES(μ =40 , λ = 100)) ``` \ No newline at end of file diff --git a/docs/src/optimization_packages/flux.md b/docs/src/optimization_packages/flux.md index b69971c57..d2919f150 100644 --- a/docs/src/optimization_packages/flux.md +++ b/docs/src/optimization_packages/flux.md @@ -1,11 +1,11 @@ # Flux.jl -## Installation: GalacticFlux.jl +## Installation: OptimizationFlux.jl -To use this package, install the GalacticFlux package: +To use this package, install the OptimizationFlux package: ```julia -import Pkg; Pkg.add("GalacticFlux") +import Pkg; Pkg.add("OptimizationFlux") ``` ## Local Unconstrained Optimizers diff --git a/docs/src/optimization_packages/gcmaes.md b/docs/src/optimization_packages/gcmaes.md index edc13ca8c..1feb35992 100644 --- a/docs/src/optimization_packages/gcmaes.md +++ b/docs/src/optimization_packages/gcmaes.md @@ -1,12 +1,12 @@ # GCMAES.jl [`GCMAES`](https://github.com/AStupidBear/GCMAES.jl) is a Julia package implementing the **Gradient-based Covariance Matrix Adaptation Evolutionary Strategy** which can utilize the gradient information to speed up the optimization process. -## Installation: GalacticGCMAES.jl +## Installation: OptimizationGCMAES.jl -To use this package, install the GalacticGCMAES package: +To use this package, install the OptimizationGCMAES package: ```julia -import Pkg; Pkg.add("GalacticGCMAES") +import Pkg; Pkg.add("OptimizationGCMAES") ``` ## Global Optimizer @@ -26,7 +26,7 @@ rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) p = [1.0, 100.0] f = OptimizationFunction(rosenbrock) -prob = GalacticOptim.OptimizationProblem(f, x0, p, lb = [-1.0,-1.0], ub = [1.0,1.0]) +prob = Optimization.OptimizationProblem(f, x0, p, lb = [-1.0,-1.0], ub = [1.0,1.0]) sol = solve(prob, GCMAESOpt()) ``` @@ -36,7 +36,7 @@ We can also utilise the gradient information of the optimization problem to aid rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) p = [1.0, 100.0] -f = OptimizationFunction(rosenbrock, GalacticOptim.ForwardDiff) -prob = GalacticOptim.OptimizationProblem(f, x0, p, lb = [-1.0,-1.0], ub = [1.0,1.0]) +f = OptimizationFunction(rosenbrock, Optimization.ForwardDiff) +prob = Optimization.OptimizationProblem(f, x0, p, lb = [-1.0,-1.0], ub = [1.0,1.0]) sol = solve(prob, GCMAESOpt()) ``` \ No newline at end of file diff --git a/docs/src/optimization_packages/mathoptinterface.md b/docs/src/optimization_packages/mathoptinterface.md index 8da827246..6323093a0 100644 --- a/docs/src/optimization_packages/mathoptinterface.md +++ b/docs/src/optimization_packages/mathoptinterface.md @@ -2,26 +2,26 @@ [MathOptInterface](https://github.com/jump-dev/MathOptInterface.jl) is Julia abstration layer to interface with variety of mathematical optimization solvers. -## Installation: GalacticMOI.jl +## Installation: OptimizationMOI.jl -To use this package, install the GalacticMOI package: +To use this package, install the OptimizationMOI package: ```julia -import Pkg; Pkg.add("GalacticMOI") +import Pkg; Pkg.add("OptimizationMOI") ``` ## Details -As of now the `GalacticOptim` interface to `MathOptInterface` implents only the `maxtime` common keyword arguments. An optimizer which is implemented in the `MathOptInterface` is can be called be called directly if no optimizer options have to be defined. For example using the `Ipopt.jl` optimizer: +As of now the `Optimization` interface to `MathOptInterface` implents only the `maxtime` common keyword arguments. An optimizer which is implemented in the `MathOptInterface` is can be called be called directly if no optimizer options have to be defined. For example using the `Ipopt.jl` optimizer: ```julia sol = solve(prob, Ipopt.Optimizer()) ``` -The optimizer options are handled in one of two ways. They can either be set via `GalacticOptim.MOI.OptimizerWithAttributes()` or as keyword argument to `solve`. For example using the `Ipopt.jl` optimizer: +The optimizer options are handled in one of two ways. They can either be set via `Optimization.MOI.OptimizerWithAttributes()` or as keyword argument to `solve`. For example using the `Ipopt.jl` optimizer: ```julia -opt = GalacticOptim.MOI.OptimizerWithAttributes(Ipopt.Optimizer, "option_name" => option_value, ...) +opt = Optimization.MOI.OptimizerWithAttributes(Ipopt.Optimizer, "option_name" => option_value, ...) sol = solve(prob, opt) sol = solve(prob, Ipopt.Optimizer(); option_name = option_value, ...) @@ -36,21 +36,21 @@ sol = solve(prob, Ipopt.Optimizer(); option_name = option_value, ...) - [`Ipopt.Optimizer`](https://juliahub.com/docs/Ipopt/yMQMo/0.7.0/) - Ipopt is a MathOptInterface optimizer, and thus its options are handled via - `GalacticOptim.MOI.OptimizerWithAttributes(Ipopt.Optimizer, "option_name" => option_value, ...)` + `Optimization.MOI.OptimizerWithAttributes(Ipopt.Optimizer, "option_name" => option_value, ...)` - The full list of optimizer options can be found in the [Ipopt Documentation](https://coin-or.github.io/Ipopt/OPTIONS.html#OPTIONS_REF) #### KNITRO.jl (MathOptInterface) - [`KNITRO.Optimizer`](https://github.com/jump-dev/KNITRO.jl) - KNITRO is a MathOptInterface optimizer, and thus its options are handled via - `GalacticOptim.MOI.OptimizerWithAttributes(KNITRO.Optimizer, "option_name" => option_value, ...)` + `Optimization.MOI.OptimizerWithAttributes(KNITRO.Optimizer, "option_name" => option_value, ...)` - The full list of optimizer options can be found in the [KNITRO Documentation](https://www.artelys.com/docs/knitro//3_referenceManual/callableLibraryAPI.html) #### AmplNLWriter.jl (MathOptInterface) - [`AmplNLWriter.Optimizer`](https://github.com/jump-dev/AmplNLWriter.jl) - AmplNLWriter is a MathOptInterface optimizer, and thus its options are handled via - `GalacticOptim.MOI.OptimizerWithAttributes(AmplNLWriter.Optimizer(algname), "option_name" => option_value, ...)` + `Optimization.MOI.OptimizerWithAttributes(AmplNLWriter.Optimizer(algname), "option_name" => option_value, ...)` - Possible `algname`s are: * `Bonmin_jll.amplexe` * `Couenne_jll.amplexe` @@ -63,25 +63,25 @@ To use one of the JLLs, they must be added first. For example: `Pkg.add("Bonmin_ - [`Juniper.Optimizer`](https://github.com/lanl-ansi/Juniper.jl) - Juniper is a MathOptInterface optimizer, and thus its options are handled via - `GalacticOptim.MOI.OptimizerWithAttributes(Ipopt.Optimizer, "option_name" => option_value, ...)` + `Optimization.MOI.OptimizerWithAttributes(Ipopt.Optimizer, "option_name" => option_value, ...)` - Juniper requires the choice of a relaxation method `nl_solver` which must be a MathOptInterface-based optimizer ```julia -using GalacticOptim, ForwardDiff +using Optimization, ForwardDiff rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) _p = [1.0, 100.0] -f = OptimizationFunction(rosenbrock, GalacticOptim.AutoForwardDiff()) -prob = GalacticOptim.OptimizationProblem(f, x0, _p) +f = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) +prob = Optimization.OptimizationProblem(f, x0, _p) using Juniper, Ipopt optimizer = Juniper.Optimizer # Choose a relaxation method -nl_solver = GalacticOptim.MOI.OptimizerWithAttributes(Ipopt.Optimizer, "print_level"=>0) +nl_solver = Optimization.MOI.OptimizerWithAttributes(Ipopt.Optimizer, "print_level"=>0) -opt = GalacticOptim.MOI.OptimizerWithAttributes(optimizer, "nl_solver"=>nl_solver) +opt = Optimization.MOI.OptimizerWithAttributes(optimizer, "nl_solver"=>nl_solver) sol = solve(prob, opt) ``` @@ -90,7 +90,7 @@ sol = solve(prob, opt) - [`BARON.Optimizer`](https://github.com/joehuchette/BARON.jl) - BARON is a MathOptInterface optimizer, and thus its options are handled via - `GalacticOptim.MOI.OptimizerWithAttributes(BARON.Optimizer, "option_name" => option_value, ...)` + `Optimization.MOI.OptimizerWithAttributes(BARON.Optimizer, "option_name" => option_value, ...)` - The full list of optimizer options can be found in the [BARON Documentation](https://minlp.com/baron-solver) @@ -99,7 +99,7 @@ sol = solve(prob, opt) - [`Ipopt.Optimizer`](https://juliahub.com/docs/Ipopt/yMQMo/0.7.0/) - Ipopt is a MathOptInterface optimizer, and thus its options are handled via - `GalacticOptim.MOI.OptimizerWithAttributes(Ipopt.Optimizer, "option_name" => option_value, ...)` + `Optimization.MOI.OptimizerWithAttributes(Ipopt.Optimizer, "option_name" => option_value, ...)` - The full list of optimizer options can be found in the [Ipopt Documentation](https://coin-or.github.io/Ipopt/OPTIONS.html#OPTIONS_REF) @@ -110,5 +110,5 @@ sol = solve(prob, opt) - [`Alpine.Optimizer`](https://github.com/lanl-ansi/Alpine.jl) - Alpine is a MathOptInterface optimizer, and thus its options are handled via - `GalacticOptim.MOI.OptimizerWithAttributes(Alpine.Optimizer, "option_name" => option_value, ...)` + `Optimization.MOI.OptimizerWithAttributes(Alpine.Optimizer, "option_name" => option_value, ...)` - The full list of optimizer options can be found in the [Alpine Documentation](https://github.com/lanl-ansi/Alpine.jl) diff --git a/docs/src/optimization_packages/metaheuristics.md b/docs/src/optimization_packages/metaheuristics.md index d40152cee..35621e861 100644 --- a/docs/src/optimization_packages/metaheuristics.md +++ b/docs/src/optimization_packages/metaheuristics.md @@ -1,12 +1,12 @@ # Metaheuristics.jl [`Metaheuristics`](https://github.com/jmejia8/Metaheuristics.jl) is a is a Julia package implementing **metaheuristic algorithms** for global optiimization that do not require for the optimized function to be differentiable. -## Installation: GalacticMetaheuristics.jl +## Installation: OptimizationMetaheuristics.jl -To use this package, install the GalacticMetaheuristics package: +To use this package, install the OptimizationMetaheuristics package: ```julia -import Pkg; Pkg.add("GalacticMetaheuristics") +import Pkg; Pkg.add("OptimizationMetaheuristics") ``` ## Global Optimizer @@ -27,7 +27,7 @@ A `Metaheuristics` Single-Objective algorithm is called using one of the followi * Simulated Annealing: `SA()` * Whale Optimization Algorithm: `WOA()` -`Metaheuristics` also performs [`Multiobjective optimization`](https://jmejia8.github.io/Metaheuristics.jl/stable/examples/#Multiobjective-Optimization) but this is not yet supported by `GalacticOptim`. +`Metaheuristics` also performs [`Multiobjective optimization`](https://jmejia8.github.io/Metaheuristics.jl/stable/examples/#Multiobjective-Optimization) but this is not yet supported by `Optimization`. Each optimizer sets default settings based on the optimization problem but specific parameters can be set as shown in the original [`Documentation`](https://jmejia8.github.io/Metaheuristics.jl/stable/algorithms/) @@ -53,18 +53,18 @@ rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) p = [1.0, 100.0] f = OptimizationFunction(rosenbrock) -prob = GalacticOptim.OptimizationProblem(f, x0, p, lb = [-1.0,-1.0], ub = [1.0,1.0]) +prob = Optimization.OptimizationProblem(f, x0, p, lb = [-1.0,-1.0], ub = [1.0,1.0]) sol = solve(prob, ECA(), maxiters=100000, maxtime=1000.0) ``` -Per default `Metaheuristics` ignores the initial values `x0` set in the `OptimizationProblem`. In order to for `GalacticOptim` to use `x0` we have to set `use_initial=true`: +Per default `Metaheuristics` ignores the initial values `x0` set in the `OptimizationProblem`. In order to for `Optimization` to use `x0` we have to set `use_initial=true`: ```julia rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) p = [1.0, 100.0] f = OptimizationFunction(rosenbrock) -prob = GalacticOptim.OptimizationProblem(f, x0, p, lb = [-1.0,-1.0], ub = [1.0,1.0]) +prob = Optimization.OptimizationProblem(f, x0, p, lb = [-1.0,-1.0], ub = [1.0,1.0]) sol = solve(prob, ECA(), use_initial=true, maxiters=100000, maxtime=1000.0) ``` @@ -73,7 +73,7 @@ sol = solve(prob, ECA(), use_initial=true, maxiters=100000, maxtime=1000.0) ### With Constraint Equations -While `Metaheuristics.jl` supports such constraints, `GalacticOptim.jl` currently does not relay these constraints. +While `Metaheuristics.jl` supports such constraints, `Optimization.jl` currently does not relay these constraints. diff --git a/docs/src/optimization_packages/multistartoptimization.md b/docs/src/optimization_packages/multistartoptimization.md index 9a8633efa..38ed03c10 100644 --- a/docs/src/optimization_packages/multistartoptimization.md +++ b/docs/src/optimization_packages/multistartoptimization.md @@ -5,16 +5,16 @@ Currently, only one global method (`TikTak`) is implemented and called by `MultiStartOptimization.TikTak(n)` where `n` is the number of initial Sobol points. -## Installation: GalacticMultiStartOptimization.jl +## Installation: OptimizationMultiStartOptimization.jl -To use this package, install the GalacticMultiStartOptimization package: +To use this package, install the OptimizationMultiStartOptimization package: ```julia -import Pkg; Pkg.add("GalacticMultiStartOptimization") +import Pkg; Pkg.add("OptimizationMultiStartOptimization") ``` !!! note -You also need to load the relevant subpackage for the local method of you choice, for example if you plan to use one of the NLopt.jl's optimizers, you'd install and load GalacticNLopt as described in the [NLopt.jl](@ref)'s section. +You also need to load the relevant subpackage for the local method of you choice, for example if you plan to use one of the NLopt.jl's optimizers, you'd install and load OptimizationNLopt as described in the [NLopt.jl](@ref)'s section. ## Global Optimizer ### Without Constraint Equations @@ -31,18 +31,18 @@ rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) p = [1.0, 100.0] f = OptimizationFunction(rosenbrock) -prob = GalacticOptim.OptimizationProblem(f, x0, p, lb = [-1.0,-1.0], ub = [1.0,1.0]) +prob = Optimization.OptimizationProblem(f, x0, p, lb = [-1.0,-1.0], ub = [1.0,1.0]) sol = solve(prob, MultistartOptimization.TikTak(100), NLopt.LD_LBFGS()) ``` -You can use any `GalacticOptim` optimizers you like. The global method of the `MultiStartOptimization` is a positional argument and followed by the local method. This for example means we can perform a multistartoptimization with LBFGS as the optimizer using either the `NLopt.jl` or `Optim.jl` implementation as follows. Moreover, this interface allows you access and adjust all the optimizer settings as you normally would: +You can use any `Optimization` optimizers you like. The global method of the `MultiStartOptimization` is a positional argument and followed by the local method. This for example means we can perform a multistartoptimization with LBFGS as the optimizer using either the `NLopt.jl` or `Optim.jl` implementation as follows. Moreover, this interface allows you access and adjust all the optimizer settings as you normally would: ```julia rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) p = [1.0, 100.0] f = OptimizationFunction(rosenbrock) -prob = GalacticOptim.OptimizationProblem(f, x0, p, lb = [-1.0,-1.0], ub = [1.0,1.0]) +prob = Optimization.OptimizationProblem(f, x0, p, lb = [-1.0,-1.0], ub = [1.0,1.0]) sol = solve(prob, MultistartOptimization.TikTak(100), NLopt.LD_LBFGS()) sol = solve(prob, MultistartOptimization.TikTak(100), LBFGS()) ``` diff --git a/docs/src/optimization_packages/nlopt.md b/docs/src/optimization_packages/nlopt.md index 712ba2ade..e584997df 100644 --- a/docs/src/optimization_packages/nlopt.md +++ b/docs/src/optimization_packages/nlopt.md @@ -1,12 +1,12 @@ # NLopt.jl [`NLopt`](https://github.com/JuliaOpt/NLopt.jl) is Julia package interfacing to the free/open-source [`NLopt library`](http://ab-initio.mit.edu/nlopt) which implements many optimization methods both global and local [`NLopt Documentation`](https://nlopt.readthedocs.io/en/latest/NLopt_Algorithms/). -## Installation: GalacticNLopt.jl +## Installation: OptimizationNLopt.jl -To use this package, install the GalacticNLopt package: +To use this package, install the OptimizationNLopt package: ```julia -import Pkg; Pkg.add("GalacticNLopt") +import Pkg; Pkg.add("OptimizationNLopt") ``` ## Methods @@ -95,7 +95,7 @@ rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) p = [1.0, 100.0] f = OptimizationFunction(rosenbrock) -prob = GalacticOptim.OptimizationProblem(f, x0, p, lb = [-1.0,-1.0], ub = [1.0,1.0]) +prob = Optimization.OptimizationProblem(f, x0, p, lb = [-1.0,-1.0], ub = [1.0,1.0]) sol = solve(prob, NLopt.LN_NELDERMEAD()) ``` @@ -126,8 +126,8 @@ The Rosenbrock function can optimized using `NLopt.LD_LBFGS()` as follows: rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) p = [1.0, 100.0] -f = OptimizationFunction(rosenbrock, GalacticOptim.AutoForwardDiff()) -prob = GalacticOptim.OptimizationProblem(f, x0, p, lb = [-1.0,-1.0], ub = [1.0,1.0]) +f = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) +prob = Optimization.OptimizationProblem(f, x0, p, lb = [-1.0,-1.0], ub = [1.0,1.0]) sol = solve(prob, NLopt.LD_LBFGS()) ``` @@ -165,7 +165,7 @@ rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) p = [1.0, 100.0] f = OptimizationFunction(rosenbrock) -prob = GalacticOptim.OptimizationProblem(f, x0, p, lb = [-1.0,-1.0], ub = [1.0,1.0]) +prob = Optimization.OptimizationProblem(f, x0, p, lb = [-1.0,-1.0], ub = [1.0,1.0]) sol = solve(prob, NLopt.GN_DIRECT()) ``` @@ -177,8 +177,8 @@ The Rosenbrock function can optimized using `NLopt.G_MLSL_LDS()` with `NLopt.LN_ rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) p = [1.0, 100.0] -f = OptimizationFunction(rosenbrock, GalacticOptim.AutoForwardDiff()) -prob = GalacticOptim.OptimizationProblem(f, x0, p, lb = [-1.0,-1.0], ub = [1.0,1.0]) +f = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) +prob = Optimization.OptimizationProblem(f, x0, p, lb = [-1.0,-1.0], ub = [1.0,1.0]) sol = solve(prob, NLopt.G_MLSL_LDS(), local_method = NLopt.LD_LBFGS(), local_maxiters=10000) ``` @@ -187,7 +187,7 @@ The following algorithms in [`NLopt`](https://github.com/JuliaOpt/NLopt.jl) are constraint equations. However, lower and upper constraints set by `lb` and `ub` in the `OptimizationProblem` are required. !!! note "Constraints with NLopt" - Equality and inequality equation support for `NLopt` via `GalacticOptim` is not supported directly. However, you can use the MOI wrapper to use constraints with NLopt optimisers. + Equality and inequality equation support for `NLopt` via `Optimization` is not supported directly. However, you can use the MOI wrapper to use constraints with NLopt optimisers. `NLopt` global optimizers which fall into this category are: diff --git a/docs/src/optimization_packages/nomad.md b/docs/src/optimization_packages/nomad.md index 0387272c3..b9c07bdcb 100644 --- a/docs/src/optimization_packages/nomad.md +++ b/docs/src/optimization_packages/nomad.md @@ -3,19 +3,19 @@ The NOMAD algorithm is called by `NOMADOpt()` -## Installation: GalacticNOMAD.jl +## Installation: OptimizationNOMAD.jl -To use this package, install the GalacticNOMAD package: +To use this package, install the OptimizationNOMAD package: ```julia -import Pkg; Pkg.add("GalacticNOMAD") +import Pkg; Pkg.add("OptimizationNOMAD") ``` ## Global Optimizer ### Without Constraint Equations The method in [`NOMAD`](https://github.com/bbopt/NOMAD.jl) is performing global optimization on problems both with and without -constraint equations. Currently however, linear and nonlinear constraints defined in `GalacticOPtim` are not passed. +constraint equations. Currently however, linear and nonlinear constraints defined in `Optimization` are not passed. NOMAD works both with and without lower and upper boxconstraints set by `lb` and `ub` in the `OptimizationProblem`. @@ -30,8 +30,8 @@ p = [1.0, 100.0] f = OptimizationFunction(rosenbrock) prob = OptimizationProblem(f, x0, _p) -sol = GalacticOptim.solve(prob,NOMADOpt()) +sol = Optimization.solve(prob,NOMADOpt()) prob = OptimizationProblem(f, x0, _p, lb = [-1.0,-1.0], ub = [1.5,1.5]) -sol = GalacticOptim.solve(prob,NOMADOpt()) +sol = Optimization.solve(prob,NOMADOpt()) ``` \ No newline at end of file diff --git a/docs/src/optimization_packages/nonconvex.md b/docs/src/optimization_packages/nonconvex.md index e0a4ed61e..abb8d39b8 100644 --- a/docs/src/optimization_packages/nonconvex.md +++ b/docs/src/optimization_packages/nonconvex.md @@ -1,12 +1,12 @@ # Nonconvex.jl [`Nonconvex`](https://github.com/JuliaNonconvex/Nonconvex.jl) is a is a Julia package implementing and wrapping nonconvex constrained optimization algorithms. -## Installation: GalacticNonconvex.jl +## Installation: OptimizationNonconvex.jl -To use this package, install the GalacticNonconvex package: +To use this package, install the OptimizationNonconvex package: ```julia -import Pkg; Pkg.add("GalacticNonconvex") +import Pkg; Pkg.add("OptimizationNonconvex") ``` ## Global Optimizer @@ -53,7 +53,7 @@ rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) p = [1.0, 100.0] f = OptimizationFunction(rosenbrock) -prob = GalacticOptim.OptimizationProblem(f, x0, p, lb = [-1.0,-1.0], ub = [1.0,1.0]) +prob = Optimization.OptimizationProblem(f, x0, p, lb = [-1.0,-1.0], ub = [1.0,1.0]) sol = solve(prob, MMA02(), maxiters=100000, maxtime=1000.0) ``` @@ -64,13 +64,13 @@ rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) p = [1.0, 100.0] f = OptimizationFunction(rosenbrock) -prob = GalacticOptim.OptimizationProblem(f, x0, p, lb = [-1.0,-1.0], ub = [1.0,1.0]) +prob = Optimization.OptimizationProblem(f, x0, p, lb = [-1.0,-1.0], ub = [1.0,1.0]) sol = solve(prob, HyperoptAlg(IpoptAlg()), sub_options=(;max_iter=100)) ``` ### With Constraint Equations -While `Nonconvex.jl` supports such constraints, `GalacticOptim.jl` currently does not relay these constraints. +While `Nonconvex.jl` supports such constraints, `Optimization.jl` currently does not relay these constraints. diff --git a/docs/src/optimization_packages/optim.md b/docs/src/optimization_packages/optim.md index fbe8990fa..796873bf3 100644 --- a/docs/src/optimization_packages/optim.md +++ b/docs/src/optimization_packages/optim.md @@ -1,12 +1,12 @@ # [Optim.jl](@id optim) [`Optim`](https://github.com/JuliaNLSolvers/Optim.jl) is Julia package implementing various algorithm to perform univariate and multivariate optimization. -## Installation: GalacticOptimJL.jl +## Installation: OptimizationOptimJL.jl -To use this package, install the GalacticOptimJL package: +To use this package, install the OptimizationOptimJL package: ```julia -import Pkg; Pkg.add("GalacticOptimJL") +import Pkg; Pkg.add("OptimizationOptimJL") ``` ## Methods @@ -76,14 +76,14 @@ rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 cons= (x,p) -> [x[1]^2 + x[2]^2] x0 = zeros(2) p = [1.0,100.0] -prob = OptimizationFunction(rosenbrock, GalacticOptim.AutoForwardDiff();cons= cons) -prob = GalacticOptim.OptimizationProblem(prob, x0, p, lcons = [-5.0], ucons = [10.0]) +prob = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff();cons= cons) +prob = Optimization.OptimizationProblem(prob, x0, p, lcons = [-5.0], ucons = [10.0]) sol = solve(prob, IPNewton()) ``` ### Derivative-Free -Derivative-free optimizers are optimizers that can be used even in cases where no derivatives or automatic differentiation is specified. While they tend to be less efficient than derivative-based optimizers, they can be easily applied to cases where defining derivatives is difficult. Note that while these methods do not support general constraints, all support bounds constraints via `lb` and `ub` in the `GalacticOptim.OptimizationProblem`. +Derivative-free optimizers are optimizers that can be used even in cases where no derivatives or automatic differentiation is specified. While they tend to be less efficient than derivative-based optimizers, they can be easily applied to cases where defining derivatives is difficult. Note that while these methods do not support general constraints, all support bounds constraints via `lb` and `ub` in the `Optimization.OptimizationProblem`. `Optim.jl` implements the following derivative-free algorithms: @@ -116,7 +116,7 @@ The Rosenbrock function can optimized using the `Optim.NelderMead()` as follows: rosenbrock(x, p) = (1 - x[1])^2 + 100 * (x[2] - x[1]^2)^2 x0 = zeros(2) p = [1.0,100.0] -prob = GalacticOptim.OptimizationProblem(rosenbrock, x0, p) +prob = Optimization.OptimizationProblem(rosenbrock, x0, p) sol = solve(prob, Optim.NelderMead()) ``` @@ -261,8 +261,8 @@ The Rosenbrock function can optimized using the `Optim.LD_LBFGS()` as follows: rosenbrock(x, p) = (1 - x[1])^2 + 100 * (x[2] - x[1]^2)^2 x0 = zeros(2) p = [1.0,100.0] -optprob = OptimizationFunction(rosenbrock, GalacticOptim.AutoForwardDiff()) -prob = GalacticOptim.OptimizationProblem(optprob, x0, p, lb=[-1.0, -1.0], ub=[0.8, 0.8]) +optprob = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) +prob = Optimization.OptimizationProblem(optprob, x0, p, lb=[-1.0, -1.0], ub=[0.8, 0.8]) sol = solve(prob, NLopt.LD_LBFGS()) ``` @@ -314,7 +314,7 @@ rosenbrock(x, p) = (1 - x[1])^2 + 100 * (x[2] - x[1]^2)^2 x0 = zeros(2) p = [1.0,100.0] f = OptimizationFunction(rosenbrock,ModelingToolkit.AutoModelingToolkit(),x0,p,grad=true,hess=true) -prob = GalacticOptim.OptimizationProblem(f,x0,p) +prob = Optimization.OptimizationProblem(f,x0,p) sol = solve(prob,Optim.Newton()) ``` @@ -348,8 +348,8 @@ rosenbrock(x, p) = (1 - x[1])^2 + 100 * (x[2] - x[1]^2)^2 cons= (x,p) -> [x[1]^2 + x[2]^2] x0 = zeros(2) p = [1.0,100.0] -optprob = OptimizationFunction(rosenbrock, GalacticOptim.AutoForwardDiff();cons= cons) -prob = GalacticOptim.OptimizationProblem(optprob, x0, p) +optprob = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff();cons= cons) +prob = Optimization.OptimizationProblem(optprob, x0, p) sol = solve(prob, Optim.KrylovTrustRegion()) ``` @@ -358,7 +358,7 @@ sol = solve(prob, Optim.KrylovTrustRegion()) ### Without Constraint Equations The following method in [`Optim`](https://github.com/JuliaNLSolvers/Optim.jl) is performing global optimization on problems without -constraint equations. It works both with and without lower and upper constraints set by `lb` and `ub` in the `GalacticOptim.OptimizationProblem`. +constraint equations. It works both with and without lower and upper constraints set by `lb` and `ub` in the `Optimization.OptimizationProblem`. - [`Optim.ParticleSwarm()`](https://julianlsolvers.github.io/Optim.jl/stable/#algo/particle_swarm/): **Particle Swarm Optimization** @@ -375,7 +375,7 @@ rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) p = [1.0,100.0] f = OptimizationFunction(rosenbrock) -prob = GalacticOptim.OptimizationProblem(f, x0, p, lb=[-1.0, -1.0], ub=[1.0, 1.0]) +prob = Optimization.OptimizationProblem(f, x0, p, lb=[-1.0, -1.0], ub=[1.0, 1.0]) sol = solve(prob, Optim.ParticleSwarm(lower=prob.lb, upper= prob.ub, n_particles=100)) ``` @@ -402,7 +402,7 @@ The Rosenbrock function can optimized using the `Optim.SAMIN()` as follows: rosenbrock(x, p) = (1 - x[1])^2 + 100 * (x[2] - x[1]^2)^2 x0 = zeros(2) p = [1.0,100.0] -f = OptimizationFunction(rosenbrock, GalacticOptim.AutoForwardDiff()) -prob = GalacticOptim.OptimizationProblem(f, x0, p, lb=[-1.0, -1.0], ub=[1.0, 1.0]) +f = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) +prob = Optimization.OptimizationProblem(f, x0, p, lb=[-1.0, -1.0], ub=[1.0, 1.0]) sol = solve(prob, Optim.SAMIN()) ``` diff --git a/docs/src/optimization_packages/quaddirect.md b/docs/src/optimization_packages/quaddirect.md index 4f74ff4ff..f5dc85cdf 100644 --- a/docs/src/optimization_packages/quaddirect.md +++ b/docs/src/optimization_packages/quaddirect.md @@ -3,12 +3,12 @@ The QuadDIRECT algorithm is called using `QuadDirect()`. -## Installation: GalacticQuadDIRECT.jl +## Installation: OptimizationQuadDIRECT.jl -To use this package, install the GalacticQuadDIRECT package: +To use this package, install the OptimizationQuadDIRECT package: ```julia -import Pkg; Pkg.add("GalacticQuadDIRECT") +import Pkg; Pkg.add("OptimizationQuadDIRECT") ``` Also note that `QuadDIRECT` should (for now) be installed by doing: @@ -32,7 +32,7 @@ rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) p = [1.0, 100.0] f = OptimizationFunction(rosenbrock) -prob = GalacticOptim.OptimizationProblem(f, x0, p, lb = [-1.0,-1.0], ub = [1.0,1.0]) +prob = Optimization.OptimizationProblem(f, x0, p, lb = [-1.0,-1.0], ub = [1.0,1.0]) solve(prob, QuadDirect(), splits = ([-0.9, 0, 0.9], [-0.8, 0, 0.8])) ``` diff --git a/docs/src/optimization_packages/speedmapping.md b/docs/src/optimization_packages/speedmapping.md index 37aecb60a..acb50da89 100644 --- a/docs/src/optimization_packages/speedmapping.md +++ b/docs/src/optimization_packages/speedmapping.md @@ -3,12 +3,12 @@ The SpeedMapping algorithm is called by `SpeedMappingOpt()` -## Installation: GalacticSpeedMapping.jl +## Installation: OptimizationSpeedMapping.jl -To use this package, install the GalacticSpeedMapping package: +To use this package, install the OptimizationSpeedMapping package: ```julia -import Pkg; Pkg.add("GalacticSpeedMapping") +import Pkg; Pkg.add("OptimizationSpeedMapping") ``` ## Global Optimizer @@ -25,7 +25,7 @@ The Rosenbrock function can be optimized using the `SpeedMappingOpt()` with and rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) p = [1.0, 100.0] -f = OptimizationFunction(rosenbrock, GalacticOptim.AutoForwardDiff()) +f = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) prob = OptimizationProblem(f, x0, _p) sol = solve(prob,SpeedMappingOpt()) diff --git a/docs/src/tutorials/intro.md b/docs/src/tutorials/intro.md index 6f81e5fad..0412371db 100644 --- a/docs/src/tutorials/intro.md +++ b/docs/src/tutorials/intro.md @@ -6,25 +6,25 @@ from BlackBoxOptim.jl on the Rosenbrock equation. The simplest copy-pasteable code to get started is the following: ```julia -using GalacticOptim +using Optimization rosenbrock(x,p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) p = [1.0,100.0] prob = OptimizationProblem(rosenbrock,x0,p) -using GalacticOptimJL +using OptimizationOptimJL sol = solve(prob,NelderMead()) -using GalacticBBO +using OptimizationBBO prob = OptimizationProblem(rosenbrock, x0, p, lb = [-1.0,-1.0], ub = [1.0,1.0]) sol = solve(prob,BBO_adaptive_de_rand_1_bin_radiuslimited()) ``` -Notice that GalacticOptim.jl is the core glue package that holds all of the common +Notice that Optimization.jl is the core glue package that holds all of the common pieces, but to solve the equations we need to use a solver package. Here, GalcticOptimJL -is for [Optim.jl](https://github.com/JuliaNLSolvers/Optim.jl) and GalacticBBO is for +is for [Optim.jl](https://github.com/JuliaNLSolvers/Optim.jl) and OptimizationBBO is for [BlackBoxOptim.jl](https://github.com/robertfeldt/BlackBoxOptim.jl). The output of the first optimization task (with the `NelderMead()` algorithm) @@ -62,7 +62,7 @@ We can also explore other methods in a similar way: ```julia using ForwardDiff -f = OptimizationFunction(rosenbrock, GalacticOptim.AutoForwardDiff()) +f = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) prob = OptimizationProblem(f, x0, p) sol = solve(prob,BFGS()) ``` @@ -96,6 +96,6 @@ For instance, the above optimization task produces the following output: sol = solve(prob, Fminbox(GradientDescent())) ``` -The examples clearly demonstrate that GalacticOptim.jl provides an intuitive +The examples clearly demonstrate that Optimization.jl provides an intuitive way of specifying optimization tasks and offers a relatively easy access to a wide range of optimization algorithms. diff --git a/docs/src/tutorials/minibatch.md b/docs/src/tutorials/minibatch.md index b1eff7325..43a0d7804 100644 --- a/docs/src/tutorials/minibatch.md +++ b/docs/src/tutorials/minibatch.md @@ -2,11 +2,11 @@ !!! note - This example uses the GalacticOptimJL.jl package. See the [Optim.jl page](@ref optim) + This example uses the OptimizationOptimJL.jl package. See the [Optim.jl page](@ref optim) for details on the installation and usage. ```julia -using DiffEqFlux, GalacticOptim, GalacticOptimJL, OrdinaryDiffEq +using DiffEqFlux, Optimization, OptimizationOptimJL, OrdinaryDiffEq function newtons_cooling(du, u, p, t) temp = u[1] @@ -62,9 +62,9 @@ train_loader = Flux.Data.DataLoader((ode_data, t), batchsize = k) numEpochs = 300 l1 = loss_adjoint(pp, train_loader.data[1], train_loader.data[2])[1] -optfun = OptimizationFunction((θ, p, batch, time_batch) -> loss_adjoint(θ, batch, time_batch), GalacticOptim.AutoZygote()) +optfun = OptimizationFunction((θ, p, batch, time_batch) -> loss_adjoint(θ, batch, time_batch), Optimization.AutoZygote()) optprob = OptimizationProblem(optfun, pp) using IterTools: ncycle -res1 = GalacticOptim.solve(optprob, ADAM(0.05), ncycle(train_loader, numEpochs), callback = callback) +res1 = Optimization.solve(optprob, ADAM(0.05), ncycle(train_loader, numEpochs), callback = callback) @test 10res1.minimum < l1 ``` diff --git a/docs/src/tutorials/rosenbrock.md b/docs/src/tutorials/rosenbrock.md index 65257e5e7..59369bfbe 100644 --- a/docs/src/tutorials/rosenbrock.md +++ b/docs/src/tutorials/rosenbrock.md @@ -2,25 +2,25 @@ !!! note - This example uses many different solvers of GalacticOptim.jl. Each solver + This example uses many different solvers of Optimization.jl. Each solver subpackage needs to be installed separate. For example, for the details on - the installation and usage of GalacticOptimJL.jl package, see the + the installation and usage of OptimizationOptimJL.jl package, see the [Optim.jl page](@ref optim). ```julia -using GalacticOptim, Optim, ForwardDiff, Zygote, Test, Random +using Optimization, Optim, ForwardDiff, Zygote, Test, Random rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) _p = [1.0, 100.0] -f = OptimizationFunction(rosenbrock, GalacticOptim.AutoForwardDiff()) +f = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) l1 = rosenbrock(x0, _p) prob = OptimizationProblem(f, x0, _p) ## Optim.jl Solvers -using GalacticOptimJL +using OptimizationOptimJL sol = solve(prob, SimulatedAnnealing()) @test 10*sol.minimum < l1 @@ -36,7 +36,7 @@ sol = solve(prob, NelderMead()) @test 10*sol.minimum < l1 cons= (x,p) -> [x[1]^2 + x[2]^2] -optprob = OptimizationFunction(rosenbrock, GalacticOptim.AutoForwardDiff();cons= cons) +optprob = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff();cons= cons) prob = OptimizationProblem(optprob, x0) @@ -68,18 +68,18 @@ function con2_c(x,p) [x[1]^2 + x[2]^2, x[2]*sin(x[1])-x[1]] end -optprob = OptimizationFunction(rosenbrock, GalacticOptim.AutoForwardDiff();cons= con2_c) +optprob = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff();cons= con2_c) prob = OptimizationProblem(optprob, x0, lcons = [-Inf,-Inf], ucons = [Inf,Inf]) sol = solve(prob, IPNewton()) @test 10*sol.minimum < l1 cons_circ = (x,p) -> [x[1]^2 + x[2]^2] -optprob = OptimizationFunction(rosenbrock, GalacticOptim.AutoForwardDiff();cons= cons_circ) +optprob = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff();cons= cons_circ) prob = OptimizationProblem(optprob, x0, lcons = [-Inf], ucons = [0.25^2]) sol = solve(prob, IPNewton()) @test sqrt(cons(sol.minimizer,nothing)[1]) ≈ 0.25 rtol = 1e-6 -optprob = OptimizationFunction(rosenbrock, GalacticOptim.AutoZygote()) +optprob = OptimizationFunction(rosenbrock, Optimization.AutoZygote()) prob = OptimizationProblem(optprob, x0) sol = solve(prob, ADAM(), maxiters = 1000, progress = false) @test 10*sol.minimum < l1 @@ -94,7 +94,7 @@ prob = OptimizationProblem(optprob, x0, lb=[-1.0, -1.0], ub=[0.8, 0.8]) ## CMAEvolutionStrategy.jl solvers -using GalacticCMAEvolutionStrategy +using OptimizationCMAEvolutionStrategy sol = solve(prob, CMAEvolutionStrategyOpt()) @test 10*sol.minimum < l1 @@ -102,7 +102,7 @@ rosenbrock(x, p=nothing) = (1 - x[1])^2 + 100 * (x[2] - x[1]^2)^2 ## NLopt.jl solvers -using GalacticNLopt +using OptimizationNLopt prob = OptimizationProblem(optprob, x0) sol = solve(prob, Opt(:LN_BOBYQA, 2)) @test 10*sol.minimum < l1 @@ -119,14 +119,14 @@ sol = solve(prob, Opt(:G_MLSL_LDS, 2), nstart=2, local_method = Opt(:LD_LBFGS, 2 ## Evolutionary.jl Solvers -using GalacticEvolutionary +using OptimizationEvolutionary sol = solve(prob, CMAES(μ =40 , λ = 100),abstol=1e-15) @test 10*sol.minimum < l1 ## BlackBoxOptim.jl Solvers -using GalacticBBO -prob = GalacticOptim.OptimizationProblem(optprob, x0, lb=[-1.0, -1.0], ub=[0.8, 0.8]) +using OptimizationBBO +prob = Optimization.OptimizationProblem(optprob, x0, lb=[-1.0, -1.0], ub=[0.8, 0.8]) sol = solve(prob, BBO()) @test 10*sol.minimum < l1 ``` diff --git a/docs/src/tutorials/symbolic.md b/docs/src/tutorials/symbolic.md index 1ce501067..7917ecb18 100644 --- a/docs/src/tutorials/symbolic.md +++ b/docs/src/tutorials/symbolic.md @@ -2,11 +2,11 @@ !!! note - This example uses the GalacticOptimJL.jl package. See the [Optim.jl page](@ref optim) + This example uses the OptimizationOptimJL.jl package. See the [Optim.jl page](@ref optim) for details on the installation and usage. ```julia -using ModelingToolkit, GalacticOptim, GalacticOptimJL +using ModelingToolkit, Optimization, OptimizationOptimJL @variables x y @parameters a b diff --git a/lib/GalacticMOI/test/runtests.jl b/lib/GalacticMOI/test/runtests.jl deleted file mode 100644 index 17a2e4d5f..000000000 --- a/lib/GalacticMOI/test/runtests.jl +++ /dev/null @@ -1,33 +0,0 @@ -using GalacticMOI, GalacticOptim, Ipopt, NLopt, Zygote -using Test - -@testset "GalacticMOI.jl" begin - rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 - x0 = zeros(2) - _p = [1.0, 100.0] - l1 = rosenbrock(x0, _p) - - optprob = OptimizationFunction((x, p) -> -rosenbrock(x, p), GalacticOptim.AutoZygote()) - prob = OptimizationProblem(optprob, x0, _p; sense=GalacticOptim.MaxSense) - - sol = solve(prob, Ipopt.Optimizer()) - @test 10 * sol.minimum < l1 - - optprob = OptimizationFunction(rosenbrock, GalacticOptim.AutoZygote()) - prob = OptimizationProblem(optprob, x0, _p; sense=GalacticOptim.MinSense) - - sol = solve(prob, Ipopt.Optimizer()) - @test 10 * sol.minimum < l1 - - sol = solve(prob, GalacticMOI.MOI.OptimizerWithAttributes(Ipopt.Optimizer, "max_cpu_time" => 60.0)) - @test 10 * sol.minimum < l1 - - sol = solve(prob, GalacticMOI.MOI.OptimizerWithAttributes(NLopt.Optimizer, "algorithm" => :LN_BOBYQA)) - @test 10 * sol.minimum < l1 - - sol = solve(prob, GalacticMOI.MOI.OptimizerWithAttributes(NLopt.Optimizer, "algorithm" => :LD_LBFGS)) - @test 10 * sol.minimum < l1 - - sol = solve(prob, GalacticMOI.MOI.OptimizerWithAttributes(NLopt.Optimizer, "algorithm" => :LD_LBFGS)) - @test 10 * sol.minimum < l1 -end diff --git a/lib/GalacticMultistartOptimization/test/runtests.jl b/lib/GalacticMultistartOptimization/test/runtests.jl deleted file mode 100644 index 71588d804..000000000 --- a/lib/GalacticMultistartOptimization/test/runtests.jl +++ /dev/null @@ -1,14 +0,0 @@ -using Pkg; Pkg.develop(path=joinpath(@__DIR__,"../../","GalacticNLopt")); -using GalacticMultistartOptimization, GalacticOptim, ForwardDiff, GalacticNLopt -using Test - -@testset "GalacticMultistartOptimization.jl" begin - rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 - x0 = zeros(2) - _p = [1.0, 100.0] - l1 = rosenbrock(x0, _p) - f = OptimizationFunction(rosenbrock, GalacticOptim.AutoForwardDiff()) - prob = GalacticOptim.OptimizationProblem(f, x0, _p, lb=[-1.0, -1.0], ub=[1.5, 1.5]) - sol = solve(prob, GalacticMultistartOptimization.TikTak(100), GalacticNLopt.Opt(:LD_LBFGS, 2)) - @test 10 * sol.minimum < l1 -end diff --git a/lib/GalacticPolyalgorithms/Project.toml b/lib/GalacticPolyalgorithms/Project.toml deleted file mode 100644 index fd17d3fe6..000000000 --- a/lib/GalacticPolyalgorithms/Project.toml +++ /dev/null @@ -1,22 +0,0 @@ -name = "GalacticPolyalgorithms" -uuid = "8202cac9-28d3-4ced-94fb-69829a1553b4" -authors = ["Vaibhav Dixit and contributors"] -version = "0.1.0" - -[deps] -GalacticOptim = "a75be94c-b780-496d-a8a9-0878b188d577" -GalacticOptimJL = "9d3c5eb1-403b-401b-8c0f-c11105342e6b" -GalacticOptimisers = "86b7a833-eb4b-49e2-87ed-89357ad7afa2" - -[compat] -GalacticOptim = "3" -GalacticOptimJL = "0.1" -GalacticOptimisers = "0.1" -julia = "1.6" - -[extras] -ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" - -[targets] -test = ["ForwardDiff", "Test"] diff --git a/lib/GalacticPolyalgorithms/test/runtests.jl b/lib/GalacticPolyalgorithms/test/runtests.jl deleted file mode 100644 index 054834015..000000000 --- a/lib/GalacticPolyalgorithms/test/runtests.jl +++ /dev/null @@ -1,14 +0,0 @@ -using GalacticPolyalgorithms, GalacticOptim, ForwardDiff -using Test - -@testset "GalacticPolyalgorithms.jl" begin - rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 - x0 = zeros(2) - _p = [1.0, 100.0] - l1 = rosenbrock(x0, _p) - - optprob = OptimizationFunction(rosenbrock, GalacticOptim.AutoForwardDiff()) - prob = OptimizationProblem(optprob, x0, _p) - sol = GalacticOptim.solve(prob, PolyOpt(), maxiters=1000) - @test 10 * sol.minimum < l1 -end diff --git a/lib/GalacticBBO/LICENSE b/lib/OptimizationBBO/LICENSE similarity index 100% rename from lib/GalacticBBO/LICENSE rename to lib/OptimizationBBO/LICENSE diff --git a/lib/GalacticBBO/Project.toml b/lib/OptimizationBBO/Project.toml similarity index 66% rename from lib/GalacticBBO/Project.toml rename to lib/OptimizationBBO/Project.toml index 6827beefc..b0a6841ab 100644 --- a/lib/GalacticBBO/Project.toml +++ b/lib/OptimizationBBO/Project.toml @@ -1,16 +1,16 @@ -name = "GalacticBBO" -uuid = "80c49c3a-6557-47d9-8f5b-13d0a2920315" +name = "OptimizationBBO" +uuid = "3e6eede4-6085-4f62-9a71-46d9bc1eb92b" authors = ["Vaibhav Dixit and contributors"] version = "0.1.0" [deps] BlackBoxOptim = "a134a8b2-14d6-55f6-9291-3336d3ab0209" -GalacticOptim = "a75be94c-b780-496d-a8a9-0878b188d577" +Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" [compat] julia = "1" BlackBoxOptim = "0.6" -GalacticOptim = "3" +Optimization = "3" [extras] Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" diff --git a/lib/GalacticBBO/src/GalacticBBO.jl b/lib/OptimizationBBO/src/OptimizationBBO.jl similarity index 90% rename from lib/GalacticBBO/src/GalacticBBO.jl rename to lib/OptimizationBBO/src/OptimizationBBO.jl index 341b14e7a..70213de60 100644 --- a/lib/GalacticBBO/src/GalacticBBO.jl +++ b/lib/OptimizationBBO/src/OptimizationBBO.jl @@ -1,6 +1,6 @@ -module GalacticBBO +module OptimizationBBO -using BlackBoxOptim, GalacticOptim, GalacticOptim.SciMLBase +using BlackBoxOptim, Optimization, Optimization.SciMLBase abstract type BBO end @@ -47,7 +47,7 @@ function __map_optimizer_args(prob::SciMLBase.OptimizationProblem, opt::BBO; return mapped_args end -function SciMLBase.__solve(prob::SciMLBase.OptimizationProblem, opt::BBO, data=GalacticOptim.DEFAULT_DATA; +function SciMLBase.__solve(prob::SciMLBase.OptimizationProblem, opt::BBO, data=Optimization.DEFAULT_DATA; callback=(args...) -> (false), maxiters::Union{Number,Nothing}=nothing, maxtime::Union{Number,Nothing}=nothing, @@ -57,7 +57,7 @@ function SciMLBase.__solve(prob::SciMLBase.OptimizationProblem, opt::BBO, data=G local x, cur, state - if data != GalacticOptim.DEFAULT_DATA + if data != Optimization.DEFAULT_DATA maxiters = length(data) end @@ -75,8 +75,8 @@ function SciMLBase.__solve(prob::SciMLBase.OptimizationProblem, opt::BBO, data=G cb_call end - maxiters = GalacticOptim._check_and_convert_maxiters(maxiters) - maxtime = GalacticOptim._check_and_convert_maxtime(maxtime) + maxiters = Optimization._check_and_convert_maxiters(maxiters) + maxtime = Optimization._check_and_convert_maxtime(maxtime) _loss = function (θ) diff --git a/lib/GalacticBBO/test/runtests.jl b/lib/OptimizationBBO/test/runtests.jl similarity index 64% rename from lib/GalacticBBO/test/runtests.jl rename to lib/OptimizationBBO/test/runtests.jl index b7f548117..6edfc591d 100644 --- a/lib/GalacticBBO/test/runtests.jl +++ b/lib/OptimizationBBO/test/runtests.jl @@ -1,14 +1,14 @@ -using GalacticBBO, GalacticOptim +using OptimizationBBO, Optimization using Test -@testset "GalacticBBO.jl" begin +@testset "OptimizationBBO.jl" begin rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) _p = [1.0, 100.0] l1 = rosenbrock(x0, _p) optprob = OptimizationFunction(rosenbrock) - prob = GalacticOptim.OptimizationProblem(optprob, x0, _p, lb=[-1.0, -1.0], ub=[0.8, 0.8]) + prob = Optimization.OptimizationProblem(optprob, x0, _p, lb=[-1.0, -1.0], ub=[0.8, 0.8]) sol = solve(prob, BBO_adaptive_de_rand_1_bin_radiuslimited()) @test 10 * sol.minimum < l1 end diff --git a/lib/GalacticCMAEvolutionStrategy/LICENSE b/lib/OptimizationCMAEvolutionStrategy/LICENSE similarity index 100% rename from lib/GalacticCMAEvolutionStrategy/LICENSE rename to lib/OptimizationCMAEvolutionStrategy/LICENSE diff --git a/lib/GalacticCMAEvolutionStrategy/Project.toml b/lib/OptimizationCMAEvolutionStrategy/Project.toml similarity index 64% rename from lib/GalacticCMAEvolutionStrategy/Project.toml rename to lib/OptimizationCMAEvolutionStrategy/Project.toml index 1e0a81cf3..8f5d976e7 100644 --- a/lib/GalacticCMAEvolutionStrategy/Project.toml +++ b/lib/OptimizationCMAEvolutionStrategy/Project.toml @@ -1,16 +1,16 @@ -name = "GalacticCMAEvolutionStrategy" -uuid = "12fbe61e-72e2-4b68-be42-232f99b30434" +name = "OptimizationCMAEvolutionStrategy" +uuid = "bd407f91-200f-4536-9381-e4ba712f53f8" authors = ["Vaibhav Dixit and contributors"] version = "0.1.0" [deps] CMAEvolutionStrategy = "8d3b24bd-414e-49e0-94fb-163cc3a3e411" -GalacticOptim = "a75be94c-b780-496d-a8a9-0878b188d577" +Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" [compat] julia = "1" CMAEvolutionStrategy = "0.2" -GalacticOptim = "3" +Optimization = "3" [extras] Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" diff --git a/lib/GalacticCMAEvolutionStrategy/src/GalacticCMAEvolutionStrategy.jl b/lib/OptimizationCMAEvolutionStrategy/src/OptimizationCMAEvolutionStrategy.jl similarity index 86% rename from lib/GalacticCMAEvolutionStrategy/src/GalacticCMAEvolutionStrategy.jl rename to lib/OptimizationCMAEvolutionStrategy/src/OptimizationCMAEvolutionStrategy.jl index 2939bb7f1..d39bde95f 100644 --- a/lib/GalacticCMAEvolutionStrategy/src/GalacticCMAEvolutionStrategy.jl +++ b/lib/OptimizationCMAEvolutionStrategy/src/OptimizationCMAEvolutionStrategy.jl @@ -1,6 +1,6 @@ -module GalacticCMAEvolutionStrategy +module OptimizationCMAEvolutionStrategy -using CMAEvolutionStrategy, GalacticOptim, GalacticOptim.SciMLBase +using CMAEvolutionStrategy, Optimization, Optimization.SciMLBase export CMAEvolutionStrategyOpt @@ -38,7 +38,7 @@ function __map_optimizer_args(prob::OptimizationProblem, opt::CMAEvolutionStrate end -function SciMLBase.__solve(prob::OptimizationProblem, opt::CMAEvolutionStrategyOpt, data=GalacticOptim.DEFAULT_DATA; +function SciMLBase.__solve(prob::OptimizationProblem, opt::CMAEvolutionStrategyOpt, data=Optimization.DEFAULT_DATA; callback=(args...) -> (false), maxiters::Union{Number,Nothing}=nothing, maxtime::Union{Number,Nothing}=nothing, @@ -47,7 +47,7 @@ function SciMLBase.__solve(prob::OptimizationProblem, opt::CMAEvolutionStrategyO kwargs...) local x, cur, state - if data != GalacticOptim.DEFAULT_DATA + if data != Optimization.DEFAULT_DATA maxiters = length(data) end @@ -62,8 +62,8 @@ function SciMLBase.__solve(prob::OptimizationProblem, opt::CMAEvolutionStrategyO cb_call end - maxiters = GalacticOptim._check_and_convert_maxiters(maxiters) - maxtime = GalacticOptim._check_and_convert_maxtime(maxtime) + maxiters = Optimization._check_and_convert_maxiters(maxiters) + maxtime = Optimization._check_and_convert_maxtime(maxtime) _loss = function (θ) x = prob.f(θ, prob.p, cur...) diff --git a/lib/GalacticCMAEvolutionStrategy/test/runtests.jl b/lib/OptimizationCMAEvolutionStrategy/test/runtests.jl similarity index 76% rename from lib/GalacticCMAEvolutionStrategy/test/runtests.jl rename to lib/OptimizationCMAEvolutionStrategy/test/runtests.jl index c57052c5e..4612bea59 100644 --- a/lib/GalacticCMAEvolutionStrategy/test/runtests.jl +++ b/lib/OptimizationCMAEvolutionStrategy/test/runtests.jl @@ -1,7 +1,7 @@ -using GalacticCMAEvolutionStrategy, GalacticOptim +using OptimizationCMAEvolutionStrategy, Optimization using Test -@testset "GalacticCMAEvolutionStrategy.jl" begin +@testset "OptimizationCMAEvolutionStrategy.jl" begin rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) _p = [1.0, 100.0] diff --git a/lib/GalacticEvolutionary/LICENSE b/lib/OptimizationEvolutionary/LICENSE similarity index 100% rename from lib/GalacticEvolutionary/LICENSE rename to lib/OptimizationEvolutionary/LICENSE diff --git a/lib/GalacticEvolutionary/Project.toml b/lib/OptimizationEvolutionary/Project.toml similarity index 69% rename from lib/GalacticEvolutionary/Project.toml rename to lib/OptimizationEvolutionary/Project.toml index cb761eb1a..e215717e9 100644 --- a/lib/GalacticEvolutionary/Project.toml +++ b/lib/OptimizationEvolutionary/Project.toml @@ -1,17 +1,17 @@ -name = "GalacticEvolutionary" -uuid = "e82bed00-89d9-4bf2-a298-ae594e22fac7" +name = "OptimizationEvolutionary" +uuid = "cb963754-43f6-435e-8d4b-99009ff27753" authors = ["Vaibhav Dixit and contributors"] version = "0.1.0" [deps] Evolutionary = "86b6b26d-c046-49b6-aa0b-5f0f74682bd6" -GalacticOptim = "a75be94c-b780-496d-a8a9-0878b188d577" +Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" [compat] julia = "1" Evolutionary = "0.11" -GalacticOptim = "3" +Optimization = "3" Reexport = "1.2" [extras] diff --git a/lib/GalacticEvolutionary/src/GalacticEvolutionary.jl b/lib/OptimizationEvolutionary/src/OptimizationEvolutionary.jl similarity index 89% rename from lib/GalacticEvolutionary/src/GalacticEvolutionary.jl rename to lib/OptimizationEvolutionary/src/OptimizationEvolutionary.jl index 5d804bd7e..4f20104a8 100644 --- a/lib/GalacticEvolutionary/src/GalacticEvolutionary.jl +++ b/lib/OptimizationEvolutionary/src/OptimizationEvolutionary.jl @@ -1,6 +1,6 @@ -module GalacticEvolutionary +module OptimizationEvolutionary -using Reexport, GalacticOptim, GalacticOptim.SciMLBase +using Reexport, Optimization, Optimization.SciMLBase @reexport using Evolutionary decompose_trace(trace::Evolutionary.OptimizationTrace) = last(trace) @@ -45,7 +45,7 @@ function __map_optimizer_args(prob::OptimizationProblem, opt::Evolutionary.Abstr return Evolutionary.Options(; mapped_args...) end -function SciMLBase.__solve(prob::OptimizationProblem, opt::Evolutionary.AbstractOptimizer, data=GalacticOptim.DEFAULT_DATA; +function SciMLBase.__solve(prob::OptimizationProblem, opt::Evolutionary.AbstractOptimizer, data=Optimization.DEFAULT_DATA; callback=(args...) -> (false), maxiters::Union{Number,Nothing}=nothing, maxtime::Union{Number,Nothing}=nothing, @@ -54,7 +54,7 @@ function SciMLBase.__solve(prob::OptimizationProblem, opt::Evolutionary.Abstract progress=false, kwargs...) local x, cur, state - if data != GalacticOptim.DEFAULT_DATA + if data != Optimization.DEFAULT_DATA maxiters = length(data) end @@ -69,8 +69,8 @@ function SciMLBase.__solve(prob::OptimizationProblem, opt::Evolutionary.Abstract cb_call end - maxiters = GalacticOptim._check_and_convert_maxiters(maxiters) - maxtime = GalacticOptim._check_and_convert_maxtime(maxtime) + maxiters = Optimization._check_and_convert_maxiters(maxiters) + maxtime = Optimization._check_and_convert_maxtime(maxtime) _loss = function (θ) x = prob.f(θ, prob.p, cur...) diff --git a/lib/GalacticEvolutionary/test/runtests.jl b/lib/OptimizationEvolutionary/test/runtests.jl similarity index 65% rename from lib/GalacticEvolutionary/test/runtests.jl rename to lib/OptimizationEvolutionary/test/runtests.jl index 40d6667e1..24b425eb6 100644 --- a/lib/GalacticEvolutionary/test/runtests.jl +++ b/lib/OptimizationEvolutionary/test/runtests.jl @@ -1,13 +1,13 @@ -using GalacticEvolutionary, GalacticOptim +using OptimizationEvolutionary, Optimization using Test -@testset "GalacticEvolutionary.jl" begin +@testset "OptimizationEvolutionary.jl" begin rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) _p = [1.0, 100.0] l1 = rosenbrock(x0, _p) optprob = OptimizationFunction(rosenbrock) - prob = GalacticOptim.OptimizationProblem(optprob, x0, _p) + prob = Optimization.OptimizationProblem(optprob, x0, _p) sol = solve(prob, CMAES(μ=40, λ=100), abstol=1e-15) @test 10 * sol.minimum < l1 end diff --git a/lib/GalacticFlux/LICENSE b/lib/OptimizationFlux/LICENSE similarity index 100% rename from lib/GalacticFlux/LICENSE rename to lib/OptimizationFlux/LICENSE diff --git a/lib/GalacticFlux/Project.toml b/lib/OptimizationFlux/Project.toml similarity index 78% rename from lib/GalacticFlux/Project.toml rename to lib/OptimizationFlux/Project.toml index 36f09afde..f18151697 100644 --- a/lib/GalacticFlux/Project.toml +++ b/lib/OptimizationFlux/Project.toml @@ -1,11 +1,11 @@ -name = "GalacticFlux" -uuid = "08766a1d-66fd-4ddf-8f8a-8b0fccaded3b" +name = "OptimizationFlux" +uuid = "253f991c-a7b2-45f8-8852-8b9a9df78a86" authors = ["Vaibhav Dixit and contributors"] version = "0.1.0" [deps] Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c" -GalacticOptim = "a75be94c-b780-496d-a8a9-0878b188d577" +Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" ProgressLogging = "33c8b6b6-d38a-422a-b730-caa89a2f386c" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" @@ -15,7 +15,7 @@ julia = "1" Flux = "0.13" ProgressLogging = "0.1" Reexport = "1.2" -GalacticOptim = "3" +Optimization = "3" [extras] ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" diff --git a/lib/GalacticFlux/src/GalacticFlux.jl b/lib/OptimizationFlux/src/OptimizationFlux.jl similarity index 78% rename from lib/GalacticFlux/src/GalacticFlux.jl rename to lib/OptimizationFlux/src/OptimizationFlux.jl index c3dcad74a..a8d570008 100644 --- a/lib/GalacticFlux/src/GalacticFlux.jl +++ b/lib/OptimizationFlux/src/OptimizationFlux.jl @@ -1,17 +1,17 @@ -module GalacticFlux +module OptimizationFlux -using GalacticOptim, Reexport, Printf, ProgressLogging, GalacticOptim.SciMLBase +using Optimization, Reexport, Printf, ProgressLogging, Optimization.SciMLBase @reexport using Flux -function SciMLBase.__solve(prob::OptimizationProblem, opt::Flux.Optimise.AbstractOptimiser, data = GalacticOptim.DEFAULT_DATA; +function SciMLBase.__solve(prob::OptimizationProblem, opt::Flux.Optimise.AbstractOptimiser, data = Optimization.DEFAULT_DATA; maxiters::Number = 0, callback = (args...) -> (false), progress = false, save_best = true, kwargs...) - if data != GalacticOptim.DEFAULT_DATA + if data != Optimization.DEFAULT_DATA maxiters = length(data) else - maxiters = GalacticOptim._check_and_convert_maxiters(maxiters) - data = GalacticOptim.take(data, maxiters) + maxiters = Optimization._check_and_convert_maxiters(maxiters) + data = Optimization.take(data, maxiters) end # Flux is silly and doesn't have an abstract type on its optimizers, so assume @@ -24,10 +24,10 @@ function SciMLBase.__solve(prob::OptimizationProblem, opt::Flux.Optimise.Abstrac min_opt = 1 min_θ = prob.u0 - f = GalacticOptim.instantiate_function(prob.f, prob.u0, prob.f.adtype, prob.p) + f = Optimization.instantiate_function(prob.f, prob.u0, prob.f.adtype, prob.p) t0 = time() - GalacticOptim.@withprogress progress name = "Training" begin + Optimization.@withprogress progress name = "Training" begin for (i, d) in enumerate(data) f.grad(G, θ, d...) x = f.f(θ, prob.p, d...) diff --git a/lib/GalacticFlux/test/runtests.jl b/lib/OptimizationFlux/test/runtests.jl similarity index 62% rename from lib/GalacticFlux/test/runtests.jl rename to lib/OptimizationFlux/test/runtests.jl index ad6cf34f0..1c1b75778 100644 --- a/lib/GalacticFlux/test/runtests.jl +++ b/lib/OptimizationFlux/test/runtests.jl @@ -1,17 +1,17 @@ -using GalacticFlux, GalacticOptim, ForwardDiff +using OptimizationFlux, Optimization, ForwardDiff using Test -@testset "GalacticFlux.jl" begin +@testset "OptimizationFlux.jl" begin rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) _p = [1.0, 100.0] l1 = rosenbrock(x0, _p) - optprob = OptimizationFunction(rosenbrock, GalacticOptim.AutoForwardDiff()) + optprob = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) prob = OptimizationProblem(optprob, x0, _p) - sol = GalacticOptim.solve(prob, Flux.ADAM(0.1), maxiters = 1000) + sol = Optimization.solve(prob, Flux.ADAM(0.1), maxiters = 1000) @test 10 * sol.minimum < l1 prob = OptimizationProblem(optprob, x0, _p) diff --git a/lib/GalacticGCMAES/LICENSE b/lib/OptimizationGCMAES/LICENSE similarity index 100% rename from lib/GalacticGCMAES/LICENSE rename to lib/OptimizationGCMAES/LICENSE diff --git a/lib/GalacticGCMAES/Project.toml b/lib/OptimizationGCMAES/Project.toml similarity index 69% rename from lib/GalacticGCMAES/Project.toml rename to lib/OptimizationGCMAES/Project.toml index 563ac02c4..6fc773edd 100644 --- a/lib/GalacticGCMAES/Project.toml +++ b/lib/OptimizationGCMAES/Project.toml @@ -1,15 +1,15 @@ -name = "GalacticGCMAES" -uuid = "8aeb8c08-03e2-48af-80d6-b4e9d05954a5" +name = "OptimizationGCMAES" +uuid = "6f0a0517-dbc2-4a7a-8a20-99ae7f27e911" authors = ["Vaibhav Dixit and contributors"] version = "0.1.0" [deps] GCMAES = "4aa9d100-eb0f-11e8-15f1-25748831eb3b" -GalacticOptim = "a75be94c-b780-496d-a8a9-0878b188d577" +Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" [compat] julia = "1" -GalacticOptim = "3" +Optimization = "3" GCMAES = "0.1" [extras] diff --git a/lib/GalacticGCMAES/src/GalacticGCMAES.jl b/lib/OptimizationGCMAES/src/OptimizationGCMAES.jl similarity index 85% rename from lib/GalacticGCMAES/src/GalacticGCMAES.jl rename to lib/OptimizationGCMAES/src/OptimizationGCMAES.jl index 7086ec98a..524102268 100644 --- a/lib/GalacticGCMAES/src/GalacticGCMAES.jl +++ b/lib/OptimizationGCMAES/src/OptimizationGCMAES.jl @@ -1,6 +1,6 @@ -module GalacticGCMAES +module OptimizationGCMAES -using GCMAES, GalacticOptim, GalacticOptim.SciMLBase +using GCMAES, Optimization, Optimization.SciMLBase export GCMAESOpt @@ -49,10 +49,10 @@ function SciMLBase.__solve(prob::OptimizationProblem, opt::GCMAESOpt; local G = similar(prob.u0) - maxiters = GalacticOptim._check_and_convert_maxiters(maxiters) - maxtime = GalacticOptim._check_and_convert_maxtime(maxtime) + maxiters = Optimization._check_and_convert_maxiters(maxiters) + maxtime = Optimization._check_and_convert_maxtime(maxtime) - f = GalacticOptim.instantiate_function(prob.f, prob.u0, prob.f.adtype, prob.p) + f = Optimization.instantiate_function(prob.f, prob.u0, prob.f.adtype, prob.p) _loss = function(θ) x = f.f(θ, prob.p) @@ -69,7 +69,7 @@ function SciMLBase.__solve(prob::OptimizationProblem, opt::GCMAESOpt; opt_args = __map_optimizer_args(prob,opt, maxiters=maxiters, maxtime=maxtime, abstol=abstol, reltol=reltol; kwargs...) t0 = time() - if prob.sense === GalacticOptim.MaxSense + if prob.sense === Optimization.MaxSense opt_xmin, opt_fmin, opt_ret = GCMAES.maximize(isnothing(f.grad) ? _loss : (_loss,g), prob.u0, σ0, prob.lb, prob.ub; opt_args...) else opt_xmin, opt_fmin, opt_ret = GCMAES.minimize(isnothing(f.grad) ? _loss : (_loss,g), prob.u0, σ0, prob.lb, prob.ub; opt_args...) diff --git a/lib/GalacticGCMAES/test/runtests.jl b/lib/OptimizationGCMAES/test/runtests.jl similarity index 50% rename from lib/GalacticGCMAES/test/runtests.jl rename to lib/OptimizationGCMAES/test/runtests.jl index b1479a433..415bc2a37 100644 --- a/lib/GalacticGCMAES/test/runtests.jl +++ b/lib/OptimizationGCMAES/test/runtests.jl @@ -1,19 +1,19 @@ -using GalacticGCMAES, GalacticOptim, ForwardDiff +using OptimizationGCMAES, Optimization, ForwardDiff using Test -@testset "GalacticGCMAES.jl" begin +@testset "OptimizationGCMAES.jl" begin rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) _p = [1.0, 100.0] l1 = rosenbrock(x0, _p) - f_ad = OptimizationFunction(rosenbrock, GalacticOptim.AutoForwardDiff()) + f_ad = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) f_noad = OptimizationFunction(rosenbrock) - prob = GalacticOptim.OptimizationProblem(f_ad, x0, _p, lb=[-1.0, -1.0], ub=[1.0, 1.0]) + prob = Optimization.OptimizationProblem(f_ad, x0, _p, lb=[-1.0, -1.0], ub=[1.0, 1.0]) sol = solve(prob, GCMAESOpt(), maxiters=1000) @test 10 * sol.minimum < l1 - prob = GalacticOptim.OptimizationProblem(f_noad, x0, _p, lb=[-1.0, -1.0], ub=[1.0, 1.0]) + prob = Optimization.OptimizationProblem(f_noad, x0, _p, lb=[-1.0, -1.0], ub=[1.0, 1.0]) sol = solve(prob, GCMAESOpt(), maxiters=1000) @test 10 * sol.minimum < l1 end diff --git a/lib/GalacticMOI/LICENSE b/lib/OptimizationMOI/LICENSE similarity index 100% rename from lib/GalacticMOI/LICENSE rename to lib/OptimizationMOI/LICENSE diff --git a/lib/GalacticMOI/Project.toml b/lib/OptimizationMOI/Project.toml similarity index 76% rename from lib/GalacticMOI/Project.toml rename to lib/OptimizationMOI/Project.toml index 38a1b6a24..044772f87 100644 --- a/lib/GalacticMOI/Project.toml +++ b/lib/OptimizationMOI/Project.toml @@ -1,16 +1,16 @@ -name = "GalacticMOI" -uuid = "103943a6-a0e8-4329-a18d-015edd6a0466" +name = "OptimizationMOI" +uuid = "fd9f6733-72f4-499f-8506-86b2bdd0dea1" authors = ["Vaibhav Dixit and contributors"] version = "0.1.0" [deps] -GalacticOptim = "a75be94c-b780-496d-a8a9-0878b188d577" +Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee" [compat] julia = "1" MathOptInterface = "1" -GalacticOptim = "3" +Optimization = "3" [extras] Ipopt = "b6b21f68-93f8-5de0-b562-5493be1d77c9" diff --git a/lib/GalacticMOI/src/GalacticMOI.jl b/lib/OptimizationMOI/src/OptimizationMOI.jl similarity index 94% rename from lib/GalacticMOI/src/GalacticMOI.jl rename to lib/OptimizationMOI/src/OptimizationMOI.jl index 17250b6cb..2a216d156 100644 --- a/lib/GalacticMOI/src/GalacticMOI.jl +++ b/lib/OptimizationMOI/src/OptimizationMOI.jl @@ -1,6 +1,6 @@ -module GalacticMOI +module OptimizationMOI -using MathOptInterface, GalacticOptim, GalacticOptim.SciMLBase +using MathOptInterface, Optimization, Optimization.SciMLBase const MOI = MathOptInterface struct MOIOptimizationProblem{T,F<:OptimizationFunction,uType,P} <: MOI.AbstractNLPEvaluator @@ -14,7 +14,7 @@ end function MOIOptimizationProblem(prob::OptimizationProblem) num_cons = prob.ucons === nothing ? 0 : length(prob.ucons) - f = GalacticOptim.instantiate_function(prob.f, prob.u0, prob.f.adtype, prob.p, num_cons) + f = Optimization.instantiate_function(prob.f, prob.u0, prob.f.adtype, prob.p, num_cons) T = eltype(prob.u0) n = length(prob.u0) return MOIOptimizationProblem( @@ -162,8 +162,8 @@ function SciMLBase.__solve( reltol::Union{Number,Nothing}=nothing, kwargs... ) - maxiters = GalacticOptim._check_and_convert_maxiters(maxiters) - maxtime = GalacticOptim._check_and_convert_maxtime(maxtime) + maxiters = Optimization._check_and_convert_maxiters(maxiters) + maxtime = Optimization._check_and_convert_maxtime(maxtime) opt_setup = __map_optimizer_args( prob, opt; @@ -200,7 +200,7 @@ function SciMLBase.__solve( MOI.set( opt_setup, MOI.ObjectiveSense(), - prob.sense === GalacticOptim.MaxSense ? MOI.MAX_SENSE : MOI.MIN_SENSE, + prob.sense === Optimization.MaxSense ? MOI.MAX_SENSE : MOI.MIN_SENSE, ) if prob.lcons === nothing @assert prob.ucons === nothing diff --git a/lib/OptimizationMOI/test/runtests.jl b/lib/OptimizationMOI/test/runtests.jl new file mode 100644 index 000000000..7c7b3a192 --- /dev/null +++ b/lib/OptimizationMOI/test/runtests.jl @@ -0,0 +1,33 @@ +using OptimizationMOI, Optimization, Ipopt, NLopt, Zygote +using Test + +@testset "OptimizationMOI.jl" begin + rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 + x0 = zeros(2) + _p = [1.0, 100.0] + l1 = rosenbrock(x0, _p) + + optprob = OptimizationFunction((x, p) -> -rosenbrock(x, p), Optimization.AutoZygote()) + prob = OptimizationProblem(optprob, x0, _p; sense=Optimization.MaxSense) + + sol = solve(prob, Ipopt.Optimizer()) + @test 10 * sol.minimum < l1 + + optprob = OptimizationFunction(rosenbrock, Optimization.AutoZygote()) + prob = OptimizationProblem(optprob, x0, _p; sense=Optimization.MinSense) + + sol = solve(prob, Ipopt.Optimizer()) + @test 10 * sol.minimum < l1 + + sol = solve(prob, OptimizationMOI.MOI.OptimizerWithAttributes(Ipopt.Optimizer, "max_cpu_time" => 60.0)) + @test 10 * sol.minimum < l1 + + sol = solve(prob, OptimizationMOI.MOI.OptimizerWithAttributes(NLopt.Optimizer, "algorithm" => :LN_BOBYQA)) + @test 10 * sol.minimum < l1 + + sol = solve(prob, OptimizationMOI.MOI.OptimizerWithAttributes(NLopt.Optimizer, "algorithm" => :LD_LBFGS)) + @test 10 * sol.minimum < l1 + + sol = solve(prob, OptimizationMOI.MOI.OptimizerWithAttributes(NLopt.Optimizer, "algorithm" => :LD_LBFGS)) + @test 10 * sol.minimum < l1 +end diff --git a/lib/GalacticMetaheuristics/LICENSE b/lib/OptimizationMetaheuristics/LICENSE similarity index 100% rename from lib/GalacticMetaheuristics/LICENSE rename to lib/OptimizationMetaheuristics/LICENSE diff --git a/lib/GalacticMetaheuristics/Project.toml b/lib/OptimizationMetaheuristics/Project.toml similarity index 69% rename from lib/GalacticMetaheuristics/Project.toml rename to lib/OptimizationMetaheuristics/Project.toml index 647f668d0..8bd832d31 100644 --- a/lib/GalacticMetaheuristics/Project.toml +++ b/lib/OptimizationMetaheuristics/Project.toml @@ -1,10 +1,10 @@ -name = "GalacticMetaheuristics" -uuid = "ea53b4ef-d776-41ba-b3a2-ff4f1ca97632" +name = "OptimizationMetaheuristics" +uuid = "3aafef2f-86ae-4776-b337-85a36adf0b55" authors = ["Vaibhav Dixit and contributors"] version = "0.1.0" [deps] -GalacticOptim = "a75be94c-b780-496d-a8a9-0878b188d577" +Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" Metaheuristics = "bcdb8e00-2c21-11e9-3065-2b553b22f898" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" @@ -12,7 +12,7 @@ Reexport = "189a3867-3050-52da-a836-e630ba90ab69" julia = "1" Metaheuristics = "3" Reexport = "1.2" -GalacticOptim = "3" +Optimization = "3" [extras] Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" diff --git a/lib/GalacticMetaheuristics/src/GalacticMetaheuristics.jl b/lib/OptimizationMetaheuristics/src/OptimizationMetaheuristics.jl similarity index 92% rename from lib/GalacticMetaheuristics/src/GalacticMetaheuristics.jl rename to lib/OptimizationMetaheuristics/src/OptimizationMetaheuristics.jl index 42b25d219..6def0302c 100644 --- a/lib/GalacticMetaheuristics/src/GalacticMetaheuristics.jl +++ b/lib/OptimizationMetaheuristics/src/OptimizationMetaheuristics.jl @@ -1,6 +1,6 @@ -module GalacticMetaheuristics +module OptimizationMetaheuristics -using Reexport, GalacticOptim, GalacticOptim.SciMLBase +using Reexport, Optimization, Optimization.SciMLBase @reexport using Metaheuristics function initial_population!(opt, prob, bounds, f) @@ -70,8 +70,8 @@ function SciMLBase.__solve(prob::OptimizationProblem, opt::Metaheuristics.Abstra local x - maxiters = GalacticOptim._check_and_convert_maxiters(maxiters) - maxtime = GalacticOptim._check_and_convert_maxtime(maxtime) + maxiters = Optimization._check_and_convert_maxiters(maxiters) + maxtime = Optimization._check_and_convert_maxtime(maxtime) _loss = function (θ) @@ -86,15 +86,15 @@ function SciMLBase.__solve(prob::OptimizationProblem, opt::Metaheuristics.Abstra end if !isnothing(prob.f.cons) - @warn "Equality constraints are current not passed on by GalacticOptim" + @warn "Equality constraints are current not passed on by Optimization" end if !isnothing(prob.lcons) - @warn "Inequality constraints are current not passed on by GalacticOptim" + @warn "Inequality constraints are current not passed on by Optimization" end if !isnothing(prob.ucons) - @warn "Inequality constraints are current not passed on by GalacticOptim" + @warn "Inequality constraints are current not passed on by Optimization" end __map_optimizer_args!(prob, opt, callback=callback, maxiters=maxiters, maxtime=maxtime, abstol=abstol, reltol=reltol; kwargs...) diff --git a/lib/GalacticMetaheuristics/test/runtests.jl b/lib/OptimizationMetaheuristics/test/runtests.jl similarity index 86% rename from lib/GalacticMetaheuristics/test/runtests.jl rename to lib/OptimizationMetaheuristics/test/runtests.jl index 5c3a23400..c126899c0 100644 --- a/lib/GalacticMetaheuristics/test/runtests.jl +++ b/lib/OptimizationMetaheuristics/test/runtests.jl @@ -1,13 +1,13 @@ -using GalacticMetaheuristics, GalacticOptim +using OptimizationMetaheuristics, Optimization using Test -@testset "GalacticMetaheuristics.jl" begin +@testset "OptimizationMetaheuristics.jl" begin rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) _p = [1.0, 100.0] l1 = rosenbrock(x0, _p) optprob = OptimizationFunction(rosenbrock) - prob = GalacticOptim.OptimizationProblem(optprob, x0, _p, lb=[-1.0, -1.0], ub=[1.5, 1.5]) + prob = Optimization.OptimizationProblem(optprob, x0, _p, lb=[-1.0, -1.0], ub=[1.5, 1.5]) sol = solve(prob, ECA()) @test 10 * sol.minimum < l1 diff --git a/lib/GalacticMultistartOptimization/LICENSE b/lib/OptimizationMultistartOptimization/LICENSE similarity index 100% rename from lib/GalacticMultistartOptimization/LICENSE rename to lib/OptimizationMultistartOptimization/LICENSE diff --git a/lib/GalacticMultistartOptimization/Project.toml b/lib/OptimizationMultistartOptimization/Project.toml similarity index 75% rename from lib/GalacticMultistartOptimization/Project.toml rename to lib/OptimizationMultistartOptimization/Project.toml index a390bc727..1ff9cbd39 100644 --- a/lib/GalacticMultistartOptimization/Project.toml +++ b/lib/OptimizationMultistartOptimization/Project.toml @@ -1,17 +1,17 @@ -name = "GalacticMultistartOptimization" -uuid = "969b6d83-0cb9-4e98-85e3-a6a04e06aec6" +name = "OptimizationMultistartOptimization" +uuid = "e4316d97-8bbb-4fd3-a7d8-3851d2a72823" authors = ["Vaibhav Dixit and contributors"] version = "0.1.0" [deps] -GalacticOptim = "a75be94c-b780-496d-a8a9-0878b188d577" +Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" MultistartOptimization = "3933049c-43be-478e-a8bb-6e0f7fd53575" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" [compat] julia = "1" MultistartOptimization = "0.1.3" -GalacticOptim = "3" +Optimization = "3" Reexport = "1.2" [extras] diff --git a/lib/GalacticMultistartOptimization/src/GalacticMultistartOptimization.jl b/lib/OptimizationMultistartOptimization/src/OptimizationMultistartOptimization.jl similarity index 91% rename from lib/GalacticMultistartOptimization/src/GalacticMultistartOptimization.jl rename to lib/OptimizationMultistartOptimization/src/OptimizationMultistartOptimization.jl index 8f64c8a57..a5a41a0ef 100644 --- a/lib/GalacticMultistartOptimization/src/GalacticMultistartOptimization.jl +++ b/lib/OptimizationMultistartOptimization/src/OptimizationMultistartOptimization.jl @@ -1,6 +1,6 @@ -module GalacticMultistartOptimization +module OptimizationMultistartOptimization -using Reexport, GalacticOptim, GalacticOptim.SciMLBase +using Reexport, Optimization, Optimization.SciMLBase @reexport using MultistartOptimization function SciMLBase.__solve(prob::OptimizationProblem, multiopt::MultistartOptimization.TikTak, opt; diff --git a/lib/OptimizationMultistartOptimization/test/runtests.jl b/lib/OptimizationMultistartOptimization/test/runtests.jl new file mode 100644 index 000000000..2d744c203 --- /dev/null +++ b/lib/OptimizationMultistartOptimization/test/runtests.jl @@ -0,0 +1,14 @@ +using Pkg; Pkg.develop(path=joinpath(@__DIR__,"../../","OptimizationNLopt")); +using OptimizationMultistartOptimization, Optimization, ForwardDiff, OptimizationNLopt +using Test + +@testset "OptimizationMultistartOptimization.jl" begin + rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 + x0 = zeros(2) + _p = [1.0, 100.0] + l1 = rosenbrock(x0, _p) + f = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) + prob = Optimization.OptimizationProblem(f, x0, _p, lb=[-1.0, -1.0], ub=[1.5, 1.5]) + sol = solve(prob, OptimizationMultistartOptimization.TikTak(100), OptimizationNLopt.Opt(:LD_LBFGS, 2)) + @test 10 * sol.minimum < l1 +end diff --git a/lib/GalacticNLopt/LICENSE b/lib/OptimizationNLopt/LICENSE similarity index 100% rename from lib/GalacticNLopt/LICENSE rename to lib/OptimizationNLopt/LICENSE diff --git a/lib/GalacticNLopt/Project.toml b/lib/OptimizationNLopt/Project.toml similarity index 72% rename from lib/GalacticNLopt/Project.toml rename to lib/OptimizationNLopt/Project.toml index 2aa8e1699..9f3b72a73 100644 --- a/lib/GalacticNLopt/Project.toml +++ b/lib/OptimizationNLopt/Project.toml @@ -1,17 +1,17 @@ -name = "GalacticNLopt" -uuid = "792012fa-ebfb-4b3a-ab44-69908522effd" +name = "OptimizationNLopt" +uuid = "4e6fcdb7-1186-4e1f-a706-475e75c168bb" authors = ["Vaibhav Dixit and contributors"] version = "0.1.0" [deps] -GalacticOptim = "a75be94c-b780-496d-a8a9-0878b188d577" +Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" NLopt = "76087f3c-5699-56af-9a33-bf431cd00edd" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" [compat] julia = "1" NLopt = "0.6" -GalacticOptim = "3" +Optimization = "3" Reexport = "1.2" [extras] diff --git a/lib/GalacticNLopt/src/GalacticNLopt.jl b/lib/OptimizationNLopt/src/OptimizationNLopt.jl similarity index 86% rename from lib/GalacticNLopt/src/GalacticNLopt.jl rename to lib/OptimizationNLopt/src/OptimizationNLopt.jl index b95f5b2af..4d9ba53b1 100644 --- a/lib/GalacticNLopt/src/GalacticNLopt.jl +++ b/lib/OptimizationNLopt/src/OptimizationNLopt.jl @@ -1,6 +1,6 @@ -module GalacticNLopt +module OptimizationNLopt -using Reexport, GalacticOptim, GalacticOptim.SciMLBase +using Reexport, Optimization, Optimization.SciMLBase @reexport using NLopt (f::NLopt.Algorithm)() = f @@ -90,13 +90,13 @@ function SciMLBase.__solve(prob::OptimizationProblem, opt::Union{NLopt.Algorithm kwargs...) local x - maxiters = GalacticOptim._check_and_convert_maxiters(maxiters) - maxtime = GalacticOptim._check_and_convert_maxtime(maxtime) - local_maxiters = GalacticOptim._check_and_convert_maxiters(local_maxiters) - local_maxtime = GalacticOptim._check_and_convert_maxtime(local_maxtime) + maxiters = Optimization._check_and_convert_maxiters(maxiters) + maxtime = Optimization._check_and_convert_maxtime(maxtime) + local_maxiters = Optimization._check_and_convert_maxiters(local_maxiters) + local_maxtime = Optimization._check_and_convert_maxtime(local_maxtime) - f = GalacticOptim.instantiate_function(prob.f, prob.u0, prob.f.adtype, prob.p) + f = Optimization.instantiate_function(prob.f, prob.u0, prob.f.adtype, prob.p) _loss = function (θ) x = f.f(θ, prob.p) @@ -121,7 +121,7 @@ function SciMLBase.__solve(prob::OptimizationProblem, opt::Union{NLopt.Algorithm opt_setup = NLopt.Opt(opt, length(prob.u0)) end - prob.sense === GalacticOptim.MaxSense ? NLopt.max_objective!(opt_setup, fg!) : NLopt.min_objective!(opt_setup, fg!) + prob.sense === Optimization.MaxSense ? NLopt.max_objective!(opt_setup, fg!) : NLopt.min_objective!(opt_setup, fg!) __map_optimizer_args!(prob, opt_setup, maxiters=maxiters, maxtime=maxtime, abstol=abstol, reltol=reltol, local_method=local_method, local_maxiters=local_maxiters, local_options=local_options; kwargs...) diff --git a/lib/GalacticNLopt/test/runtests.jl b/lib/OptimizationNLopt/test/runtests.jl similarity index 83% rename from lib/GalacticNLopt/test/runtests.jl rename to lib/OptimizationNLopt/test/runtests.jl index ba5c9d6cd..c2af620a1 100644 --- a/lib/GalacticNLopt/test/runtests.jl +++ b/lib/OptimizationNLopt/test/runtests.jl @@ -1,18 +1,18 @@ -using GalacticNLopt, GalacticOptim, Zygote +using OptimizationNLopt, Optimization, Zygote using Test -@testset "GalacticNLopt.jl" begin +@testset "OptimizationNLopt.jl" begin rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) _p = [1.0, 100.0] l1 = rosenbrock(x0, _p) - optprob = OptimizationFunction((x, p) -> -rosenbrock(x, p), GalacticOptim.AutoZygote()) - prob = OptimizationProblem(optprob, x0, _p; sense=GalacticOptim.MaxSense) + optprob = OptimizationFunction((x, p) -> -rosenbrock(x, p), Optimization.AutoZygote()) + prob = OptimizationProblem(optprob, x0, _p; sense=Optimization.MaxSense) sol = solve(prob, NLopt.Opt(:LN_BOBYQA, 2)) @test 10 * sol.minimum < l1 - optprob = OptimizationFunction(rosenbrock, GalacticOptim.AutoZygote()) + optprob = OptimizationFunction(rosenbrock, Optimization.AutoZygote()) prob = OptimizationProblem(optprob, x0, _p) sol = solve(prob, NLopt.Opt(:LN_BOBYQA, 2)) diff --git a/lib/GalacticNOMAD/LICENSE b/lib/OptimizationNOMAD/LICENSE similarity index 100% rename from lib/GalacticNOMAD/LICENSE rename to lib/OptimizationNOMAD/LICENSE diff --git a/lib/GalacticNOMAD/Project.toml b/lib/OptimizationNOMAD/Project.toml similarity index 64% rename from lib/GalacticNOMAD/Project.toml rename to lib/OptimizationNOMAD/Project.toml index dec4e0895..1a900030e 100644 --- a/lib/GalacticNOMAD/Project.toml +++ b/lib/OptimizationNOMAD/Project.toml @@ -1,16 +1,16 @@ -name = "GalacticNOMAD" -uuid = "6c1b17f4-5d83-47e4-91dc-4b94a71b6df7" +name = "OptimizationNOMAD" +uuid = "2cab0595-8222-4775-b714-9828e6a9e01b" authors = ["Vaibhav Dixit and contributors"] version = "0.1.0" [deps] -GalacticOptim = "a75be94c-b780-496d-a8a9-0878b188d577" +Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" NOMAD = "02130f1c-4665-5b79-af82-ff1385104aa0" [compat] julia = "1" NOMAD = "2" -GalacticOptim = "3" +Optimization = "3" [extras] Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" diff --git a/lib/GalacticNOMAD/src/GalacticNOMAD.jl b/lib/OptimizationNOMAD/src/OptimizationNOMAD.jl similarity index 91% rename from lib/GalacticNOMAD/src/GalacticNOMAD.jl rename to lib/OptimizationNOMAD/src/OptimizationNOMAD.jl index 32786cffe..aa26578c5 100644 --- a/lib/GalacticNOMAD/src/GalacticNOMAD.jl +++ b/lib/OptimizationNOMAD/src/OptimizationNOMAD.jl @@ -1,6 +1,6 @@ -module GalacticNOMAD +module OptimizationNOMAD -using NOMAD, GalacticOptim, GalacticOptim.SciMLBase +using NOMAD, Optimization, Optimization.SciMLBase export NOMADOpt struct NOMADOpt end @@ -47,8 +47,8 @@ function SciMLBase.__solve(prob::OptimizationProblem, opt::NOMADOpt; local x - maxiters = GalacticOptim._check_and_convert_maxiters(maxiters) - maxtime = GalacticOptim._check_and_convert_maxtime(maxtime) + maxiters = Optimization._check_and_convert_maxiters(maxiters) + maxtime = Optimization._check_and_convert_maxtime(maxtime) _loss = function (θ) diff --git a/lib/GalacticNOMAD/test/runtests.jl b/lib/OptimizationNOMAD/test/runtests.jl similarity index 68% rename from lib/GalacticNOMAD/test/runtests.jl rename to lib/OptimizationNOMAD/test/runtests.jl index 6bacdbcf4..a8944a766 100644 --- a/lib/GalacticNOMAD/test/runtests.jl +++ b/lib/OptimizationNOMAD/test/runtests.jl @@ -1,7 +1,7 @@ -using GalacticNOMAD, GalacticOptim +using OptimizationNOMAD, Optimization using Test -@testset "GalacticNOMAD.jl" begin +@testset "OptimizationNOMAD.jl" begin rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) _p = [1.0, 100.0] @@ -10,10 +10,10 @@ using Test f = OptimizationFunction(rosenbrock) prob = OptimizationProblem(f, x0, _p) - sol = GalacticOptim.solve(prob, NOMADOpt()) + sol = Optimization.solve(prob, NOMADOpt()) @test 10 * sol.minimum < l1 prob = OptimizationProblem(f, x0, _p; lb=[-1.0, -1.0], ub=[1.5, 1.5]) - sol = GalacticOptim.solve(prob, NOMADOpt()) + sol = Optimization.solve(prob, NOMADOpt()) @test 10 * sol.minimum < l1 end diff --git a/lib/GalacticNonconvex/LICENSE b/lib/OptimizationNonconvex/LICENSE similarity index 100% rename from lib/GalacticNonconvex/LICENSE rename to lib/OptimizationNonconvex/LICENSE diff --git a/lib/GalacticNonconvex/Project.toml b/lib/OptimizationNonconvex/Project.toml similarity index 88% rename from lib/GalacticNonconvex/Project.toml rename to lib/OptimizationNonconvex/Project.toml index 7869cb25b..4419c7f0b 100644 --- a/lib/GalacticNonconvex/Project.toml +++ b/lib/OptimizationNonconvex/Project.toml @@ -1,11 +1,11 @@ -name = "GalacticNonconvex" -uuid = "5bfdff76-3000-42fd-af80-1c590d2c75e2" +name = "OptimizationNonconvex" +uuid = "10d56f2b-e510-4d53-b540-70dbdae0400e" authors = ["Vaibhav Dixit and contributors"] version = "0.1.0" [deps] ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" -GalacticOptim = "a75be94c-b780-496d-a8a9-0878b188d577" +Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" Nonconvex = "01bcebdf-4d21-426d-b5c4-6132c1619978" NonconvexBayesian = "fb352abc-de7b-48de-9ebd-665b54b5d9b3" NonconvexIpopt = "bf347577-a06d-49ad-a669-8c0e005493b8" @@ -19,7 +19,7 @@ NonconvexSearch = "75732972-a7cd-4375-b200-958e0814350d" [compat] julia = "1" ChainRulesCore = "1" -GalacticOptim = "3" +Optimization = "3" Nonconvex = "1" NonconvexBayesian = "0.1" NonconvexIpopt = "0.1.4" diff --git a/lib/GalacticNonconvex/src/GalacticNonconvex.jl b/lib/OptimizationNonconvex/src/OptimizationNonconvex.jl similarity index 91% rename from lib/GalacticNonconvex/src/GalacticNonconvex.jl rename to lib/OptimizationNonconvex/src/OptimizationNonconvex.jl index 5b8c0198f..71798ab66 100644 --- a/lib/GalacticNonconvex/src/GalacticNonconvex.jl +++ b/lib/OptimizationNonconvex/src/OptimizationNonconvex.jl @@ -1,6 +1,6 @@ -module GalacticNonconvex +module OptimizationNonconvex -using Nonconvex, GalacticOptim, GalacticOptim.SciMLBase, ChainRulesCore +using Nonconvex, Optimization, Optimization.SciMLBase, ChainRulesCore include("nonconvex_bayesian.jl") include("nonconvex_pavito.jl") @@ -72,10 +72,10 @@ function SciMLBase.__solve(prob::OptimizationProblem, opt::Nonconvex.NonconvexCo # local x - maxiters = GalacticOptim._check_and_convert_maxiters(maxiters) - maxtime = GalacticOptim._check_and_convert_maxtime(maxtime) + maxiters = Optimization._check_and_convert_maxiters(maxiters) + maxtime = Optimization._check_and_convert_maxtime(maxtime) - f = GalacticOptim.instantiate_function(prob.f, prob.u0, prob.f.adtype, prob.p) + f = Optimization.instantiate_function(prob.f, prob.u0, prob.f.adtype, prob.p) _loss = NonconvexADWrapper(f, prob) @@ -95,17 +95,17 @@ function SciMLBase.__solve(prob::OptimizationProblem, opt::Nonconvex.NonconvexCo Nonconvex.addvar!(opt_set, prob.lb, prob.ub, integer=integer) if !isnothing(prob.f.cons) - @warn "Equality constraints are current not passed on by GalacticOptim" + @warn "Equality constraints are current not passed on by Optimization" #add_ineq_constraint!(opt_set, f) end if !isnothing(prob.lcons) - @warn "Inequality constraints are current not passed on by GalacticOptim" + @warn "Inequality constraints are current not passed on by Optimization" #add_ineq_constraint!(opt_set, f) end if !isnothing(prob.ucons) - @warn "Inequality constraints are current not passed on by GalacticOptim" + @warn "Inequality constraints are current not passed on by Optimization" #add_ineq_constraint!(opt_set, f) end diff --git a/lib/GalacticNonconvex/src/nonconvex_bayesian.jl b/lib/OptimizationNonconvex/src/nonconvex_bayesian.jl similarity index 100% rename from lib/GalacticNonconvex/src/nonconvex_bayesian.jl rename to lib/OptimizationNonconvex/src/nonconvex_bayesian.jl diff --git a/lib/GalacticNonconvex/src/nonconvex_ipopt.jl b/lib/OptimizationNonconvex/src/nonconvex_ipopt.jl similarity index 100% rename from lib/GalacticNonconvex/src/nonconvex_ipopt.jl rename to lib/OptimizationNonconvex/src/nonconvex_ipopt.jl diff --git a/lib/GalacticNonconvex/src/nonconvex_juniper.jl b/lib/OptimizationNonconvex/src/nonconvex_juniper.jl similarity index 100% rename from lib/GalacticNonconvex/src/nonconvex_juniper.jl rename to lib/OptimizationNonconvex/src/nonconvex_juniper.jl diff --git a/lib/GalacticNonconvex/src/nonconvex_mma.jl b/lib/OptimizationNonconvex/src/nonconvex_mma.jl similarity index 100% rename from lib/GalacticNonconvex/src/nonconvex_mma.jl rename to lib/OptimizationNonconvex/src/nonconvex_mma.jl diff --git a/lib/GalacticNonconvex/src/nonconvex_multistart.jl b/lib/OptimizationNonconvex/src/nonconvex_multistart.jl similarity index 95% rename from lib/GalacticNonconvex/src/nonconvex_multistart.jl rename to lib/OptimizationNonconvex/src/nonconvex_multistart.jl index 41b9e7edc..0122fc2bd 100644 --- a/lib/GalacticNonconvex/src/nonconvex_multistart.jl +++ b/lib/OptimizationNonconvex/src/nonconvex_multistart.jl @@ -38,7 +38,7 @@ function __create_options(opt::NonconvexMultistart.HyperoptAlg; options = !isnothing(opt_kwargs) ? NonconvexMultistart.HyperoptOptions(;opt_kwargs...) : NonconvexMultistart.HyperoptOptions() if isa(options.sampler, NonconvexMultistart.Hyperopt.Hyperband) - error("$(options.sampler) is currently not support by GalacticOptim") + error("$(options.sampler) is currently not support by Optimization") end return options @@ -52,7 +52,7 @@ function _create_options(opt::NonconvexMultistart.HyperoptAlg; options = (; options = !isnothing(opt_kwargs) ? NonconvexMultistart.HyperoptOptions(;sub_options= __create_options(opt.sub_alg, opt_kwargs=sub_options) ,opt_kwargs...) : NonconvexMultistart.HyperoptOptions(;sub_options= __create_options(opt.sub_alg, opt_kwargs=sub_options))) if isa(options.options.sampler, NonconvexMultistart.Hyperopt.Hyperband) - error("$(options.options.sampler) is currently not support by GalacticOptim") + error("$(options.options.sampler) is currently not support by Optimization") end return options diff --git a/lib/GalacticNonconvex/src/nonconvex_nlopt.jl b/lib/OptimizationNonconvex/src/nonconvex_nlopt.jl similarity index 100% rename from lib/GalacticNonconvex/src/nonconvex_nlopt.jl rename to lib/OptimizationNonconvex/src/nonconvex_nlopt.jl diff --git a/lib/GalacticNonconvex/src/nonconvex_pavito.jl b/lib/OptimizationNonconvex/src/nonconvex_pavito.jl similarity index 100% rename from lib/GalacticNonconvex/src/nonconvex_pavito.jl rename to lib/OptimizationNonconvex/src/nonconvex_pavito.jl diff --git a/lib/GalacticNonconvex/src/nonconvex_percival.jl b/lib/OptimizationNonconvex/src/nonconvex_percival.jl similarity index 100% rename from lib/GalacticNonconvex/src/nonconvex_percival.jl rename to lib/OptimizationNonconvex/src/nonconvex_percival.jl diff --git a/lib/GalacticNonconvex/src/nonconvex_search.jl b/lib/OptimizationNonconvex/src/nonconvex_search.jl similarity index 100% rename from lib/GalacticNonconvex/src/nonconvex_search.jl rename to lib/OptimizationNonconvex/src/nonconvex_search.jl diff --git a/lib/GalacticNonconvex/test/runtests.jl b/lib/OptimizationNonconvex/test/runtests.jl similarity index 90% rename from lib/GalacticNonconvex/test/runtests.jl rename to lib/OptimizationNonconvex/test/runtests.jl index 9c1c78700..42bea6b61 100644 --- a/lib/GalacticNonconvex/test/runtests.jl +++ b/lib/OptimizationNonconvex/test/runtests.jl @@ -1,22 +1,22 @@ -using GalacticNonconvex, GalacticOptim, Zygote, Pkg +using OptimizationNonconvex, Optimization, Zygote, Pkg using Test -@testset "GalacticNonconvex.jl" begin +@testset "OptimizationNonconvex.jl" begin rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) _p = [1.0, 100.0] l1 = rosenbrock(x0, _p) - optprob = OptimizationFunction(rosenbrock, GalacticOptim.AutoZygote()) - - GalacticNonconvex.Nonconvex.@load MMA - GalacticNonconvex.Nonconvex.@load Ipopt - GalacticNonconvex.Nonconvex.@load NLopt - GalacticNonconvex.Nonconvex.@load BayesOpt - GalacticNonconvex.Nonconvex.@load Juniper - GalacticNonconvex.Nonconvex.@load Pavito - GalacticNonconvex.Nonconvex.@load Hyperopt - GalacticNonconvex.Nonconvex.@load MTS - prob = GalacticOptim.OptimizationProblem(optprob, x0, _p, lb=[-1.0, -1.0], ub=[1.5, 1.5]) + optprob = OptimizationFunction(rosenbrock, Optimization.AutoZygote()) + + OptimizationNonconvex.Nonconvex.@load MMA + OptimizationNonconvex.Nonconvex.@load Ipopt + OptimizationNonconvex.Nonconvex.@load NLopt + OptimizationNonconvex.Nonconvex.@load BayesOpt + OptimizationNonconvex.Nonconvex.@load Juniper + OptimizationNonconvex.Nonconvex.@load Pavito + OptimizationNonconvex.Nonconvex.@load Hyperopt + OptimizationNonconvex.Nonconvex.@load MTS + prob = Optimization.OptimizationProblem(optprob, x0, _p, lb=[-1.0, -1.0], ub=[1.5, 1.5]) sol = solve(prob, MMA02()) @test 10 * sol.minimum < l1 diff --git a/lib/GalacticOptimJL/LICENSE b/lib/OptimizationOptimJL/LICENSE similarity index 100% rename from lib/GalacticOptimJL/LICENSE rename to lib/OptimizationOptimJL/LICENSE diff --git a/lib/GalacticOptimJL/Project.toml b/lib/OptimizationOptimJL/Project.toml similarity index 79% rename from lib/GalacticOptimJL/Project.toml rename to lib/OptimizationOptimJL/Project.toml index 92c8f5ed1..7b066b1b9 100644 --- a/lib/GalacticOptimJL/Project.toml +++ b/lib/OptimizationOptimJL/Project.toml @@ -1,15 +1,15 @@ -name = "GalacticOptimJL" -uuid = "9d3c5eb1-403b-401b-8c0f-c11105342e6b" +name = "OptimizationOptimJL" +uuid = "36348300-93cb-4f02-beb5-3c3902f8871e" authors = ["Vaibhav Dixit and contributors"] version = "0.1.1" [deps] -GalacticOptim = "a75be94c-b780-496d-a8a9-0878b188d577" +Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" Optim = "429524aa-4258-5aef-a3af-852621145aeb" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" [compat] -GalacticOptim = "3" +Optimization = "3" Optim = "1" Reexport = "1.2" julia = "1" diff --git a/lib/GalacticOptimJL/src/GalacticOptimJL.jl b/lib/OptimizationOptimJL/src/OptimizationOptimJL.jl similarity index 82% rename from lib/GalacticOptimJL/src/GalacticOptimJL.jl rename to lib/OptimizationOptimJL/src/OptimizationOptimJL.jl index 4d8824d08..f19aebf45 100644 --- a/lib/GalacticOptimJL/src/GalacticOptimJL.jl +++ b/lib/OptimizationOptimJL/src/OptimizationOptimJL.jl @@ -1,6 +1,6 @@ -module GalacticOptimJL +module OptimizationOptimJL -using Reexport, GalacticOptim, GalacticOptim.SciMLBase +using Reexport, Optimization, Optimization.SciMLBase @reexport using Optim decompose_trace(trace::Optim.OptimizationTrace) = last(trace) decompose_trace(trace::Optim.OptimizationState) = trace @@ -39,7 +39,7 @@ end function SciMLBase.__solve(prob::OptimizationProblem, opt::Optim.AbstractOptimizer, - data=GalacticOptim.DEFAULT_DATA; + data=Optimization.DEFAULT_DATA; kwargs...) if !isnothing(prob.lb) | !isnothing(prob.ub) if !(opt isa Union{Optim.Fminbox,Optim.SAMIN,Optim.AbstractConstrainedOptimizer}) @@ -57,7 +57,7 @@ function SciMLBase.__solve(prob::OptimizationProblem, end function ___solve(prob::OptimizationProblem, opt::Optim.AbstractOptimizer, - data=GalacticOptim.DEFAULT_DATA; + data=Optimization.DEFAULT_DATA; callback=(args...) -> (false), maxiters::Union{Number,Nothing}=nothing, maxtime::Union{Number,Nothing}=nothing, @@ -68,7 +68,7 @@ function ___solve(prob::OptimizationProblem, opt::Optim.AbstractOptimizer, local x, cur, state - if data != GalacticOptim.DEFAULT_DATA + if data != Optimization.DEFAULT_DATA maxiters = length(data) end @@ -88,23 +88,23 @@ function ___solve(prob::OptimizationProblem, opt::Optim.AbstractOptimizer, end end - maxiters = GalacticOptim._check_and_convert_maxiters(maxiters) - maxtime = GalacticOptim._check_and_convert_maxtime(maxtime) + maxiters = Optimization._check_and_convert_maxiters(maxiters) + maxtime = Optimization._check_and_convert_maxtime(maxtime) - f = GalacticOptim.instantiate_function(prob.f, prob.u0, prob.f.adtype, prob.p) + f = Optimization.instantiate_function(prob.f, prob.u0, prob.f.adtype, prob.p) !(opt isa Optim.ZerothOrderOptimizer) && f.grad === nothing && error("Use OptimizationFunction to pass the derivatives or automatically generate them with one of the autodiff backends") _loss = function (θ) x = f.f(θ, prob.p, cur...) __x = first(x) - return prob.sense === GalacticOptim.MaxSense ? -__x : __x + return prob.sense === Optimization.MaxSense ? -__x : __x end fg! = function (G, θ) if G !== nothing f.grad(G, θ, cur...) - if prob.sense === GalacticOptim.MaxSense + if prob.sense === Optimization.MaxSense G .*= false end end @@ -114,7 +114,7 @@ function ___solve(prob::OptimizationProblem, opt::Optim.AbstractOptimizer, if opt isa Optim.KrylovTrustRegion hv = function (H, θ, v) f.hv(H, θ, v, cur...) - if prob.sense === GalacticOptim.MaxSense + if prob.sense === Optimization.MaxSense H .*= false end end @@ -122,14 +122,14 @@ function ___solve(prob::OptimizationProblem, opt::Optim.AbstractOptimizer, else gg = function (G, θ) f.grad(G, θ, cur...) - if prob.sense === GalacticOptim.MaxSense + if prob.sense === Optimization.MaxSense G .*= false end end hh = function (H, θ) f.hess(H, θ, cur...) - if prob.sense === GalacticOptim.MaxSense + if prob.sense === Optimization.MaxSense H .*= false end end @@ -143,11 +143,11 @@ function ___solve(prob::OptimizationProblem, opt::Optim.AbstractOptimizer, t1 = time() opt_ret = Symbol(Optim.converged(opt_res)) - SciMLBase.build_solution(prob, opt, opt_res.minimizer, prob.sense === GalacticOptim.MaxSense ? -opt_res.minimum : opt_res.minimum; original=opt_res, retcode=opt_ret) + SciMLBase.build_solution(prob, opt, opt_res.minimizer, prob.sense === Optimization.MaxSense ? -opt_res.minimum : opt_res.minimum; original=opt_res, retcode=opt_ret) end function ___solve(prob::OptimizationProblem, opt::Union{Optim.Fminbox,Optim.SAMIN}, - data=GalacticOptim.DEFAULT_DATA; + data=Optimization.DEFAULT_DATA; callback=(args...) -> (false), maxiters::Union{Number,Nothing}=nothing, maxtime::Union{Number,Nothing}=nothing, @@ -158,7 +158,7 @@ function ___solve(prob::OptimizationProblem, opt::Union{Optim.Fminbox,Optim.SAMI local x, cur, state - if data != GalacticOptim.DEFAULT_DATA + if data != Optimization.DEFAULT_DATA maxiters = length(data) end @@ -178,22 +178,22 @@ function ___solve(prob::OptimizationProblem, opt::Union{Optim.Fminbox,Optim.SAMI end end - maxiters = GalacticOptim._check_and_convert_maxiters(maxiters) - maxtime = GalacticOptim._check_and_convert_maxtime(maxtime) + maxiters = Optimization._check_and_convert_maxiters(maxiters) + maxtime = Optimization._check_and_convert_maxtime(maxtime) - f = GalacticOptim.instantiate_function(prob.f, prob.u0, prob.f.adtype, prob.p) + f = Optimization.instantiate_function(prob.f, prob.u0, prob.f.adtype, prob.p) !(opt isa Optim.ZerothOrderOptimizer) && f.grad === nothing && error("Use OptimizationFunction to pass the derivatives or automatically generate them with one of the autodiff backends") _loss = function (θ) x = f.f(θ, prob.p, cur...) __x = first(x) - return prob.sense === GalacticOptim.MaxSense ? -__x : __x + return prob.sense === Optimization.MaxSense ? -__x : __x end fg! = function (G, θ) if G !== nothing f.grad(G, θ, cur...) - if prob.sense === GalacticOptim.MaxSense + if prob.sense === Optimization.MaxSense G .*= false end end @@ -202,7 +202,7 @@ function ___solve(prob::OptimizationProblem, opt::Union{Optim.Fminbox,Optim.SAMI gg = function (G, θ) f.grad(G, θ, cur...) - if prob.sense === GalacticOptim.MaxSense + if prob.sense === Optimization.MaxSense G .*= false end end @@ -220,7 +220,7 @@ end function ___solve(prob::OptimizationProblem, opt::Optim.ConstrainedOptimizer, - data=GalacticOptim.DEFAULT_DATA; + data=Optimization.DEFAULT_DATA; callback=(args...) -> (false), maxiters::Union{Number,Nothing}=nothing, maxtime::Union{Number,Nothing}=nothing, @@ -231,7 +231,7 @@ function ___solve(prob::OptimizationProblem, opt::Optim.ConstrainedOptimizer, local x, cur, state - if data != GalacticOptim.DEFAULT_DATA + if data != Optimization.DEFAULT_DATA maxiters = length(data) end @@ -251,22 +251,22 @@ function ___solve(prob::OptimizationProblem, opt::Optim.ConstrainedOptimizer, end end - maxiters = GalacticOptim._check_and_convert_maxiters(maxiters) - maxtime = GalacticOptim._check_and_convert_maxtime(maxtime) + maxiters = Optimization._check_and_convert_maxiters(maxiters) + maxtime = Optimization._check_and_convert_maxtime(maxtime) - f = GalacticOptim.instantiate_function(prob.f, prob.u0, prob.f.adtype, prob.p, prob.ucons === nothing ? 0 : length(prob.ucons)) + f = Optimization.instantiate_function(prob.f, prob.u0, prob.f.adtype, prob.p, prob.ucons === nothing ? 0 : length(prob.ucons)) f.cons_j === nothing && error("This optimizer requires derivative definitions for nonlinear constraints. If the problem does not have nonlinear constraints, choose a different optimizer. Otherwise define the derivative for cons using OptimizationFunction either directly or automatically generate them with one of the autodiff backends") _loss = function (θ) x = f.f(θ, prob.p, cur...) __x = first(x) - return prob.sense === GalacticOptim.MaxSense ? -__x : __x + return prob.sense === Optimization.MaxSense ? -__x : __x end fg! = function (G, θ) if G !== nothing f.grad(G, θ, cur...) - if prob.sense === GalacticOptim.MaxSense + if prob.sense === Optimization.MaxSense G .*= false end end @@ -274,14 +274,14 @@ function ___solve(prob::OptimizationProblem, opt::Optim.ConstrainedOptimizer, end gg = function (G, θ) f.grad(G, θ, cur...) - if prob.sense === GalacticOptim.MaxSense + if prob.sense === Optimization.MaxSense G .*= false end end hh = function (H, θ) f.hess(H, θ, cur...) - if prob.sense === GalacticOptim.MaxSense + if prob.sense === Optimization.MaxSense H .*= false end end diff --git a/lib/GalacticOptimJL/test/runtests.jl b/lib/OptimizationOptimJL/test/runtests.jl similarity index 74% rename from lib/GalacticOptimJL/test/runtests.jl rename to lib/OptimizationOptimJL/test/runtests.jl index 3fcc9aaeb..156a20428 100644 --- a/lib/GalacticOptimJL/test/runtests.jl +++ b/lib/OptimizationOptimJL/test/runtests.jl @@ -1,12 +1,12 @@ -using GalacticOptimJL, GalacticOptimJL.Optim, GalacticOptim, ForwardDiff, Zygote, Random, ModelingToolkit +using OptimizationOptimJL, OptimizationOptimJL.Optim, Optimization, ForwardDiff, Zygote, Random, ModelingToolkit using Test -@testset "GalacticOptimJL.jl" begin +@testset "OptimizationOptimJL.jl" begin rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) _p = [1.0, 100.0] l1 = rosenbrock(x0, _p) - f = OptimizationFunction(rosenbrock, GalacticOptim.AutoForwardDiff()) + f = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) prob = OptimizationProblem(f, x0, _p) Random.seed!(1234) sol = solve(prob, SimulatedAnnealing()) @@ -22,8 +22,8 @@ using Test @test 10 * sol.minimum < l1 cons = (x, p) -> [x[1]^2 + x[2]^2] - optprob = OptimizationFunction(rosenbrock, GalacticOptim.AutoForwardDiff(); cons=cons) - optprob = OptimizationFunction(rosenbrock, GalacticOptim.AutoModelingToolkit(); cons=cons) + optprob = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff(); cons=cons) + optprob = OptimizationFunction(rosenbrock, Optimization.AutoModelingToolkit(); cons=cons) prob = OptimizationProblem(optprob, x0, _p) @@ -52,18 +52,18 @@ using Test [x[1]^2 + x[2]^2, x[2] * sin(x[1]) - x[1]] end - optprob = OptimizationFunction(rosenbrock, GalacticOptim.AutoForwardDiff(); cons=con2_c) + optprob = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff(); cons=con2_c) prob = OptimizationProblem(optprob, x0, _p, lcons=[-Inf, -Inf], ucons=[Inf, Inf]) sol = solve(prob, IPNewton()) @test 10 * sol.minimum < l1 cons_circ = (x, p) -> [x[1]^2 + x[2]^2] - optprob = OptimizationFunction(rosenbrock, GalacticOptim.AutoForwardDiff(); cons=cons_circ) + optprob = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff(); cons=cons_circ) prob = OptimizationProblem(optprob, x0, _p, lcons=[-Inf], ucons=[0.25^2]) sol = solve(prob, IPNewton()) @test sqrt(cons(sol.u, nothing)[1]) ≈ 0.25 rtol = 1e-6 - optprob = OptimizationFunction(rosenbrock, GalacticOptim.AutoZygote()) + optprob = OptimizationFunction(rosenbrock, Optimization.AutoZygote()) prob = OptimizationProblem(optprob, x0, _p, lb=[-1.0, -1.0], ub=[0.8, 0.8]) sol = solve(prob, Optim.Fminbox()) @@ -74,8 +74,8 @@ using Test sol = solve(prob, Optim.SAMIN()) @test 10 * sol.minimum < l1 - optprob = OptimizationFunction((x, p) -> -rosenbrock(x, p), GalacticOptim.AutoZygote()) - prob = OptimizationProblem(optprob, x0, _p; sense=GalacticOptim.MaxSense) + optprob = OptimizationFunction((x, p) -> -rosenbrock(x, p), Optimization.AutoZygote()) + prob = OptimizationProblem(optprob, x0, _p; sense=Optimization.MaxSense) sol = solve(prob, NelderMead()) @test 10 * sol.minimum < l1 @@ -87,12 +87,12 @@ using Test G[1] = -2.0 * (1.0 - x[1]) - 400.0 * (x[2] - x[1]^2) * x[1] G[2] = 200.0 * (x[2] - x[1]^2) end - optprob = OptimizationFunction((x, p) -> -rosenbrock(x, p), GalacticOptim.AutoZygote(), grad=g!) - prob = OptimizationProblem(optprob, x0, _p; sense=GalacticOptim.MaxSense) + optprob = OptimizationFunction((x, p) -> -rosenbrock(x, p), Optimization.AutoZygote(), grad=g!) + prob = OptimizationProblem(optprob, x0, _p; sense=Optimization.MaxSense) sol = solve(prob, BFGS()) @test 10 * sol.minimum < l1 - optprob = OptimizationFunction(rosenbrock, GalacticOptim.AutoModelingToolkit()) + optprob = OptimizationFunction(rosenbrock, Optimization.AutoModelingToolkit()) prob = OptimizationProblem(optprob, x0, _p) sol = solve(prob, Optim.BFGS()) @test 10 * sol.minimum < l1 diff --git a/lib/GalacticOptimisers/LICENSE b/lib/OptimizationOptimisers/LICENSE similarity index 100% rename from lib/GalacticOptimisers/LICENSE rename to lib/OptimizationOptimisers/LICENSE diff --git a/lib/GalacticOptimisers/Project.toml b/lib/OptimizationOptimisers/Project.toml similarity index 78% rename from lib/GalacticOptimisers/Project.toml rename to lib/OptimizationOptimisers/Project.toml index d8fd175d8..838cd476e 100644 --- a/lib/GalacticOptimisers/Project.toml +++ b/lib/OptimizationOptimisers/Project.toml @@ -1,11 +1,11 @@ -name = "GalacticOptimisers" -uuid = "86b7a833-eb4b-49e2-87ed-89357ad7afa2" +name = "OptimizationOptimisers" +uuid = "42dfb2eb-d2b4-4451-abcd-913932933ac1" authors = ["Vaibhav Dixit and contributors"] version = "0.1.0" [deps] Optimisers = "3bd65402-5787-11e9-1adc-39752487f4e2" -GalacticOptim = "a75be94c-b780-496d-a8a9-0878b188d577" +Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" ProgressLogging = "33c8b6b6-d38a-422a-b730-caa89a2f386c" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" @@ -15,7 +15,7 @@ julia = "1" Optimisers = "0.2" ProgressLogging = "0.1" Reexport = "1.2" -GalacticOptim = "3" +Optimization = "3" [extras] ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" diff --git a/lib/GalacticOptimisers/src/GalacticOptimisers.jl b/lib/OptimizationOptimisers/src/OptimizationOptimisers.jl similarity index 80% rename from lib/GalacticOptimisers/src/GalacticOptimisers.jl rename to lib/OptimizationOptimisers/src/OptimizationOptimisers.jl index cad39cada..632e6b2d9 100644 --- a/lib/GalacticOptimisers/src/GalacticOptimisers.jl +++ b/lib/OptimizationOptimisers/src/OptimizationOptimisers.jl @@ -1,21 +1,21 @@ -module GalacticOptimisers +module OptimizationOptimisers -using GalacticOptim, Reexport, Printf, ProgressLogging, GalacticOptim.SciMLBase +using Optimization, Reexport, Printf, ProgressLogging, Optimization.SciMLBase @reexport using Optimisers const OptimisersOptimizers = Union{Descent, Adam, Momentum, Nesterov, RMSProp, AdaGrad, AdaMax, AdaDelta, AMSGrad, NAdam, RAdam, OAdam, AdaBelief, WeightDecay, ClipGrad, ClipNorm, OptimiserChain} -function SciMLBase.__solve(prob::OptimizationProblem, opt::OptimisersOptimizers, data = GalacticOptim.DEFAULT_DATA; +function SciMLBase.__solve(prob::OptimizationProblem, opt::OptimisersOptimizers, data = Optimization.DEFAULT_DATA; maxiters::Number = 0, callback = (args...) -> (false), progress = false, save_best = true, kwargs...) - if data != GalacticOptim.DEFAULT_DATA + if data != Optimization.DEFAULT_DATA maxiters = length(data) else - maxiters = GalacticOptim._check_and_convert_maxiters(maxiters) - data = GalacticOptim.take(data, maxiters) + maxiters = Optimization._check_and_convert_maxiters(maxiters) + data = Optimization.take(data, maxiters) end θ = copy(prob.u0) @@ -26,11 +26,11 @@ function SciMLBase.__solve(prob::OptimizationProblem, opt::OptimisersOptimizers, min_opt = 1 min_θ = prob.u0 - f = GalacticOptim.instantiate_function(prob.f, prob.u0, prob.f.adtype, prob.p) + f = Optimization.instantiate_function(prob.f, prob.u0, prob.f.adtype, prob.p) state = Optimisers.setup(opt, θ) t0 = time() - GalacticOptim.@withprogress progress name = "Training" begin + Optimization.@withprogress progress name = "Training" begin for (i, d) in enumerate(data) f.grad(G, θ, d...) x = f.f(θ, prob.p, d...) diff --git a/lib/GalacticOptimisers/test/runtests.jl b/lib/OptimizationOptimisers/test/runtests.jl similarity index 60% rename from lib/GalacticOptimisers/test/runtests.jl rename to lib/OptimizationOptimisers/test/runtests.jl index 174742073..f83ea5837 100644 --- a/lib/GalacticOptimisers/test/runtests.jl +++ b/lib/OptimizationOptimisers/test/runtests.jl @@ -1,17 +1,17 @@ -using GalacticOptimisers, GalacticOptim, ForwardDiff +using OptimizationOptimisers, Optimization, ForwardDiff using Test -@testset "GalacticOptimisers.jl" begin +@testset "OptimizationOptimisers.jl" begin rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) _p = [1.0, 100.0] l1 = rosenbrock(x0, _p) - optprob = OptimizationFunction(rosenbrock, GalacticOptim.AutoForwardDiff()) + optprob = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) prob = OptimizationProblem(optprob, x0, _p) - sol = GalacticOptim.solve(prob, Optimisers.ADAM(0.1), maxiters=1000) + sol = Optimization.solve(prob, Optimisers.ADAM(0.1), maxiters=1000) @test 10 * sol.minimum < l1 prob = OptimizationProblem(optprob, x0, _p) diff --git a/lib/GalacticPolyalgorithms/LICENSE b/lib/OptimizationPolyalgorithms/LICENSE similarity index 100% rename from lib/GalacticPolyalgorithms/LICENSE rename to lib/OptimizationPolyalgorithms/LICENSE diff --git a/lib/OptimizationPolyalgorithms/Project.toml b/lib/OptimizationPolyalgorithms/Project.toml new file mode 100644 index 000000000..e68759dcb --- /dev/null +++ b/lib/OptimizationPolyalgorithms/Project.toml @@ -0,0 +1,22 @@ +name = "OptimizationPolyalgorithms" +uuid = "869ac0a3-20d1-4dac-a63a-5f8d7406e689" +authors = ["Vaibhav Dixit and contributors"] +version = "0.1.0" + +[deps] +Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" +OptimizationOptimJL = "36348300-93cb-4f02-beb5-3c3902f8871e" +OptimizationOptimisers = "42dfb2eb-d2b4-4451-abcd-913932933ac1" + +[compat] +Optimization = "3" +OptimizationOptimJL = "0.1" +OptimizationOptimisers = "0.1" +julia = "1.6" + +[extras] +ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" +Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" + +[targets] +test = ["ForwardDiff", "Test"] diff --git a/lib/GalacticPolyalgorithms/src/GalacticPolyalgorithms.jl b/lib/OptimizationPolyalgorithms/src/OptimizationPolyalgorithms.jl similarity index 68% rename from lib/GalacticPolyalgorithms/src/GalacticPolyalgorithms.jl rename to lib/OptimizationPolyalgorithms/src/OptimizationPolyalgorithms.jl index 19460e0d7..c70f753bb 100644 --- a/lib/GalacticPolyalgorithms/src/GalacticPolyalgorithms.jl +++ b/lib/OptimizationPolyalgorithms/src/OptimizationPolyalgorithms.jl @@ -1,6 +1,6 @@ -module GalacticPolyalgorithms +module OptimizationPolyalgorithms -using GalacticOptim, GalacticOptim.SciMLBase, GalacticOptimJL, GalacticOptimisers +using Optimization, Optimization.SciMLBase, OptimizationOptimJL, OptimizationOptimisers struct PolyOpt end @@ -20,19 +20,19 @@ function SciMLBase.__solve(prob::OptimizationProblem, if isempty(args) && deterministic && prob.lb === nothing && prob.ub === nothing # If determinsitic then ADAM -> finish with BFGS if maxiters === nothing - res1 = GalacticOptim.solve(prob, Optimisers.ADAM(0.01), args...; maxiters=300, kwargs...) + res1 = Optimization.solve(prob, Optimisers.ADAM(0.01), args...; maxiters=300, kwargs...) else - res1 = GalacticOptim.solve(prob, Optimisers.ADAM(0.01), args...; maxiters, kwargs...) + res1 = Optimization.solve(prob, Optimisers.ADAM(0.01), args...; maxiters, kwargs...) end optprob2 = remake(prob,u0 = res1.u) - res1 = GalacticOptim.solve( + res1 = Optimization.solve( optprob2, BFGS(initial_stepnorm=0.01), args...; maxiters, kwargs...) elseif isempty(args) && deterministic - res1 = GalacticOptim.solve( + res1 = Optimization.solve( prob, BFGS(initial_stepnorm=0.01), args...; maxiters, kwargs...) else - res1 = GalacticOptim.solve(prob, Optimisers.ADAM(0.1), args...; maxiters, kwargs...) + res1 = Optimization.solve(prob, Optimisers.ADAM(0.1), args...; maxiters, kwargs...) end end diff --git a/lib/OptimizationPolyalgorithms/test/runtests.jl b/lib/OptimizationPolyalgorithms/test/runtests.jl new file mode 100644 index 000000000..67bac12f0 --- /dev/null +++ b/lib/OptimizationPolyalgorithms/test/runtests.jl @@ -0,0 +1,14 @@ +using OptimizationPolyalgorithms, Optimization, ForwardDiff +using Test + +@testset "OptimizationPolyalgorithms.jl" begin + rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 + x0 = zeros(2) + _p = [1.0, 100.0] + l1 = rosenbrock(x0, _p) + + optprob = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) + prob = OptimizationProblem(optprob, x0, _p) + sol = Optimization.solve(prob, PolyOpt(), maxiters=1000) + @test 10 * sol.minimum < l1 +end diff --git a/lib/GalacticQuadDIRECT/LICENSE b/lib/OptimizationQuadDIRECT/LICENSE similarity index 100% rename from lib/GalacticQuadDIRECT/LICENSE rename to lib/OptimizationQuadDIRECT/LICENSE diff --git a/lib/GalacticQuadDIRECT/Project.toml b/lib/OptimizationQuadDIRECT/Project.toml similarity index 62% rename from lib/GalacticQuadDIRECT/Project.toml rename to lib/OptimizationQuadDIRECT/Project.toml index 6b5a02b92..29e743abe 100644 --- a/lib/GalacticQuadDIRECT/Project.toml +++ b/lib/OptimizationQuadDIRECT/Project.toml @@ -1,14 +1,14 @@ -name = "GalacticQuadDIRECT" -uuid = "4bf6b49a-7500-4e3e-a673-0c4a2031f894" +name = "OptimizationQuadDIRECT" +uuid = "842ac81e-713d-465f-80f7-84eddaced298" authors = ["Vaibhav Dixit and contributors"] version = "0.1.0" [deps] -GalacticOptim = "a75be94c-b780-496d-a8a9-0878b188d577" +Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" [compat] julia = "1" -GalacticOptim = "3" +Optimization = "3" [extras] Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" diff --git a/lib/GalacticQuadDIRECT/src/GalacticQuadDIRECT.jl b/lib/OptimizationQuadDIRECT/src/OptimizationQuadDIRECT.jl similarity index 93% rename from lib/GalacticQuadDIRECT/src/GalacticQuadDIRECT.jl rename to lib/OptimizationQuadDIRECT/src/OptimizationQuadDIRECT.jl index 453d9420d..faa8a7850 100644 --- a/lib/GalacticQuadDIRECT/src/GalacticQuadDIRECT.jl +++ b/lib/OptimizationQuadDIRECT/src/OptimizationQuadDIRECT.jl @@ -1,6 +1,6 @@ -module GalacticQuadDIRECT +module OptimizationQuadDIRECT -using QuadDIRECT, GalacticOptim, GalacticOptim.SciMLBase +using QuadDIRECT, Optimization, Optimization.SciMLBase export QuadDirect @@ -48,7 +48,7 @@ function SciMLBase.__solve(prob::OptimizationProblem, opt::QuadDirect; local x, _loss - maxiters = GalacticOptim._check_and_convert_maxiters(maxiters) + maxiters = Optimization._check_and_convert_maxiters(maxiters) if splits === nothing error("You must provide the initial locations at which to evaluate the function in `splits` (a list of 3-vectors with values in strictly increasing order and within the specified bounds).") diff --git a/lib/GalacticQuadDIRECT/test/runtests.jl b/lib/OptimizationQuadDIRECT/test/runtests.jl similarity index 84% rename from lib/GalacticQuadDIRECT/test/runtests.jl rename to lib/OptimizationQuadDIRECT/test/runtests.jl index f9e2ed684..818728c2d 100644 --- a/lib/GalacticQuadDIRECT/test/runtests.jl +++ b/lib/OptimizationQuadDIRECT/test/runtests.jl @@ -1,8 +1,8 @@ using Pkg; Pkg.develop(url="https://github.com/timholy/QuadDIRECT.jl.git"); -using GalacticQuadDIRECT, GalacticOptim +using OptimizationQuadDIRECT, Optimization using Test -@testset "GalacticQuadDIRECT.jl" begin +@testset "OptimizationQuadDIRECT.jl" begin rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) _p = [1.0, 100.0] diff --git a/lib/GalacticSpeedMapping/LICENSE b/lib/OptimizationSpeedMapping/LICENSE similarity index 100% rename from lib/GalacticSpeedMapping/LICENSE rename to lib/OptimizationSpeedMapping/LICENSE diff --git a/lib/GalacticSpeedMapping/Project.toml b/lib/OptimizationSpeedMapping/Project.toml similarity index 69% rename from lib/GalacticSpeedMapping/Project.toml rename to lib/OptimizationSpeedMapping/Project.toml index 117b68a08..fc3aefc0a 100644 --- a/lib/GalacticSpeedMapping/Project.toml +++ b/lib/OptimizationSpeedMapping/Project.toml @@ -1,15 +1,15 @@ -name = "GalacticSpeedMapping" -uuid = "27412f8d-bbde-4e3b-aff6-6d61babb7c0e" +name = "OptimizationSpeedMapping" +uuid = "3d669222-0d7d-4eb9-8a9f-d8528b0d9b91" authors = ["Vaibhav Dixit and contributors"] version = "0.1.0" [deps] -GalacticOptim = "a75be94c-b780-496d-a8a9-0878b188d577" +Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" SpeedMapping = "f1835b91-879b-4a3f-a438-e4baacf14412" [compat] julia = "1" -GalacticOptim = "3" +Optimization = "3" SpeedMapping = "0.3" [extras] diff --git a/lib/GalacticSpeedMapping/src/GalacticSpeedMapping.jl b/lib/OptimizationSpeedMapping/src/OptimizationSpeedMapping.jl similarity index 86% rename from lib/GalacticSpeedMapping/src/GalacticSpeedMapping.jl rename to lib/OptimizationSpeedMapping/src/OptimizationSpeedMapping.jl index 2ee32016a..427a5a1db 100644 --- a/lib/GalacticSpeedMapping/src/GalacticSpeedMapping.jl +++ b/lib/OptimizationSpeedMapping/src/OptimizationSpeedMapping.jl @@ -1,6 +1,6 @@ -module GalacticSpeedMapping +module OptimizationSpeedMapping -using SpeedMapping, GalacticOptim, GalacticOptim.SciMLBase +using SpeedMapping, Optimization, Optimization.SciMLBase export SpeedMappingOpt @@ -47,10 +47,10 @@ function SciMLBase.__solve(prob::OptimizationProblem, opt::SpeedMappingOpt; kwargs...) local x - maxiters = GalacticOptim._check_and_convert_maxiters(maxiters) - maxtime = GalacticOptim._check_and_convert_maxtime(maxtime) + maxiters = Optimization._check_and_convert_maxiters(maxiters) + maxtime = Optimization._check_and_convert_maxtime(maxtime) - f = GalacticOptim.instantiate_function(prob.f, prob.u0, prob.f.adtype, prob.p) + f = Optimization.instantiate_function(prob.f, prob.u0, prob.f.adtype, prob.p) _loss = function (θ) x = f.f(θ, prob.p) diff --git a/lib/GalacticSpeedMapping/test/runtests.jl b/lib/OptimizationSpeedMapping/test/runtests.jl similarity index 80% rename from lib/GalacticSpeedMapping/test/runtests.jl rename to lib/OptimizationSpeedMapping/test/runtests.jl index 2a60a9fc2..4b72d18aa 100644 --- a/lib/GalacticSpeedMapping/test/runtests.jl +++ b/lib/OptimizationSpeedMapping/test/runtests.jl @@ -1,12 +1,12 @@ -using GalacticSpeedMapping, GalacticOptim, ForwardDiff +using OptimizationSpeedMapping, Optimization, ForwardDiff using Test -@testset "GalacticSpeedMapping.jl" begin +@testset "OptimizationSpeedMapping.jl" begin rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) _p = [1.0, 100.0] l1 = rosenbrock(x0, _p) - f = OptimizationFunction(rosenbrock, GalacticOptim.AutoForwardDiff()) + f = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) prob = OptimizationProblem(f, x0, _p) sol = solve(prob, SpeedMappingOpt()) @test 10 * sol.minimum < l1 diff --git a/src/GalacticOptim.jl b/src/Optimization.jl similarity index 98% rename from src/GalacticOptim.jl rename to src/Optimization.jl index c7550a7c5..d61872ea1 100644 --- a/src/GalacticOptim.jl +++ b/src/Optimization.jl @@ -1,7 +1,7 @@ """ $(DocStringExtensions.README) """ -module GalacticOptim +module Optimization using DocStringExtensions using Reexport diff --git a/src/function/function.jl b/src/function/function.jl index bac5f0c77..82423fab1 100644 --- a/src/function/function.jl +++ b/src/function/function.jl @@ -1,7 +1,7 @@ """ instantiate_function(f, x, ::AbstractADType, p, num_cons = 0)::OptimizationFunction -This function is used internally by GalacticOptim.jl to construct +This function is used internally by Optimization.jl to construct the necessary extra functions (gradients, Hessians, etc.) before optimization. Each of the ADType dispatches use the supplied automatic differentiation type in order to specify how the construction process diff --git a/test/ADtests.jl b/test/ADtests.jl index f4d2b50c8..adb4c8968 100644 --- a/test/ADtests.jl +++ b/test/ADtests.jl @@ -1,4 +1,4 @@ -using GalacticOptim, GalacticOptimJL, GalacticOptimisers, Test +using Optimization, OptimizationOptimJL, OptimizationOptimisers, Test using ForwardDiff, Zygote, ReverseDiff, FiniteDiff, Tracker using ModelingToolkit x0 = zeros(2) @@ -26,8 +26,8 @@ g!(G1, x0) h!(H1, x0) cons = (x, p) -> [x[1]^2 + x[2]^2] -optf = OptimizationFunction(rosenbrock, GalacticOptim.AutoModelingToolkit(), cons=cons) -optprob = GalacticOptim.instantiate_function(optf, x0, GalacticOptim.AutoModelingToolkit(), nothing, 1) +optf = OptimizationFunction(rosenbrock, Optimization.AutoModelingToolkit(), cons=cons) +optprob = Optimization.instantiate_function(optf, x0, Optimization.AutoModelingToolkit(), nothing, 1) optprob.grad(G2, x0) @test G1 == G2 optprob.hess(H2, x0) @@ -43,8 +43,8 @@ optprob.cons_h(H3, x0) function con2_c(x, p) [x[1]^2 + x[2]^2, x[2] * sin(x[1]) - x[1]] end -optf = OptimizationFunction(rosenbrock, GalacticOptim.AutoModelingToolkit(), cons=con2_c) -optprob = GalacticOptim.instantiate_function(optf, x0, GalacticOptim.AutoModelingToolkit(), nothing, 2) +optf = OptimizationFunction(rosenbrock, Optimization.AutoModelingToolkit(), cons=con2_c) +optprob = Optimization.instantiate_function(optf, x0, Optimization.AutoModelingToolkit(), nothing, 2) optprob.grad(G2, x0) @test G1 == G2 optprob.hess(H2, x0) @@ -57,8 +57,8 @@ H3 = [Array{Float64}(undef, 2, 2), Array{Float64}(undef, 2, 2)] optprob.cons_h(H3, x0) @test H3 == [[2.0 0.0; 0.0 2.0], [-0.0 1.0; 1.0 0.0]] -optf = OptimizationFunction(rosenbrock, GalacticOptim.AutoModelingToolkit(true, true), cons=con2_c) -optprob = GalacticOptim.instantiate_function(optf, x0, GalacticOptim.AutoModelingToolkit(true, true), nothing, 2) +optf = OptimizationFunction(rosenbrock, Optimization.AutoModelingToolkit(true, true), cons=con2_c) +optprob = Optimization.instantiate_function(optf, x0, Optimization.AutoModelingToolkit(true, true), nothing, 2) using SparseArrays sH = sparse([1, 1, 2, 2], [1, 2, 1, 2], zeros(4)) optprob.hess(sH, x0) @@ -71,8 +71,8 @@ sH3 = [sparse([1,2], [1, 2], zeros(2)), sparse([1, 1, 2], [1, 2, 1], zeros(3))] optprob.cons_h(sH3, x0) @test Array.(sH3) == [[2.0 0.0; 0.0 2.0], [-0.0 1.0; 1.0 0.0]] -optf = OptimizationFunction(rosenbrock, GalacticOptim.AutoForwardDiff()) -optprob = GalacticOptim.instantiate_function(optf, x0, GalacticOptim.AutoForwardDiff(), nothing) +optf = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) +optprob = Optimization.instantiate_function(optf, x0, Optimization.AutoForwardDiff(), nothing) optprob.grad(G2, x0) @test G1 == G2 optprob.hess(H2, x0) @@ -89,8 +89,8 @@ sol = solve(prob, Optim.Newton()) sol = solve(prob, Optim.KrylovTrustRegion()) @test 10 * sol.minimum < l1 -optf = OptimizationFunction(rosenbrock, GalacticOptim.AutoZygote()) -optprob = GalacticOptim.instantiate_function(optf, x0, GalacticOptim.AutoZygote(), nothing) +optf = OptimizationFunction(rosenbrock, Optimization.AutoZygote()) +optprob = Optimization.instantiate_function(optf, x0, Optimization.AutoZygote(), nothing) optprob.grad(G2, x0) @test G1 == G2 optprob.hess(H2, x0) @@ -107,8 +107,8 @@ sol = solve(prob, Optim.Newton()) sol = solve(prob, Optim.KrylovTrustRegion()) @test 10 * sol.minimum < l1 -optf = OptimizationFunction(rosenbrock, GalacticOptim.AutoReverseDiff()) -optprob = GalacticOptim.instantiate_function(optf, x0, GalacticOptim.AutoReverseDiff(), nothing) +optf = OptimizationFunction(rosenbrock, Optimization.AutoReverseDiff()) +optprob = Optimization.instantiate_function(optf, x0, Optimization.AutoReverseDiff(), nothing) optprob.grad(G2, x0) @test G1 == G2 optprob.hess(H2, x0) @@ -124,8 +124,8 @@ sol = solve(prob, Optim.Newton()) sol = solve(prob, Optim.KrylovTrustRegion()) @test 10 * sol.minimum < l1 -optf = OptimizationFunction(rosenbrock, GalacticOptim.AutoTracker()) -optprob = GalacticOptim.instantiate_function(optf, x0, GalacticOptim.AutoTracker(), nothing) +optf = OptimizationFunction(rosenbrock, Optimization.AutoTracker()) +optprob = Optimization.instantiate_function(optf, x0, Optimization.AutoTracker(), nothing) optprob.grad(G2, x0) @test G1 == G2 @test_throws ErrorException optprob.hess(H2, x0) @@ -138,8 +138,8 @@ sol = solve(prob, Optim.BFGS()) @test_throws ErrorException solve(prob, Newton()) -optf = OptimizationFunction(rosenbrock, GalacticOptim.AutoFiniteDiff()) -optprob = GalacticOptim.instantiate_function(optf, x0, GalacticOptim.AutoFiniteDiff(), nothing) +optf = OptimizationFunction(rosenbrock, Optimization.AutoFiniteDiff()) +optprob = Optimization.instantiate_function(optf, x0, Optimization.AutoFiniteDiff(), nothing) optprob.grad(G2, x0) @test G1 ≈ G2 rtol = 1e-6 optprob.hess(H2, x0) diff --git a/test/diffeqfluxtests.jl b/test/diffeqfluxtests.jl index d6225909e..f776ac046 100644 --- a/test/diffeqfluxtests.jl +++ b/test/diffeqfluxtests.jl @@ -1,4 +1,4 @@ -using OrdinaryDiffEq, DiffEqFlux, GalacticOptim, GalacticOptimJL, GalacticOptimisers, ForwardDiff +using OrdinaryDiffEq, DiffEqFlux, Optimization, OptimizationOptimJL, OptimizationOptimisers, ForwardDiff function lotka_volterra!(du, u, p, t) x, y = u @@ -46,11 +46,11 @@ callback = function (p, l, pred) return false end -optprob = OptimizationFunction((x,p) -> loss_adjoint(x), GalacticOptim.AutoForwardDiff()) +optprob = OptimizationFunction((x,p) -> loss_adjoint(x), Optimization.AutoForwardDiff()) -prob = GalacticOptim.OptimizationProblem(optprob, p) +prob = Optimization.OptimizationProblem(optprob, p) -result_ode = GalacticOptim.solve(prob, +result_ode = Optimization.solve(prob, BFGS(initial_stepnorm = 0.0001), callback = callback) @@ -97,17 +97,17 @@ callback = function (p, l, pred) return false end -optprob = OptimizationFunction( (p,x) -> loss_neuralode(p), GalacticOptim.AutoForwardDiff()) +optprob = OptimizationFunction( (p,x) -> loss_neuralode(p), Optimization.AutoForwardDiff()) -prob = GalacticOptim.OptimizationProblem(optprob, prob_neuralode.p) +prob = Optimization.OptimizationProblem(optprob, prob_neuralode.p) -result_neuralode = GalacticOptim.solve(prob, - GalacticOptimisers.ADAM(), callback = callback, +result_neuralode = Optimization.solve(prob, + OptimizationOptimisers.ADAM(), callback = callback, maxiters = 300) @test result_neuralode.minimum == loss_neuralode(result_neuralode.u)[1] prob2 = remake(prob,u0=result_neuralode.u) -result_neuralode2 = GalacticOptim.solve(prob2, +result_neuralode2 = Optimization.solve(prob2, BFGS(initial_stepnorm=0.0001), callback = callback, maxiters = 100) diff --git a/test/minibatch.jl b/test/minibatch.jl index 757c5dc72..a31fafae3 100644 --- a/test/minibatch.jl +++ b/test/minibatch.jl @@ -1,4 +1,4 @@ -using DiffEqFlux, GalacticOptim, OrdinaryDiffEq, GalacticOptimisers +using DiffEqFlux, Optimization, OrdinaryDiffEq, OptimizationOptimisers function newtons_cooling(du, u, p, t) temp = u[1] @@ -54,8 +54,8 @@ train_loader = Flux.Data.DataLoader((ode_data, t), batchsize = k) numEpochs = 300 l1 = loss_adjoint(pp, train_loader.data[1], train_loader.data[2])[1] -optfun = OptimizationFunction((θ, p, batch, time_batch) -> loss_adjoint(θ, batch, time_batch), GalacticOptim.AutoZygote()) +optfun = OptimizationFunction((θ, p, batch, time_batch) -> loss_adjoint(θ, batch, time_batch), Optimization.AutoZygote()) optprob = OptimizationProblem(optfun, pp) using IterTools: ncycle -res1 = GalacticOptim.solve(optprob, Optimisers.ADAM(0.05), ncycle(train_loader, numEpochs), callback = callback, maxiters = numEpochs) +res1 = Optimization.solve(optprob, Optimisers.ADAM(0.05), ncycle(train_loader, numEpochs), callback = callback, maxiters = numEpochs) @test 10res1.minimum < l1 diff --git a/test/runtests.jl b/test/runtests.jl index 83ef9fde5..1bad75123 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -16,8 +16,8 @@ end @time begin if GROUP == "All" || GROUP == "Core" - dev_subpkg("GalacticOptimJL") - dev_subpkg("GalacticOptimisers") + dev_subpkg("OptimizationOptimJL") + dev_subpkg("OptimizationOptimisers") @safetestset "AD Tests" begin include("ADtests.jl") end @@ -28,8 +28,8 @@ if GROUP == "All" || GROUP == "Core" include("diffeqfluxtests.jl") end elseif GROUP == "GPU" - dev_subpkg("GalacticOptimJL") - dev_subpkg("GalacticOptimisers") + dev_subpkg("OptimizationOptimJL") + dev_subpkg("OptimizationOptimisers") activate_downstream_env() @safetestset "DiffEqFlux GPU" begin include("downstream/gpu_neural_ode.jl") From dcc04294d5007cc47fd5f3c2b1c69b202f70c193 Mon Sep 17 00:00:00 2001 From: Chris Rackauckas Date: Mon, 30 May 2022 21:12:56 -0400 Subject: [PATCH 2/4] give a new UUID? --- lib/OptimizationPolyalgorithms/Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/OptimizationPolyalgorithms/Project.toml b/lib/OptimizationPolyalgorithms/Project.toml index e68759dcb..f51982ca7 100644 --- a/lib/OptimizationPolyalgorithms/Project.toml +++ b/lib/OptimizationPolyalgorithms/Project.toml @@ -1,5 +1,5 @@ name = "OptimizationPolyalgorithms" -uuid = "869ac0a3-20d1-4dac-a63a-5f8d7406e689" +uuid = "500b13db-7e66-49ce-bda4-eed966be6282" authors = ["Vaibhav Dixit and contributors"] version = "0.1.0" From adc8e3a91efd17cd94feafaac76aac94506d7f41 Mon Sep 17 00:00:00 2001 From: Chris Rackauckas Date: Tue, 31 May 2022 01:58:15 -0400 Subject: [PATCH 3/4] bump test --- Project.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/Project.toml b/Project.toml index eb4f83c33..5267b971e 100644 --- a/Project.toml +++ b/Project.toml @@ -1,6 +1,5 @@ name = "Optimization" uuid = "7f7a1694-90dd-40f0-9382-eb1efda571ba" -authors = ["Vaibhavdixit02 "] version = "3.5.0" [deps] From d79e5e971d5dd3fd3ee3c00a1ac3c0354666fb8e Mon Sep 17 00:00:00 2001 From: Chris Rackauckas Date: Tue, 31 May 2022 09:41:23 -0400 Subject: [PATCH 4/4] make sure to dev --- docs/make.jl | 2 +- test/runtests.jl | 9 +++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/docs/make.jl b/docs/make.jl index 8553a0269..1d65fd825 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -14,7 +14,7 @@ makedocs( canonical="https://Optimization.sciml.ai/stable/"), pages=[ - "Optimization.jl: Unified Global Optimization Package" => "index.md", + "Optimization.jl: A Unified Optimization Package" => "index.md", "Tutorials" => [ "Basic usage" => "tutorials/intro.md", diff --git a/test/runtests.jl b/test/runtests.jl index 1bad75123..ad036618e 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -14,10 +14,13 @@ function activate_subpkg_env(subpkg) Pkg.instantiate() end -@time begin -if GROUP == "All" || GROUP == "Core" +if GROUP == "All" || GROUP == "Core" || GROUP == "GPU" || GROUP == "OptimizationPolyalgorithms" dev_subpkg("OptimizationOptimJL") dev_subpkg("OptimizationOptimisers") +end + +@time begin +if GROUP == "All" || GROUP == "Core" @safetestset "AD Tests" begin include("ADtests.jl") end @@ -28,8 +31,6 @@ if GROUP == "All" || GROUP == "Core" include("diffeqfluxtests.jl") end elseif GROUP == "GPU" - dev_subpkg("OptimizationOptimJL") - dev_subpkg("OptimizationOptimisers") activate_downstream_env() @safetestset "DiffEqFlux GPU" begin include("downstream/gpu_neural_ode.jl")