Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feature/libe_gen_wrapper + Feature/asktell aposmm #209

Open
wants to merge 43 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 35 commits
Commits
Show all changes
43 commits
Select commit Hold shift + click to select a range
985f611
Point at libEnsemble asktell branch
shuds13 Apr 19, 2024
af48c3c
Add ability to call a libE ask/tell generator
shuds13 Apr 19, 2024
707b7cb
Add example that calls libE RandSample generator
shuds13 Apr 19, 2024
dd62c06
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Apr 19, 2024
dc7d54a
Merge remote-tracking branch 'upstream/feature/libe_gen_wrapper' into…
jlnav May 8, 2024
d5a1d4b
adjustments to combine aposmm-settings and user-settings, plus call s…
jlnav May 9, 2024
18c1bad
pass in a parameterized APOSMM instance
jlnav May 10, 2024
1d10b02
only pass in necessary fields to libE_gen (??)
jlnav May 10, 2024
e887f06
add dummy_aposmm_libE_gen example
jlnav May 10, 2024
1b9b966
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] May 10, 2024
838d0d1
debugging, replacing objective with 6hc, noticing that optimas tries …
jlnav May 13, 2024
09bf72d
insert sample points, fix incorrect data being passed back by allowin…
jlnav May 14, 2024
f887fca
Merge branch 'feature/asktell_aposmm' of https://github.com/jlnav/opt…
jlnav May 14, 2024
d77f36d
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] May 14, 2024
3aea989
trying to pass more data back to aposmm...
jlnav May 14, 2024
c9542d2
Merge branch 'feature/asktell_aposmm' of https://github.com/jlnav/opt…
jlnav May 14, 2024
258c015
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] May 14, 2024
b4b970c
remove a debug print
jlnav May 14, 2024
715473a
adjust run_example constants and bounds, cleanup wrapper's _tell to s…
jlnav May 16, 2024
dd3e3c8
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] May 16, 2024
9f71994
Merge branch 'main' into feature/asktell_aposmm
jlnav May 22, 2024
2674dbe
combine libe_gen_instance and libe_gen_class into one parameter, some…
jlnav May 22, 2024
1ed3c31
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] May 22, 2024
81a3b07
pull aposmm's ub and lb from VaryingParameters
jlnav May 22, 2024
e4dc0ba
Merge branch 'feature/asktell_aposmm' of https://github.com/jlnav/opt…
jlnav May 22, 2024
ab20e98
tentative docstrings
jlnav May 23, 2024
c797c9c
attach libE_calc_in to trial, remove final_tell call in persistent_ge…
jlnav May 23, 2024
859669d
return final_tell to persistent_generator (for now?)
jlnav May 23, 2024
751563f
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] May 23, 2024
f8c3fc2
introduces Exploration.finalize(). Moves gen.final_tell into that met…
jlnav May 24, 2024
ca44a9c
Merge branch 'feature/asktell_aposmm' of https://github.com/jlnav/opt…
jlnav May 24, 2024
ce47066
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] May 24, 2024
dc294e5
make additional APOSMMWrapper subclass of libEWrapper, with additiona…
jlnav May 28, 2024
720bf8f
docstrings
jlnav May 28, 2024
696e00e
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] May 28, 2024
ce8beb8
rough-comparison between aposmm-detected minima and known minima, add…
jlnav May 31, 2024
bb26486
additional documentation
jlnav May 31, 2024
9f2a059
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] May 31, 2024
52dbd2e
indexing fix
jlnav Jun 3, 2024
08a835b
fixes precision, but more runs are needed
jlnav Jun 5, 2024
bf32ec4
Merge branch 'main' into feature/asktell_aposmm
jlnav Jul 22, 2024
6232b41
adjust wrappers to use upstream class's ask_np and tell_np
jlnav Jul 29, 2024
595fd86
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jul 29, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
101 changes: 101 additions & 0 deletions examples/dummy_aposmm_libE_gen/run_example.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
"""Basic example of parallel random sampling with simulations."""

from math import gamma, pi, sqrt
import numpy as np
from libensemble.generators import APOSMM
import libensemble.gen_funcs

libensemble.gen_funcs.rc.aposmm_optimizers = "nlopt"
from optimas.core import VaryingParameter, Objective
from libensemble.tests.regression_tests.support import (
six_hump_camel_minima as minima,
)

# from optimas.generators import RandomSamplingGenerator
from optimas.generators import APOSMMWrapper
from optimas.evaluators import TemplateEvaluator
from optimas.explorations import Exploration

from multiprocessing import set_start_method

set_start_method("fork", force=True)


def analyze_simulation(simulation_directory, output_params):
"""Analyze the simulation output.

This method analyzes the output generated by the simulation to
obtain the value of the optimization objective and other analyzed
parameters, if specified. The value of these parameters has to be
given to the `output_params` dictionary.

Parameters
----------
simulation_directory : str
Path to the simulation folder where the output was generated.
output_params : dict
Dictionary where the value of the objectives and analyzed parameters
will be stored. There is one entry per parameter, where the key
is the name of the parameter given by the user.

Returns
-------
dict
The `output_params` dictionary with the results from the analysis.

"""
# Read back result from file
with open("result.txt") as f:
result = float(f.read())
# Fill in output parameters.
output_params["f"] = result
return output_params


# Create varying parameters and objectives.
var_1 = VaryingParameter("x0", -3.0, 3.0)
var_2 = VaryingParameter("x1", -2.0, 2.0)
obj = Objective("f")

n = 2

aposmm = APOSMM(
initial_sample_size=100,
localopt_method="LN_BOBYQA",
sample_points=np.round(minima, 1),
rk_const=0.5 * ((gamma(1 + (n / 2)) * 5) ** (1 / n)) / sqrt(pi),
xtol_abs=1e-2,
ftol_abs=1e-2,
dist_to_bound_multiple=0.5,
max_active_runs=4, # refers to APOSMM's simul local optimization runs
lb=np.array([var_1.lower_bound, var_2.lower_bound]),
ub=np.array([var_1.upper_bound, var_2.upper_bound]),
Comment on lines +71 to +72
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could we replace this with:

variables = {'x0':[-3.0, 3.0], 'x1':[-2.0, 2.0]},
objectives = {'f': 'MINIMIZE'}

as discussed here: campa-consortium/generator_standard#16

)

gen = APOSMMWrapper(
varying_parameters=[var_1, var_2],
objectives=[obj],
libe_gen=aposmm,
)

# Create evaluator.
ev = TemplateEvaluator(
sim_template="template_simulation_script.py",
analysis_func=analyze_simulation,
)


# Create exploration.
exp = Exploration(
generator=gen, evaluator=ev, max_evals=300, sim_workers=4, run_async=True
)


# To safely perform exploration, run it in the block below (this is needed
# for some flavours of multiprocessing, namely spawn and forkserver)
if __name__ == "__main__":
exp.run(100)
exp.run(200)
exp.finalize()
assert len(gen.libe_gen.all_local_minima)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we want to check the minima have the expected values.

print(f"Found {len(gen.libe_gen.all_local_minima)} minima!")
22 changes: 22 additions & 0 deletions examples/dummy_aposmm_libE_gen/template_simulation_script.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
"""Simple template script used for demonstration.

The script evaluates an analytical expression and stores the results in a
`result.txt` file that is later read by the analysis function.
"""

import numpy as np

# 2D function with multiple minima
# result = -({{x0}} + 10 * np.cos({{x0}})) * ({{x1}} + 5 * np.cos({{x1}}))

x1 = {{x0}}
x2 = {{x1}}

term1 = (4 - 2.1 * x1**2 + (x1**4) / 3) * x1**2
term2 = x1 * x2
term3 = (-4 + 4 * x2**2) * x2**2

result = term1 + term2 + term3

with open("result.txt", "w") as f:
f.write("%f" % result)
76 changes: 76 additions & 0 deletions examples/dummy_random_libEgen/run_example.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
"""Basic example of parallel random sampling with simulations."""

from libensemble.gen_funcs.persistent_sampling import RandSample
from optimas.core import VaryingParameter, Objective

# from optimas.generators import RandomSamplingGenerator
from optimas.generators import libEWrapper
from optimas.evaluators import TemplateEvaluator
from optimas.explorations import Exploration


def analyze_simulation(simulation_directory, output_params):
"""Analyze the simulation output.

This method analyzes the output generated by the simulation to
obtain the value of the optimization objective and other analyzed
parameters, if specified. The value of these parameters has to be
given to the `output_params` dictionary.

Parameters
----------
simulation_directory : str
Path to the simulation folder where the output was generated.
output_params : dict
Dictionary where the value of the objectives and analyzed parameters
will be stored. There is one entry per parameter, where the key
is the name of the parameter given by the user.

Returns
-------
dict
The `output_params` dictionary with the results from the analysis.

"""
# Read back result from file
with open("result.txt") as f:
result = float(f.read())
# Fill in output parameters.
output_params["f"] = result
return output_params


# Create varying parameters and objectives.
var_1 = VaryingParameter("x0", 0.0, 15.0)
var_2 = VaryingParameter("x1", 0.0, 15.0)
obj = Objective("f")


# Create generator.
# gen = RandomSamplingGenerator(
# varying_parameters=[var_1, var_2], objectives=[obj], distribution="normal"
# )

gen = libEWrapper(
varying_parameters=[var_1, var_2],
objectives=[obj],
libe_gen=RandSample,
)

# Create evaluator.
ev = TemplateEvaluator(
sim_template="template_simulation_script.py",
analysis_func=analyze_simulation,
)


# Create exploration.
exp = Exploration(
generator=gen, evaluator=ev, max_evals=10, sim_workers=4, run_async=True
)


# To safely perform exploration, run it in the block below (this is needed
# for some flavours of multiprocessing, namely spawn and forkserver)
if __name__ == "__main__":
exp.run()
13 changes: 13 additions & 0 deletions examples/dummy_random_libEgen/template_simulation_script.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
"""Simple template script used for demonstration.

The script evaluates an analytical expression and stores the results in a
`result.txt` file that is later read by the analysis function.
"""

import numpy as np

# 2D function with multiple minima
result = -({{x0}} + 10 * np.cos({{x0}})) * ({{x1}} + 5 * np.cos({{x1}}))

with open("result.txt", "w") as f:
f.write("%f" % result)
8 changes: 8 additions & 0 deletions optimas/explorations/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from libensemble.executors.mpi_executor import MPIExecutor

from optimas.core.trial import TrialStatus
from optimas.generators.libE_wrapper import libEWrapper
from optimas.generators.base import Generator
from optimas.evaluators.base import Evaluator
from optimas.evaluators.function_evaluator import FunctionEvaluator
Expand Down Expand Up @@ -220,6 +221,13 @@ def run(self, n_evals: Optional[int] = None) -> None:
# Reset `cwd` to initial value before `libE` was called.
os.chdir(cwd)

def finalize(self) -> None:
"""Finalize the exploration, cleanup generator."""
if isinstance(self.generator, libEWrapper):
self.generator.libe_gen.final_tell(
self._libe_history.H[["sim_id", "f"]]
)

def attach_trials(
self,
trial_data: Union[Dict, List[Dict], np.ndarray, pd.DataFrame],
Expand Down
5 changes: 5 additions & 0 deletions optimas/gen_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,10 @@ def persistent_generator(H, persis_info, gen_specs, libE_info):

# Get generator, objectives, and parameters to analyze.
generator = gen_specs["user"]["generator"]

if hasattr(generator, "libe_gen"):
generator.init_libe_gen(H, persis_info, gen_specs, libE_info)

objectives = generator.objectives
analyzed_parameters = generator.analyzed_parameters

Expand Down Expand Up @@ -109,6 +113,7 @@ def persistent_generator(H, persis_info, gen_specs, libE_info):
y = calc_in[par.name][i]
ev = Evaluation(parameter=par, value=y)
trial.complete_evaluation(ev)
trial.libE_calc_in = calc_in[i]
# Register trial with unknown SEM
generator.tell([trial])
# Set the number of points to generate to that number:
Expand Down
4 changes: 4 additions & 0 deletions optimas/generators/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@
from .grid_sampling import GridSamplingGenerator
from .line_sampling import LineSamplingGenerator
from .random_sampling import RandomSamplingGenerator
from .libE_wrapper import libEWrapper
from .aposmm import APOSMMWrapper


__all__ = [
Expand All @@ -32,4 +34,6 @@
"GridSamplingGenerator",
"LineSamplingGenerator",
"RandomSamplingGenerator",
"libEWrapper",
"APOSMMWrapper",
]
104 changes: 104 additions & 0 deletions optimas/generators/aposmm.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
import numpy as np
from typing import List

from libensemble.generators import LibEnsembleGenInterfacer

from optimas.core import (
Objective,
Trial,
VaryingParameter,
Parameter,
TrialParameter,
)
from .libE_wrapper import libEWrapper


class APOSMMWrapper(libEWrapper):
"""
Wraps a live, parameterized APOSMM generator instance. Note that .tell() parameters
are internally cached until either the initial sample or N points (for N local-optimization processes)
are evaluated.
"""

def __init__(
self,
varying_parameters: List[VaryingParameter],
objectives: List[Objective],
libe_gen=None,
) -> None:
custom_trial_parameters = [
TrialParameter(
"x_on_cube", dtype=(float, (len(varying_parameters),))
),
TrialParameter("local_pt", dtype=bool),
]
super().__init__(
varying_parameters=varying_parameters,
objectives=objectives,
custom_trial_parameters=custom_trial_parameters,
libe_gen=libe_gen,
)
self.libe_gen = libe_gen
self.num_evals = 0
self._told_initial_sample = False

def _slot_in_data(self, trial):
"""Slot in libE_calc_in and trial data into corresponding array fields."""
self.new_array["f"][self.num_evals] = trial.libE_calc_in["f"]
self.new_array["x"][self.num_evals] = trial.parameter_values
self.new_array["sim_id"][self.num_evals] = trial.libE_calc_in["sim_id"]
self.new_array["x_on_cube"][self.num_evals] = trial.x_on_cube
self.new_array["local_pt"][self.num_evals] = trial.local_pt

@property
def _array_size(self):
"""Output array size must match either initial sample or N points to evaluate in parallel."""
user = self.libe_gen.gen_specs["user"]
return (
user["initial_sample_size"]
if not self._told_initial_sample
else user["max_active_runs"]
)

@property
def _enough_initial_sample(self):
"""We're typically happy with at least 90% of the initial sample."""
return self.num_evals > int(
0.9 * self.libe_gen.gen_specs["user"]["initial_sample_size"]
)

@property
def _enough_subsequent_points(self):
"""But we need to evaluate at least N points, for the N local-optimization processes."""
return (
self.num_evals >= self.libe_gen.gen_specs["user"]["max_active_runs"]
)

def _ask(self, trials: List[Trial]) -> List[Trial]:
"""Fill in the parameter values of the requested trials."""
n_trials = len(trials)
gen_out = self.libe_gen.ask(n_trials)

for i, trial in enumerate(trials):
trial.parameter_values = gen_out[i]["x"]
trial.x_on_cube = gen_out[i]["x_on_cube"]
trial.local_pt = gen_out[i]["local_pt"]

return trials

def _tell(self, trials: List[Trial]) -> None:
"""Pass objective values to generator, slotting/caching into APOSMM's expected results array."""
trial = trials[0]
if self.num_evals == 0:
self.new_array = self.libe_gen.create_results_array(
self._array_size, empty=True
)
self._slot_in_data(trial)
self.num_evals += 1
if not self._told_initial_sample and self._enough_initial_sample:
self.libe_gen.tell(self.new_array)
self._told_initial_sample = True
self.num_evals = 0
elif self._told_initial_sample and self._enough_subsequent_points:
self.libe_gen.tell(self.new_array)
self.num_evals = 0 # reset, create a new array next time around
Loading