Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feature/libe_gen_wrapper + Feature/asktell aposmm #209

Open
wants to merge 43 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 20 commits
Commits
Show all changes
43 commits
Select commit Hold shift + click to select a range
985f611
Point at libEnsemble asktell branch
shuds13 Apr 19, 2024
af48c3c
Add ability to call a libE ask/tell generator
shuds13 Apr 19, 2024
707b7cb
Add example that calls libE RandSample generator
shuds13 Apr 19, 2024
dd62c06
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Apr 19, 2024
dc7d54a
Merge remote-tracking branch 'upstream/feature/libe_gen_wrapper' into…
jlnav May 8, 2024
d5a1d4b
adjustments to combine aposmm-settings and user-settings, plus call s…
jlnav May 9, 2024
18c1bad
pass in a parameterized APOSMM instance
jlnav May 10, 2024
1d10b02
only pass in necessary fields to libE_gen (??)
jlnav May 10, 2024
e887f06
add dummy_aposmm_libE_gen example
jlnav May 10, 2024
1b9b966
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] May 10, 2024
838d0d1
debugging, replacing objective with 6hc, noticing that optimas tries …
jlnav May 13, 2024
09bf72d
insert sample points, fix incorrect data being passed back by allowin…
jlnav May 14, 2024
f887fca
Merge branch 'feature/asktell_aposmm' of https://github.com/jlnav/opt…
jlnav May 14, 2024
d77f36d
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] May 14, 2024
3aea989
trying to pass more data back to aposmm...
jlnav May 14, 2024
c9542d2
Merge branch 'feature/asktell_aposmm' of https://github.com/jlnav/opt…
jlnav May 14, 2024
258c015
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] May 14, 2024
b4b970c
remove a debug print
jlnav May 14, 2024
715473a
adjust run_example constants and bounds, cleanup wrapper's _tell to s…
jlnav May 16, 2024
dd3e3c8
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] May 16, 2024
9f71994
Merge branch 'main' into feature/asktell_aposmm
jlnav May 22, 2024
2674dbe
combine libe_gen_instance and libe_gen_class into one parameter, some…
jlnav May 22, 2024
1ed3c31
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] May 22, 2024
81a3b07
pull aposmm's ub and lb from VaryingParameters
jlnav May 22, 2024
e4dc0ba
Merge branch 'feature/asktell_aposmm' of https://github.com/jlnav/opt…
jlnav May 22, 2024
ab20e98
tentative docstrings
jlnav May 23, 2024
c797c9c
attach libE_calc_in to trial, remove final_tell call in persistent_ge…
jlnav May 23, 2024
859669d
return final_tell to persistent_generator (for now?)
jlnav May 23, 2024
751563f
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] May 23, 2024
f8c3fc2
introduces Exploration.finalize(). Moves gen.final_tell into that met…
jlnav May 24, 2024
ca44a9c
Merge branch 'feature/asktell_aposmm' of https://github.com/jlnav/opt…
jlnav May 24, 2024
ce47066
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] May 24, 2024
dc294e5
make additional APOSMMWrapper subclass of libEWrapper, with additiona…
jlnav May 28, 2024
720bf8f
docstrings
jlnav May 28, 2024
696e00e
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] May 28, 2024
ce8beb8
rough-comparison between aposmm-detected minima and known minima, add…
jlnav May 31, 2024
bb26486
additional documentation
jlnav May 31, 2024
9f2a059
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] May 31, 2024
52dbd2e
indexing fix
jlnav Jun 3, 2024
08a835b
fixes precision, but more runs are needed
jlnav Jun 5, 2024
bf32ec4
Merge branch 'main' into feature/asktell_aposmm
jlnav Jul 22, 2024
6232b41
adjust wrappers to use upstream class's ask_np and tell_np
jlnav Jul 29, 2024
595fd86
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jul 29, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
99 changes: 99 additions & 0 deletions examples/dummy_aposmm_libE_gen/run_example.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
"""Basic example of parallel random sampling with simulations."""

from math import gamma, pi, sqrt
import numpy as np
from libensemble.generators import APOSMM
import libensemble.gen_funcs

libensemble.gen_funcs.rc.aposmm_optimizers = "nlopt"
from optimas.core import VaryingParameter, Objective
from libensemble.tests.regression_tests.support import (
six_hump_camel_minima as minima,
)

# from optimas.generators import RandomSamplingGenerator
from optimas.generators import libEWrapper
from optimas.evaluators import TemplateEvaluator
from optimas.explorations import Exploration

from multiprocessing import set_start_method

set_start_method("fork", force=True)


def analyze_simulation(simulation_directory, output_params):
"""Analyze the simulation output.

This method analyzes the output generated by the simulation to
obtain the value of the optimization objective and other analyzed
parameters, if specified. The value of these parameters has to be
given to the `output_params` dictionary.

Parameters
----------
simulation_directory : str
Path to the simulation folder where the output was generated.
output_params : dict
Dictionary where the value of the objectives and analyzed parameters
will be stored. There is one entry per parameter, where the key
is the name of the parameter given by the user.

Returns
-------
dict
The `output_params` dictionary with the results from the analysis.

"""
# Read back result from file
with open("result.txt") as f:
result = float(f.read())
# Fill in output parameters.
output_params["f"] = result
return output_params


# Create varying parameters and objectives.
var_1 = VaryingParameter("x0", -3.0, 3.0)
var_2 = VaryingParameter("x1", -2.0, 2.0)
obj = Objective("f")

n = 2

aposmm = APOSMM(
initial_sample_size=100,
localopt_method="LN_BOBYQA",
sample_points=np.round(minima, 1),
rk_const=0.5 * ((gamma(1 + (n / 2)) * 5) ** (1 / n)) / sqrt(pi),
xtol_abs=1e-2,
ftol_abs=1e-2,
dist_to_bound_multiple=0.5,
max_active_runs=4, # refers to APOSMM's simul local optimization runs
lb=np.array([-3, -2]), # potentially matches the VaryingParameters
ub=np.array([3, 2]),
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

lb/ub duplicated from VaryingParameter. Not sure best approach, but for now we could define these first and use in both places.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yup! Perhaps our generator_standard work may help this avenue.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Insert same source-of-truth values into VaryingParameters and ub and lb

)

gen = libEWrapper(
varying_parameters=[var_1, var_2],
objectives=[obj],
libe_gen_instance=aposmm,
Copy link
Collaborator

@shuds13 shuds13 May 21, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

How different to libe_gen. This can be set up so if a class is provided it initializes in place.
e.g.,
if inspect.isclass(libe_gen):
initialize...

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That's true, for rough-development I was being explicit on what type of object we passed, but we can afford to be smarter now. Pass in something initialized, keep as-is, if its not initialized, initialize it.

I'll take a second look, but I don't think I came across a pure-optimas example that did object-initialization within the library instead of exposed to the user.

)

# Create evaluator.
ev = TemplateEvaluator(
sim_template="template_simulation_script.py",
analysis_func=analyze_simulation,
)


# Create exploration.
exp = Exploration(
generator=gen, evaluator=ev, max_evals=300, sim_workers=4, run_async=True
)


# To safely perform exploration, run it in the block below (this is needed
# for some flavours of multiprocessing, namely spawn and forkserver)
if __name__ == "__main__":
exp.run()
assert len(gen.libe_gen.all_local_minima)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we want to check the minima have the expected values.

print(f"Found {len(gen.libe_gen.all_local_minima)} minima!")
22 changes: 22 additions & 0 deletions examples/dummy_aposmm_libE_gen/template_simulation_script.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
"""Simple template script used for demonstration.

The script evaluates an analytical expression and stores the results in a
`result.txt` file that is later read by the analysis function.
"""

import numpy as np

# 2D function with multiple minima
# result = -({{x0}} + 10 * np.cos({{x0}})) * ({{x1}} + 5 * np.cos({{x1}}))

x1 = {{x0}}
x2 = {{x1}}

term1 = (4 - 2.1 * x1**2 + (x1**4) / 3) * x1**2
term2 = x1 * x2
term3 = (-4 + 4 * x2**2) * x2**2

result = term1 + term2 + term3

with open("result.txt", "w") as f:
f.write("%f" % result)
76 changes: 76 additions & 0 deletions examples/dummy_random_libEgen/run_example.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
"""Basic example of parallel random sampling with simulations."""

from libensemble.gen_funcs.persistent_sampling import RandSample
from optimas.core import VaryingParameter, Objective

# from optimas.generators import RandomSamplingGenerator
from optimas.generators import libEWrapper
from optimas.evaluators import TemplateEvaluator
from optimas.explorations import Exploration


def analyze_simulation(simulation_directory, output_params):
"""Analyze the simulation output.

This method analyzes the output generated by the simulation to
obtain the value of the optimization objective and other analyzed
parameters, if specified. The value of these parameters has to be
given to the `output_params` dictionary.

Parameters
----------
simulation_directory : str
Path to the simulation folder where the output was generated.
output_params : dict
Dictionary where the value of the objectives and analyzed parameters
will be stored. There is one entry per parameter, where the key
is the name of the parameter given by the user.

Returns
-------
dict
The `output_params` dictionary with the results from the analysis.

"""
# Read back result from file
with open("result.txt") as f:
result = float(f.read())
# Fill in output parameters.
output_params["f"] = result
return output_params


# Create varying parameters and objectives.
var_1 = VaryingParameter("x0", 0.0, 15.0)
var_2 = VaryingParameter("x1", 0.0, 15.0)
obj = Objective("f")


# Create generator.
# gen = RandomSamplingGenerator(
# varying_parameters=[var_1, var_2], objectives=[obj], distribution="normal"
# )

gen = libEWrapper(
varying_parameters=[var_1, var_2],
objectives=[obj],
libe_gen_class=RandSample,
)

# Create evaluator.
ev = TemplateEvaluator(
sim_template="template_simulation_script.py",
analysis_func=analyze_simulation,
)


# Create exploration.
exp = Exploration(
generator=gen, evaluator=ev, max_evals=10, sim_workers=4, run_async=True
)


# To safely perform exploration, run it in the block below (this is needed
# for some flavours of multiprocessing, namely spawn and forkserver)
if __name__ == "__main__":
exp.run()
13 changes: 13 additions & 0 deletions examples/dummy_random_libEgen/template_simulation_script.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
"""Simple template script used for demonstration.

The script evaluates an analytical expression and stores the results in a
`result.txt` file that is later read by the analysis function.
"""

import numpy as np

# 2D function with multiple minima
result = -({{x0}} + 10 * np.cos({{x0}})) * ({{x1}} + 5 * np.cos({{x1}}))

with open("result.txt", "w") as f:
f.write("%f" % result)
11 changes: 10 additions & 1 deletion optimas/gen_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,10 @@ def persistent_generator(H, persis_info, gen_specs, libE_info):

# Get generator, objectives, and parameters to analyze.
generator = gen_specs["user"]["generator"]

if hasattr(generator, "libe_gen_class"):
generator.init_libe_gen(H, persis_info, gen_specs, libE_info)

objectives = generator.objectives
analyzed_parameters = generator.analyzed_parameters

Expand Down Expand Up @@ -110,11 +114,16 @@ def persistent_generator(H, persis_info, gen_specs, libE_info):
ev = Evaluation(parameter=par, value=y)
trial.complete_evaluation(ev)
# Register trial with unknown SEM
generator.tell([trial])
if hasattr(generator, "libe_gen_class"):
generator.tell([trial], libE_calc_in=calc_in[i])
else:
generator.tell([trial])
# Set the number of points to generate to that number:
number_of_gen_points = min(n + n_failed_gens, max_evals - n_gens)
n_failed_gens = 0
else:
number_of_gen_points = 0

if hasattr(generator, "libe_gen_class"):
return generator.final_tell(libE_calc_in=calc_in)
return H_o, persis_info, FINISHED_PERSISTENT_GEN_TAG
2 changes: 2 additions & 0 deletions optimas/generators/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
from .grid_sampling import GridSamplingGenerator
from .line_sampling import LineSamplingGenerator
from .random_sampling import RandomSamplingGenerator
from .libE_wrapper import libEWrapper


__all__ = [
Expand All @@ -32,4 +33,5 @@
"GridSamplingGenerator",
"LineSamplingGenerator",
"RandomSamplingGenerator",
"libEWrapper",
]
10 changes: 8 additions & 2 deletions optimas/generators/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -237,7 +237,10 @@ def ask(self, n_trials: int) -> List[Trial]:
return trials

def tell(
self, trials: List[Trial], allow_saving_model: Optional[bool] = True
self,
trials: List[Trial],
allow_saving_model: Optional[bool] = True,
libE_calc_in: Optional[np.typing.NDArray] = None,
) -> None:
"""Give trials back to generator once they have been evaluated.

Expand All @@ -253,7 +256,10 @@ def tell(
for trial in trials:
if trial not in self._given_trials:
self._add_external_evaluated_trial(trial)
self._tell(trials)
if libE_calc_in is not None:
self._tell(trials, libE_calc_in)
else:
self._tell(trials)
for trial in trials:
if not trial.failed:
log_msg = "Completed trial {} with objective(s) {}".format(
Expand Down
Loading