diff --git a/.github/workflows/basic.yml b/.github/workflows/basic.yml index d36888487..a6c483e02 100644 --- a/.github/workflows/basic.yml +++ b/.github/workflows/basic.yml @@ -85,12 +85,20 @@ jobs: pip install -r install/testing_requirements.txt pip install -r install/misc_feature_requirements.txt + git clone --recurse-submodules -b refactor/pounders_API https://github.com/POptUS/IBCDFO.git + pushd IBCDFO/minq/py/minq5/ + export PYTHONPATH="$PYTHONPATH:$(pwd)" + echo "PYTHONPATH=$PYTHONPATH" >> $GITHUB_ENV + popd + pushd IBCDFO/ibcdfo_pypkg/ + pip install -e . + popd + - name: Install mpi4py and MPI from conda run: | conda install mpi4py ${{ matrix.mpi-version }} - name: Install generator dependencies - if: matrix.os != 'windows-latest' && steps.cache.outputs.cache-hit != 'true' run: | python -m pip install --upgrade pip pip install mpmath diff --git a/.github/workflows/extra.yml b/.github/workflows/extra.yml index 5c9eb42bd..21ecbe4ba 100644 --- a/.github/workflows/extra.yml +++ b/.github/workflows/extra.yml @@ -95,7 +95,6 @@ jobs: pip install mpi4py - name: Install generator dependencies - if: steps.cache.outputs.cache-hit != 'true' run: | python -m pip install --upgrade pip conda env update --file install/gen_deps_environment.yml @@ -131,7 +130,7 @@ jobs: cd .. - name: Install generator dependencies for Ubuntu tests - if: matrix.os == 'ubuntu-latest' && steps.cache.outputs.cache-hit != 'true' && matrix.python-version != '3.12' + if: matrix.os == 'ubuntu-latest' && matrix.python-version != '3.12' run: | sudo apt-get install bc @@ -153,7 +152,7 @@ jobs: - name: Copy heffte exe on cache-hit - if: matrix.os == 'ubuntu-latest' && steps.cache.outputs.cache-hit != 'false' && matrix.python-version != '3.12' + if: matrix.os == 'ubuntu-latest' && matrix.python-version != '3.12' run: | cd /home/runner/work/libensemble/libensemble cp ./heffte/build/benchmarks/speed3d_c2c ./libensemble/tests/regression_tests/ @@ -169,6 +168,15 @@ jobs: sed -i -e "s/pyzmq>=22.1.0,<23.0.0/pyzmq>=23.0.0,<24.0.0/" ./balsam/setup.cfg cd balsam; pip install -e .; cd .. + git clone --recurse-submodules -b refactor/pounders_API https://github.com/POptUS/IBCDFO.git + pushd IBCDFO/minq/py/minq5/ + export PYTHONPATH="$PYTHONPATH:$(pwd)" + echo "PYTHONPATH=$PYTHONPATH" >> $GITHUB_ENV + popd + pushd IBCDFO/ibcdfo_pypkg/ + pip install -e . + popd + - uses: actions/cache/save@v3 name: Save dependencies to cache if: matrix.os == 'ubuntu-latest' diff --git a/libensemble/gen_funcs/aposmm_localopt_support.py b/libensemble/gen_funcs/aposmm_localopt_support.py index d43aaee6f..5c109bda2 100644 --- a/libensemble/gen_funcs/aposmm_localopt_support.py +++ b/libensemble/gen_funcs/aposmm_localopt_support.py @@ -7,6 +7,7 @@ "run_local_nlopt", "run_local_tao", "run_local_dfols", + "run_local_ibcdfo_pounders", "run_local_scipy_opt", "run_external_localopt", ] @@ -19,7 +20,12 @@ import libensemble.gen_funcs from libensemble.message_numbers import EVAL_GEN_TAG, STOP_TAG # Only used to simulate receiving from manager -optimizer_list = ["petsc", "nlopt", "dfols", "scipy", "external"] + +class APOSMMException(Exception): + """Raised for any exception in APOSMM""" + + +optimizer_list = ["petsc", "nlopt", "dfols", "scipy", "ibcdfo", "external"] optimizers = libensemble.gen_funcs.rc.aposmm_optimizers if optimizers is not None: @@ -27,7 +33,7 @@ optimizers = [optimizers] unrec = set(optimizers) - set(optimizer_list) if unrec: - print(f"APOSMM Warning: unrecognized optimizers {unrec}") + raise APOSMMException(f"APOSMM Error: unrecognized optimizers {unrec}") # Preferable to import globally in most cases if "petsc" in optimizers: @@ -36,16 +42,14 @@ import nlopt # noqa: F401 if "dfols" in optimizers: import dfols # noqa: F401 + if "ibcdfo" in optimizers: + from ibcdfo import pounders # noqa: F401 if "scipy" in optimizers: from scipy import optimize as sp_opt # noqa: F401 if "external" in optimizers: pass -class APOSMMException(Exception): - """Raised for any exception in APOSMM""" - - class ConvergedMsg(object): """ Message communicated when a local optimization is converged. @@ -117,8 +121,12 @@ def __init__(self, user_specs, x0, f0, grad0=None): run_local_opt = run_local_scipy_opt elif user_specs["localopt_method"] in ["dfols"]: run_local_opt = run_local_dfols + elif user_specs["localopt_method"] in ["ibcdfo_pounders"]: + run_local_opt = run_local_ibcdfo_pounders elif user_specs["localopt_method"] in ["external_localopt"]: run_local_opt = run_external_localopt + else: + raise APOSMMException(f"APOSMM Error: unrecognized method {user_specs['localopt_method']}") self.parent_can_read.clear() self.process = Process( @@ -155,12 +163,7 @@ def iterate(self, data): elif "fvec" in data.dtype.names: self.comm_queue.put((data["x_on_cube"], data["fvec"])) else: - self.comm_queue.put( - ( - data["x_on_cube"], - data["f"], - ) - ) + self.comm_queue.put((data["x_on_cube"], data["f"])) self.child_can_read.set() self.parent_can_read.wait() @@ -414,6 +417,67 @@ def run_local_dfols(user_specs, comm_queue, x0, f0, child_can_read, parent_can_r finish_queue(x_opt, opt_flag, comm_queue, parent_can_read, user_specs) +def run_local_ibcdfo_pounders(user_specs, comm_queue, x0, f0, child_can_read, parent_can_read): + """ + Runs a IBCDFO local optimization run starting at ``x0``, governed by the + parameters in ``user_specs``. + + Although IBCDFO methods can receive previous evaluations, few other methods + support that, so APOSMM assumes the first point will be re-evaluated (but + not be sent back to the manager). + """ + n = len(x0) + # Define bound constraints (lower <= x <= upper) + lb = np.zeros(n) + ub = np.ones(n) + + # Set random seed (for reproducibility) + np.random.seed(0) + + dist_to_bound = min(min(ub - x0), min(x0 - lb)) + assert dist_to_bound > np.finfo(np.float64).eps, "The distance to the boundary is too small" + + run_max_eval = user_specs.get("run_max_eval", 100 * (n + 1)) + g_tol = 1e-8 + delta_0 = 0.5 * dist_to_bound + m = len(f0) + + if "hfun" in user_specs: + Options = {"hfun": user_specs["hfun"], "combinemodels": user_specs["combinemodels"]} + else: + Options = None + + [X, F, hF, flag, xkin] = pounders.pounders( + lambda x: scipy_dfols_callback_fun(x, comm_queue, child_can_read, parent_can_read, user_specs), + x0, + n, + run_max_eval, + g_tol, + delta_0, + m, + lb, + ub, + Options=Options, + ) + + assert flag >= 0, "IBCDFO errored" + + x_opt = X[xkin] + + if flag == 0: + opt_flag = 1 + else: + print( + "[APOSMM] The IBCDFO run started from " + str(x0) + " stopped with an exit " + "flag of " + str(flag) + ". No point from this run will be " + "ruled as a minimum! APOSMM may start a new run from some point " + "in this run." + ) + opt_flag = 0 + + finish_queue(x_opt, opt_flag, comm_queue, parent_can_read, user_specs) + + def run_local_tao(user_specs, comm_queue, x0, f0, child_can_read, parent_can_read): """ Runs a PETSc/TAO local optimization run starting at ``x0``, governed by the diff --git a/libensemble/gen_funcs/persistent_aposmm.py b/libensemble/gen_funcs/persistent_aposmm.py index 4f39bf573..34c8a1b40 100644 --- a/libensemble/gen_funcs/persistent_aposmm.py +++ b/libensemble/gen_funcs/persistent_aposmm.py @@ -87,6 +87,12 @@ def aposmm(H, persis_info, gen_specs, libE_info): points must satisfy - ``'rk_const' [float]``: Multiplier in front of the r_k value - ``'max_active_runs' [int]``: Bound on number of runs APOSMM is advancing + - ``'stop_after_k_minima' [int]``: Tell APOSMM to stop after this many + local minima have been identified by a local optimization run. + - ``'stop_after_k_runs' [int]``: Tell APOSMM to stop after this many runs + have ended. (The number of ended runs may be less than the number of + minima if, for example, a local optimization run ends due to a evaluation + constraint, but not convergence criteria.) If the rules in ``decide_where_to_start_localopt`` produces more than ``'max_active_runs'`` in some iteration, then existing runs are prioritized. @@ -130,14 +136,13 @@ def aposmm(H, persis_info, gen_specs, libE_info): unless opt_flag is 1) opt_flag: 1 if the run ended with an optimal point (x_opt) or 0 if it ended because e.g., maxiters/maxevals were reached - num_samples_needed: Number of additional uniformly drawn samples needed + num_samples: Number of additional uniformly drawn samples needed Description of persistent variables used to maintain the state of APOSMM persis_info['total_runs']: Running count of started/completed localopt runs persis_info['run_order']: Sequence of indices of points in unfinished runs - persis_info['old_runs']: Sequence of indices of points in finished runs """ @@ -145,9 +150,16 @@ def aposmm(H, persis_info, gen_specs, libE_info): user_specs = gen_specs["user"] ps = PersistentSupport(libE_info, EVAL_GEN_TAG) n, n_s, rk_const, ld, mu, nu, comm, local_H = initialize_APOSMM(H, user_specs, libE_info) - local_opters, sim_id_to_child_inds, run_order, run_pts, total_runs, fields_to_pass = initialize_children( - user_specs - ) + ( + local_opters, + sim_id_to_child_inds, + run_order, + run_pts, + total_runs, + ended_runs, + fields_to_pass, + ) = initialize_children(user_specs) + if user_specs["initial_sample_size"] != 0: # Send our initial sample. We don't need to check that n_s is large enough: # the alloc_func only returns when the initial sample has function values. @@ -177,8 +189,12 @@ def aposmm(H, persis_info, gen_specs, libE_info): persis_info["run_order"] = run_order break - if np.sum(local_H["local_min"]) >= user_specs.get("stop_after_this_many_minima", np.inf): + if np.sum(local_H["local_min"]) >= user_specs.get("stop_after_k_minima", np.inf) or len( + ended_runs + ) >= user_specs.get("stop_after_k_runs", np.inf): # This break happens here so the manager can be informed about the last minima. + clean_up_and_stop(local_opters) + persis_info["run_order"] = run_order break n_s, n_r = update_local_H_after_receiving(local_H, n, n_s, user_specs, Work, calc_in, fields_to_pass) @@ -194,6 +210,7 @@ def aposmm(H, persis_info, gen_specs, libE_info): opt_ind = update_history_optimal(x_opt, opt_flag, local_H, run_order[child_idx]) new_opt_inds_to_send_mgr.append(opt_ind) local_opters.pop(child_idx) + ended_runs.append(child_idx) else: add_to_local_H(local_H, x_new, user_specs, local_flag=1, on_cube=True) new_inds_to_send_mgr.append(len(local_H) - 1) @@ -221,9 +238,7 @@ def aposmm(H, persis_info, gen_specs, libE_info): local_opters[total_runs] = local_opter - x_new = local_opter.iterate( - local_H[ind][fields_to_pass] - ) # Assuming the second point can't be ruled optimal + x_new = local_opter.iterate(local_H[ind][fields_to_pass]) # Assuming the second x won't be optimal add_to_local_H(local_H, x_new, user_specs, local_flag=1, on_cube=True) new_inds_to_send_mgr.append(len(local_H) - 1) @@ -239,18 +254,16 @@ def aposmm(H, persis_info, gen_specs, libE_info): total_runs += 1 if first_pass: - num_samples_needed = persis_info["nworkers"] - 1 - len(new_inds_to_send_mgr) + num_samples = persis_info["nworkers"] - 1 - len(new_inds_to_send_mgr) first_pass = False else: - num_samples_needed = n_r - len(new_inds_to_send_mgr) + num_samples = n_r - len(new_inds_to_send_mgr) - if num_samples_needed > 0: + if num_samples > 0: persis_info = add_k_sample_points_to_local_H( - num_samples_needed, user_specs, persis_info, n, comm, local_H, sim_id_to_child_inds - ) - new_inds_to_send_mgr = new_inds_to_send_mgr + list( - range(len(local_H) - num_samples_needed, len(local_H)) + num_samples, user_specs, persis_info, n, comm, local_H, sim_id_to_child_inds ) + new_inds_to_send_mgr = new_inds_to_send_mgr + list(range(len(local_H) - num_samples, len(local_H))) if not user_specs.get("standalone"): ps.send(local_H[new_inds_to_send_mgr + new_opt_inds_to_send_mgr][[i[0] for i in gen_specs["out"]]]) @@ -589,7 +602,10 @@ def decide_where_to_start_localopt(H, n, n_s, rk_const, ld=0, mu=0, nu=0): def calc_rk(n, n_s, rk_const, lhs_divisions=0): """Calculate the critical distance r_k""" if lhs_divisions == 0: - r_k = rk_const * (log(n_s) / n_s) ** (1 / n) + if n_s == 1: + r_k = 1e8 + else: + r_k = rk_const * (log(n_s) / n_s) ** (1 / n) else: k = np.floor(n_s / lhs_divisions).astype(int) if k <= 1: # to prevent r_k=0 @@ -668,11 +684,7 @@ def initialize_APOSMM(H, user_specs, libE_info): "ind_of_better_s", ] if any([i in H.dtype.names for i in over_written_fields]): - print( - "\n[APOSMM] persistent_aposmm ignores any given values in these fields: " - + str(over_written_fields) - + "\n" - ) + print("\n[APOSMM] Ignoring given values in these fields: " + str(over_written_fields) + "\n") initialize_dists_and_inds(local_H, len(H)) @@ -681,11 +693,11 @@ def initialize_APOSMM(H, user_specs, libE_info): n_s = np.sum(~local_H["local_pt"]) - assert ( - n_s > 0 or user_specs["initial_sample_size"] > 0 - ), "APOSMM requires a positive initial_sample_size, or some existing points in order to determine where to start local optimization runs." + msg = "APOSMM requires a positive initial_sample_size, or some existing points in order to determine where to start local optimization runs." + assert n_s > 0 or user_specs["initial_sample_size"] > 0, msg if "sample_points" in user_specs: + assert user_specs["sample_points"].ndim == 2, "Must have 2 dimensions for sample points" assert isinstance(user_specs["sample_points"], np.ndarray) return n, n_s, rk_c, ld, mu, nu, comm, local_H @@ -696,10 +708,9 @@ def initialize_children(user_specs): local_opters = {} sim_id_to_child_inds = {} run_order = {} - run_pts = ( - {} - ) # This can differ from 'x_on_cube' if, for example, user_specs['periodic'] is True and run points are off the cube. + run_pts = {} # These can differ from 'x_on_cube' (e.g., if user_specs['periodic']=1 and runs leave unit cube) total_runs = 0 + ended_runs = [] if user_specs["localopt_method"] in ["LD_MMA", "blmvm", "scipy_BFGS"]: fields_to_pass = ["x_on_cube", "f", "grad"] elif user_specs["localopt_method"] in [ @@ -714,12 +725,12 @@ def initialize_children(user_specs): "nm", ]: fields_to_pass = ["x_on_cube", "f"] - elif user_specs["localopt_method"] in ["pounders", "dfols"]: + elif user_specs["localopt_method"] in ["pounders", "ibcdfo_pounders", "dfols"]: fields_to_pass = ["x_on_cube", "fvec"] else: raise NotImplementedError(f"Unknown local optimization method {user_specs['localopt_method']}.") - return local_opters, sim_id_to_child_inds, run_order, run_pts, total_runs, fields_to_pass + return local_opters, sim_id_to_child_inds, run_order, run_pts, total_runs, ended_runs, fields_to_pass def add_k_sample_points_to_local_H(k, user_specs, persis_info, n, comm, local_H, sim_id_to_child_inds): diff --git a/libensemble/sim_funcs/chwirut1.py b/libensemble/sim_funcs/chwirut1.py index 00903edf5..be22de884 100644 --- a/libensemble/sim_funcs/chwirut1.py +++ b/libensemble/sim_funcs/chwirut1.py @@ -287,7 +287,10 @@ def chwirut_eval(H, _, sim_specs): else: O["fvec"][i] = EvaluateFunction(x) - O["f"][i] = sim_specs["user"]["combine_component_func"](O["fvec"][i]) + if "combine_component_func" in sim_specs["user"]: + O["f"][i] = sim_specs["user"]["combine_component_func"](O["fvec"][i]) + else: + O["f"][i] = np.sum(O["fvec"][i] ** 2) return O diff --git a/libensemble/tests/regression_tests/support.py b/libensemble/tests/regression_tests/support.py index f95637106..37574bbe6 100644 --- a/libensemble/tests/regression_tests/support.py +++ b/libensemble/tests/regression_tests/support.py @@ -103,7 +103,6 @@ def write_uniform_gen_func(H, persis_info, gen_specs, _): persis_info_1[0] = { "run_order": {}, # Used by manager to remember run order - "old_runs": {}, # Used by manager to store old runs order "total_runs": 0, # Used by manager to count total runs "rand_stream": np.random.default_rng(1), } diff --git a/libensemble/tests/regression_tests/test_persistent_aposmm_dfols.py b/libensemble/tests/regression_tests/test_persistent_aposmm_dfols.py index ddcbba57a..ec8c3486e 100644 --- a/libensemble/tests/regression_tests/test_persistent_aposmm_dfols.py +++ b/libensemble/tests/regression_tests/test_persistent_aposmm_dfols.py @@ -52,7 +52,6 @@ def combine_component(x): # Declare the run parameters/functions m = 214 n = 3 - budget = 10 sim_specs = { "sim_f": sim_f, diff --git a/libensemble/tests/regression_tests/test_persistent_aposmm_ibcdfo_pounders.py b/libensemble/tests/regression_tests/test_persistent_aposmm_ibcdfo_pounders.py new file mode 100644 index 000000000..dd0a86b5b --- /dev/null +++ b/libensemble/tests/regression_tests/test_persistent_aposmm_ibcdfo_pounders.py @@ -0,0 +1,141 @@ +""" +Runs libEnsemble with APOSMM+IBCDFO on two test problems. Only a single +optimization run is being performed for the below setup. + +The first case uses POUNDERS to solve the chwirut least-squares problem. For +this case, all chwirut 214 residual calculations for a given point are +performed as a single simulation evaluation. + +The second case uses the generalized POUNDERS to minimize normalized beamline +emittance. The "beamline simulation" is a synthetic polynomial test function +that takes in 4 variables and returning 3 outputs. These outputs represent +position , momentum , and the correlation between them . + +These values are then mapped to the normalized emittance - . + +Execute via one of the following commands: + mpiexec -np 3 python test_persistent_aposmm_ibcdfo.py + python test_persistent_aposmm_ibcdfo.py --nworkers 2 --comms local +Both will run with 1 manager, 1 worker running APOSMM+IBCDFO), and 1 worker +doing the simulation evaluations. +""" + +# Do not change these lines - they are parsed by run-tests.sh +# TESTSUITE_COMMS: local mpi +# TESTSUITE_NPROCS: 3 + +import multiprocessing +import sys + +import numpy as np + +import libensemble.gen_funcs +from libensemble.libE import libE +from libensemble.sim_funcs.chwirut1 import chwirut_eval + +libensemble.gen_funcs.rc.aposmm_optimizers = "ibcdfo" + +from libensemble.alloc_funcs.persistent_aposmm_alloc import persistent_aposmm_alloc as alloc_f +from libensemble.gen_funcs.persistent_aposmm import aposmm as gen_f +from libensemble.tools import add_unique_random_streams, parse_args, save_libE_output + +try: + from ibcdfo.pounders import pounders # noqa: F401 + from ibcdfo.pounders.general_h_funs import emittance_combine, emittance_h +except ModuleNotFoundError: + sys.exit("Please 'pip install ibcdfo'") + +try: + from minqsw import minqsw # noqa: F401 + +except ModuleNotFoundError: + sys.exit("Ensure https://github.com/POptUS/minq has been cloned and that minq/py/minq5/ is on the PYTHONPATH") + + +def sum_squared(x): + return np.sum(np.power(x, 2)) + + +def synthetic_beamline_mapping(H, _, sim_specs): + x = H["x"][0] + assert len(x) == 4, "Assuming 4 inputs to this function" + y = np.zeros(3) # Synthetic beamline outputs + y[0] = x[0] ** 2 + 1.0 + y[1] = x[1] ** 2 + 2.0 + y[2] = x[2] * x[3] + 0.5 + + Out = np.zeros(1, dtype=sim_specs["out"]) + Out["fvec"] = y + Out["f"] = y[0] * y[1] - y[2] ** 2 + return Out + + +# Main block is necessary only when using local comms with spawn start method (default on macOS and Windows). +if __name__ == "__main__": + multiprocessing.set_start_method("fork", force=True) + + nworkers, is_manager, libE_specs, _ = parse_args() + + assert nworkers == 2, "This test is just for two workers" + + for inst in range(2): + if inst == 0: + # Declare the run parameters/functions + m = 214 + n = 3 + sim_f = chwirut_eval + elif inst == 1: + m = 3 + n = 4 + sim_f = synthetic_beamline_mapping + + sim_specs = { + "sim_f": sim_f, + "in": ["x"], + "out": [("f", float), ("fvec", float, m)], + } + + gen_out = [ + ("x", float, n), + ("x_on_cube", float, n), + ("sim_id", int), + ("local_min", bool), + ("local_pt", bool), + ("started_run", bool), + ] + + gen_specs = { + "gen_f": gen_f, + "persis_in": ["f", "fvec"] + [n[0] for n in gen_out], + "out": gen_out, + "user": { + "initial_sample_size": 1, + "stop_after_k_runs": 1, + "max_active_runs": 1, + "sample_points": np.atleast_2d(0.1 * (np.arange(n) + 1)), + "localopt_method": "ibcdfo_pounders", + "run_max_eval": 100 * (n + 1), + "components": m, + "lb": -1 * np.ones(n), + "ub": np.ones(n), + }, + } + + if inst == 1: + gen_specs["user"]["hfun"] = emittance_h + gen_specs["user"]["combinemodels"] = emittance_combine + + alloc_specs = {"alloc_f": alloc_f} + + persis_info = add_unique_random_streams({}, nworkers + 1) + + exit_criteria = {"sim_max": 500} + + # Perform the run + H, persis_info, flag = libE(sim_specs, gen_specs, exit_criteria, persis_info, alloc_specs, libE_specs) + + if is_manager: + assert persis_info[1].get("run_order"), "Run_order should have been given back" + assert flag == 0 + + save_libE_output(H, persis_info, __file__, nworkers) diff --git a/libensemble/tests/regression_tests/test_persistent_aposmm_with_grad.py b/libensemble/tests/regression_tests/test_persistent_aposmm_with_grad.py index 10d702105..6ef34f375 100644 --- a/libensemble/tests/regression_tests/test_persistent_aposmm_with_grad.py +++ b/libensemble/tests/regression_tests/test_persistent_aposmm_with_grad.py @@ -76,7 +76,7 @@ "initial_sample_size": 0, # Don't need to do evaluations because the sampling already done below "localopt_method": "LD_MMA", "rk_const": 0.5 * ((gamma(1 + (n / 2)) * 5) ** (1 / n)) / sqrt(pi), - "stop_after_this_many_minima": 25, + "stop_after_k_minima": 25, "xtol_rel": 1e-6, "ftol_rel": 1e-6, "max_active_runs": 6, @@ -121,6 +121,12 @@ H, persis_info, flag = libE(sim_specs, gen_specs, exit_criteria, persis_info, alloc_specs, libE_specs, H0=H0) if is_manager: + assert persis_info[1].get("run_order"), "Run_order should have been given back" + assert ( + len(persis_info[1]["run_order"]) >= gen_specs["user"]["stop_after_k_minima"] + ), "This test should have many runs started." + assert len(H) < exit_criteria["sim_max"], "Test should have stopped early due to 'stop_after_k_minima'" + print("[Manager]:", H[np.where(H["local_min"])]["x"]) print("[Manager]: Time taken =", time() - start_time, flush=True) @@ -131,6 +137,4 @@ print(np.min(np.sum((H[H["local_min"]]["x"] - m) ** 2, 1)), flush=True) assert np.min(np.sum((H[H["local_min"]]["x"] - m) ** 2, 1)) < tol - assert len(H) < exit_criteria["sim_max"], "Test should have stopped early" - save_libE_output(H, persis_info, __file__, nworkers)