diff --git a/.flake8 b/.flake8 index ee8bc23c8..142e9f02d 100644 --- a/.flake8 +++ b/.flake8 @@ -39,7 +39,6 @@ per-file-ignores = examples/calling_scripts/run_libensemble_on_warpx.py:E402 libensemble/tests/regression_tests/test_persistent_aposmm*:E402 libensemble/tests/regression_tests/test_persistent_gp_multitask_ax.py:E402 - libensemble/tests/regression_tests/test_ytopt_heffte.py:E402 libensemble/tests/functionality_tests/test_uniform_sampling_then_persistent_localopt_runs.py:E402 libensemble/tests/functionality_tests/test_stats_output.py:E402 libensemble/tests/functionality_tests/test_active_persistent_worker_abort.py:E402 diff --git a/.github/workflows/basic.yml b/.github/workflows/basic.yml index dcdbd047d..42385b57a 100644 --- a/.github/workflows/basic.yml +++ b/.github/workflows/basic.yml @@ -63,19 +63,6 @@ jobs: channel-priority: flexible auto-update-conda: true - - uses: actions/cache/restore@v4 - name: Restore cached dependencies - id: cache - if: matrix.os == 'ubuntu-latest' - with: - path: | - /home/runner/.local - /usr/share/miniconda3/envs/condaenv - /usr/share/miniconda3/bin - /usr/share/miniconda3/lib - /usr/share/miniconda3/include - key: libe-${{ github.ref_name }}-${{ matrix.python-version }}-${{ matrix.comms-type }}-${{ matrix.pydantic-version }}-basic - - name: Force-update certifi run: | python --version @@ -95,15 +82,7 @@ jobs: run: | pip install -r install/testing_requirements.txt pip install -r install/misc_feature_requirements.txt - - git clone --recurse-submodules -b develop https://github.com/POptUS/IBCDFO.git - pushd IBCDFO/minq/py/minq5/ - export PYTHONPATH="$PYTHONPATH:$(pwd)" - echo "PYTHONPATH=$PYTHONPATH" >> $GITHUB_ENV - popd - pushd IBCDFO/ibcdfo_pypkg/ - pip install -e . - popd + source install/install_ibcdfo.sh - name: Install mpi4py and MPI from conda run: | @@ -115,18 +94,6 @@ jobs: pip install mpmath matplotlib conda install numpy nlopt scipy - - uses: actions/cache/save@v4 - name: Save dependencies to cache - if: matrix.os == 'ubuntu-latest' - with: - path: | - /home/runner/.local - /usr/share/miniconda3/envs/condaenv - /usr/share/miniconda3/bin - /usr/share/miniconda3/lib - /usr/share/miniconda3/include - key: libe-${{ github.ref_name }}-${{ matrix.python-version }}-${{ matrix.comms-type }} - - name: Install libEnsemble, flake8 run: | pip install pydantic==${{ matrix.pydantic-version }} @@ -163,4 +130,4 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: crate-ci/typos@v1.24.3 + - uses: crate-ci/typos@v1.24.5 diff --git a/.github/workflows/extra.yml b/.github/workflows/extra.yml index d879f2ca7..df23fadbb 100644 --- a/.github/workflows/extra.yml +++ b/.github/workflows/extra.yml @@ -67,20 +67,6 @@ jobs: channel-priority: flexible auto-update-conda: true - - uses: actions/cache/restore@v4 - name: Restore cached dependencies - id: cache - if: matrix.os == 'ubuntu-latest' - with: - path: | - /home/runner/work/libensemble/libensemble/heffte/build/ - /home/runner/.local - /usr/share/miniconda3/envs/condaenv - /usr/share/miniconda3/bin - /usr/share/miniconda3/lib - /usr/share/miniconda3/include - key: libe-${{ github.ref_name }}-${{ matrix.python-version }}-${{ matrix.comms-type }}-${{ matrix.pydantic-version }}-extra - - name: Force-update certifi run: | python --version @@ -97,65 +83,29 @@ jobs: conda install clang_osx-64 - name: Install mpi4py and MPI from conda - if: (matrix.python-version != '3.10' && matrix.os == 'ubuntu-latest') || matrix.os == 'macos-latest' run: | conda install mpi4py ${{ matrix.mpi-version }} - - name: Install mpi4py from pip, MPI from conda - if: matrix.python-version == '3.10' && matrix.os == 'ubuntu-latest' - run: | - conda install ${{ matrix.mpi-version }} - pip install mpi4py - - name: Install generator dependencies run: | - python -m pip install --upgrade pip - pip install mpmath - pip install matplotlib conda env update --file install/gen_deps_environment.yml - #- name: Install ax-platform - # run: pip install "ax-platform<=0.4.0" - - name: Install gpcam if: matrix.python-version != '3.12' run: | pip install gpcam - name: Install surmise - if: matrix.os != 'macos-latest' + if: matrix.os == 'ubuntu-latest' run: | pip install --upgrade git+https://github.com/bandframework/surmise.git - - name: Build ytopt and dependencies - if: matrix.python-version <= '3.10' && matrix.os != 'macos-latest' - run: | - pip install scikit-learn==1.4.0 - pip install pandas==2.2.1 - pip install ConfigSpace - pip install "ray[default]" - git clone https://github.com/ytopt-team/scikit-optimize.git - cd scikit-optimize - pip install . - cd .. - git clone -b version1 https://github.com/ytopt-team/autotune.git - cd autotune - pip install . - cd .. - git clone -b main https://github.com/ytopt-team/ytopt.git - cd ytopt - pip install . - cd .. - - name: Install generator dependencies for Ubuntu tests if: matrix.os == 'ubuntu-latest' && matrix.python-version != '3.12' run: | sudo apt-get install bc - - # pip install dragonfly-opt - # pip install git+https://github.com/dragonfly/dragonfly.git - pip install git+https://github.com/jlnav/dragonfly.git@fix/remove_npobject - pip install scikit-build packaging Tasmanian --user + pip install -r install/ubuntu_no312.txt + pip install Tasmanian --user - name: Install Balsam on Pydantic 1 if: matrix.pydantic-version == '1.10.17' @@ -170,43 +120,7 @@ jobs: conda install octave pip install -r install/testing_requirements.txt pip install -r install/misc_feature_requirements.txt - - git clone --recurse-submodules -b develop https://github.com/POptUS/IBCDFO.git - pushd IBCDFO/minq/py/minq5/ - export PYTHONPATH="$PYTHONPATH:$(pwd)" - echo "PYTHONPATH=$PYTHONPATH" >> $GITHUB_ENV - popd - pushd IBCDFO/ibcdfo_pypkg/ - pip install -e . - popd - - - name: Build heffte - if: matrix.os != 'macos-latest' - run: | - # begin heffte build and dependencies - sudo apt install libfftw3-dev - git clone https://github.com/icl-utk-edu/heffte.git - mkdir heffte/build - cd heffte/build - pwd - cmake -D CMAKE_BUILD_TYPE=Release -D BUILD_SHARED_LIBS=ON -D CMAKE_INSTALL_PREFIX=./ -D Heffte_ENABLE_AVX=ON -D Heffte_ENABLE_FFTW=ON ../ - make -j 4 - make install - cp ./benchmarks/speed3d_c2c ../../libensemble/tests/regression_tests/ - # end heffte build and dependencies - - - uses: actions/cache/save@v4 - name: Save dependencies to cache - if: matrix.os == 'ubuntu-latest' - with: - path: | - /home/runner/work/libensemble/libensemble/heffte/build/ - /home/runner/.local - /usr/share/miniconda3/envs/condaenv - /usr/share/miniconda3/bin - /usr/share/miniconda3/lib - /usr/share/miniconda3/include - key: libe-${{ github.ref_name }}-${{ matrix.python-version }}-${{ matrix.comms-type }} + source install/install_ibcdfo.sh - name: Install libEnsemble, flake8, lock environment run: | @@ -217,24 +131,25 @@ jobs: - name: Remove various tests on newer pythons if: matrix.python-version >= '3.11' run: | - rm ./libensemble/tests/regression_tests/test_ytopt_heffte.py # rm ./libensemble/tests/regression_tests/test_gpCAM.py rm ./libensemble/tests/regression_tests/test_persistent_gp.py - - name: Remove test for persistent Tasmanian on Python 3.12 + - name: Remove test for persistent Tasmanian, Surmise on Python 3.12 if: matrix.python-version >= '3.12' run: | rm ./libensemble/tests/regression_tests/test_persistent_tasmanian.py rm ./libensemble/tests/regression_tests/test_persistent_tasmanian_async.py + rm ./libensemble/tests/regression_tests/test_persistent_surmise_calib.py + rm ./libensemble/tests/regression_tests/test_persistent_surmise_killsims.py - name: Install redis/proxystore on Pydantic 2 - if: matrix.pydantic-version == '2.6.4' + if: matrix.pydantic-version == '2.8.2' run: | pip install redis pip install proxystore==0.7.0 - name: Remove proxystore test on Pydantic 1 - if: matrix.pydantic-version == '1.10.13' + if: matrix.pydantic-version == '1.10.17' run: | rm ./libensemble/tests/regression_tests/test_proxystore_integration.py @@ -250,16 +165,10 @@ jobs: with: redis-version: 7 - - name: Run extensive tests, Ubuntu - if: matrix.os == 'ubuntu-latest' + - name: Run extensive tests run: | ./libensemble/tests/run-tests.sh -e -z -${{ matrix.comms-type }} - - name: Run extensive tests, macOS - if: matrix.os == 'macos-latest' - run: | - ./libensemble/tests/run-tests.sh -e -z -${{ matrix.comms-type }} - - name: Merge coverage run: | mv libensemble/tests/.cov* . @@ -275,4 +184,4 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: crate-ci/typos@v1.24.3 + - uses: crate-ci/typos@v1.24.5 diff --git a/docs/examples/surmise.rst b/docs/examples/surmise.rst index 72fa87413..69d0f068e 100644 --- a/docs/examples/surmise.rst +++ b/docs/examples/surmise.rst @@ -2,15 +2,11 @@ persistent_surmise ------------------ Required: Surmise_ - -Note that currently the github fork https://github.com/mosesyhc/surmise should be used:: - - pip install --upgrade git+https://github.com/bandframework/surmise.git@develop - -The :doc:`Borehole Calibration tutorial<../tutorials/calib_cancel_tutorial>` uses this generator as an example of the capability to cancel pending simulations. +The :doc:`Borehole Calibration tutorial<../tutorials/calib_cancel_tutorial>` uses this generator as an +example of the capability to cancel pending simulations. .. automodule:: persistent_surmise_calib :members: :no-undoc-members: -.. _Surmise: https://surmise.readthedocs.io/en/latest/index.html +.. _Surmise: https://github.com/bandframework/surmise diff --git a/install/gen_deps_environment.yml b/install/gen_deps_environment.yml index 6f622ec82..a69146f3e 100644 --- a/install/gen_deps_environment.yml +++ b/install/gen_deps_environment.yml @@ -6,7 +6,7 @@ channels: dependencies: - pip - numpy>=2 - - nlopt + - nlopt==2.7.1 - scipy - superlu_dist - hypre diff --git a/install/install_ibcdfo.sh b/install/install_ibcdfo.sh new file mode 100644 index 000000000..efd5f6dcb --- /dev/null +++ b/install/install_ibcdfo.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +git clone --recurse-submodules -b develop https://github.com/POptUS/IBCDFO.git +pushd IBCDFO/minq/py/minq5/ +export PYTHONPATH="$PYTHONPATH:$(pwd)" +echo "PYTHONPATH=$PYTHONPATH" >> $GITHUB_ENV +popd +pushd IBCDFO/ibcdfo_pypkg/ +pip install -e . +popd diff --git a/install/testing_requirements.txt b/install/testing_requirements.txt index bf07e69d1..e2f025794 100644 --- a/install/testing_requirements.txt +++ b/install/testing_requirements.txt @@ -1,9 +1,10 @@ flake8==7.1.1 coverage==7.3.1 -pytest==8.3.2 +pytest==8.3.3 pytest-cov==5.0.0 pytest-timeout==2.3.1 mock==5.1.0 python-dateutil==2.9.0.post0 anyio==4.4.0 matplotlib==3.9.2 +mpmath==1.3.0 diff --git a/install/ubuntu_no312.txt b/install/ubuntu_no312.txt new file mode 100644 index 000000000..c66fb45a0 --- /dev/null +++ b/install/ubuntu_no312.txt @@ -0,0 +1,5 @@ +gpcam==8.1.6 +scikit-build==0.18.1 +packaging==24.1 +git+https://github.com/jlnav/dragonfly.git@fix/remove_npobject +git+https://github.com/bandframework/surmise.git diff --git a/libensemble/gen_funcs/ytopt_asktell.py b/libensemble/gen_funcs/ytopt_asktell.py deleted file mode 100644 index cb8a42b9b..000000000 --- a/libensemble/gen_funcs/ytopt_asktell.py +++ /dev/null @@ -1,71 +0,0 @@ -""" -This module wraps around the ytopt generator. -""" - -import numpy as np - -from libensemble.message_numbers import EVAL_GEN_TAG, FINISHED_PERSISTENT_GEN_TAG, PERSIS_STOP, STOP_TAG -from libensemble.tools.persistent_support import PersistentSupport - -__all__ = ["persistent_ytopt"] - - -def persistent_ytopt(H, persis_info, gen_specs, libE_info): - ps = PersistentSupport(libE_info, EVAL_GEN_TAG) - user_specs = gen_specs["user"] - ytoptimizer = user_specs["ytoptimizer"] - - tag = None - calc_in = None - first_call = True - first_write = True - fields = [i[0] for i in gen_specs["out"]] - - # Send batches until manager sends stop tag - while tag not in [STOP_TAG, PERSIS_STOP]: - if first_call: - ytopt_points = ytoptimizer.ask_initial(n_points=user_specs["num_sim_workers"]) # Returns a list - batch_size = len(ytopt_points) - first_call = False - else: - batch_size = len(calc_in) - results = [] - for entry in calc_in: - field_params = {} - for field in fields: - field_params[field] = entry[field][0] - results += [(field_params, entry["RUNTIME"])] - print("results debug: ", results) - ytoptimizer.tell(results) - - ytopt_points = ytoptimizer.ask(n_points=batch_size) # Returns a generator that we convert to a list - ytopt_points = list(ytopt_points)[0] - - # The hand-off of information from ytopt to libE is below. This hand-off may be brittle. - H_o = np.zeros(batch_size, dtype=gen_specs["out"]) - for i, entry in enumerate(ytopt_points): - for key, value in entry.items(): - H_o[i][key] = value - - # This returns the requested points to the libE manager, which will - # perform the sim_f evaluations and then give back the values. - tag, Work, calc_in = ps.send_recv(H_o) - print("received:", calc_in, flush=True) - - if calc_in is not None: - if len(calc_in): - b = [] - for entry in calc_in[0]: - if isinstance(entry, float): - b += [str(entry)] - else: - b += [str(entry[0])] - - with open("../../results.csv", "a") as f: - if first_write: - f.write(",".join(calc_in.dtype.names) + "\n") - first_write = False - else: - f.write(",".join(b) + "\n") - - return H_o, persis_info, FINISHED_PERSISTENT_GEN_TAG diff --git a/libensemble/sim_funcs/heffte.py b/libensemble/sim_funcs/heffte.py deleted file mode 100644 index 248a46be2..000000000 --- a/libensemble/sim_funcs/heffte.py +++ /dev/null @@ -1,26 +0,0 @@ -""" -Example sim_f for simple heFFTe use case. -""" - -import subprocess - -import numpy as np - - -def call_and_process_heffte(H, persis_info, sim_specs, _): - """ - Evaluates (via subprocess) a string that includes a call to a heFFTe - executable as well as other arguments. Afterwards, the stdout is parsed to - collect the run time (as reported by heFTTe) - """ - - H_o = np.zeros(1, dtype=sim_specs["out"]) - - p = subprocess.run(H["exec_and_args"][0].split(" "), cwd="./", stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - - assert p.returncode == 0, "heFFTe call has failed" - - time = float(p.stdout.decode().split("Time per run: ")[1].split(" ")[0]) - - H_o["RUN_TIME"] = time - return H_o, persis_info diff --git a/libensemble/sim_funcs/ytopt_obj.py b/libensemble/sim_funcs/ytopt_obj.py deleted file mode 100644 index 87dee97ef..000000000 --- a/libensemble/sim_funcs/ytopt_obj.py +++ /dev/null @@ -1,44 +0,0 @@ -""" -This module is a wrapper around an example ytopt objective function -""" - -__all__ = ["init_obj"] - -import time - -import numpy as np -from plopper import Plopper - -start_time = time.time() - - -def init_obj(H, persis_info, sim_specs, libE_info): - point = {} - for field in sim_specs["in"]: - point[field] = np.squeeze(H[field]) - - y = myobj(point, sim_specs["in"], libE_info["workerID"]) # ytopt objective wants a dict - H_o = np.zeros(2, dtype=sim_specs["out"]) - H_o["RUNTIME"] = y - H_o["elapsed_sec"] = time.time() - start_time - - return H_o, persis_info - - -def myobj(point: dict, params: list, workerID: int): - def plopper_func(x, params): - obj = Plopper("./speed3d.sh", "./") - x = np.asarray_chkfinite(x) - value = [point[param] for param in params] - # print(value) - # os.system("./processexe.pl exe.pl " +str(value[4])+ " " +str(value[5])+ " " +str(value[6])) - # os.environ["OMP_NUM_THREADS"] = str(value[4]) - params = [i.upper() for i in params] - # result = float(obj.findRuntime(value, params, workerID)) - result = obj.findRuntime(value, params, workerID) - return result - - x = np.array([point[f"p{i}"] for i in range(len(point))]) - results = plopper_func(x, params) - # print('CONFIG and OUTPUT', [point, results], flush=True) - return results diff --git a/libensemble/tests/regression_tests/optimizer.py b/libensemble/tests/regression_tests/optimizer.py deleted file mode 100644 index 0d6207980..000000000 --- a/libensemble/tests/regression_tests/optimizer.py +++ /dev/null @@ -1,167 +0,0 @@ -from sys import float_info - -import ConfigSpace as CS -import numpy as np -from numpy import inf -from skopt import Optimizer as SkOptimizer -from ytopt.search import util - -ccs_active = False -try: - import cconfigspace as CCS - - ccs_active = True -except (ImportError, OSError) as a: - import warnings - - warnings.warn("CCS could not be loaded and is deactivated: " + str(a), category=ImportWarning) - -logger = util.conf_logger("ytopt.search.hps.optimizer.optimizer") - - -class Optimizer: - SEED = 12345 - KAPPA = 1.96 - - def __init__( - self, num_workers: int, space, learner, acq_func, liar_strategy, set_KAPPA, set_SEED, set_NI, **kwargs - ): - assert learner in ["RF", "ET", "GBRT", "GP", "DUMMY"], f"Unknown scikit-optimize base_estimator: {learner}" - assert liar_strategy in "cl_min cl_mean cl_max".split() - - self.space = space - self.learner = learner - self.acq_func = acq_func - self.liar_strategy = liar_strategy - self.KAPPA = set_KAPPA - self.SEED = set_SEED - self.NI = set_NI - # n_init = set_NI - # print ('............self.KAPPA',self.KAPPA) - # print ('............self.learner',self.learner) - # print ('............self.acq_func',self.acq_func) - # print ('............self.SEED',self.SEED) - - n_init = inf if learner == "DUMMY" else self.NI # num_workers - # print ('............n_init',n_init) - if isinstance(self.space, CS.ConfigurationSpace) or ( - ccs_active and isinstance(self.space, CCS.ConfigurationSpace) - ): - self._optimizer = SkOptimizer( - dimensions=self.space, - base_estimator=self.learner, - acq_optimizer="sampling", - acq_func=self.acq_func, - acq_func_kwargs={"kappa": self.KAPPA}, - random_state=self.SEED, - n_initial_points=n_init, - ) - else: - self._optimizer = SkOptimizer( - dimensions=self.space.dimensions, - base_estimator=self.learner, - acq_optimizer="sampling", - acq_func=self.acq_func, - acq_func_kwargs={"kappa": self.KAPPA}, - random_state=self.SEED, - n_initial_points=n_init, - ) - - self.evals = {} - self.counter = 0 - logger.info("Using skopt.Optimizer with %s base_estimator" % self.learner) - - def _get_lie(self): - if self.liar_strategy == "cl_min": - return min(self._optimizer.yi) if self._optimizer.yi else 0.0 - elif self.liar_strategy == "cl_mean": - return np.mean(self._optimizer.yi) if self._optimizer.yi else 0.0 - else: - return max(self._optimizer.yi) if self._optimizer.yi else 0.0 - - def _xy_from_dict(self): - XX = list(self.evals.keys()) - YY = [self.evals[x] for x in XX] - return XX, YY - - def to_dict(self, x: list) -> dict: - if isinstance(self.space, CS.ConfigurationSpace): - res = {} - hps_names = self.space.get_hyperparameter_names() - for i in range(len(x)): - res[hps_names[i]] = x[i] - return res - elif ccs_active and isinstance(self.space, CCS.ConfigurationSpace): - res = {} - hps = self.space.hyperparameters - for i in range(len(x)): - res[hps[i].name] = x[i] - return res - else: - return self.space.to_dict(x) - - def _ask(self): - x = self._optimizer.ask() - y = self._get_lie() - key = tuple(x) - if key not in self.evals: - self.counter += 1 - self._optimizer.tell(x, y) - self.evals[key] = y - logger.debug(f"_ask: {x} lie: {y}") - else: - logger.debug(f"Duplicate _ask: {x} lie: {y}") - return self.to_dict(x) - - def ask(self, n_points=None, batch_size=20): - if n_points is None: - return self._ask() - else: - batch = [] - for _ in range(n_points): - batch.append(self._ask()) - if len(batch) == batch_size: - yield batch - batch = [] - if batch: - yield batch - - def ask_initial(self, n_points): - default = self._optimizer.ask_default() - XX = [] - if default is not None and n_points > 0: - XX.append(default) - n_points -= 1 - if n_points > 0: - XX += self._optimizer.ask(n_points=n_points) - for x in XX: - y = self._get_lie() - key = tuple(x) - if key not in self.evals: - self.counter += 1 - self._optimizer.tell(x, y) - self.evals[key] = y - return [self.to_dict(x) for x in XX] - - def tell(self, xy_data): - assert isinstance(xy_data, list), f"where type(xy_data)=={type(xy_data)}" - maxval = max(self._optimizer.yi) if self._optimizer.yi else 0.0 - for x, y in xy_data: - key = tuple(x.values()) # * tuple(x[k] for k in self.space) - assert key in self.evals, f"where key=={key} and self.evals=={self.evals}" - logger.debug(f"tell: {x} --> {key}: evaluated objective: {y}") - self.evals[key] = y if y < float_info.max else maxval - - self._optimizer.Xi = [] - self._optimizer.yi = [] - XX, YY = self._xy_from_dict() - assert len(XX) == len(YY) == self.counter, ( - f"where len(XX)=={len(XX)}," f"len(YY)=={len(YY)}, self.counter=={self.counter}" - ) - print("debug: ", XX, YY, flush=True) - self._optimizer.tell(XX, YY) - assert len(self._optimizer.Xi) == len(self._optimizer.yi) == self.counter, ( - f"where len(self._optimizer.Xi)=={len(self._optimizer.Xi)}, " - f"len(self._optimizer.yi)=={len(self._optimizer.yi)}," - f"self.counter=={self.counter}" - ) diff --git a/libensemble/tests/regression_tests/scripts_used_by_reg_tests/ytopt-libe-speed3d/cleanup.sh b/libensemble/tests/regression_tests/scripts_used_by_reg_tests/ytopt-libe-speed3d/cleanup.sh deleted file mode 100755 index 3000b0a0e..000000000 --- a/libensemble/tests/regression_tests/scripts_used_by_reg_tests/ytopt-libe-speed3d/cleanup.sh +++ /dev/null @@ -1 +0,0 @@ -rm -r ensemble* *.cobaltlog *.error *.output ensemble.log libE_* out.txt ytopt.log *.npy *.pickle *.job *.out diff --git a/libensemble/tests/regression_tests/scripts_used_by_reg_tests/ytopt-libe-speed3d/exe.pl b/libensemble/tests/regression_tests/scripts_used_by_reg_tests/ytopt-libe-speed3d/exe.pl deleted file mode 100755 index e8403285b..000000000 --- a/libensemble/tests/regression_tests/scripts_used_by_reg_tests/ytopt-libe-speed3d/exe.pl +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env perl - -#Author: Xingfu Wu -#MCS, ANL -# exe.pl: average the execution time in 5 runs -# -use Time::HiRes qw(gettimeofday); - -$A_FILE = "tmpoutfile.txt"; -foreach $filename (@ARGV) { - # print "Start to preprocess ", $filename, "...\n"; - $ssum = 0.0; - $nmax = 1; - @nn = (1..$nmax); - for(@nn) { - #$retval = gettimeofday( ); - system("mpirun -n 4 sh $filename > tmpoutfile.txt 2>&1"); - open (TEMFILE, '<', $A_FILE); - while () { - $line = $_; - chomp ($line); - - if ($line =~ /Time per run/) { - ($v1, $v2, $v3, $v4, $v5) = split(' ', $line); - printf("%.6f", $v4); - } - } - if ($v4 == 0 ) { - printf("-1"); - } - close(TEMFILE); - #$tt = gettimeofday( ); - #$ttotal = $tt - $retval; - #$ssum = $ssum + $ttotal; - } - #$avg = $ssum / $nmax; - # print "End to preprocess ", $avg, "...\n"; - #printf("%.3f", $avg); -} diff --git a/libensemble/tests/regression_tests/scripts_used_by_reg_tests/ytopt-libe-speed3d/findMin.py b/libensemble/tests/regression_tests/scripts_used_by_reg_tests/ytopt-libe-speed3d/findMin.py deleted file mode 100644 index 954b99a9a..000000000 --- a/libensemble/tests/regression_tests/scripts_used_by_reg_tests/ytopt-libe-speed3d/findMin.py +++ /dev/null @@ -1,16 +0,0 @@ -import pandas - -dataframe = pandas.read_csv("results.csv") -array = dataframe.values -x = array[:, 7] - -print("Performance summary based on", len(array), "evaluations:") -print("Min: ", x.min(), "s") -print("Max: ", x.max(), "s") -print("Mean: ", x.mean(), "s") -print("The best configurations (for the smallest time) is:\n") -print("P0 P1 P2 P3 P4 P5 P6 execution time elapsed time\n") -mn = x.min() -for i in range(len(array)): - if x[i] == mn: - print(array[i, :]) diff --git a/libensemble/tests/regression_tests/scripts_used_by_reg_tests/ytopt-libe-speed3d/plopper.pl b/libensemble/tests/regression_tests/scripts_used_by_reg_tests/ytopt-libe-speed3d/plopper.pl deleted file mode 100755 index 54cd9752f..000000000 --- a/libensemble/tests/regression_tests/scripts_used_by_reg_tests/ytopt-libe-speed3d/plopper.pl +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/perl -#Change this path! -#Author: Xingfu Wu -# MCS, ANL -# plopper.pl: process the file plopper.py to change the proper app_timeout -# - -$A_FILE = "timeoutfile.txt"; - -$filename1 = $ARGV[0]; - #print "Start to process ", $filename, "...\n"; - $fname = ">" . $A_FILE; - open(OUTFILE, $fname); - open (TEMFILE, $filename1); - while () { - $line = $_; - chomp ($line); - - if ($line =~ /app_timeout =/) { - ($v1, $v2) = split('= ', $line); - print OUTFILE $v1, " = ", $ARGV[1], "\n"; - } else { - print OUTFILE $line, "\n"; - } - } - close(TEMFILE); - close(OUTFILE); - system("mv $A_FILE $filename1"); -#exit main -exit 0; diff --git a/libensemble/tests/regression_tests/scripts_used_by_reg_tests/ytopt-libe-speed3d/plopper.py b/libensemble/tests/regression_tests/scripts_used_by_reg_tests/ytopt-libe-speed3d/plopper.py deleted file mode 100644 index ab6e90625..000000000 --- a/libensemble/tests/regression_tests/scripts_used_by_reg_tests/ytopt-libe-speed3d/plopper.py +++ /dev/null @@ -1,88 +0,0 @@ -import os -import random -import subprocess - -import psutil - - -class Plopper: - def __init__(self, sourcefile, outputdir): - # Initializing global variables - self.sourcefile = sourcefile - self.outputdir = outputdir + "/tmp_files" - - if not os.path.exists(self.outputdir): - os.makedirs(self.outputdir) - - # Creating a dictionary using parameter label and value - def createDict(self, x, params): - dictVal = {} - for p, v in zip(params, x): - dictVal[p] = v - return dictVal - - # Replace the Markers in the source file with the corresponding values - def plotValues(self, dictVal, inputfile, outputfile): - with open(inputfile, "r") as f1: - buf = f1.readlines() - - with open(outputfile, "w") as f2: - for line in buf: - modify_line = line - for key, value in dictVal.items(): - if key in modify_line: - if value != "None": # For empty string options - modify_line = modify_line.replace("#" + key, str(value)) - - if modify_line != line: - f2.write(modify_line) - else: - # To avoid writing the Marker - f2.write(line) - - # Function to find the execution time of the interim file, and return the - # execution time as cost to the search module - def findRuntime(self, x, params, worker): - interimfile = "" - # exetime = float('inf') - # exetime = sys.maxsize - exetime = -1 - counter = random.randint(1, 10001) # To reduce collision increasing the sampling intervals - - interimfile = self.outputdir + "/" + str(counter) + ".sh" - - # Generate intermediate file - dictVal = self.createDict(x, params) - self.plotValues(dictVal, self.sourcefile, interimfile) - - # compile and find the execution time - # tmpbinary = interimfile[:-2] - tmpbinary = interimfile - # tmpbinary = interimfile[:-3] + '_w' + str(worker)+".sh" - - kernel_idx = self.sourcefile.rfind("/") - kernel_dir = self.sourcefile[:kernel_idx] - - cmd2 = kernel_dir + "/exe.pl " + tmpbinary - - # Find the execution time - - execution_status = subprocess.Popen(cmd2, shell=True, stdout=subprocess.PIPE) - app_timeout = 120 - - try: - outs, errs = execution_status.communicate(timeout=app_timeout) - except subprocess.TimeoutExpired: - execution_status.kill() - for proc in psutil.process_iter(attrs=["pid", "name"]): - if "exe.pl" in proc.info["name"]: - proc.kill() - outs, errs = execution_status.communicate() - return app_timeout - - exetime = float(outs.strip()) - # exetime = execution_status.stdout.decode('utf-8') - # if exetime == 0: - # exetime = -1 - - return exetime # return execution time as cost diff --git a/libensemble/tests/regression_tests/scripts_used_by_reg_tests/ytopt-libe-speed3d/processexe.pl b/libensemble/tests/regression_tests/scripts_used_by_reg_tests/ytopt-libe-speed3d/processexe.pl deleted file mode 100755 index 756069dd0..000000000 --- a/libensemble/tests/regression_tests/scripts_used_by_reg_tests/ytopt-libe-speed3d/processexe.pl +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/perl -#Change this path! -#Author: Xingfu Wu -# MCS, ANL -# processexe.pl: process the file exe.pl to change the proper number of nodes -# - -$A_FILE = "tmpfile.txt"; - -$filename1 = $ARGV[0]; - #print "Start to process ", $filename, "...\n"; - $fname = ">" . $A_FILE; - $i = 0; - open(OUTFILE, $fname); - open (TEMFILE, $filename1); - while () { - $line = $_; - chomp ($line); - - if ($line =~ /system/) { - print OUTFILE " system(\"mpirun -n 4 ", " sh \$filename > tmpoutfile.txt 2>&1\");", "\n"; - } else { - print OUTFILE $line, "\n"; - } - } - close(TEMFILE); - close(OUTFILE); - system("mv $A_FILE $filename1"); - system("chmod +x $filename1"); -#exit main -exit 0; diff --git a/libensemble/tests/regression_tests/scripts_used_by_reg_tests/ytopt-libe-speed3d/runs.sh b/libensemble/tests/regression_tests/scripts_used_by_reg_tests/ytopt-libe-speed3d/runs.sh deleted file mode 100755 index c0ef329f5..000000000 --- a/libensemble/tests/regression_tests/scripts_used_by_reg_tests/ytopt-libe-speed3d/runs.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/bash -#This script is for running an app on a laptop using mpirun without any scheduler - -# set the number of nodes -let nnds=8 -# set the number of nodes for the MPI ranks per run -let nranks=4 -# set the number of workers (nnds/nranks plus 1) -let nws=3 -# set the maximum application runtime(s) as timeout baseline for each evaluation -let appto=120 - -#--- process processexe.pl to change the number of nodes (no change) -./processcp.pl ${nranks} -./plopper.pl plopper.py ${appto} - -#-----This part creates a submission script--------- -cat >batch.job < out.txt 2>&1 -EOF -#-----This part submits the script you just created-------------- -chmod +x batch.job -./batch.job diff --git a/libensemble/tests/regression_tests/scripts_used_by_reg_tests/ytopt-libe-speed3d/speed3d.sh b/libensemble/tests/regression_tests/scripts_used_by_reg_tests/ytopt-libe-speed3d/speed3d.sh deleted file mode 100755 index a8ac03a6b..000000000 --- a/libensemble/tests/regression_tests/scripts_used_by_reg_tests/ytopt-libe-speed3d/speed3d.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash -x - -./speed3d_c2c fftw double 128 128 128 #P0 #P1 #P2 #P3 diff --git a/libensemble/tests/regression_tests/test_heffte.py b/libensemble/tests/regression_tests/test_heffte.py deleted file mode 100644 index 73f3dfdb0..000000000 --- a/libensemble/tests/regression_tests/test_heffte.py +++ /dev/null @@ -1,60 +0,0 @@ -""" -Test libEnsemble's integration with calling the heFFTe executable with various -configurations. -""" - -# Do not change these lines - they are parsed by run-tests.sh -# TESTSUITE_COMMS: mpi -# TESTSUITE_NPROCS: 2 4 -# TESTSUITE_EXTRA: true -# TESTSUITE_OS_SKIP: OSX - -import itertools -from os.path import exists - -import numpy as np - -from libensemble import Ensemble -from libensemble.alloc_funcs.give_pregenerated_work import give_pregenerated_sim_work as alloc_f - -# Import libEnsemble items for this test -from libensemble.sim_funcs.heffte import call_and_process_heffte as sim_f -from libensemble.specs import AllocSpecs, ExitCriteria, SimSpecs - -# Main block is necessary only when using local comms with spawn start method (default on macOS and Windows). -if __name__ == "__main__": - assert exists("speed3d_c2c"), "The heFFTe executable doesn't exist" - - fixed = ["mpirun -np 4 ./speed3d_c2c fftw double 128 128 128"] - arg1 = ["-no-reorder", "-reorder"] - arg2 = ["-a2a", "-a2av", "-p2p", "-p2p_pl"] - arg3 = ["-ingrid 4 1 1", "-ingrid 2 2 1"] - arg4 = ["-outgrid 4 1 1", "-outgrid 2 2 1"] - - part_list = list(itertools.product(fixed, arg1, arg2, arg3, arg4)) - full_list = list(map(" ".join, part_list)) - max_len = max([len(item) for item in full_list]) - - study = Ensemble(parse_args=True) - study.sim_specs = SimSpecs( - sim_f=sim_f, - inputs=["exec_and_args"], - out=[("RUN_TIME", float)], - ) - - n_samp = len(full_list) - H0 = np.zeros(n_samp, dtype=[("exec_and_args", str, max_len), ("sim_id", int), ("sim_started", bool)]) - H0["exec_and_args"] = full_list - H0["sim_id"] = range(n_samp) - - study.alloc_specs = AllocSpecs(alloc_f=alloc_f) - study.exit_criteria = ExitCriteria(sim_max=len(H0)) - study.H0 = H0 - study.run() - - if study.is_manager: - assert len(study.H) == len(study.H0) - assert np.all(study.H["sim_ended"]), "Every point should have been marked as ended" - assert len(np.unique(study.H["RUN_TIME"])) >= len(study.H) / 2, "Most of the RUN_TIMEs should be unique" - print("\nlibEnsemble correctly didn't add anything to initial sample") - study.save_output(__file__) diff --git a/libensemble/tests/regression_tests/test_ytopt_heffte.py b/libensemble/tests/regression_tests/test_ytopt_heffte.py deleted file mode 100644 index f9f1d1f91..000000000 --- a/libensemble/tests/regression_tests/test_ytopt_heffte.py +++ /dev/null @@ -1,150 +0,0 @@ -""" -Runs libEnsemble to call the ytopt ask/tell interface in a generator function, -and the ytopt findRunTime interface in a simulator function. - -Execute locally via one of the following commands (e.g. 3 workers): - mpiexec -np 4 python run_ytopt_xsbench.py - python run_ytopt_xsbench.py --nworkers 3 --comms local - -The number of concurrent evaluations of the objective function will be 4-1=3. -""" - -# Do not change these lines - they are parsed by run-tests.sh -# TESTSUITE_COMMS: mpi -# TESTSUITE_NPROCS: 3 -# TESTSUITE_EXTRA: true -# TESTSUITE_OS_SKIP: OSX - -import warnings - -# A ytopt dependency uses an ImportWarning -warnings.filterwarnings("ignore", category=ImportWarning) -warnings.filterwarnings("ignore", category=FutureWarning) -warnings.filterwarnings("ignore", category=RuntimeWarning) -warnings.filterwarnings("ignore", category=DeprecationWarning) - -import os -import secrets -import sys - -ytopt_files_loc = "./scripts_used_by_reg_tests/ytopt-libe-speed3d/" -sys.path.append(ytopt_files_loc) - -import ConfigSpace as CS -import ConfigSpace.hyperparameters as CSH -import numpy as np -from optimizer import Optimizer - -from libensemble.alloc_funcs.start_only_persistent import only_persistent_gens as alloc_f -from libensemble.gen_funcs.ytopt_asktell import persistent_ytopt # Gen function, communicates with ytopt optimizer - -# Import libEnsemble items for this test -from libensemble.libE import libE -from libensemble.sim_funcs.ytopt_obj import init_obj # Sim function, calls Plopper -from libensemble.tools import add_unique_random_streams, parse_args - -# Parse comms, default options from commandline -nworkers, is_manager, libE_specs, user_args_in = parse_args() -num_sim_workers = nworkers - 1 # Subtracting one because one worker will be the generator - -user_args_in = ["--learner=RF", "--max-evals=10"] -assert len(user_args_in), "learner, etc. not specified, e.g. --learner RF" -user_args = {} -for entry in user_args_in: - if entry.startswith("--"): - if "=" not in entry: - key = entry.strip("--") - value = user_args_in[user_args_in.index(entry) + 1] - else: - split = entry.split("=") - key = split[0].strip("--") - value = split[1] - - user_args[key] = value - -req_settings = ["learner", "max-evals"] -assert all([opt in user_args for opt in req_settings]), "Required settings missing. Specify each setting in " + str( - req_settings -) - -# Set options so workers operate in unique directories -here = os.path.join(os.getcwd(), ytopt_files_loc) - -libE_specs["use_worker_dirs"] = True -libE_specs["sim_dirs_make"] = False # Otherwise directories separated by each sim call -libE_specs["ensemble_dir_path"] = "./ensemble_" + secrets.token_hex(nbytes=4) - -# Copy or symlink needed files into unique directories -libE_specs["sim_dir_symlink_files"] = [here + f for f in ["speed3d.sh", "exe.pl", "plopper.py", "processexe.pl"]] -libE_specs["sim_dir_symlink_files"] += ["speed3d_c2c"] - -# Declare the sim_f to be optimized, and the input/outputs -sim_specs = { - "sim_f": init_obj, - "in": ["p0", "p1", "p2", "p3"], - "out": [("RUNTIME", float), ("elapsed_sec", float)], -} - -cs = CS.ConfigurationSpace(seed=1234) -p0 = CSH.CategoricalHyperparameter(name="p0", choices=["-no-reorder", "-reorder"], default_value="-no-reorder") -p1 = CSH.CategoricalHyperparameter(name="p1", choices=["-a2a", "-a2av", "-p2p", "-p2p_pl"], default_value="-a2a") -p2 = CSH.CategoricalHyperparameter(name="p2", choices=["-ingrid 4 1 1", "-ingrid 2 2 1"], default_value="-ingrid 4 1 1") -p3 = CSH.CategoricalHyperparameter( - name="p3", choices=["-outgrid 4 1 1", "-outgrid 2 2 1"], default_value="-outgrid 4 1 1" -) -cs.add_hyperparameters([p0, p1, p2, p3]) - -ytoptimizer = Optimizer( - num_workers=num_sim_workers, - space=cs, - learner=user_args["learner"], - liar_strategy="cl_max", - acq_func="gp_hedge", - set_KAPPA=1.96, - set_SEED=2345, - set_NI=10, -) - -# Declare the gen_f that will generate points for the sim_f, and the various input/outputs -gen_specs = { - "gen_f": persistent_ytopt, - "out": [("p0", "