diff --git a/idaes/core/util/convergence/convergence_base.py b/idaes/core/util/convergence/convergence_base.py index e3935033a5..a5f39cae3f 100644 --- a/idaes/core/util/convergence/convergence_base.py +++ b/idaes/core/util/convergence/convergence_base.py @@ -22,7 +22,7 @@ - a Pyomo solver with appropriate options The module executes convergence evaluation in two steps. In the first step, a -json file is created that containsa set of points sampled from the provided +json file is created that contains a set of points sampled from the provided inputs. This step only needs to be done once - up front. The second step, which should be executed any time there is a major code change that could impact the model, takes that set of sampled points and solves the model at each of the @@ -79,6 +79,7 @@ class from ConvergenceEvaluation, and implement three methods: from pyomo.core import Param, Var from pyomo.common.log import LoggingIntercept from pyomo.environ import check_optimal_termination +from pyomo.common.deprecation import deprecated # idaes import idaes.core.util.convergence.mpi_utils as mpiu @@ -92,6 +93,11 @@ class from ConvergenceEvaluation, and implement three methods: convergence_classes = {} +@deprecated( + msg="This function has been deprecated in favor of the new Parameter Sweep " + "tools and may be removed in a future release.", + version="2.3.0", +) def register_convergence_class(name): def _register_convergence_class(cls): if name in convergence_classes: @@ -102,6 +108,11 @@ def _register_convergence_class(cls): return _register_convergence_class +@deprecated( + msg="This class has been deprecated in favor of the new Parameter Sweep " + "tools and may be removed in a future release.", + version="2.3.0", +) class ConvergenceEvaluationSpecification(object): """ Object for defining sample points for convergence evaluations. @@ -121,28 +132,27 @@ def add_sampled_input( (with given mean and standard deviation) truncated to the values given by lower and upper bounds - Parameters - ---------- - name : str - The name of the input. - pyomo_path : str - A string representation of the path to the variable or parameter to - be sampled. This string will be executed to retrieve the Pyomo - component. - lower : float - A lower bound on the input variable or parameter. - upper : float - An upper bound on the input variable or parameter - mean : float - The mean value to use when generating normal distribution samples - std : float - The standard deviation to use when generating normal distribution samples - distribution : str - The Distribution type {"normal", "uniform"} + Args: + name - str + The name of the input. + pyomo_path - str + A string representation of the path to the variable or parameter to + be sampled. This string will be executed to retrieve the Pyomo + component. + lower - float + A lower bound on the input variable or parameter. + upper - float + An upper bound on the input variable or parameter + mean - float + The mean value to use when generating normal distribution samples + std - float + The standard deviation to use when generating normal distribution samples + distribution - str + The Distribution type {"normal", "uniform"} + + Returns: + None - Returns - ------- - N/A """ # ToDo: put some error checking here ... Maybe we should have the model # ToDo: already? Can use to check if the pyomo_path is valid? check if @@ -161,6 +171,11 @@ def inputs(self): return self._inputs +@deprecated( + msg="This class has been deprecated in favor of the new Parameter Sweep " + "tools and may be removed in a future release.", + version="2.3.0", +) class ConvergenceEvaluation: """ Object for running convergence evaluations. @@ -196,11 +211,10 @@ def get_initialized_model(self): values of parameters or variables according to the sampling specifications. - Returns - ------- - Pyomo model : return a Pyomo model object that is initialized and - ready to solve. This is the model object that will be - used in the evaluation. + Returns: + Pyomo model - return a Pyomo model object that is initialized and + ready to solve. This is the model object that will be + used in the evaluation. """ raise NotImplementedError( "Not implemented in the base class. This" @@ -214,9 +228,8 @@ def get_solver(self): Users may overload this to use a custom solver or options if required. - Returns - ------- - Pyomo solver + Returns: + Pyomo solver """ return get_solver() @@ -397,26 +410,22 @@ def _run_ipopt_with_stats(model, solver, max_iter=500, max_cpu_time=120): """ Run the solver (must be ipopt) and return the convergence statistics - Parameters - ---------- - model : Pyomo model - The pyomo model to be solved - - solver : Pyomo solver - The pyomo solver to use - it must be ipopt, but with whichever options - are preferred - - max_iter : int - The maximum number of iterations to allow for ipopt + Args: + model - Pyomo model + The pyomo model to be solved + solver - Pyomo solver + The pyomo solver to use - it must be ipopt, but with whichever options + are preferred + max_iter - int + The maximum number of iterations to allow for ipopt + max_cpu_time - int + The maximum cpu time to allow for ipopt (in seconds) - max_cpu_time : int - The maximum cpu time to allow for ipopt (in seconds) + Returns: + Returns a tuple with (solve status object, bool (solve successful or + not), number of iters, number of iters in restoration, number of iters with regularization, + solve time) - Returns - ------- - Returns a tuple with (solve status object, bool (solve successful or - not), number of iters, number of iters in restoration, number of iters with regularization, - solve time) """ # ToDo: Check that the "solver" is, in fact, IPOPT @@ -510,25 +519,30 @@ def _set_model_parameters_from_sample(model, inputs, sample_point): ) +@deprecated( + msg="This function has been deprecated in favor of the new Parameter Sweep " + "tools and may be removed in a future release.", + version="2.3.0", +) def generate_samples(eval_spec, n_points, seed=None): """ Samples the space of the inputs defined in the eval_spec, and creates an OrderedDict with all the points to be used in executing a convergence evaluation - Parameters - ---------- - eval_spec : ConvergenceEvaluationSpecification - The convergence evaluation specification object that we would like to - sample - n_points : int - The total number of points that should be created - seed : int or None - The seed to be used when generating samples. If set to None, then the - seed is not set - Returns - ------- + Args: + eval_spec - ConvergenceEvaluationSpecification + The convergence evaluation specification object that we would like to + sample + n_points - int + The total number of points that should be created + seed - int or None + The seed to be used when generating samples. If set to None, then the + seed is not set + + Returns: OrderedDict of samples + """ if seed is not None: np.random.seed(seed) @@ -549,6 +563,11 @@ def generate_samples(eval_spec, n_points, seed=None): return samples +@deprecated( + msg="This function has been deprecated in favor of the new Parameter Sweep " + "tools and may be removed in a future release.", + version="2.3.0", +) def write_sample_file( eval_spec, filename, convergence_evaluation_class_str, n_points, seed=None ): @@ -557,25 +576,25 @@ def write_sample_file( json file with all the points to be used in executing a convergence evaluation - Parameters - ---------- - filename : str - The filename for the json file that will be created containing all the - points to be run - eval_spec : ConvergenceEvaluationSpecification - The convergence evaluation specification object that we would like to - sample - convergence_evaluation_class_str : str - Python string that identifies the convergence evaluation class for this - specific evaluation. This is usually in the form of module.class_name. - n_points : int - The total number of points that should be created - seed : int or None - The seed to be used when generating samples. If set to None, then the - seed is not set - Returns - ------- - N/A + Args: + filename - str + The filename for the json file that will be created containing all the + points to be run + eval_spec - ConvergenceEvaluationSpecification + The convergence evaluation specification object that we would like to + sample + convergence_evaluation_class_str - str + Python string that identifies the convergence evaluation class for this + specific evaluation. This is usually in the form of module.class_name. + n_points - int + The total number of points that should be created + seed - int or None + The seed to be used when generating samples. If set to None, then the + seed is not set + + Returns: + None + """ # build the samples samples = generate_samples(eval_spec, n_points, seed) @@ -592,6 +611,11 @@ def write_sample_file( json.dump(jsondict, fd, indent=3) +@deprecated( + msg="This function has been deprecated in favor of the new Parameter Sweep " + "tools and may be removed in a future release.", + version="2.3.0", +) def run_convergence_evaluation_from_sample_file(sample_file): """ Run convergence evaluation using specified sample file. @@ -624,6 +648,11 @@ def run_convergence_evaluation_from_sample_file(sample_file): return run_convergence_evaluation(jsondict, conv_eval) +@deprecated( + msg="This function has been deprecated in favor of the new Parameter Sweep " + "tools and may be removed in a future release.", + version="2.3.0", +) def run_single_sample_from_sample_file(sample_file, name): """ Run single convergence evaluation from sample in provided file. @@ -657,6 +686,11 @@ def run_single_sample_from_sample_file(sample_file, name): return run_single_sample(jsondict, conv_eval, name) +@deprecated( + msg="This function has been deprecated in favor of the new Parameter Sweep " + "tools and may be removed in a future release.", + version="2.3.0", +) def run_single_sample(sample_file_dict, conv_eval, name): """ Run single sample from dict and return IPOPT stats. @@ -676,23 +710,26 @@ def run_single_sample(sample_file_dict, conv_eval, name): return _run_ipopt_with_stats(model, solver) +@deprecated( + msg="This function has been deprecated in favor of the new Parameter Sweep " + "tools and may be removed in a future release.", + version="2.3.0", +) def run_convergence_evaluation(sample_file_dict, conv_eval): """ Run convergence evaluation and generate the statistics based on information in the sample_file. - Parameters - ---------- - sample_file_dict : dict - Dictionary created by ConvergenceEvaluationSpecification that contains - the input and sample point information + Args: + sample_file_dict - dict + Dictionary created by ConvergenceEvaluationSpecification that contains + the input and sample point information + conv_eval - ConvergenceEvaluation + The ConvergenceEvaluation object that should be used - conv_eval : ConvergenceEvaluation - The ConvergenceEvaluation object that should be used + Returns: + None - Returns - ------- - N/A """ inputs = sample_file_dict["inputs"] samples = sample_file_dict["samples"] @@ -754,6 +791,11 @@ def run_convergence_evaluation(sample_file_dict, conv_eval): return inputs, samples, global_results +@deprecated( + msg="This function has been deprecated in favor of the new Parameter Sweep " + "tools and may be removed in a future release.", + version="2.3.0", +) def generate_baseline_statistics( conv_eval, n_points: int, seed: int = None, display: bool = True ): @@ -818,6 +860,11 @@ def generate_baseline_statistics( return jsondict +@deprecated( + msg="This function has been deprecated in favor of the new Parameter Sweep " + "tools and may be removed in a future release.", + version="2.3.0", +) def save_convergence_statistics( inputs, results, dmf=None, display=True, json_path=None, report_path=None ): @@ -838,6 +885,11 @@ def save_convergence_statistics( return s +@deprecated( + msg="This class has been deprecated in favor of the new Parameter Sweep " + "tools and may be removed in a future release.", + version="2.3.0", +) class Stats(object): def __init__(self, inputs=None, results=None, from_dict=None, from_json=None): """A convergence stats and results object. This class stores the diff --git a/idaes/core/util/model_diagnostics.py b/idaes/core/util/model_diagnostics.py index 72b63520a9..49907f64e3 100644 --- a/idaes/core/util/model_diagnostics.py +++ b/idaes/core/util/model_diagnostics.py @@ -20,8 +20,8 @@ from operator import itemgetter import sys from inspect import signature -import math -from math import log +from math import log, isclose, inf, isfinite +import json from typing import List import numpy as np @@ -75,6 +75,7 @@ from pyomo.contrib.fbbt.fbbt import compute_bounds_on_expr from pyomo.common.deprecation import deprecation_warning from pyomo.common.errors import PyomoException +from pyomo.common.tempfiles import TempfileManager from idaes.core.util.model_statistics import ( activated_blocks_set, @@ -98,6 +99,11 @@ extreme_jacobian_entries, jacobian_cond, ) +from idaes.core.util.parameter_sweep import ( + SequentialSweepRunner, + ParameterSweepBase, + is_psweepspec, +) import idaes.logger as idaeslog _log = idaeslog.getLogger(__name__) @@ -1770,9 +1776,9 @@ def display_variables_in_constraint(self, constraint, stream=None): def _get_bounds_with_inf(node: NumericExpression): lb, ub = compute_bounds_on_expr(node) if lb is None: - lb = -math.inf + lb = -inf if ub is None: - ub = math.inf + ub = inf return lb, ub @@ -1854,7 +1860,7 @@ def _check_eval_error_tan( node: NumericExpression, warn_list: List[str], config: ConfigDict ): lb, ub = _get_bounds_with_inf(node) - if not (math.isfinite(lb) and math.isfinite(ub)): + if not (isfinite(lb) and isfinite(ub)): msg = f"{node} may evaluate to -inf or inf; Argument bounds are {_get_bounds_with_inf(node.args[0])}" warn_list.append(msg) @@ -2989,6 +2995,418 @@ def print_variable_bounds(v): print(v, "\t\t", v.lb, "\t", v.value, "\t", v.ub) +def psweep_runner_validator(val): + """Domain validator for Parameter Sweep runners + + Args: + val : value to be checked + + Returns: + TypeError if val is not a valid callback + """ + if issubclass(val, ParameterSweepBase): + return val + + raise ValueError("Workflow runner must be a subclass of ParameterSweepBase.") + + +CACONFIG = ConfigDict() +CACONFIG.declare( + "input_specification", + ConfigValue( + domain=is_psweepspec, + doc="ParameterSweepSpecification object defining inputs to be sampled", + ), +) +CACONFIG.declare( + "workflow_runner", + ConfigValue( + default=SequentialSweepRunner, + domain=psweep_runner_validator, + doc="Parameter sweep workflow runner", + ), +) +CACONFIG.declare( + "solver_options", + ConfigValue( + domain=None, + description="Options to pass to IPOPT.", + ), +) +CACONFIG.declare( + "halt_on_error", + ConfigValue( + default=False, + domain=bool, + doc="Whether to halt execution of parameter sweep on encountering a solver error (default=False).", + ), +) + + +class IpoptConvergenceAnalysis: + """ + Tool to performa a parameter sweep of model checking for numerical issues and + convergence characteristics. Users may specify an IDAES ParameterSweep class to + perform the sweep (default is SequentialSweepRunner). + """ + + CONFIG = CACONFIG() + + def __init__(self, model, **kwargs): + # TODO: In future may want to generalise this to accept indexed blocks + # However, for now some of the tools do not support indexed blocks + if not isinstance(model, _BlockData): + raise TypeError( + "model argument must be an instance of a Pyomo BlockData object " + "(either a scalar Block or an element of an indexed Block)." + ) + + self.config = self.CONFIG(kwargs) + + self._model = model + + solver = SolverFactory("ipopt") + if self.config.solver_options is not None: + solver.options = self.config.solver_options + + self._psweep = self.config.workflow_runner( + input_specification=self.config.input_specification, + build_model=self._build_model, + rebuild_model=True, + run_model=self._run_model, + build_outputs=self._build_outputs, + halt_on_error=self.config.halt_on_error, + handle_solver_error=self._recourse, + solver=solver, + ) + + @property + def results(self): + """ + Returns the results of the IpoptConvergenceAnalysis run + """ + return self._psweep.results + + @property + def samples(self): + """ + Returns the set of input samples for convergence analysis (pandas DataFrame) + """ + return self._psweep.get_input_samples() + + def run_convergence_analysis(self): + """ + Execute convergence analysis sweep by calling execute_parameter_sweep + method in specified runner. + + Returns: + dict of results from parameter sweep + """ + return self._psweep.execute_parameter_sweep() + + def run_convergence_analysis_from_dict(self, input_dict: dict): + """ + Execute convergence analysis sweep using specification defined in dict. + + Args: + input_dict: dict to load specification from + + Returns: + dict of results from parameter sweep + """ + self.from_dict(input_dict) + return self.run_convergence_analysis() + + def run_convergence_analysis_from_file(self, filename: str): + """ + Execute convergence analysis sweep using specification defined in json file. + + Args: + filename: name of file to load specification from as string + + Returns: + dict of results from parameter sweep + """ + self.from_json_file(filename) + return self.run_convergence_analysis() + + def compare_convergence_to_baseline( + self, filename: str, rel_tol: float = 0.1, abs_tol: float = 1 + ): + """ + Run convergence analysis and compare results to those defined in baseline file. + + Args: + filename: name of baseline file to load specification from as string + rel_tol: relative tolerance to use for comparing number of iterations + abs_tol: absolute tolerance to use for comparing number of iterations + + Returns: + dict containing lists of differences between convergence analysis run and baseline + """ + with open(filename, "r") as f: + # Load file manually so we have saved results + jdict = json.load(f) + f.close() + + # Run convergence analysis from dict + self.run_convergence_analysis_from_dict(jdict) + + # Compare results + return self._compare_results_to_dict(jdict, rel_tol=rel_tol, abs_tol=abs_tol) + + def assert_baseline_comparison( + self, filename: str, rel_tol: float = 0.1, abs_tol: float = 1 + ): + """ + Run convergence analysis and assert no differences in results to those defined + in baseline file. + + Args: + filename: name of baseline file to load specification from as string + rel_tol: relative tolerance to use for comparing number of iterations + abs_tol: absolute tolerance to use for comparing number of iterations + + Raises: + AssertionError if results of convergence analysis do not match baseline + """ + diffs = self.compare_convergence_to_baseline( + filename, rel_tol=rel_tol, abs_tol=abs_tol + ) + + if any(len(v) != 0 for v in diffs.values()): + raise AssertionError("Convergence analysis does not match baseline") + + def to_dict(self): + """ + Serialize specification and current results to dict form + + Returns: + dict + """ + return self._psweep.to_dict() + + def from_dict(self, input_dict): + """ + Load specification and results from dict. + + Args: + input_dict: dict to load from + + Returns: + None + """ + return self._psweep.from_dict(input_dict) + + def to_json_file(self, filename): + """ + Write specification and results to json file. + + Args: + filename: name of file to write to as string + + Returns: + None + """ + return self._psweep.to_json_file(filename) + + def from_json_file(self, filename): + """ + Load specification and results from json file. + + Args: + filename: name of file to load from as string + + Returns: + None + """ + return self._psweep.from_json_file(filename) + + def _build_model(self): + # Create new instance of model by cloning + return self._model.clone() + + def _run_model(self, model, solver): + # Run model using IPOPT and collect stats + ( + status, + iters, + iters_in_restoration, + iters_w_regularization, + time, + ) = self._run_ipopt_with_stats(model, solver) + + run_stats = [ + iters, + iters_in_restoration, + iters_w_regularization, + time, + ] + + success = check_optimal_termination(status) + + return success, run_stats + + @staticmethod + def _build_outputs(model, run_stats): + # Run model diagnostics numerical checks + dt = DiagnosticsToolbox(model=model) + + warnings = False + try: + dt.assert_no_numerical_warnings() + except AssertionError: + warnings = True + + # Compile Results + return { + "iters": run_stats[0], + "iters_in_restoration": run_stats[1], + "iters_w_regularization": run_stats[2], + "time": run_stats[3], + "numerical_issues": warnings, + } + + @staticmethod + def _recourse(model): + # Return a default dict indicating no results + return { + "iters": -1, + "iters_in_restoration": -1, + "iters_w_regularization": -1, + "time": -1, + "numerical_issues": -1, + } + + @staticmethod + def _parse_ipopt_output(ipopt_file): + # PArse IPOPT logs and return key metrics + # ToDO: Check for final iteration with regularization or restoration + + iters = 0 + iters_in_restoration = 0 + iters_w_regularization = 0 + time = 0 + # parse the output file to get the iteration count, solver times, etc. + with open(ipopt_file, "r") as f: + parseline = False + for line in f: + if line.startswith("iter"): + # This marks the start of the iteration logging, set parseline True + parseline = True + elif line.startswith("Number of Iterations....:"): + # Marks end of iteration logging, set parseline False + parseline = False + tokens = line.split() + iters = int(tokens[3]) + elif parseline: + # Line contains details of an iteration, look for restoration or regularization + tokens = line.split() + try: + if not tokens[6] == "-": + # Iteration with regularization + iters_w_regularization += 1 + if tokens[0].endswith("r"): + # Iteration in restoration + iters_in_restoration += 1 + except IndexError: + # Blank line at end of iteration list, so assume we hit this + pass + elif line.startswith( + "Total CPU secs in IPOPT (w/o function evaluations) =" + ): + tokens = line.split() + time += float(tokens[9]) + elif line.startswith( + "Total CPU secs in NLP function evaluations =" + ): + tokens = line.split() + time += float(tokens[8]) + + return iters, iters_in_restoration, iters_w_regularization, time + + def _run_ipopt_with_stats(self, model, solver, max_iter=500, max_cpu_time=120): + # Solve model using provided solver (assumed to be IPOPT) and parse logs + # ToDo: Check that the "solver" is, in fact, IPOPT + + TempfileManager.push() + tempfile = TempfileManager.create_tempfile(suffix="ipopt_out", text=True) + opts = { + "output_file": tempfile, + "max_iter": max_iter, + "max_cpu_time": max_cpu_time, + } + + status_obj = solver.solve(model, options=opts, tee=True) + + ( + iters, + iters_in_restoration, + iters_w_regularization, + time, + ) = self._parse_ipopt_output(tempfile) + + TempfileManager.pop(remove=True) + return status_obj, iters, iters_in_restoration, iters_w_regularization, time + + def _compare_results_to_dict( + self, + compare_dict: dict, + rel_tol: float = 0.1, + abs_tol: float = 1, + ): + # Compare results + success_diff = [] + iters_diff = [] + restore_diff = [] + reg_diff = [] + num_iss_diff = [] + + for k, v in self.results.items(): + # Get sample result from compare dict + try: + comp = compare_dict["results"][k] + except KeyError: + # Reading from json often converts ints to strings + # Check to see if the index as a string works + comp = compare_dict["results"][str(k)] + + # Check for differences + if v["success"] != comp["success"]: + success_diff.append(k) + if not isclose( + v["results"]["iters"], + comp["results"]["iters"], + rel_tol=rel_tol, + abs_tol=abs_tol, + ): + iters_diff.append(k) + if not isclose( + v["results"]["iters_in_restoration"], + comp["results"]["iters_in_restoration"], + rel_tol=rel_tol, + abs_tol=abs_tol, + ): + restore_diff.append(k) + if not isclose( + v["results"]["iters_w_regularization"], + comp["results"]["iters_w_regularization"], + rel_tol=rel_tol, + abs_tol=abs_tol, + ): + reg_diff.append(k) + if v["results"]["numerical_issues"] != comp["results"]["numerical_issues"]: + num_iss_diff.append(k) + + return { + "success": success_diff, + "iters": iters_diff, + "iters_in_restoration": restore_diff, + "iters_w_regularization": reg_diff, + "numerical_issues": num_iss_diff, + } + + def get_valid_range_of_component(component): """ Return the valid range for a component as specified in the model metadata. diff --git a/idaes/core/util/parameter_sweep.py b/idaes/core/util/parameter_sweep.py new file mode 100644 index 0000000000..4c12484cd6 --- /dev/null +++ b/idaes/core/util/parameter_sweep.py @@ -0,0 +1,818 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +IDAES Parameter Sweep API and sequential workflow runner. +""" + +import sys +import json + +from pandas import DataFrame + +from pyomo.core import Param, Var +from pyomo.environ import check_optimal_termination +from pyomo.common.config import ConfigDict, ConfigValue, document_kwargs_from_configdict + +import idaes.logger as idaeslog +from idaes.core.surrogate.pysmo.sampling import SamplingMethods, UniformSampling +from idaes.core.util.exceptions import ConfigurationError + +__author__ = "Andrew Lee" + +# Set up logger +_log = idaeslog.getLogger(__name__) + +# TODO: Timeouts +# TODO: Re-initialize option/callback + + +class ParameterSweepSpecification(object): + """Defines a set of input variables/parameters and values to be used in + a parameter sweep study. + + Users can define inputs to be sampled, the number of samples and the method to + use to generate the samples (chosen from Pysmo's sampling methods) and then + call the generate_samples method to generate the desired set of samples. + """ + + # TODO: Consider supporting sampling from data sets in the future + def __init__(self): + self._inputs = {} + self._sampling_method = None + self._samples = None + self._sample_size = None + + def add_sampled_input( + self, + pyomo_path: str, + lower: float, + upper: float, + name: str = None, + ): + """ + Add an input that should be sampled when forming the set of + specific points that need to be run for the convergence evaluation + + The input will be sampled assuming a normal distribution + (with given mean and standard deviation) + truncated to the values given by lower and upper bounds + + Args: + pyomo_path A string representation of the path to the variable or parameter to + be sampled. This string will be executed to retrieve the Pyomo + component. + lower : lower bound on the input variable or parameter. + upper : upper bound on the input variable or parameter + name : (Optional) display name for the input. If not provided, + pyomo_pth will be used. + + Returns: + None + """ + # ToDo: put some error checking here ... Maybe we should have the model + # ToDo: already? Can use to check if the pyomo_path is valid? check if + # ToDo: bounds will be violated? + spec = {} + spec["pyomo_path"] = pyomo_path + spec["lower"] = lower + spec["upper"] = upper + + if name is None: + # If no name given, use Pyomo path + name = pyomo_path + + self._inputs[name] = spec + + def set_sampling_method(self, sampling_method): + """Defines the method to use to generate samples. This must be a Pysmo + SamplingMethod + + Args: + sampling_method: SamplingMethod class to use. + + Returns: + None + """ + try: + if issubclass(sampling_method, SamplingMethods): + self._sampling_method = sampling_method + else: + raise TypeError + except TypeError: + raise TypeError( + f"Sampling method must be an instance of a Pysmo SamplingMethod " + f"(received {sampling_method})" + ) + + def set_sample_size(self, sample_size): + """Sets the number of samples to be generated. + + The format depends on the sampling method chosen. For UniformSampling, this must + be a list of ints of length equal to the number of inputs, otherwise it must be + an int. + + Args: + sample_size: int or list-of-ints + + Returns: + None + """ + self._sample_size = sample_size + + def _generate_pysmo_data_input(self): + lb_list = [] + ub_list = [] + + for var_def in self.inputs.values(): + lb_list.append(var_def["lower"]) + ub_list.append(var_def["upper"]) + + return [lb_list, ub_list] + + def generate_samples(self): + """ + Generate set of samples of specified size from inputs. + + Args: + sample_size - [int or list of ints] size of sample to be generated. + For UniformSampling this must be a list of ints indicating number of + graduations for each input. For all other sampling methods this must be + an int indicating the number of samples to generate. + + Returns: + Pandas DataFrame of samples + """ + if self.sampling_method is None: + raise ValueError( + "Please choose a sampling method to use for sample generation. " + "Sampling method must be chosen from those available in Pysmo." + ) + + if len(self._inputs) == 0: + raise ValueError("Please identify at least one input variable to sample.") + + if self._sample_size is None: + raise ValueError("Please set a sample size.") + elif self.sampling_method is UniformSampling: + if not isinstance(self._sample_size, list): + raise TypeError( + "For UniformSampling, sample_size must be list of integers." + ) + sample_size = self._sample_size + else: + try: + sample_size = int(self._sample_size) + except ValueError: + raise ValueError("sample_size must be an integer.") + if sample_size <= 0: + raise ValueError("sample_size must be an integer greater than 1.") + + bounds_list = self._generate_pysmo_data_input() + + # pylint: disable=not-callable + space_init = self.sampling_method( + bounds_list, + sample_size, + sampling_type="creation", + ) + + self._samples = DataFrame( + space_init.sample_points(), columns=[_ for _ in self.inputs] + ) + + return self.samples + + @property + def inputs(self): + """ + Returns an dict containing the declared inputs. + """ + return self._inputs + + @property + def sampling_method(self): + """ + Returns the declared sampling method. + """ + return self._sampling_method + + @property + def sample_size(self): + """ + Returns the declared sample size. + """ + return self._sample_size + + @property + def samples(self): + """ + Returns the generated set of samples (pandas DataFrame). + """ + return self._samples + + def to_dict(self): + """Write specification to a dict + + Returns: + Input specification as a dict + """ + outdict = {} + + outdict["inputs"] = self._inputs + + # Pysmo sampling methods are not serializable, so get name instead + outdict["sampling_method"] = self._sampling_method.__name__ + outdict["sample_size"] = self._sample_size + outdict["samples"] = self._samples.to_dict(orient="tight") + + return outdict + + def from_dict(self, input_dict: dict): + """ + Load specification from dict + + Args: + input_dict: dict to load into specification + + Returns: + None + """ + self._inputs = {} + for k, v in input_dict["inputs"].items(): + self._inputs[k] = v + + # Hack to get Pysom sampling method from string name + mod = __import__( + "idaes.core.surrogate.pysmo.sampling", + fromlist=[input_dict["sampling_method"]], + ) + self._sampling_method = getattr(mod, input_dict["sampling_method"]) + + self._sample_size = input_dict["sample_size"] + self._samples = DataFrame().from_dict( + input_dict["samples"], + orient="tight", + ) + + def to_json_file(self, filename: str): + """ + Serialize specification to file in json format. + + Args: + filename: name of file to write to as string + + Returns: + None + """ + with open(filename, "w") as fd: + json.dump(self.to_dict(), fd, indent=3) + + def from_json_file(self, filename: str): + """ + Load specification from json file. + + Args: + filename: name of file to load as string + + Returns: + None + """ + with open(filename, "r") as f: + self.from_dict(json.load(f)) + f.close() + + +def is_psweepspec(val): + """ + Config validator for ParameterSweepSpecifications + + Args: + val: value to validate + + Returns: + ParameterSweepSpecification + + Raises: + ValueError if val is not a ParameterSweepSpecification + """ + if isinstance(val, ParameterSweepSpecification): + return val + _log.error( + f"Input configuration {val} must be an instance of ParameterSweepSpecification." + ) + raise ValueError( + "Input configuration must be an instance of ParameterSweepSpecification." + ) + + +def _is_solver(val): + """ + Config validator for Solver object. + + Use duck-typing and just look for a solve method + + Args: + val: value to validate + + Returns: + val if it has a solve method + + Raises: + ValueError if val does not have a solve method + """ + try: + if callable(val.solve): + return val + except AttributeError: + raise ValueError("solver must be an instance of a Pyomo Solver object.") + + +CONFIG = ConfigDict() +CONFIG.declare( + "rebuild_model", + ConfigValue( + default=True, + domain=bool, + doc="Whether to rebuild model for each sample, or reuse the same instance for " + "all runs (default=True, rebuild for all runs).", + ), +) +CONFIG.declare( + "build_model", + ConfigValue(doc="Callback method to construct initialized model for execution."), +) +CONFIG.declare( + "build_model_arguments", + ConfigValue( + domain=dict, + doc="Arguments to pass to build_model callback.", + ), +) +CONFIG.declare( + "run_model", + ConfigValue( + doc="Callback method to use when running model. If None, model is run with solver.solve()" + ), +) +CONFIG.declare( + "run_model_arguments", + ConfigValue( + domain=dict, + doc="Arguments to pass to run_model callback.", + ), +) +CONFIG.declare( + "build_outputs", + ConfigValue( + doc="Callback method to use to collect results from model after execution." + ), +) +CONFIG.declare( + "build_outputs_arguments", + ConfigValue( + domain=dict, + doc="Arguments to pass to build_outputs callback.", + ), +) +CONFIG.declare( + "handle_solver_error", + ConfigValue( + doc="Callback method to use in case of exception when solving a sample." + ), +) +CONFIG.declare( + "handle_solver_error_arguments", + ConfigValue( + domain=dict, + doc="Arguments to pass to handle_solver_error callback.", + ), +) +CONFIG.declare( + "halt_on_error", + ConfigValue( + default=False, + domain=bool, + doc="Whether to halt execution of parameter sweep on encountering a solver error (default=False).", + ), +) +CONFIG.declare( + "input_specification", + ConfigValue( + domain=is_psweepspec, + doc="ParameterSweepSpecification object defining inputs to be sampled", + ), +) +CONFIG.declare( + "solver", + ConfigValue( + default=None, + domain=_is_solver, + doc="Pyomo solver object to use when solving model", + ), +) + + +@document_kwargs_from_configdict(CONFIG) +class ParameterSweepBase: + """ + IDAES base class for defining parameter sweep runners. + + Thus base class defines a basic API for setting up parameter sweep studies + and allows the end-user to provide instructions for how to set up and run + the model to be studied using a set of callbacks. + """ + + def __init__(self, **kwargs): + self.config = CONFIG(kwargs) + self._results = {} + self._model = None # used to store model instance if rebuild_model is False + + @property + def results(self): + """ + Returns dict containing the results from the parameter sweep. + """ + return self._results + + def execute_parameter_sweep(self): + """ + Placeholder method for parameter sweep runners. + + Developers should overload this with a method to execute the parameter sweep + using their preferred workflow manager. + + Raises: + NotImplementedError + """ + raise NotImplementedError( + "Derived classes should overload this method with a " "workflow manager." + ) + + def execute_single_sample(self, sample_id: int): + """ + Executes a single run of the model using input values from sample_id. + + Args: + sample_id: int indicating row in specification.samples to load into model + for run. + + Returns: + results: results generated by build_outputs callback + success: bool indicating whether execution was successful + error: str if error occurs during solve else None + """ + model = self.get_initialized_model() + + # Load sample values + self.set_input_values(model, sample_id) + + # Try/except to catch any critical failures that occur + error = None + try: + success, run_stats = self.run_model(model, self.config.solver) + + if not success: + _log.warning(f"Sample {sample_id} did not report success.") + except Exception as e: # pylint: disable=broad-except + if self.config.halt_on_error: + raise + + success = False + error = str(e) # Cast to string for storage + + # Compile Results + if error is None: + results = self.build_outputs(model, run_stats) + else: + # Catch any Exception for recourse + results, success = self.handle_error(model) + + _log.info(f"Sample {sample_id} finished.") + + return results, success, error + + def get_initialized_model(self): + """ + Get instance of model to be run by calling build_model callback. + + Returns: + Pyomo model to be run + """ + if self.config.build_model is None: + raise ConfigurationError( + "Please specify a method to construct the model of interest." + ) + + if not self.config.rebuild_model: + # If reusing model, see if instance has been constructed yet + if self._model is not None: + # If yes, return and done + return self._model + + # Otherwise, build instance of model + args = self.config.build_model_arguments + if args is None: + args = {} + model = self.config.build_model(**args) + # TODO: Verify model is actually a model? + + if not self.config.rebuild_model: + # If reusing model, store instance for reuse + self._model = model + + return model + + def get_input_specification(self): + """ + Get input specification from config block + + Returns: + ParameterSweepConfiguration object + + Raises: + ConfigurationError if no input specification has been defined + """ + if self.config.input_specification is None: + raise ConfigurationError( + "Please specify an input specification to use for sampling." + ) + + return self.config.input_specification + + def get_input_samples(self): + """ + Get dataframe of samples from input specification. + + Returns: + pandas dataframe of samples + + Raises: + ConfigurationError if no input specification has been defined + """ + spec = self.get_input_specification() + + samples = spec.samples + + if samples is None: + samples = spec.generate_samples() + + return samples + + def get_sample_values(self, sample_id: int): + """ + Get inputs for a specific sample run indicated by sample_id. + + Args: + sample_id: int representing a row in the specification.samples dataframe + + Returns: + Pandas Series of input values for chosen sample + """ + samples = self.get_input_samples() + + return samples.iloc[sample_id] + + def set_input_values(self, model, sample_id: int): + """ + Set values of input variables/parameters in instance of model using values + from sample_id. + + Args: + model: instance of model to be executed + sample_id: int representing a row in the specification.samples dataframe + + Returns: + None + + Raises: + ValueError if an input cannot be found or if it is not a fixed Var or + mutable Param + """ + samples = self.get_input_samples() + inputs = self.config.input_specification.inputs + + for k, i in inputs.items(): + v = samples[k][sample_id] + + # k stores the "name" of the input + # need to get the pyomo path of the input from the inputs structure + pyomo_path = i["pyomo_path"] + + comp = model.find_component(pyomo_path) + try: + ctype = comp.ctype + except AttributeError: + ctype = None + + # TODO: Validate bounds + + if ctype is Param: + if comp.is_constant(): + raise ValueError( + f"Convergence testing found an input of type Param that " + f"was not mutable ({comp.name}). Please make sure all " + f"sampled inputs are either mutable params or fixed vars." + ) + comp.set_value(v) + elif ctype is Var: + if not comp.is_fixed(): + raise ValueError( + f"Convergence testing found an input of type Var that " + f"was not fixed ({comp.name}). Please make sure all " + f"sampled inputs are either mutable params or fixed vars." + ) + comp.set_value(float(v)) + else: + raise ValueError( + f"Failed to find a valid input component (must be " + f"a fixed Var or a mutable Param). Instead, " + f"pyomo_path: {pyomo_path} returned: {comp}." + ) + + def run_model(self, model, solver): + """ + Executes run of model by calling the run_model callback + + Args: + model: instance of model to be run + solver: Pyomo solver object to use to solve model + + Returns: + success: bool indicating whether execution was successful or not + run_stats: output collected by run_model callback (default is Pyomo SolverResults object) + """ + if self.config.run_model is None: + res = solver.solve(model) + + success = check_optimal_termination(res) + return success, res + + args = self.config.run_model_arguments + if args is None: + args = {} + + return self.config.run_model(model, solver, **args) + + def build_outputs(self, model, run_stats): + """ + Collects desired results from instance of model by calling build_outputs callback. + + Args: + model: instance of model to collect results from + run_stats: output from run_model callback to be collected in results + (default is Pyomo results object) + + Returns: + Output of build_outputs callback. If no build_outputs callback is provided, + returns run_stats instead. + """ + if self.config.build_outputs is None: + return run_stats + else: + args = self.config.build_outputs_arguments + if args is None: + args = {} + + return self.config.build_outputs(model, run_stats, **args) + + def handle_error(self, model): + """ + Call handle_solver_error callback. This method is used in case the solver encounters + a critical error whilst solving a sample. + + Args: + model: instance of model for performing recourse + + Returns: + Output of handle_solver_error callback + """ + if self.config.handle_solver_error is None: + # No recourse specified, so success=False and results=None + return None, False + else: + args = self.config.handle_solver_error_arguments + if args is None: + args = {} + return self.config.handle_solver_error(model, **args) + + def to_dict(self): + """ + Serialize specification and current results to dict form + + Returns: + dict + """ + # TODO : Need to serialize build_model method somehow? + outdict = {} + outdict["specification"] = self.get_input_specification().to_dict() + outdict["results"] = self.results + + return outdict + + def from_dict(self, input_dict: dict): + """ + Load specification and results from dict. + + Args: + input_dict: dict to load from + + Returns: + None + """ + if self.config.input_specification is not None: + # Log a warning about overwriting + _log.debug("Overwriting existing input specification") + + self.config.input_specification = ParameterSweepSpecification() + self.config.input_specification.from_dict(input_dict["specification"]) + + self._results = {} + # Need to iterate to convert string indices to int + # Converting to json turns the indices to strings + for k, v in input_dict["results"].items(): + self._results[int(k)] = v + + def to_json_file(self, filename: str): + """ + Write specification and results to json file. + + Args: + filename: name of file to write to as string + + Returns: + None + """ + with open(filename, "w") as fd: + json.dump(self.to_dict(), fd, indent=3) + + def from_json_file(self, filename: str): + """ + Load specification and results from json file. + + Args: + filename: name of file to load from as string + + Returns: + None + """ + with open(filename, "r") as f: + self.from_dict(json.load(f)) + f.close() + + @staticmethod + def progress_bar(fraction, msg: str, length: int = 20): + """ + Prints a progress bar to stdout + + Args: + fraction: fraction of total samples executed + msg: string to append to progress bar + length: length of progress bar (default=20) + + Returns: + None + """ + n_complete = int(length * fraction) + n_remaining = length - n_complete + characters = f"{'*' * n_complete}{'-' * n_remaining}" + sys.stdout.write(f"{fraction * 100:.1f}% {characters} {msg}\n") + sys.stdout.flush() + + +class SequentialSweepRunner(ParameterSweepBase): + """ + Sequential runner for parameter sweeps. + + This class executes a parameter sweep by running all samples sequentially + in a for loop. + """ + + def execute_parameter_sweep(self): + """ + Execute sequential parameter sweep. + + Returns: + dict of results indexed by sample ID. + """ + self._results = {} + samples = self.get_input_samples() + + count = 1 + for s in samples.index: + sresults, success, error = self.execute_single_sample(s) + self._results[s] = {"success": success, "results": sresults, "error": error} + + self.progress_bar(float(count) / float(len(samples)), "Complete") + count += 1 + + return self.results diff --git a/idaes/core/util/tests/convergence_baseline.json b/idaes/core/util/tests/convergence_baseline.json new file mode 100644 index 0000000000..40bcbc1c5f --- /dev/null +++ b/idaes/core/util/tests/convergence_baseline.json @@ -0,0 +1,60 @@ +{ + "specification": { + "inputs": { + "v2": { + "pyomo_path": "v2", + "lower": 2, + "upper": 6 + } + }, + "sampling_method": "UniformSampling", + "sample_size": [ + 2 + ], + "samples": { + "index": [ + 0, + 1 + ], + "columns": [ + "v2" + ], + "data": [ + [ + 2.0 + ], + [ + 6.0 + ] + ], + "index_names": [ + null + ], + "column_names": [ + null + ] + } + }, + "results": { + "0": { + "success": true, + "results": { + "iters": 7, + "iters_in_restoration": 4, + "iters_w_regularization": 0, + "time": 0.002, + "numerical_issues": true + } + }, + "1": { + "success": false, + "results": { + "iters": 7, + "iters_in_restoration": 4, + "iters_w_regularization": 2, + "time": 0.001, + "numerical_issues": true + } + } + } +} \ No newline at end of file diff --git a/idaes/core/util/tests/ipopt_output.txt b/idaes/core/util/tests/ipopt_output.txt new file mode 100644 index 0000000000..8fb5b243de --- /dev/null +++ b/idaes/core/util/tests/ipopt_output.txt @@ -0,0 +1,105 @@ +****************************************************************************** +This program contains Ipopt, a library for large-scale nonlinear optimization. + Ipopt is released as open source code under the Eclipse Public License (EPL). + For more information visit http://projects.coin-or.org/Ipopt + +This version of Ipopt was compiled from source code available at + https://github.com/IDAES/Ipopt as part of the Institute for the Design of + Advanced Energy Systems Process Systems Engineering Framework (IDAES PSE + Framework) Copyright (c) 2018-2019. See https://github.com/IDAES/idaes-pse. + +This version of Ipopt was compiled using HSL, a collection of Fortran codes + for large-scale scientific computation. All technical papers, sales and + publicity material resulting from use of the HSL codes within IPOPT must + contain the following acknowledgement: + HSL, a collection of Fortran codes for large-scale scientific + computation. See http://www.hsl.rl.ac.uk. +****************************************************************************** + +This is Ipopt version 3.13.2, running with linear solver ma27. + +Number of nonzeros in equality constraint Jacobian...: 9 +Number of nonzeros in inequality constraint Jacobian.: 0 +Number of nonzeros in Lagrangian Hessian.............: 4 + +Total number of variables............................: 5 + variables with only lower bounds: 0 + variables with lower and upper bounds: 2 + variables with only upper bounds: 0 +Total number of equality constraints.................: 5 +Total number of inequality constraints...............: 0 + inequality constraints with only lower bounds: 0 + inequality constraints with lower and upper bounds: 0 + inequality constraints with only upper bounds: 0 + +iter objective inf_pr inf_du lg(mu) ||d|| lg(rg) alpha_du alpha_pr ls + 0 0.0000000e+00 5.45e+12 0.00e+00 -1.0 0.00e+00 - 0.00e+00 0.00e+00 0 + 1 0.0000000e+00 4.20e+12 1.89e+03 -1.0 5.33e+12 - 9.16e-04 1.00e+00h 1 + 2 0.0000000e+00 2.42e+12 1.11e+03 -1.0 4.57e+12 - 9.11e-01 4.23e-01h 1 + 3 0.0000000e+00 2.41e+12 8.13e+02 -1.0 2.65e+12 - 9.43e-01 7.15e-03h 1 + 4 0.0000000e+00 2.41e+12 3.85e+06 -1.0 2.63e+12 - 9.64e-01 7.20e-05h 1 + 5r 0.0000000e+00 2.41e+12 1.00e+03 9.9 0.00e+00 - 0.00e+00 3.60e-07R 2 + 6r 0.0000000e+00 2.31e+10 5.20e+03 9.9 1.90e+14 - 4.08e-06 4.57e-05f 1 + 7r 0.0000000e+00 7.32e+08 8.50e+04 7.8 1.33e+11 - 1.00e+00 6.53e-04f 1 + 8r 0.0000000e+00 3.43e+08 2.48e+03 7.1 5.83e+06 - 9.84e-01 1.00e+00f 1 + 9r 0.0000000e+00 5.44e+07 2.58e+02 7.1 2.84e+06 - 7.63e-01 9.45e-01f 1 +iter objective inf_pr inf_du lg(mu) ||d|| lg(rg) alpha_du alpha_pr ls + 10r 0.0000000e+00 1.02e+10 1.04e+03 6.4 3.71e+05 - 4.95e-01 8.41e-01f 1 + 11r 0.0000000e+00 2.31e+10 7.47e+01 6.4 4.29e+05 - 1.00e+00 1.00e+00f 1 + 12r 0.0000000e+00 1.88e+08 2.80e+01 6.4 6.23e+04 - 1.00e+00 1.00e+00f 1 + 13r 0.0000000e+00 8.11e+07 6.93e+02 4.4 2.37e+04 - 9.92e-01 9.62e-01f 1 + 14r 0.0000000e+00 1.08e+10 6.69e+02 4.4 1.48e+07 -4.0 4.35e-02 1.85e-02f 3 + 15r 0.0000000e+00 2.46e+10 2.71e+02 4.4 9.21e+05 -3.6 6.63e-01 1.00e+00F 1 + 16r 0.0000000e+00 4.16e+09 2.95e+02 4.4 4.34e+05 -3.1 1.00e+00 8.46e-01F 1 + 17r 0.0000000e+00 4.17e+09 4.47e+02 4.4 4.35e+06 -3.6 6.09e-01 7.48e-04f 1 + 18r 0.0000000e+00 1.58e+05 1.26e+00 4.4 8.81e+05 - 1.00e+00 1.00e+00h 1 + 19r 0.0000000e+00 5.64e+03 7.32e+01 2.3 2.91e+02 - 9.98e-01 9.85e-01f 1 +iter objective inf_pr inf_du lg(mu) ||d|| lg(rg) alpha_du alpha_pr ls + 20r 0.0000000e+00 8.85e+02 2.38e+01 0.9 1.93e+01 - 1.00e+00 8.43e-01f 1 + 21r 0.0000000e+00 8.40e+02 7.95e-01 0.2 1.42e+01 - 1.00e+00 1.00e+00f 1 + 22r 0.0000000e+00 8.40e+02 5.89e-01 -1.9 2.52e+00 - 9.98e-01 9.96e-01f 1 + 23r 0.0000000e+00 8.40e+02 1.22e+00 -2.9 1.36e-02 - 1.00e+00 1.19e-07f 24 + 24r 0.0000000e+00 8.40e+02 1.30e+00 -2.9 7.74e-03 - 1.00e+00 1.19e-07f 24 + 25r 0.0000000e+00 8.40e+02 1.30e+00 -2.9 7.74e-03 - 1.00e+00 1.19e-07f 24 + 26r 0.0000000e+00 8.40e+02 1.30e+00 -2.9 7.74e-03 - 1.00e+00 1.19e-07f 24 + 27r 0.0000000e+00 8.40e+02 1.30e+00 -2.9 7.74e-03 - 1.00e+00 1.19e-07f 24 + 28r 0.0000000e+00 8.40e+02 1.30e+00 -2.9 7.74e-03 - 1.00e+00 1.19e-07f 24 + 29r 0.0000000e+00 8.40e+02 1.30e+00 -2.9 7.74e-03 - 1.00e+00 1.19e-07f 24 +iter objective inf_pr inf_du lg(mu) ||d|| lg(rg) alpha_du alpha_pr ls + 30r 0.0000000e+00 8.40e+02 1.30e+00 -2.9 7.74e-03 - 1.00e+00 1.19e-07f 24 + 31r 0.0000000e+00 8.40e+02 1.30e+00 -2.9 7.74e-03 - 1.00e+00 1.19e-07f 24 + 32r 0.0000000e+00 8.40e+02 1.30e+00 -2.9 7.74e-03 - 1.00e+00 1.19e-07f 24 + 33r 0.0000000e+00 8.40e+02 1.23e-08 -2.9 7.74e-03 - 1.00e+00 1.00e+00w 1 + 34r 0.0000000e+00 8.40e+02 1.85e-02 -6.5 8.60e-03 - 1.00e+00 4.88e-04f 12 + 35r 0.0000000e+00 8.40e+02 3.85e-02 -6.5 8.48e-03 - 1.00e+00 4.88e-04f 12 + 36r 0.0000000e+00 8.40e+02 3.85e-02 -6.5 8.48e-03 - 1.00e+00 4.88e-04f 12 + 37r 0.0000000e+00 8.40e+02 3.84e-02 -6.5 8.48e-03 - 1.00e+00 4.88e-04f 12 + 38r 0.0000000e+00 8.40e+02 3.84e-02 -6.5 8.48e-03 - 1.00e+00 4.88e-04f 12 + 39r 0.0000000e+00 8.40e+02 3.84e-02 -6.5 8.48e-03 - 1.00e+00 4.88e-04f 12 +iter objective inf_pr inf_du lg(mu) ||d|| lg(rg) alpha_du alpha_pr ls + 40r 0.0000000e+00 8.40e+02 3.84e-02 -6.5 8.48e-03 - 1.00e+00 4.88e-04f 12 + 41r 0.0000000e+00 8.40e+02 3.84e-02 -6.5 8.48e-03 - 1.00e+00 4.88e-04f 12 + 42r 0.0000000e+00 8.40e+02 3.83e-02 -6.5 8.48e-03 - 1.00e+00 4.88e-04f 12 + 43r 0.0000000e+00 8.40e+02 3.83e-02 -6.5 8.48e-03 - 1.00e+00 4.88e-04f 12 + +Number of Iterations....: 43 + + (scaled) (unscaled) +Objective...............: 0.0000000000000000e+00 0.0000000000000000e+00 +Dual infeasibility......: 2.8541888410133756e-04 2.8541888410133756e-04 +Constraint violation....: 8.3952289433653289e+02 8.3952289433653289e+02 +Complementarity.........: 2.8250564388833404e-07 2.8250564388833404e-07 +Overall NLP error.......: 8.3952289433653289e+02 8.3952289433653289e+02 + + +Number of objective function evaluations = 387 +Number of objective gradient evaluations = 7 +Number of equality constraint evaluations = 395 +Number of inequality constraint evaluations = 0 +Number of equality constraint Jacobian evaluations = 46 +Number of inequality constraint Jacobian evaluations = 0 +Number of Lagrangian Hessian evaluations = 44 +Total CPU secs in IPOPT (w/o function evaluations) = 0.016 +Total CPU secs in NLP function evaluations = 0.035 + +EXIT: Converged to a point of local infeasibility. Problem may be infeasible. \ No newline at end of file diff --git a/idaes/core/util/tests/load_psweep.json b/idaes/core/util/tests/load_psweep.json new file mode 100644 index 0000000000..6f6125d94c --- /dev/null +++ b/idaes/core/util/tests/load_psweep.json @@ -0,0 +1,48 @@ +{ + "specification": { + "inputs": { + "v2": { + "pyomo_path": "v2", + "lower": 2, + "upper": 6 + } + }, + "sampling_method": "UniformSampling", + "sample_size": [ + 2 + ], + "samples": { + "index": [ + 0, + 1 + ], + "columns": [ + "v2" + ], + "data": [ + [ + 2.0 + ], + [ + 6.0 + ] + ], + "index_names": [ + null + ], + "column_names": [ + null + ] + } + }, + "results": { + "0": { + "success": true, + "results": 2 + }, + "1": { + "success": true, + "results": 6 + } + } +} \ No newline at end of file diff --git a/idaes/core/util/tests/load_spec.json b/idaes/core/util/tests/load_spec.json new file mode 100644 index 0000000000..d3621e0ad3 --- /dev/null +++ b/idaes/core/util/tests/load_spec.json @@ -0,0 +1,95 @@ +{ + "inputs": { + "foo": { + "pyomo_path": "model.foo", + "lower": 0, + "upper": 10 + }, + "bar": { + "pyomo_path": "model.bar", + "lower": 20, + "upper": 40 + } + }, + "sampling_method": "UniformSampling", + "sample_size": [ + 4, + 3 + ], + "samples": { + "index": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11 + ], + "columns": [ + "foo", + "bar" + ], + "data": [ + [ + 0.0, + 20.0 + ], + [ + 0.0, + 30.0 + ], + [ + 0.0, + 40.0 + ], + [ + 3.333333333333333, + 20.0 + ], + [ + 3.333333333333333, + 30.0 + ], + [ + 3.333333333333333, + 40.0 + ], + [ + 6.666666666666666, + 20.0 + ], + [ + 6.666666666666666, + 30.0 + ], + [ + 6.666666666666666, + 40.0 + ], + [ + 10.0, + 20.0 + ], + [ + 10.0, + 30.0 + ], + [ + 10.0, + 40.0 + ] + ], + "index_names": [ + null + ], + "column_names": [ + null + ] + } +} \ No newline at end of file diff --git a/idaes/core/util/tests/test_model_diagnostics.py b/idaes/core/util/tests/test_model_diagnostics.py index 3c9256844a..ccb35d52ea 100644 --- a/idaes/core/util/tests/test_model_diagnostics.py +++ b/idaes/core/util/tests/test_model_diagnostics.py @@ -17,6 +17,10 @@ import math import numpy as np import pytest +import os +from copy import deepcopy + +from pandas import DataFrame from pyomo.environ import ( Block, @@ -36,12 +40,15 @@ units, value, Var, + assert_optimal_termination, Param, Integers, ) from pyomo.common.collections import ComponentSet from pyomo.contrib.pynumero.asl import AmplInterface from pyomo.contrib.pynumero.interfaces.pyomo_nlp import PyomoNLP +from pyomo.common.fileutils import this_file_dir +from pyomo.common.tempfiles import TempfileManager import idaes.core.util.scaling as iscale import idaes.logger as idaeslog @@ -50,17 +57,11 @@ from idaes.core.util.testing import PhysicalParameterTestBlock from unittest import TestCase -# TODO: Add pyomo.dae test case -""" -from pyomo.environ import TransformationFactory -from pyomo.dae import ContinuousSet, DerivativeVar -""" - -# Need to update from idaes.core.util.model_diagnostics import ( DiagnosticsToolbox, SVDToolbox, DegeneracyHunter, + IpoptConvergenceAnalysis, DegeneracyHunter2, svd_dense, svd_sparse, @@ -79,12 +80,24 @@ check_parallel_jacobian, compute_ill_conditioning_certificate, ) +from idaes.core.util.parameter_sweep import ( + SequentialSweepRunner, + ParameterSweepSpecification, +) +from idaes.core.surrogate.pysmo.sampling import ( + UniformSampling, +) from idaes.core.util.testing import _enable_scip_solver_for_testing __author__ = "Alex Dowling, Douglas Allan, Andrew Lee" +# TODO: Add pyomo.dae test cases +solver_available = SolverFactory("scip").available() +currdir = this_file_dir() + + @pytest.fixture(scope="module") def scip_solver(): solver = SolverFactory("scip") @@ -2081,6 +2094,620 @@ def test_report_irreducible_degenerate_sets_none(self, model, scip_solver): assert stream.getvalue() == expected +ca_dict = { + "specification": { + "inputs": { + "v2": { + "pyomo_path": "v2", + "lower": 2, + "upper": 6, + }, + }, + "sampling_method": "UniformSampling", + "sample_size": [2], + "samples": { + "index": [0, 1], + "columns": ["v2"], + "data": [[2.0], [6.0]], + "index_names": [None], + "column_names": [None], + }, + }, + "results": { + 0: { + "success": True, + "results": 2, + }, + 1: { + "success": True, + "results": 6, + }, + }, +} + +ca_res = { + "specification": { + "inputs": {"v2": {"pyomo_path": "v2", "lower": 2, "upper": 6}}, + "sampling_method": "UniformSampling", + "sample_size": [2], + "samples": { + "index": [0, 1], + "columns": ["v2"], + "data": [[2.0], [6.0]], + "index_names": [None], + "column_names": [None], + }, + }, + "results": { + 0: { + "success": False, + "results": { + "iters": 7, + "iters_in_restoration": 4, + "iters_w_regularization": 0, + "time": 0.0, + "numerical_issues": True, + }, + }, + 1: { + "success": False, + "results": { + "iters": 7, + "iters_in_restoration": 4, + "iters_w_regularization": 0, + "time": 0.0, + "numerical_issues": True, + }, + }, + }, +} + + +class TestIpoptConvergenceAnalysis: + @pytest.fixture + def model(self): + m = ConcreteModel() + + m.v1 = Var(bounds=(None, 1)) + m.v2 = Var() + m.c = Constraint(expr=m.v1 == m.v2) + + m.v2.fix(0) + + return m + + @pytest.mark.unit + def test_init(self, model): + ca = IpoptConvergenceAnalysis(model) + + assert ca._model is model + assert isinstance(ca._psweep, SequentialSweepRunner) + assert isinstance(ca.results, dict) + assert ca.config.input_specification is None + assert ca.config.solver_options is None + + @pytest.mark.unit + def test_build_model(self, model): + ca = IpoptConvergenceAnalysis(model) + + clone = ca._build_model() + clone.pprint() + + assert clone is not model + assert isinstance(clone.v1, Var) + assert isinstance(clone.v2, Var) + assert isinstance(clone.c, Constraint) + + @pytest.mark.unit + def test_parse_ipopt_output(self, model): + ca = IpoptConvergenceAnalysis(model) + + fname = os.path.join(currdir, "ipopt_output.txt") + iters, restoration, regularization, time = ca._parse_ipopt_output(fname) + + assert iters == 43 + assert restoration == 39 + assert regularization == 4 + assert time == 0.016 + 0.035 + + @pytest.mark.component + @pytest.mark.solver + def test_run_ipopt_with_stats(self): + m = ConcreteModel() + m.v1 = Var(initialize=1) + m.c1 = Constraint(expr=m.v1 == 4) + + ca = IpoptConvergenceAnalysis(m) + solver = SolverFactory("ipopt") + + ( + status, + iters, + iters_in_restoration, + iters_w_regularization, + time, + ) = ca._run_ipopt_with_stats(m, solver) + + assert_optimal_termination(status) + assert iters == 1 + assert iters_in_restoration == 0 + assert iters_w_regularization == 0 + assert time < 0.01 + + @pytest.mark.component + @pytest.mark.solver + def test_run_model(self, model): + ca = IpoptConvergenceAnalysis(model) + + model.v2.fix(0.5) + + solver = SolverFactory("ipopt") + + success, run_stats = ca._run_model(model, solver) + + assert success + assert value(model.v1) == pytest.approx(0.5, rel=1e-8) + + assert len(run_stats) == 4 + assert run_stats[0] == 1 + assert run_stats[1] == 0 + assert run_stats[2] == 0 + + @pytest.mark.unit + def test_build_outputs(self, model): + ca = IpoptConvergenceAnalysis(model) + + model.v1.set_value(0.5) + model.v2.fix(0.5) + + results = ca._build_outputs(model, (1, 2, 3, 4)) + + assert results == { + "iters": 1, + "iters_in_restoration": 2, + "iters_w_regularization": 3, + "time": 4, + "numerical_issues": False, + } + + @pytest.mark.unit + def test_build_outputs_with_warnings(self, model): + ca = IpoptConvergenceAnalysis(model) + + model.v1.set_value(4) + model.v2.fix(0.5) + + results = ca._build_outputs(model, (1, 2, 3, 4)) + + assert results == { + "iters": 1, + "iters_in_restoration": 2, + "iters_w_regularization": 3, + "time": 4, + "numerical_issues": True, + } + + @pytest.mark.unit + def test_recourse(self, model): + ca = IpoptConvergenceAnalysis(model) + + assert ca._recourse(model) == { + "iters": -1, + "iters_in_restoration": -1, + "iters_w_regularization": -1, + "time": -1, + "numerical_issues": -1, + } + + @pytest.mark.integration + @pytest.mark.solver + def test_run_convergence_analysis(self, model): + spec = ParameterSweepSpecification() + spec.add_sampled_input("v2", lower=0, upper=3) + spec.set_sampling_method(UniformSampling) + spec.set_sample_size([4]) + + ca = IpoptConvergenceAnalysis(model, input_specification=spec) + + ca.run_convergence_analysis() + + assert isinstance(ca.results, dict) + assert len(ca.results) == 4 + + # Ignore time, as it is too noisy to test + # Sample 0 should solve cleanly + assert ca.results[0]["success"] + assert ca.results[0]["results"]["iters"] == 0 + assert ca.results[0]["results"]["iters_in_restoration"] == 0 + assert ca.results[0]["results"]["iters_w_regularization"] == 0 + assert not ca.results[0]["results"]["numerical_issues"] + + # Sample 1 should solve, but have issues due to bound on v1 + assert ca.results[1]["success"] + assert ca.results[1]["results"]["iters"] == pytest.approx(3, abs=1) + assert ca.results[1]["results"]["iters_in_restoration"] == 0 + assert ca.results[1]["results"]["iters_w_regularization"] == 0 + assert ca.results[1]["results"]["numerical_issues"] + + # Other iterations should fail due to bound + assert not ca.results[2]["success"] + assert ca.results[2]["results"]["iters"] == pytest.approx(7, abs=1) + assert ca.results[2]["results"]["iters_in_restoration"] == pytest.approx( + 4, abs=1 + ) + assert ca.results[2]["results"]["iters_w_regularization"] == 0 + assert ca.results[2]["results"]["numerical_issues"] + + assert not ca.results[3]["success"] + assert ca.results[3]["results"]["iters"] == pytest.approx(8, abs=1) + assert ca.results[3]["results"]["iters_in_restoration"] == pytest.approx( + 5, abs=1 + ) + assert ca.results[3]["results"]["iters_w_regularization"] == 0 + assert ca.results[3]["results"]["numerical_issues"] + + @pytest.fixture(scope="class") + def ca_with_results(self): + spec = ParameterSweepSpecification() + spec.set_sampling_method(UniformSampling) + spec.add_sampled_input("v2", 2, 6) + spec.set_sample_size([2]) + spec.generate_samples() + + ca = IpoptConvergenceAnalysis( + model=ConcreteModel(), + input_specification=spec, + ) + + ca._psweep._results = { + 0: {"success": True, "results": 2}, + 1: {"success": True, "results": 6}, + } + + return ca + + @pytest.mark.component + def test_to_dict(self, ca_with_results): + outdict = ca_with_results.to_dict() + assert outdict == ca_dict + + @pytest.mark.unit + def test_from_dict(self): + ca = IpoptConvergenceAnalysis( + model=ConcreteModel(), + ) + + ca.from_dict(ca_dict) + + input_spec = ca._psweep.get_input_specification() + + assert isinstance(input_spec, ParameterSweepSpecification) + assert len(input_spec.inputs) == 1 + + assert input_spec.sampling_method is UniformSampling + assert isinstance(input_spec.samples, DataFrame) + assert input_spec.sample_size == [2] + + assert isinstance(ca.results, dict) + assert len(ca.results) == 2 + + for i in [0, 1]: + assert ca.results[i]["success"] + assert ca.results[i]["results"] == 2 + i * 4 + + @pytest.mark.component + def test_to_json_file(self, ca_with_results): + temp_context = TempfileManager.new_context() + tmpfile = temp_context.create_tempfile(suffix=".json") + + ca_with_results.to_json_file(tmpfile) + + with open(tmpfile, "r") as f: + lines = f.read() + f.close() + + expected = """{ + "specification": { + "inputs": { + "v2": { + "pyomo_path": "v2", + "lower": 2, + "upper": 6 + } + }, + "sampling_method": "UniformSampling", + "sample_size": [ + 2 + ], + "samples": { + "index": [ + 0, + 1 + ], + "columns": [ + "v2" + ], + "data": [ + [ + 2.0 + ], + [ + 6.0 + ] + ], + "index_names": [ + null + ], + "column_names": [ + null + ] + } + }, + "results": { + "0": { + "success": true, + "results": 2 + }, + "1": { + "success": true, + "results": 6 + } + } +}""" + + assert lines == expected + + # Check for clean up + temp_context.release(remove=True) + assert not os.path.exists(tmpfile) + + @pytest.mark.unit + def test_load_from_json_file(self): + fname = os.path.join(currdir, "load_psweep.json") + + ca = IpoptConvergenceAnalysis( + model=ConcreteModel(), + ) + ca.from_json_file(fname) + + input_spec = ca._psweep.get_input_specification() + + assert isinstance(input_spec, ParameterSweepSpecification) + assert len(input_spec.inputs) == 1 + + assert input_spec.sampling_method is UniformSampling + assert isinstance(input_spec.samples, DataFrame) + assert input_spec.sample_size == [2] + + assert isinstance(ca.results, dict) + assert len(ca.results) == 2 + + for i in [0, 1]: + assert ca.results[i]["success"] + assert ca.results[i]["results"] == 2 + i * 4 + + @pytest.mark.integration + @pytest.mark.solver + def test_run_convergence_analysis_from_dict(self, model): + ca = IpoptConvergenceAnalysis( + model=model, + ) + ca.run_convergence_analysis_from_dict(ca_dict) + + input_spec = ca._psweep.get_input_specification() + + assert isinstance(input_spec, ParameterSweepSpecification) + assert len(input_spec.inputs) == 1 + + assert input_spec.sampling_method is UniformSampling + assert isinstance(input_spec.samples, DataFrame) + assert input_spec.sample_size == [2] + + assert isinstance(ca.results, dict) + assert len(ca.results) == 2 + + assert not ca.results[0]["success"] + assert ca.results[0]["results"]["iters"] == pytest.approx(7, abs=1) + assert ca.results[0]["results"]["iters_in_restoration"] == pytest.approx( + 4, abs=1 + ) + assert ca.results[0]["results"]["iters_w_regularization"] == 0 + assert ca.results[0]["results"]["numerical_issues"] + + assert not ca.results[1]["success"] + assert ca.results[1]["results"]["iters"] == pytest.approx(7, abs=1) + assert ca.results[1]["results"]["iters_in_restoration"] == pytest.approx( + 4, abs=1 + ) + assert ca.results[1]["results"]["iters_w_regularization"] == 0 + assert ca.results[1]["results"]["numerical_issues"] + + @pytest.mark.integration + @pytest.mark.solver + def test_run_convergence_analysis_from_file(self, model): + fname = os.path.join(currdir, "load_psweep.json") + + ca = IpoptConvergenceAnalysis( + model=model, + ) + ca.run_convergence_analysis_from_file(fname) + + input_spec = ca._psweep.get_input_specification() + + assert isinstance(input_spec, ParameterSweepSpecification) + assert len(input_spec.inputs) == 1 + + assert input_spec.sampling_method is UniformSampling + assert isinstance(input_spec.samples, DataFrame) + assert input_spec.sample_size == [2] + + assert isinstance(ca.results, dict) + assert len(ca.results) == 2 + + assert not ca.results[0]["success"] + assert ca.results[0]["results"]["iters"] == pytest.approx(7, abs=1) + assert ca.results[0]["results"]["iters_in_restoration"] == pytest.approx( + 4, abs=1 + ) + assert ca.results[0]["results"]["iters_w_regularization"] == 0 + assert ca.results[0]["results"]["numerical_issues"] + + assert not ca.results[1]["success"] + assert ca.results[1]["results"]["iters"] == pytest.approx(7, abs=1) + assert ca.results[1]["results"]["iters_in_restoration"] == pytest.approx( + 4, abs=1 + ) + assert ca.results[1]["results"]["iters_w_regularization"] == 0 + assert ca.results[1]["results"]["numerical_issues"] + + @pytest.fixture(scope="class") + def conv_anal(self): + ca = IpoptConvergenceAnalysis( + model=ConcreteModel(), + ) + ca.from_dict(ca_res) + + return ca + + @pytest.mark.unit + def test_compare_results_to_dict_ok(self, conv_anal): + diffs = conv_anal._compare_results_to_dict(ca_res) + + assert diffs["success"] == [] + assert diffs["iters"] == [] + assert diffs["iters_in_restoration"] == [] + assert diffs["iters_w_regularization"] == [] + assert diffs["numerical_issues"] == [] + + @pytest.mark.unit + def test_compare_results_to_dict_success(self, conv_anal): + ca_copy = deepcopy(ca_res) + ca_copy["results"][0]["success"] = True + + diffs = conv_anal._compare_results_to_dict(ca_copy) + + assert diffs["success"] == [0] + assert diffs["iters"] == [] + assert diffs["iters_in_restoration"] == [] + assert diffs["iters_w_regularization"] == [] + assert diffs["numerical_issues"] == [] + + @pytest.mark.unit + def test_compare_results_to_dict_iters(self, conv_anal): + ca_copy = deepcopy(ca_res) + ca_copy["results"][0]["results"]["iters"] = 8 + ca_copy["results"][1]["results"]["iters"] = 9 + + diffs = conv_anal._compare_results_to_dict(ca_copy) + + assert diffs["success"] == [] + assert diffs["iters"] == [1] + assert diffs["iters_in_restoration"] == [] + assert diffs["iters_w_regularization"] == [] + assert diffs["numerical_issues"] == [] + + diffs = conv_anal._compare_results_to_dict(ca_copy, abs_tol=0, rel_tol=0) + + assert diffs["success"] == [] + assert diffs["iters"] == [0, 1] + assert diffs["iters_in_restoration"] == [] + assert diffs["iters_w_regularization"] == [] + assert diffs["numerical_issues"] == [] + + @pytest.mark.unit + def test_compare_results_to_dict_restoration(self, conv_anal): + ca_copy = deepcopy(ca_res) + ca_copy["results"][0]["results"]["iters_in_restoration"] = 5 + ca_copy["results"][1]["results"]["iters_in_restoration"] = 6 + + diffs = conv_anal._compare_results_to_dict(ca_copy) + + assert diffs["success"] == [] + assert diffs["iters"] == [] + assert diffs["iters_in_restoration"] == [1] + assert diffs["iters_w_regularization"] == [] + assert diffs["numerical_issues"] == [] + + diffs = conv_anal._compare_results_to_dict(ca_copy, abs_tol=0, rel_tol=0) + + assert diffs["success"] == [] + assert diffs["iters"] == [] + assert diffs["iters_in_restoration"] == [0, 1] + assert diffs["iters_w_regularization"] == [] + assert diffs["numerical_issues"] == [] + + @pytest.mark.unit + def test_compare_results_to_dict_regularization(self, conv_anal): + ca_copy = deepcopy(ca_res) + ca_copy["results"][0]["results"]["iters_w_regularization"] = 1 + ca_copy["results"][1]["results"]["iters_w_regularization"] = 2 + + diffs = conv_anal._compare_results_to_dict(ca_copy) + + assert diffs["success"] == [] + assert diffs["iters"] == [] + assert diffs["iters_in_restoration"] == [] + assert diffs["iters_w_regularization"] == [1] + assert diffs["numerical_issues"] == [] + + diffs = conv_anal._compare_results_to_dict(ca_copy, abs_tol=0, rel_tol=0) + + assert diffs["success"] == [] + assert diffs["iters"] == [] + assert diffs["iters_in_restoration"] == [] + assert diffs["iters_w_regularization"] == [0, 1] + assert diffs["numerical_issues"] == [] + + @pytest.mark.unit + def test_compare_results_to_dict_numerical_issues(self, conv_anal): + ca_copy = deepcopy(ca_res) + ca_copy["results"][1]["results"]["numerical_issues"] = False + + diffs = conv_anal._compare_results_to_dict(ca_copy) + + assert diffs["success"] == [] + assert diffs["iters"] == [] + assert diffs["iters_in_restoration"] == [] + assert diffs["iters_w_regularization"] == [] + assert diffs["numerical_issues"] == [1] + + @pytest.mark.integration + @pytest.mark.solver + def test_compare_convergence_to_baseline(self, model): + fname = os.path.join(currdir, "convergence_baseline.json") + + ca = IpoptConvergenceAnalysis( + model=model, + ) + + diffs = ca.compare_convergence_to_baseline(fname) + + # Baseline has incorrect values + assert diffs == { + "success": [0], + "iters": [], + "iters_in_restoration": [], + "iters_w_regularization": [1], + "numerical_issues": [], + } + + @pytest.mark.integration + @pytest.mark.solver + def test_assert_baseline_comparison(self, model): + fname = os.path.join(currdir, "convergence_baseline.json") + + ca = IpoptConvergenceAnalysis( + model=model, + ) + + # Baseline has incorrect values + with pytest.raises( + AssertionError, + match="Convergence analysis does not match baseline", + ): + ca.assert_baseline_comparison(fname) + + @pytest.fixture() def dummy_problem(): m = ConcreteModel() diff --git a/idaes/core/util/tests/test_parameter_sweep.py b/idaes/core/util/tests/test_parameter_sweep.py new file mode 100644 index 0000000000..bf418fc851 --- /dev/null +++ b/idaes/core/util/tests/test_parameter_sweep.py @@ -0,0 +1,1348 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +Tests for IDAES Parameter Sweep API and sequential workflow runner. +""" + +import pytest +import os + +from pandas import DataFrame, Series +from pandas.testing import assert_frame_equal, assert_series_equal + +from pyomo.environ import ( + ConcreteModel, + Constraint, + Var, + Param, + value, + SolverFactory, + assert_optimal_termination, +) +from pyomo.common.fileutils import this_file_dir +from pyomo.common.tempfiles import TempfileManager + +from idaes.core.util.parameter_sweep import ( + ParameterSweepSpecification, + ParameterSweepBase, + SequentialSweepRunner, +) +from idaes.core.surrogate.pysmo.sampling import ( + LatinHypercubeSampling, + UniformSampling, + HaltonSampling, + HammersleySampling, + CVTSampling, +) +from idaes.core.util.exceptions import ConfigurationError +import idaes.logger as idaeslog + +currdir = this_file_dir() + + +expected_todict = { + "inputs": { + "foo": { + "pyomo_path": "model.foo", + "lower": 0, + "upper": 10, + }, + "bar": { + "pyomo_path": "model.bar", + "lower": 20, + "upper": 40, + }, + }, + "sampling_method": "UniformSampling", + "sample_size": [4, 3], + "samples": { + "index": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], + "columns": ["foo", "bar"], + "data": [ + [0.0, 20.0], + [0.0, 30.0], + [0.0, 40.0], + [3.333333333333333, 20.0], + [3.333333333333333, 30.0], + [3.333333333333333, 40.0], + [6.666666666666666, 20.0], + [6.666666666666666, 30.0], + [6.666666666666666, 40.0], + [10.0, 20.0], + [10.0, 30.0], + [10.0, 40.0], + ], + "index_names": [None], + "column_names": [None], + }, +} + + +class TestParameterSweepSpecification: + @pytest.mark.unit + def test_init(self): + spec = ParameterSweepSpecification() + + assert isinstance(spec.inputs, dict) + assert len(spec.inputs) == 0 + + assert spec.sampling_method is None + assert spec.samples is None + assert spec.sample_size is None + + @pytest.mark.unit + def test_add_sampled_input(self): + spec = ParameterSweepSpecification() + + spec.add_sampled_input("model.foo", 0, 10, name="foo") + spec.add_sampled_input("model.bar", 20, 40, name="bar") + + assert len(spec.inputs) == 2 + + assert isinstance(spec.inputs["foo"], dict) + assert spec.inputs["foo"]["pyomo_path"] == "model.foo" + assert spec.inputs["foo"]["lower"] == 0 + assert spec.inputs["foo"]["upper"] == 10 + + assert isinstance(spec.inputs["bar"], dict) + assert spec.inputs["bar"]["pyomo_path"] == "model.bar" + assert spec.inputs["bar"]["lower"] == 20 + assert spec.inputs["bar"]["upper"] == 40 + + @pytest.mark.unit + def test_add_sampled_input_no_name(self): + spec = ParameterSweepSpecification() + + spec.add_sampled_input("model.foo", 0, 10) + + assert len(spec.inputs) == 1 + + assert isinstance(spec.inputs["model.foo"], dict) + assert spec.inputs["model.foo"]["pyomo_path"] == "model.foo" + assert spec.inputs["model.foo"]["lower"] == 0 + assert spec.inputs["model.foo"]["upper"] == 10 + + @pytest.mark.unit + def test_generate_pysmo_data_input(self): + spec = ParameterSweepSpecification() + + spec.add_sampled_input("model.foo", 0, 10, name="foo") + spec.add_sampled_input("model.bar", 20, 40, name="bar") + + data_spec = spec._generate_pysmo_data_input() + + assert isinstance(data_spec, list) + assert len(data_spec) == 2 + + assert data_spec[0] == [0, 20] + assert data_spec[1] == [10, 40] + + @pytest.mark.unit + def test_set_sampling_method(self): + spec = ParameterSweepSpecification() + + spec.set_sampling_method(LatinHypercubeSampling) + assert spec.sampling_method is LatinHypercubeSampling + + spec.set_sampling_method(UniformSampling) + assert spec.sampling_method is UniformSampling + + spec.set_sampling_method(HaltonSampling) + assert spec.sampling_method is HaltonSampling + + spec.set_sampling_method(HammersleySampling) + assert spec.sampling_method is HammersleySampling + + spec.set_sampling_method(CVTSampling) + assert spec.sampling_method is CVTSampling + + @pytest.mark.unit + def test_set_sample_size(self): + spec = ParameterSweepSpecification() + + spec.set_sample_size(10) + + assert spec.sample_size == 10 + + @pytest.mark.unit + def test_set_sampling_method_invalid(self): + spec = ParameterSweepSpecification() + + with pytest.raises( + TypeError, + match="Sampling method must be an instance of a Pysmo SamplingMethod " + "\(received foo\)", + ): + spec.set_sampling_method("foo") + + @pytest.mark.unit + def test_generate_samples_no_inputs(self): + spec = ParameterSweepSpecification() + spec.set_sampling_method(LatinHypercubeSampling) + + with pytest.raises( + ValueError, match="Please identify at least one input variable to sample." + ): + spec.generate_samples() + + @pytest.mark.unit + def test_generate_samples_none(self): + spec = ParameterSweepSpecification() + spec.set_sampling_method(LatinHypercubeSampling) + + spec.add_sampled_input("model.foo", 0, 10, name="foo") + spec.add_sampled_input("model.bar", 20, 40, name="bar") + + with pytest.raises(ValueError, match="Please set a sample size."): + spec.generate_samples() + + @pytest.mark.unit + def test_generate_samples_zero(self): + spec = ParameterSweepSpecification() + spec.set_sampling_method(LatinHypercubeSampling) + + spec.add_sampled_input("model.foo", 0, 10, name="foo") + spec.add_sampled_input("model.bar", 20, 40, name="bar") + + spec.set_sample_size(0) + + with pytest.raises( + ValueError, match="sample_size must be an integer greater than 1." + ): + spec.generate_samples() + + @pytest.mark.unit + def test_generate_samples_not_int(self): + spec = ParameterSweepSpecification() + spec.set_sampling_method(LatinHypercubeSampling) + + spec.add_sampled_input("model.foo", 0, 10, name="foo") + spec.add_sampled_input("model.bar", 20, 40, name="bar") + + spec.set_sample_size(0.5) + + with pytest.raises(ValueError, match="sample_size must be an integer."): + spec.generate_samples() + + @pytest.mark.unit + def test_generate_samples_uniform_not_list(self): + spec = ParameterSweepSpecification() + spec.set_sampling_method(UniformSampling) + + spec.add_sampled_input("model.foo", 0, 10, name="foo") + spec.add_sampled_input("model.bar", 20, 40, name="bar") + + spec.set_sample_size(0.5) + + with pytest.raises( + TypeError, + match="For UniformSampling, sample_size must be list of integers.", + ): + spec.generate_samples() + + @pytest.mark.component + def test_generate_samples_FF(self): + spec = ParameterSweepSpecification() + spec.set_sampling_method(UniformSampling) + + spec.add_sampled_input("model.foo", 0, 10, name="foo") + spec.add_sampled_input("model.bar", 20, 40, name="bar") + + spec.set_sample_size([4, 3]) + + samples = spec.generate_samples() + assert isinstance(spec.samples, DataFrame) + assert spec._samples.shape == (12, 2) + + expected = DataFrame( + data=[ + [0.0, 20.0], + [0.0, 30.0], + [0.0, 40.0], + [10 / 3, 20.0], + [10 / 3, 30.0], + [10 / 3, 40.0], + [20 / 3, 20.0], + [20 / 3, 30.0], + [20 / 3, 40.0], + [10.0, 20.0], + [10.0, 30.0], + [10.0, 40.0], + ], + columns=["foo", "bar"], + ) + + assert_frame_equal(samples, expected) + + @pytest.mark.integration + def test_generate_samples_LHC(self): + spec = ParameterSweepSpecification() + spec.set_sampling_method(LatinHypercubeSampling) + + spec.add_sampled_input("model.foo", 0, 10, name="foo") + spec.add_sampled_input("model.bar", 20, 40, name="bar") + + spec.set_sample_size(10) + + spec.generate_samples() + assert spec._samples.shape == (10, 2) + + @pytest.mark.integration + def test_generate_samples_CVT(self): + spec = ParameterSweepSpecification() + spec.set_sampling_method(CVTSampling) + + spec.add_sampled_input("model.foo", 0, 10, name="foo") + spec.add_sampled_input("model.bar", 20, 40, name="bar") + + spec.set_sample_size(10) + + spec.generate_samples() + assert spec._samples.shape == (10, 2) + + @pytest.mark.integration + def test_generate_samples_Halton(self): + spec = ParameterSweepSpecification() + spec.set_sampling_method(HaltonSampling) + + spec.add_sampled_input("model.foo", 0, 10, name="foo") + spec.add_sampled_input("model.bar", 20, 40, name="bar") + + spec.set_sample_size(10) + + spec.generate_samples() + assert spec._samples.shape == (10, 2) + + @pytest.mark.integration + def test_generate_samples_Hammersley(self): + spec = ParameterSweepSpecification() + spec.set_sampling_method(HammersleySampling) + + spec.add_sampled_input("model.foo", 0, 10, name="foo") + spec.add_sampled_input("model.bar", 20, 40, name="bar") + + spec.set_sample_size(10) + + spec.generate_samples() + assert spec._samples.shape == (10, 2) + + @pytest.mark.unit + def test_to_dict(self): + spec = ParameterSweepSpecification() + spec.set_sampling_method(UniformSampling) + spec.add_sampled_input("model.foo", 0, 10, name="foo") + spec.add_sampled_input("model.bar", 20, 40, name="bar") + spec.set_sample_size([4, 3]) + spec.generate_samples() + + outdict = spec.to_dict() + + assert outdict == expected_todict + + @pytest.mark.unit + def test_from_dict(self): + spec = ParameterSweepSpecification() + + spec.from_dict(expected_todict) + + assert isinstance(spec.inputs, dict) + assert len(spec.inputs) == 2 + + assert isinstance(spec.inputs["foo"], dict) + assert spec.inputs["foo"]["pyomo_path"] == "model.foo" + assert spec.inputs["foo"]["lower"] == 0 + assert spec.inputs["foo"]["upper"] == 10 + + assert isinstance(spec.inputs["bar"], dict) + assert spec.inputs["bar"]["pyomo_path"] == "model.bar" + assert spec.inputs["bar"]["lower"] == 20 + assert spec.inputs["bar"]["upper"] == 40 + + assert spec.sampling_method is UniformSampling + assert spec.sample_size == [4, 3] + + assert isinstance(spec.samples, DataFrame) + assert spec.samples.shape == (12, 2) + + expected = DataFrame( + data=[ + [0.0, 20.0], + [0.0, 30.0], + [0.0, 40.0], + [10 / 3, 20.0], + [10 / 3, 30.0], + [10 / 3, 40.0], + [20 / 3, 20.0], + [20 / 3, 30.0], + [20 / 3, 40.0], + [10.0, 20.0], + [10.0, 30.0], + [10.0, 40.0], + ], + columns=["foo", "bar"], + ) + + assert_frame_equal(spec.samples, expected) + + @pytest.mark.component + def test_to_json_file(self): + temp_context = TempfileManager.new_context() + tmpfile = temp_context.create_tempfile(suffix=".json") + + spec = ParameterSweepSpecification() + spec.set_sampling_method(UniformSampling) + spec.add_sampled_input("model.foo", 0, 10, name="foo") + spec.add_sampled_input("model.bar", 20, 40, name="bar") + spec.set_sample_size([4, 3]) + spec.generate_samples() + + spec.to_json_file(tmpfile) + + with open(tmpfile, "r") as f: + lines = f.read() + f.close() + + expected = """{ + "inputs": { + "foo": { + "pyomo_path": "model.foo", + "lower": 0, + "upper": 10 + }, + "bar": { + "pyomo_path": "model.bar", + "lower": 20, + "upper": 40 + } + }, + "sampling_method": "UniformSampling", + "sample_size": [ + 4, + 3 + ], + "samples": { + "index": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11 + ], + "columns": [ + "foo", + "bar" + ], + "data": [ + [ + 0.0, + 20.0 + ], + [ + 0.0, + 30.0 + ], + [ + 0.0, + 40.0 + ], + [ + 3.333333333333333, + 20.0 + ], + [ + 3.333333333333333, + 30.0 + ], + [ + 3.333333333333333, + 40.0 + ], + [ + 6.666666666666666, + 20.0 + ], + [ + 6.666666666666666, + 30.0 + ], + [ + 6.666666666666666, + 40.0 + ], + [ + 10.0, + 20.0 + ], + [ + 10.0, + 30.0 + ], + [ + 10.0, + 40.0 + ] + ], + "index_names": [ + null + ], + "column_names": [ + null + ] + } +}""" + + assert lines == expected + + # Check for clean up + temp_context.release(remove=True) + assert not os.path.exists(tmpfile) + + @pytest.mark.unit + def test_from_json_file(self): + fname = os.path.join(currdir, "load_spec.json") + + spec = ParameterSweepSpecification() + + spec.from_json_file(fname) + + assert isinstance(spec.inputs, dict) + assert len(spec.inputs) == 2 + + assert isinstance(spec.inputs["foo"], dict) + assert spec.inputs["foo"]["pyomo_path"] == "model.foo" + assert spec.inputs["foo"]["lower"] == 0 + assert spec.inputs["foo"]["upper"] == 10 + + assert isinstance(spec.inputs["bar"], dict) + assert spec.inputs["bar"]["pyomo_path"] == "model.bar" + assert spec.inputs["bar"]["lower"] == 20 + assert spec.inputs["bar"]["upper"] == 40 + + assert spec.sampling_method is UniformSampling + assert spec.sample_size == [4, 3] + + assert isinstance(spec.samples, DataFrame) + assert spec.samples.shape == (12, 2) + + expected = DataFrame( + data=[ + [0.0, 20.0], + [0.0, 30.0], + [0.0, 40.0], + [10 / 3, 20.0], + [10 / 3, 30.0], + [10 / 3, 40.0], + [20 / 3, 20.0], + [20 / 3, 30.0], + [20 / 3, 40.0], + [10.0, 20.0], + [10.0, 30.0], + [10.0, 40.0], + ], + columns=["foo", "bar"], + ) + + assert_frame_equal(spec.samples, expected) + + +spec = ParameterSweepSpecification() +spec.set_sampling_method(LatinHypercubeSampling) +spec.add_sampled_input("v1", 0, 10) +spec.add_sampled_input("v2", 20, 40) +spec.add_sampled_input("p2", -10, 10) +spec.set_sample_size(5) +spec.generate_samples() + + +psweep_dict = { + "specification": { + "inputs": { + "v2": { + "pyomo_path": "v2", + "lower": 2, + "upper": 6, + }, + }, + "sampling_method": "UniformSampling", + "sample_size": [2], + "samples": { + "index": [0, 1], + "columns": ["v2"], + "data": [[2.0], [6.0]], + "index_names": [None], + "column_names": [None], + }, + }, + "results": { + 0: { + "success": True, + "results": 2, + }, + 1: { + "success": True, + "results": 6, + }, + }, +} + + +class TestParameterSweepBase: + def build_model(self): + m = ConcreteModel() + m.v1 = Var(initialize=1) + m.v2 = Var(initialize=1) + m.v3 = Var(initialize=1) + m.p1 = Param(initialize=1) + m.p2 = Param(initialize=1, mutable=True) + m.p3 = Param(initialize=1, mutable=True) + + m.v1.fix(1) + m.v2.fix(1) + + return m + + @pytest.mark.unit + def test_init(self): + psweep = ParameterSweepBase() + + assert psweep.config.rebuild_model + assert psweep.config.build_model is None + assert psweep.config.run_model is None + assert psweep.config.build_outputs is None + assert psweep.config.handle_solver_error is None + assert not psweep.config.halt_on_error + assert psweep.config.input_specification is None + assert psweep.config.solver is None + assert isinstance(psweep.results, dict) + assert len(psweep.results) == 0 + assert psweep._model is None + + @pytest.mark.unit + def test_get_initialized_model_none(self): + psweep = ParameterSweepBase() + + with pytest.raises( + ConfigurationError, + match="Please specify a method to construct the model of interest.", + ): + psweep.get_initialized_model() + + @pytest.mark.unit + def test_get_initialized_model(self): + psweep = ParameterSweepBase( + build_model=self.build_model, + ) + + m2 = psweep.get_initialized_model() + + assert isinstance(m2, ConcreteModel) + assert isinstance(m2.v1, Var) + assert isinstance(m2.v2, Var) + assert isinstance(m2.p1, Param) + assert isinstance(m2.p2, Param) + assert isinstance(m2.p3, Param) + + assert psweep._model is None + + @pytest.mark.unit + def test_get_initialized_model_rebuild_false(self): + psweep = ParameterSweepBase( + build_model=self.build_model, + rebuild_model=False, + ) + + m2 = psweep.get_initialized_model() + + assert isinstance(m2, ConcreteModel) + assert isinstance(m2.v1, Var) + assert isinstance(m2.v2, Var) + assert isinstance(m2.p1, Param) + assert isinstance(m2.p2, Param) + assert isinstance(m2.p3, Param) + + assert psweep._model is m2 + + @pytest.mark.unit + def test_get_initialized_model_rebuild_false_existing(self): + psweep = ParameterSweepBase( + build_model=self.build_model, + rebuild_model=False, + ) + psweep._model = "foo" + + assert psweep.get_initialized_model() == "foo" + + @pytest.mark.unit + def test_get_initialized_model_w_args(self): + def build_model(arg1=None, arg2=None): + m = ConcreteModel() + m.a1 = arg1 + m.a2 = arg2 + + return m + + psweep = ParameterSweepBase( + build_model=build_model, + build_model_arguments={"arg1": "foo", "arg2": False}, + ) + + m2 = psweep.get_initialized_model() + + assert isinstance(m2, ConcreteModel) + assert m2.a1 == "foo" + assert not m2.a2 + + @pytest.mark.unit + def test_get_specification(self): + psweep = ParameterSweepBase( + build_model=self.build_model, + input_specification=spec, + ) + + s2 = psweep.get_input_specification() + + assert s2 is spec + + @pytest.mark.unit + def test_get_specification_none(self): + psweep = ParameterSweepBase( + build_model=self.build_model, + ) + + with pytest.raises( + ConfigurationError, + match="Please specify an input specification to use for sampling.", + ): + psweep.get_input_specification() + + @pytest.mark.unit + def test_get_input_samples(self): + psweep = ParameterSweepBase( + build_model=self.build_model, + input_specification=spec, + ) + + s = psweep.get_input_samples() + + assert s is spec.samples + + @pytest.mark.unit + def test_get_input_samples_none(self): + spec2 = ParameterSweepSpecification() + spec2.set_sampling_method(LatinHypercubeSampling) + spec2.add_sampled_input("v1", 0, 10) + spec2.add_sampled_input("v2", 20, 40) + spec2.add_sampled_input("p2", -10, 10) + + psweep = ParameterSweepBase( + build_model=self.build_model, + input_specification=spec2, + ) + + with pytest.raises( + ValueError, + match="Please set a sample size.", + ): + psweep.get_input_samples() + + @pytest.mark.unit + def test_set_input_values(self): + psweep = ParameterSweepBase( + build_model=self.build_model, + input_specification=spec, + ) + + model = psweep.get_initialized_model() + + for i in range(5): + psweep.set_input_values(model, i) + + assert value(model.v1) == pytest.approx(spec.samples["v1"][i], abs=1e-10) + assert value(model.v2) == pytest.approx(spec.samples["v2"][i], abs=1e-10) + assert value(model.p2) == pytest.approx(spec.samples["p2"][i], abs=1e-10) + + assert value(model.v3) == pytest.approx(1, abs=1e-10) + assert value(model.p1) == pytest.approx(1, abs=1e-10) + assert value(model.p3) == pytest.approx(1, abs=1e-10) + + @pytest.mark.unit + def test_set_input_immutable_param(self): + def bm(): + m = ConcreteModel() + m.p1 = Param() + return m + + spec2 = ParameterSweepSpecification() + spec2.set_sampling_method(LatinHypercubeSampling) + spec2.add_sampled_input("p1", 0, 10) + spec2.set_sample_size(2) + + psweep = ParameterSweepBase( + build_model=bm, + input_specification=spec2, + ) + + model = psweep.get_initialized_model() + + with pytest.raises( + ValueError, + match="Convergence testing found an input of type Param that " + "was not mutable \(p1\). Please make sure all " + "sampled inputs are either mutable params or fixed vars.", + ): + psweep.set_input_values(model, 0) + + @pytest.mark.unit + def test_set_input_unfixed_var(self): + def bm(): + m = ConcreteModel() + m.v1 = Var() + return m + + spec2 = ParameterSweepSpecification() + spec2.set_sampling_method(LatinHypercubeSampling) + spec2.add_sampled_input("v1", 0, 10) + spec2.set_sample_size(2) + + psweep = ParameterSweepBase( + build_model=bm, + input_specification=spec2, + ) + + model = psweep.get_initialized_model() + + with pytest.raises( + ValueError, + match="Convergence testing found an input of type Var that " + "was not fixed \(v1\). Please make sure all " + "sampled inputs are either mutable params or fixed vars.", + ): + psweep.set_input_values(model, 0) + + @pytest.mark.unit + def test_set_input_invalid_ctype(self): + def bm(): + m = ConcreteModel() + m.v1 = Var() + m.c1 = Constraint(expr=m.v1 == 2) + return m + + spec2 = ParameterSweepSpecification() + spec2.set_sampling_method(LatinHypercubeSampling) + spec2.add_sampled_input("c1", 0, 10) + spec2.set_sample_size(2) + + psweep = ParameterSweepBase( + build_model=bm, + input_specification=spec2, + ) + + model = psweep.get_initialized_model() + + with pytest.raises( + ValueError, + match="Failed to find a valid input component \(must be " + "a fixed Var or a mutable Param\). Instead, " + "pyomo_path: c1 returned: c1.", + ): + psweep.set_input_values(model, 0) + + @pytest.mark.component + @pytest.mark.solver + def test_run_model_none(self): + psweep = ParameterSweepBase() + + model = ConcreteModel() + model.v = Var() + model.c = Constraint(expr=model.v == 4) + + solver = SolverFactory("ipopt") + + success, run_stats = psweep.run_model(model, solver) + + assert success + assert_optimal_termination(run_stats) + assert value(model.v) == pytest.approx(4, rel=1e-8) + + @pytest.mark.unit + def test_run_model_dummy(self): + def dummy_run(model, solver): + return "foo" + + psweep = ParameterSweepBase(run_model=dummy_run) + + model = ConcreteModel() + status = psweep.run_model(model, "bar") + + assert status == "foo" + + @pytest.mark.unit + def test_run_model_dummy_w_args(self): + def dummy_run(model, solver, arg1=None, arg2=None): + return "foo", {"arg1": arg1, "arg2": arg2} + + psweep = ParameterSweepBase( + run_model=dummy_run, run_model_arguments={"arg1": "foo", "arg2": False} + ) + + model = ConcreteModel() + status, run_stats = psweep.run_model(model, "bar") + + assert status == "foo" + assert run_stats == {"arg1": "foo", "arg2": False} + + @pytest.mark.unit + def test_build_outputs_none(self): + psweep = ParameterSweepBase() + + assert psweep.build_outputs("foo", "bar") == "bar" + + @pytest.mark.unit + def test_build_outputs(self): + def dummy_collect(model, run_stats): + return value(model.v) + + model = ConcreteModel() + model.v = Var(initialize=1) + model.c = Constraint(expr=model.v == 4) + + psweep = ParameterSweepBase( + build_outputs=dummy_collect, + ) + + results = psweep.build_outputs(model, "foo") + + assert results == 1 + + @pytest.mark.unit + def test_build_outputs_w_args(self): + def dummy_collect(model, run_stats, arg1=None, arg2=None): + return [value(model.v), arg1, arg2] + + model = ConcreteModel() + model.v = Var(initialize=1) + model.c = Constraint(expr=model.v == 4) + + psweep = ParameterSweepBase( + build_outputs=dummy_collect, + build_outputs_arguments={"arg1": "foo", "arg2": False}, + ) + + results = psweep.build_outputs(model, "bar") + + assert results == [1, "foo", False] + + @pytest.mark.unit + def test_handle_error_none(self): + model = ConcreteModel() + + psweep = ParameterSweepBase() + + results = psweep.handle_error(model) + + assert results == (None, False) + + @pytest.mark.unit + def test_handle_error(self): + model = ConcreteModel() + + def dummy_recourse(model): + return "foo" + + psweep = ParameterSweepBase( + handle_solver_error=dummy_recourse, + ) + + results = psweep.handle_error(model) + + assert results == "foo" + + @pytest.mark.unit + def test_handle_error_w_args(self): + model = ConcreteModel() + + def dummy_recourse(model, arg1=None, arg2=None): + return "foo", arg1, arg2 + + psweep = ParameterSweepBase( + handle_solver_error=dummy_recourse, + handle_solver_error_arguments={"arg1": "bar", "arg2": False}, + ) + + results = psweep.handle_error(model) + + assert results == ("foo", "bar", False) + + @pytest.mark.component + @pytest.mark.solver + def test_execute_single_sample(self, caplog): + caplog.set_level(idaeslog.DEBUG) + + def build_model(): + m = ConcreteModel() + m.v1 = Var(initialize=1) + m.v2 = Var(initialize=4) + m.c1 = Constraint(expr=m.v1 == m.v2) + + m.v2.fix() + + return m + + def build_outputs(model, run_stats): + return value(model.v1) + + spec2 = ParameterSweepSpecification() + spec2.set_sampling_method(UniformSampling) + spec2.add_sampled_input("v2", 2, 6) + spec2.set_sample_size([2]) + + solver = SolverFactory("ipopt") + + psweep = ParameterSweepBase( + build_model=build_model, + input_specification=spec2, + build_outputs=build_outputs, + solver=solver, + ) + + results, success, error = psweep.execute_single_sample(1) + + assert results == pytest.approx(6, rel=1e-8) + assert success + assert error is None + + assert "Sample 1 finished." in caplog.text + + @pytest.mark.component + def test_execute_single_sample_recourse_none(self): + def build_model(): + m = ConcreteModel() + m.v1 = Var(initialize=1) + m.v2 = Var(initialize=4) + m.c1 = Constraint(expr=m.v1 == m.v2) + + m.v2.fix() + + return m + + class dummy_solver: + @staticmethod + def solve(model, *args, **kwargs): + raise Exception("Test exception") + + spec2 = ParameterSweepSpecification() + spec2.set_sampling_method(UniformSampling) + spec2.add_sampled_input("v2", 2, 6) + spec2.set_sample_size([2]) + + psweep = ParameterSweepBase( + build_model=build_model, + input_specification=spec2, + solver=dummy_solver, + ) + + results, success, error = psweep.execute_single_sample(1) + + assert results is None + assert not success + assert error == "Test exception" + + @pytest.mark.component + def test_execute_single_sample_halt_on_error(self): + def build_model(): + m = ConcreteModel() + m.v1 = Var(initialize=1) + m.v2 = Var(initialize=4) + m.c1 = Constraint(expr=m.v1 == m.v2) + + m.v2.fix() + + return m + + class dummy_solver: + @staticmethod + def solve(model, *args, **kwargs): + raise RuntimeError("Test exception") + + spec2 = ParameterSweepSpecification() + spec2.set_sampling_method(UniformSampling) + spec2.add_sampled_input("v2", 2, 6) + spec2.set_sample_size([2]) + + psweep = ParameterSweepBase( + build_model=build_model, + input_specification=spec2, + solver=dummy_solver, + halt_on_error=True, + ) + + with pytest.raises( + RuntimeError, + match="Test exception", + ): + psweep.execute_single_sample(1) + + @pytest.mark.component + def test_execute_single_sample_recourse_custom(self): + def build_model(): + m = ConcreteModel() + m.v1 = Var(initialize=1) + m.v2 = Var(initialize=4) + m.c1 = Constraint(expr=m.v1 == m.v2) + + m.v2.fix() + + return m + + class dummy_solver: + @staticmethod + def solve(model, *args, **kwargs): + raise Exception("Test exception") + + def recourse(model): + return "foo", "bar" + + spec2 = ParameterSweepSpecification() + spec2.set_sampling_method(UniformSampling) + spec2.add_sampled_input("v2", 2, 6) + spec2.set_sample_size([2]) + + psweep = ParameterSweepBase( + build_model=build_model, + input_specification=spec2, + solver=dummy_solver, + handle_solver_error=recourse, + ) + + results, success, error = psweep.execute_single_sample(1) + + assert results == "foo" + assert success == "bar" + assert error == "Test exception" + + @pytest.fixture(scope="class") + def psweep_with_results(self): + spec2 = ParameterSweepSpecification() + spec2.set_sampling_method(UniformSampling) + spec2.add_sampled_input("v2", 2, 6) + spec2.set_sample_size([2]) + spec2.generate_samples() + + psweep = ParameterSweepBase( + input_specification=spec2, + ) + + psweep._results = { + 0: {"success": True, "results": 2}, + 1: {"success": True, "results": 6}, + } + + return psweep + + @pytest.mark.component + def test_to_dict(self, psweep_with_results): + outdict = psweep_with_results.to_dict() + assert outdict == psweep_dict + + @pytest.mark.unit + def test_from_dict(self): + def build_model(): + m = ConcreteModel() + m.v1 = Var(initialize=1) + m.v2 = Var(initialize=4) + m.c1 = Constraint(expr=m.v1 == m.v2) + + m.v2.fix() + + return m + + psweep = ParameterSweepBase( + build_model=build_model, + ) + + psweep.from_dict(psweep_dict) + + input_spec = psweep.get_input_specification() + + assert isinstance(input_spec, ParameterSweepSpecification) + assert len(input_spec.inputs) == 1 + + assert input_spec.sampling_method is UniformSampling + assert isinstance(input_spec.samples, DataFrame) + assert input_spec.sample_size == [2] + + assert isinstance(psweep._results, dict) + assert len(psweep._results) == 2 + + for i in [0, 1]: + assert psweep._results[i]["success"] + assert psweep._results[i]["results"] == 2 + i * 4 + + @pytest.mark.component + def test_to_json_file(self, psweep_with_results): + temp_context = TempfileManager.new_context() + tmpfile = temp_context.create_tempfile(suffix=".json") + + psweep_with_results.to_json_file(tmpfile) + + with open(tmpfile, "r") as f: + lines = f.read() + f.close() + + expected = """{ + "specification": { + "inputs": { + "v2": { + "pyomo_path": "v2", + "lower": 2, + "upper": 6 + } + }, + "sampling_method": "UniformSampling", + "sample_size": [ + 2 + ], + "samples": { + "index": [ + 0, + 1 + ], + "columns": [ + "v2" + ], + "data": [ + [ + 2.0 + ], + [ + 6.0 + ] + ], + "index_names": [ + null + ], + "column_names": [ + null + ] + } + }, + "results": { + "0": { + "success": true, + "results": 2 + }, + "1": { + "success": true, + "results": 6 + } + } +}""" + + assert lines == expected + + # Check for clean up + temp_context.release(remove=True) + assert not os.path.exists(tmpfile) + + @pytest.mark.unit + def test_load_from_json_file(self): + fname = os.path.join(currdir, "load_psweep.json") + + psweep = ParameterSweepBase() + psweep.from_json_file(fname) + + assert psweep.config.build_model is None + + input_spec = psweep.get_input_specification() + + assert isinstance(input_spec, ParameterSweepSpecification) + assert len(input_spec.inputs) == 1 + + assert input_spec.sampling_method is UniformSampling + assert isinstance(input_spec.samples, DataFrame) + assert input_spec.sample_size == [2] + + assert isinstance(psweep._results, dict) + assert len(psweep._results) == 2 + + for i in [0, 1]: + assert psweep._results[i]["success"] + assert psweep._results[i]["results"] == 2 + i * 4 + + @pytest.mark.unit + def test_execute_parameter_sweep(self): + psweep = ParameterSweepBase() + + with pytest.raises(NotImplementedError): + psweep.execute_parameter_sweep() + + @pytest.mark.unit + def test_get_sample_values(self, psweep_with_results): + expected = Series({"v2": 2.0}, name=0) + vals = psweep_with_results.get_sample_values(0) + assert_series_equal(expected, vals) + + @pytest.mark.unit + def test_progress_bar(self, capsys): + psweep = ParameterSweepBase() + psweep.progress_bar(0.25, "testing", 16) + out, err = capsys.readouterr() + assert "25.0% ****------------ testing\n" in out + + +class TestSequentialSweepRunner: + @pytest.mark.component + @pytest.mark.solver + def test_sequential_runner(self): + def build_model(): + m = ConcreteModel() + m.v1 = Var(initialize=1) + m.v2 = Var(initialize=4) + m.c1 = Constraint(expr=m.v1 == m.v2 - 1e-3) + + m.v2.fix() + + return m + + def build_outputs(model, run_stats): + return value(model.v1) + + spec2 = ParameterSweepSpecification() + spec2.set_sampling_method(UniformSampling) + spec2.add_sampled_input("v2", 2, 6) + spec2.set_sample_size([2]) + spec2.generate_samples() + + solver = SolverFactory("ipopt") + + psweep = SequentialSweepRunner( + build_model=build_model, + input_specification=spec2, + solver=solver, + build_outputs=build_outputs, + ) + + psweep.execute_parameter_sweep() + + assert psweep.results[0]["success"] + assert psweep.results[0]["results"] == pytest.approx(2 - 1e-3, rel=1e-8) + + assert psweep.results[1]["success"] + assert psweep.results[1]["results"] == pytest.approx(6 - 1e-3, rel=1e-8)