diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 64df5fac28..8bd70f352f 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -23,13 +23,14 @@ conftest.py @lbianchi-lbl /idaes/commands/ @dangunter # Core -/idaes/core/ @andrewlee94 @dallan-keylogic @lbianchi-lbl +/idaes/core/ @agarciadiego @dallan-keylogic @lbianchi-lbl /idaes/core/dmf/ @dangunter -/idaes/core/surrogate/ @andrewlee94 @bpaul4 @avdudchenko @rundxdi +/idaes/core/surrogate/ @bpaul4 @avdudchenko @rundxdi /idaes/core/ui/ @dangunter # Models -/idaes/models/ @andrewlee94 @bpaul4 +/idaes/models/ @agarciadiego @bpaul4 +/idaes/models/properties/ @agarciadiego @dallan-keylogic # Apps - each package needs a maintainer /idaes/apps/caprese/ @Robbybp diff --git a/idaes/core/surrogate/keras_surrogate.py b/idaes/core/surrogate/keras_surrogate.py index 45a3a64a2a..1bc1feed62 100644 --- a/idaes/core/surrogate/keras_surrogate.py +++ b/idaes/core/surrogate/keras_surrogate.py @@ -17,23 +17,21 @@ # pylint: disable=missing-class-docstring # pylint: disable=missing-function-docstring -from enum import Enum import json import os.path -import numpy as np import pandas as pd from pyomo.common.dependencies import attempt_import -from idaes.core.surrogate.base.surrogate_base import SurrogateBase from idaes.core.surrogate.sampling.scaling import OffsetScaler +from idaes.core.surrogate.omlt_base_surrogate_class import OMLTSurrogate + keras, keras_available = attempt_import("tensorflow.keras") omlt, omlt_available = attempt_import("omlt") if omlt_available: - from omlt import OmltBlock, OffsetScaling from omlt.neuralnet.nn_formulation import ( FullSpaceSmoothNNFormulation, ReducedSpaceSmoothNNFormulation, @@ -45,7 +43,7 @@ from omlt.io import load_keras_sequential -class KerasSurrogate(SurrogateBase): +class KerasSurrogate(OMLTSurrogate): def __init__( self, keras_model, @@ -87,52 +85,11 @@ def __init__( input_labels=input_labels, output_labels=output_labels, input_bounds=input_bounds, + input_scaler=input_scaler, + output_scaler=output_scaler, ) - - # make sure we are using the standard scaler - if ( - input_scaler is not None - and not isinstance(input_scaler, OffsetScaler) - or output_scaler is not None - and not isinstance(output_scaler, OffsetScaler) - ): - raise NotImplementedError("KerasSurrogate only supports the OffsetScaler.") - - # check that the input labels match - if input_scaler is not None and input_scaler.expected_columns() != input_labels: - raise ValueError( - "KerasSurrogate created with input_labels that do not match" - " the expected columns in the input_scaler.\n" - "input_labels={}\n" - "input_scaler.expected_columns()={}".format( - input_labels, input_scaler.expected_columns() - ) - ) - - # check that the output labels match - if ( - output_scaler is not None - and output_scaler.expected_columns() != output_labels - ): - raise ValueError( - "KerasSurrogate created with output_labels that do not match" - " the expected columns in the output_scaler.\n" - "output_labels={}\n" - "output_scaler.expected_columns()={}".format( - output_labels, output_scaler.expected_columns() - ) - ) - - self._input_scaler = input_scaler - self._output_scaler = output_scaler self._keras_model = keras_model - class Formulation(Enum): - FULL_SPACE = 1 - REDUCED_SPACE = 2 - RELU_BIGM = 3 - RELU_COMPLEMENTARITY = 4 - def populate_block(self, block, additional_options=None): """ Method to populate a Pyomo Block with the keras model constraints. @@ -149,37 +106,7 @@ def populate_block(self, block, additional_options=None): formulation = additional_options.pop( "formulation", KerasSurrogate.Formulation.FULL_SPACE ) - offset_inputs = np.zeros(self.n_inputs()) - factor_inputs = np.ones(self.n_inputs()) - offset_outputs = np.zeros(self.n_outputs()) - factor_outputs = np.ones(self.n_outputs()) - if self._input_scaler: - offset_inputs = self._input_scaler.offset_series()[ - self.input_labels() - ].to_numpy() - factor_inputs = self._input_scaler.factor_series()[ - self.input_labels() - ].to_numpy() - if self._output_scaler: - offset_outputs = self._output_scaler.offset_series()[ - self.output_labels() - ].to_numpy() - factor_outputs = self._output_scaler.factor_series()[ - self.output_labels() - ].to_numpy() - - # build the OMLT scaler object - omlt_scaling = OffsetScaling( - offset_inputs=offset_inputs, - factor_inputs=factor_inputs, - offset_outputs=offset_outputs, - factor_outputs=factor_outputs, - ) - - # omlt takes *scaled* input bounds as a dictionary with int keys - input_bounds = dict(enumerate(self.input_bounds().values())) - scaled_input_bounds = omlt_scaling.get_scaled_input_expressions(input_bounds) - scaled_input_bounds = {i: tuple(bnd) for i, bnd in scaled_input_bounds.items()} + omlt_scaling, scaled_input_bounds = self.generate_omlt_scaling_objecets() net = load_keras_sequential( self._keras_model, @@ -201,32 +128,7 @@ def populate_block(self, block, additional_options=None): "KerasSurrogate.populate_block. Please pass a valid " "formulation.".format(formulation) ) - block.nn = OmltBlock() - block.nn.build_formulation( - formulation_object, - ) - - # input/output variables need to be constrained to be equal - # auto-created variables that come from OMLT. - input_idx_by_label = {s: i for i, s in enumerate(self._input_labels)} - input_vars_as_dict = block.input_vars_as_dict() - - @block.Constraint(self._input_labels) - def input_surrogate_ties(m, input_label): - return ( - input_vars_as_dict[input_label] - == block.nn.inputs[input_idx_by_label[input_label]] - ) - - output_idx_by_label = {s: i for i, s in enumerate(self._output_labels)} - output_vars_as_dict = block.output_vars_as_dict() - - @block.Constraint(self._output_labels) - def output_surrogate_ties(m, output_label): - return ( - output_vars_as_dict[output_label] - == block.nn.outputs[output_idx_by_label[output_label]] - ) + self.populate_block_with_net(block, formulation_object) def evaluate_surrogate(self, inputs): """ diff --git a/idaes/core/surrogate/omlt_base_surrogate_class.py b/idaes/core/surrogate/omlt_base_surrogate_class.py new file mode 100644 index 0000000000..6ae587d583 --- /dev/null +++ b/idaes/core/surrogate/omlt_base_surrogate_class.py @@ -0,0 +1,187 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +Interface for importing ONNX models into IDAES +""" +# TODO: Missing docstrings +# pylint: disable=missing-class-docstring +# pylint: disable=missing-function-docstring + +from enum import Enum +import numpy as np + +from pyomo.common.dependencies import attempt_import + +from idaes.core.surrogate.base.surrogate_base import SurrogateBase +from idaes.core.surrogate.sampling.scaling import OffsetScaler + +keras, keras_available = attempt_import("tensorflow.keras") +omlt, omlt_available = attempt_import("omlt") + +if omlt_available: + from omlt import OmltBlock, OffsetScaling + + +class OMLTSurrogate(SurrogateBase): + def __init__( + self, + input_labels, + output_labels, + input_bounds, + input_scaler=None, + output_scaler=None, + ): + """ + Standard SurrogateObject for surrogates based on Keras models. + Utilizes the OMLT framework for importing Keras models to IDAES. + + Contains methods to both populate a Pyomo Block with constraints + representing the surrogate and to evaluate the surrogate a set of user + provided points. + + This constructor should only be used when first creating the surrogate within IDAES. + Once created, this object can be stored to disk using save_to_folder and loaded + with load_from_folder + + Args: + onnx_model: Onnx model file to be loaded. + input_labels: list of str + The ordered list of labels corresponding to the inputs in the keras model + output_labels: list of str + The ordered list of labels corresponding to the outputs in the keras model + input_bounds: None of dict of tuples + Keys correspond to each of the input labels and values are the tuples of + bounds (lb, ub) + input_scaler: None or OffsetScaler + The scaler to be used for the inputs. If None, then no scaler is used + output_scaler: None of OffsetScaler + The scaler to be used for the outputs. If None, then no scaler is used + """ + super().__init__( + input_labels=input_labels, + output_labels=output_labels, + input_bounds=input_bounds, + ) + + # make sure we are using the standard scaler + if ( + input_scaler is not None + and not isinstance(input_scaler, OffsetScaler) + or output_scaler is not None + and not isinstance(output_scaler, OffsetScaler) + ): + raise NotImplementedError("KerasSurrogate only supports the OffsetScaler.") + + # check that the input labels match + if input_scaler is not None and input_scaler.expected_columns() != input_labels: + raise ValueError( + "KerasSurrogate created with input_labels that do not match" + " the expected columns in the input_scaler.\n" + "input_labels={}\n" + "input_scaler.expected_columns()={}".format( + input_labels, input_scaler.expected_columns() + ) + ) + + # check that the output labels match + if ( + output_scaler is not None + and output_scaler.expected_columns() != output_labels + ): + raise ValueError( + "KerasSurrogate created with output_labels that do not match" + " the expected columns in the output_scaler.\n" + "output_labels={}\n" + "output_scaler.expected_columns()={}".format( + output_labels, output_scaler.expected_columns() + ) + ) + + self._input_scaler = input_scaler + self._output_scaler = output_scaler + + class Formulation(Enum): + FULL_SPACE = 1 + REDUCED_SPACE = 2 + RELU_BIGM = 3 + RELU_COMPLEMENTARITY = 4 + + def generate_omlt_scaling_objecets(self): + offset_inputs = np.zeros(self.n_inputs()) + factor_inputs = np.ones(self.n_inputs()) + offset_outputs = np.zeros(self.n_outputs()) + factor_outputs = np.ones(self.n_outputs()) + if self._input_scaler: + offset_inputs = self._input_scaler.offset_series()[ + self.input_labels() + ].to_numpy() + factor_inputs = self._input_scaler.factor_series()[ + self.input_labels() + ].to_numpy() + if self._output_scaler: + offset_outputs = self._output_scaler.offset_series()[ + self.output_labels() + ].to_numpy() + factor_outputs = self._output_scaler.factor_series()[ + self.output_labels() + ].to_numpy() + + omlt_scaling = OffsetScaling( + offset_inputs=offset_inputs, + factor_inputs=factor_inputs, + offset_outputs=offset_outputs, + factor_outputs=factor_outputs, + ) + + # omlt takes *scaled* input bounds as a dictionary with int keys + input_bounds = dict(enumerate(self.input_bounds().values())) + scaled_input_bounds = omlt_scaling.get_scaled_input_expressions(input_bounds) + scaled_input_bounds = {i: tuple(bnd) for i, bnd in scaled_input_bounds.items()} + return omlt_scaling, scaled_input_bounds + + def populate_block_with_net(self, block, formulation_object): + """ + Method to populate a Pyomo Block with the omlt model constraints and build its formulation. + + Args: + block: Pyomo Block component + The block to be populated with variables and/or constraints. + formulation_object: omlt loaded network formulation + """ + + block.nn = OmltBlock() + block.nn.build_formulation( + formulation_object, + ) + + # input/output variables need to be constrained to be equal + # auto-created variables that come from OMLT. + input_idx_by_label = {s: i for i, s in enumerate(self._input_labels)} + input_vars_as_dict = block.input_vars_as_dict() + + @block.Constraint(self._input_labels) + def input_surrogate_ties(m, input_label): + return ( + input_vars_as_dict[input_label] + == block.nn.inputs[input_idx_by_label[input_label]] + ) + + output_idx_by_label = {s: i for i, s in enumerate(self._output_labels)} + output_vars_as_dict = block.output_vars_as_dict() + + @block.Constraint(self._output_labels) + def output_surrogate_ties(m, output_label): + return ( + output_vars_as_dict[output_label] + == block.nn.outputs[output_idx_by_label[output_label]] + ) diff --git a/idaes/core/surrogate/onnx_surrogate.py b/idaes/core/surrogate/onnx_surrogate.py new file mode 100644 index 0000000000..52b559a89b --- /dev/null +++ b/idaes/core/surrogate/onnx_surrogate.py @@ -0,0 +1,257 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +Interface for importing ONNX models into IDAES +""" +# TODO: Missing docstrings +# pylint: disable=missing-class-docstring +# pylint: disable=missing-function-docstring +# TODO: Importing protected _ACTIVATION_OP_TYPES as not exposed in distributed version +# pylint: disable=W0123 +from enum import Enum +import json +import os.path + +from pyomo.common.dependencies import attempt_import + +from idaes.core.surrogate.sampling.scaling import OffsetScaler + +from idaes.core.surrogate.omlt_base_surrogate_class import OMLTSurrogate + +onnx, onnx_available = attempt_import("onnx") +omlt, omlt_available = attempt_import("omlt") + +if omlt_available: + from omlt.neuralnet import ( + FullSpaceSmoothNNFormulation, + ReducedSpaceSmoothNNFormulation, + ReluBigMFormulation, + ReluComplementarityFormulation, + ) + import omlt.io as omltio + + if onnx_available: + from omlt.io import load_onnx_neural_network, write_onnx_model_with_bounds + + +class ONNXSurrogate(OMLTSurrogate): + def __init__( + self, + onnx_model, + input_labels, + output_labels, + input_bounds, + input_scaler=None, + output_scaler=None, + ): + """ + Standard SurrogateObject for surrogates based on ONNX models. + Utilizes the OMLT framework for importing ONNX models to IDAES. + + Contains methods to both populate a Pyomo Block with constraints + representing the surrogate and to evaluate the surrogate a set of user + provided points. + + This constructor should only be used when first creating the surrogate within IDAES. + Once created, this object can be stored to disk using save_to_folder and loaded + with load_from_folder + + Args: + onnx_model: Onnx model file to be loaded. + input_labels: list of str + The ordered list of labels corresponding to the inputs in the onnx model + output_labels: list of str + The ordered list of labels corresponding to the outputs in the onnx model + input_bounds: None of dict of tuples + Keys correspond to each of the input labels and values are the tuples of + bounds (lb, ub) + input_scaler: None or OffsetScaler + The scaler to be used for the inputs. If None, then no scaler is used + output_scaler: None of OffsetScaler + The scaler to be used for the outputs. If None, then no scaler is used + """ + super().__init__( + input_labels=input_labels, + output_labels=output_labels, + input_bounds=input_bounds, + input_scaler=input_scaler, + output_scaler=output_scaler, + ) + + self._onnx_model = onnx_model + + class Formulation(Enum): + FULL_SPACE = 1 + REDUCED_SPACE = 2 + RELU_BIGM = 3 + RELU_COMPLEMENTARITY = 4 + + def populate_block(self, block, additional_options=None): + """ + Method to populate a Pyomo Block with the onnx model constraints. + + Args: + block: Pyomo Block component + The block to be populated with variables and/or constraints. + additional_options: dict or None + If not None, then should be a dict with the following keys; + 'formulation': ONNXSurrogate.Formulation + The formulation to use with OMLT. Possible values are FULL_SPACE, + REDUCED_SPACE, RELU_BIGM, or RELU_COMPLEMENTARITY (default is FULL_SPACE) + """ + formulation = additional_options.pop( + "formulation", ONNXSurrogate.Formulation.REDUCED_SPACE + ) + omlt_scaling, scaled_input_bounds = self.generate_omlt_scaling_objecets() + + # omlt takes *scaled* input bounds as a dictionary with int keys + input_bounds = dict(enumerate(self.input_bounds().values())) + scaled_input_bounds = omlt_scaling.get_scaled_input_expressions(input_bounds) + scaled_input_bounds = {i: tuple(bnd) for i, bnd in scaled_input_bounds.items()} + + # TODO: remove this once new OMLT 1.2 is made available and includes tanh support + # overrides default available activation functions for ONNX, tanh is not listed in 1.1 but is supported + + omltio.onnx_parser._ACTIVATION_OP_TYPES = [ # pylint: disable=protected-access + "Relu", + "Sigmoid", + "LogSoftmax", + "Tanh", + ] + + net = load_onnx_neural_network( + self._onnx_model, + scaling_object=omlt_scaling, + input_bounds=scaled_input_bounds, + ) + + if formulation == ONNXSurrogate.Formulation.FULL_SPACE: + formulation_object = FullSpaceSmoothNNFormulation(net) + elif formulation == ONNXSurrogate.Formulation.REDUCED_SPACE: + formulation_object = ReducedSpaceSmoothNNFormulation(net) + elif formulation == ONNXSurrogate.Formulation.RELU_BIGM: + formulation_object = ReluBigMFormulation(net) + elif formulation == ONNXSurrogate.Formulation.RELU_COMPLEMENTARITY: + formulation_object = ReluComplementarityFormulation(net) + else: + raise ValueError( + 'An unrecognized formulation "{}" was passed to ' + "ONNXSurrogate.populate_block. Please pass a valid " + "formulation.".format(formulation) + ) + self.populate_block_with_net(block, formulation_object) + + def evaluate_surrogate(self, inputs): + """ + Method to evaluate ONNX model at a set of input values. + + Args: + inputs: numpy array of input values. First dimension of array + must match the number of input variables. + + Returns: + outputs: numpy array of values for all outputs evaluated at input + points. + """ + raise NotImplementedError + + def save_to_folder(self, save_location, save_name): + """ + Save the surrogate object to disk by providing the location to store the + model in and its name, as well as additional IDAES metadata + + Args: + save_location: str + The name of the folder to contain the ONNX model and additional + IDAES metadata + save_name: str + The name for the model + """ + + write_onnx_model_with_bounds( + os.path.join(save_location, "{}.onnx".format(save_name)), + onnx_model=self._onnx_model, + input_bounds=None, + ) + info = dict() + info["input_scaler"] = None + if self._input_scaler is not None: + info["input_scaler"] = self._input_scaler.to_dict() + info["output_scaler"] = None + if self._output_scaler is not None: + info["output_scaler"] = self._output_scaler.to_dict() + + # serialize information from the base class + info["input_labels"] = self.input_labels() + info["output_labels"] = self.output_labels() + info["input_bounds"] = self.input_bounds() + + with open( + os.path.join(save_location, "{}_idaes_info.json".format(save_name)), "w" + ) as fd: + json.dump(info, fd) + + @classmethod + def load_onnx_model(cls, onnx_model_location, model_name): + """ + Load the surrogate object from disk by providing the name of the + folder holding the onnx model and its name, including accompanying json file that includes following + structure: + + "input_scaler":{ + "expected_columns":[list of input_keys], + "offset":{"input_key":offset_value,etc.}, + "factor":{"input_key":factor_value (e.g. multiplier),etc.}} + + "output_scaler":{ + "expected_columns":[list of output_keys], + "offset":{"output_key":offset_value,etc.}, + "factor":{"output_key":factor_value (e.g. multiplier),etc.}} + + "input_bounds":{"input_key":[low_bound,high_bound],etc.} + "input_labels":[list of input_keys] + "output_labels":[list of output_keys] + + Args: + folder_name: str + The name of the folder containing the onnx model and additional + IDAES metadata + model_name: str + The name of the model to load in the folder + + Returns: an instance of ONNXSurrogate + """ + onnx_model = onnx.load( + os.path.join(onnx_model_location, "{}.onnx".format(model_name)) + ) + with open( + os.path.join(onnx_model_location, "{}_idaes_info.json".format(model_name)) + ) as fd: + info = json.load(fd) + + input_scaler = None + if info["input_scaler"] is not None: + input_scaler = OffsetScaler.from_dict(info["input_scaler"]) + + output_scaler = None + if info["output_scaler"] is not None: + output_scaler = OffsetScaler.from_dict(info["output_scaler"]) + + return ONNXSurrogate( + onnx_model=onnx_model, + input_labels=info["input_labels"], + output_labels=info["output_labels"], + input_bounds=info["input_bounds"], + input_scaler=input_scaler, + output_scaler=output_scaler, + ) diff --git a/idaes/core/surrogate/tests/data/onnx_models/net_st_net_5000_STM_100_s_2000000_60_5_tanh_1e-06_4096_tr_15481_Calcite_ST.onnx b/idaes/core/surrogate/tests/data/onnx_models/net_st_net_5000_STM_100_s_2000000_60_5_tanh_1e-06_4096_tr_15481_Calcite_ST.onnx new file mode 100644 index 0000000000..b0101b3ec0 Binary files /dev/null and b/idaes/core/surrogate/tests/data/onnx_models/net_st_net_5000_STM_100_s_2000000_60_5_tanh_1e-06_4096_tr_15481_Calcite_ST.onnx differ diff --git a/idaes/core/surrogate/tests/data/onnx_models/net_st_net_5000_STM_100_s_2000000_60_5_tanh_1e-06_4096_tr_15481_Calcite_ST_idaes_info.json b/idaes/core/surrogate/tests/data/onnx_models/net_st_net_5000_STM_100_s_2000000_60_5_tanh_1e-06_4096_tr_15481_Calcite_ST_idaes_info.json new file mode 100644 index 0000000000..d2970958d1 --- /dev/null +++ b/idaes/core/surrogate/tests/data/onnx_models/net_st_net_5000_STM_100_s_2000000_60_5_tanh_1e-06_4096_tr_15481_Calcite_ST_idaes_info.json @@ -0,0 +1 @@ +{"input_scaler": {"expected_columns": ["feed_pH", "pressure_bar_feed", "Na", "Cl", "Ca", "Mg", "HCO3", "SO4", "K", "Sr", "Ba", "HCl"], "offset": {"feed_pH": 4.0, "pressure_bar_feed": 1.0, "Na": 0.0, "Cl": 0.0, "Ca": 0.0, "Mg": 0.0, "HCO3": 0.0, "SO4": 0.0, "K": 0.0, "Sr": 0.0, "Ba": 0.0, "HCl": 0.0}, "factor": {"feed_pH": 8.0, "pressure_bar_feed": 405.32, "Na": 135.99, "Cl": 180.0, "Ca": 10.0, "Mg": 10.0, "HCO3": 10.0, "SO4": 100.0, "K": 40.0, "Sr": 10.0, "Ba": 0.1, "HCl": 2000.0}}, "input_bounds": {"feed_pH": [4.0, 12.0], "pressure_bar_feed": [1.0, 406.32], "Na": [0.0, 135.99], "Cl": [0.0, 180.0], "Ca": [0.0, 10.0], "Mg": [0.0, 10.0], "HCO3": [0.0, 10.0], "SO4": [0.0, 100.0], "K": [0.0, 40.0], "Sr": [0.0, 10.0], "Ba": [0.0, 0.1], "HCl": [0.0, 2000.0]}, "output_scaler": {"expected_columns": ["Calcite_ST"], "offset": {"Calcite_ST": 0.0}, "factor": {"Calcite_ST": 98.19}}, "input_labels": ["feed_pH", "pressure_bar_feed", "Na", "Cl", "Ca", "Mg", "HCO3", "SO4", "K", "Sr", "Ba", "HCl"], "output_labels": ["Calcite_ST"]} \ No newline at end of file diff --git a/idaes/core/surrogate/tests/test_onnx_surrogate.py b/idaes/core/surrogate/tests/test_onnx_surrogate.py new file mode 100644 index 0000000000..623254c285 --- /dev/null +++ b/idaes/core/surrogate/tests/test_onnx_surrogate.py @@ -0,0 +1,173 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +Tests for ONNXSurrogate +""" +import pytest + +pytest.importorskip("onnx", reason="onnx not available") +pytest.importorskip("omlt", reason="omlt not available") + +import os.path +from pyomo.common.fileutils import this_file_dir +from pyomo.common.tempfiles import TempfileManager +from pyomo.environ import ( + ConcreteModel, + Var, + SolverFactory, + assert_optimal_termination, + value, +) +from idaes.core.surrogate.onnx_surrogate import ONNXSurrogate +from idaes.core.surrogate.surrogate_block import SurrogateBlock +from idaes.core.surrogate.sampling.scaling import OffsetScaler +import json +import onnx + +# onnx, onnx_available = attempt_import("onnx") +rtol = 1e-4 +atol = 1e-4 + + +def load_onnx_model_data( + name="net_st_net_5000_STM_100_s_2000000_60_5_tanh_1e-06_4096_tr_15481_Calcite_ST", +): + onnx_folder_name = os.path.join(this_file_dir(), "data", "onnx_models") + onnx_model = onnx.load(os.path.join(onnx_folder_name, "{}.onnx".format(name))) + with open(os.path.join(onnx_folder_name, "{}_idaes_info.json".format(name))) as fd: + scaler_info = json.load(fd) + + test_inputs = { + "vars": [ + "feed_pH", + "pressure_bar_feed", + "Na", + "Cl", + "Ca", + "Mg", + "HCO3", + "SO4", + "K", + "Sr", + "Ba", + "HCl", + ], + "feed_pH": 9.5, + "pressure_bar_feed": 1.01325, + "Na": 0.230858556000748, + "Cl": 0.106701648328453, + "Ca": 0.0245274696499109, + "Mg": 0.0311348703689873, + "HCO3": 0.430482141673564, + "SO4": 0.182204065, + "K": 0.000500561, + "Sr": 0.000761853, + "Ba": 2.50e-05, + "HCl": 10, + } + test_outputs = {"vars": ["Calcite_ST"], "Calcite_ST": 40.4270772546529} + return onnx_model, scaler_info, test_inputs, test_outputs + + +@pytest.mark.unit +@pytest.mark.skipif(not SolverFactory("ipopt").available(False), reason="no Ipopt") +def test_onnx_surrogate_manual_creation(): + ### + # Test 1->2 sigmoid + ### + onnx_model, scaler_info, test_inputs, test_outputs = load_onnx_model_data() + input_scaler = None + for key, items in scaler_info.items(): + print(key, items) + if scaler_info["input_scaler"] is not None: + input_scaler = OffsetScaler.from_dict(scaler_info["input_scaler"]) + + output_scaler = None + if scaler_info["output_scaler"] is not None: + output_scaler = OffsetScaler.from_dict(scaler_info["output_scaler"]) + onnx_surrogate = ONNXSurrogate( + onnx_model, + input_labels=scaler_info["input_labels"], + output_labels=scaler_info["output_labels"], + input_bounds=scaler_info["input_bounds"], + input_scaler=input_scaler, + output_scaler=output_scaler, + ) + + m = ConcreteModel() + + output_vars = ["Calcite_ST"] + m.inputs = Var(test_inputs["vars"]) + + m.outputs = Var(test_outputs["vars"]) + m.surrogate = SurrogateBlock() + m.surrogate.build_model( + surrogate_object=onnx_surrogate, + input_vars=[m.inputs[input_var] for input_var in test_inputs["vars"]], + output_vars=[m.outputs[output_var] for output_var in test_outputs["vars"]], + formulation=ONNXSurrogate.Formulation.REDUCED_SPACE, + ) + for key in test_inputs["vars"]: + m.inputs[key].fix(test_inputs[key]) + + solver = SolverFactory("ipopt") + status = solver.solve(m, tee=True) + assert_optimal_termination(status) + assert pytest.approx(test_outputs["Calcite_ST"], rel=1e-3) == value( + m.outputs["Calcite_ST"] + ) + + +@pytest.mark.unit +@pytest.mark.skipif(not SolverFactory("ipopt").available(False), reason="no Ipopt") +def test_onnx_surrogate_load_and_save_from_file(): + ### + # Test 1->2 sigmoid + ### + _, _, test_inputs, test_outputs = load_onnx_model_data() + + onnx_surrogate = ONNXSurrogate.load_onnx_model( + onnx_model_location=os.path.join(this_file_dir(), "data", "onnx_models"), + model_name="net_st_net_5000_STM_100_s_2000000_60_5_tanh_1e-06_4096_tr_15481_Calcite_ST", + ) + with TempfileManager.new_context() as tf: + dname = tf.mkdtemp() + onnx_surrogate.save_to_folder(dname, "temp_model") + + loaded_onnx_surrogate = ONNXSurrogate.load_onnx_model( + onnx_model_location=dname, + model_name="temp_model", + ) + assert not os.path.isdir(dname) + m = ConcreteModel() + + output_vars = ["Calcite_ST"] + m.inputs = Var(test_inputs["vars"]) + + m.outputs = Var(test_outputs["vars"]) + m.surrogate = SurrogateBlock() + m.surrogate.build_model( + surrogate_object=loaded_onnx_surrogate, + input_vars=[m.inputs[input_var] for input_var in test_inputs["vars"]], + output_vars=[m.outputs[output_var] for output_var in test_outputs["vars"]], + formulation=ONNXSurrogate.Formulation.REDUCED_SPACE, + ) + for key in test_inputs["vars"]: + m.inputs[key].fix(test_inputs[key]) + + solver = SolverFactory("ipopt") + status = solver.solve(m, tee=True) + assert_optimal_termination(status) + assert pytest.approx(test_outputs["Calcite_ST"], rel=1e-3) == value( + m.outputs["Calcite_ST"] + ) diff --git a/idaes/core/util/utility_minimization.py b/idaes/core/util/utility_minimization.py index 8ea9e35bd8..34ce574b27 100644 --- a/idaes/core/util/utility_minimization.py +++ b/idaes/core/util/utility_minimization.py @@ -28,7 +28,7 @@ __author__ = "Alejandro Garciadiego, Alexander Dowling" -# Temperature Epsilon to add or subsract 1 degree to avoid division over +# Temperature Epsilon to add or subtract 1 degree to avoid division over # 0 in equipment not changing temperature EpsT = 1 @@ -301,7 +301,7 @@ def heat_data(blk, heating, cooling, DG_units=pyunits.Mwatt): Q[i] = value(pyunits.convert(v.heat_duty[0], to_units=DG_units)) FCp_[i] = Q[i] / (T_out[i] - T_in[i]) - # Generate a large dictioary containing all the data obtained + # Generate a large dictionary containing all the data obtained # from the equipment exchangeData = {} for i in pinch_streamsdict: @@ -413,7 +413,7 @@ def pinch_calc(heating, cooling, exchangeData, DTmin, eps): initQw = -sum(value(exchangeData[i]["Q"]) for i in exchangerdict) + initQs initQw = max([initQw, 0.0]) - # Fill Class with all the data to initialioze Duran-Grossmann variables + # Fill Class with all the data to initialize Duran-Grossmann variables PD = PinchDataClass(initQs, initQw) PD.initQAh = initQAh PD.initQAc = initQAc diff --git a/idaes/models/properties/modular_properties/base/generic_property.py b/idaes/models/properties/modular_properties/base/generic_property.py index 26a31b3d57..2dcb428bea 100644 --- a/idaes/models/properties/modular_properties/base/generic_property.py +++ b/idaes/models/properties/modular_properties/base/generic_property.py @@ -304,7 +304,8 @@ def build(self): # Add Phase objects if self.config.phases is None: raise ConfigurationError( - "{} was not provided with a phases argument.".format(self.name) + f"{self.name} was not provided with a phases argument. " + "Did you forget to unpack the configurations dictionary?" ) # Add a flag indicating whether this is an electrolyte system or not diff --git a/idaes/models/properties/modular_properties/base/generic_reaction.py b/idaes/models/properties/modular_properties/base/generic_reaction.py index 531a7bd06d..6ad5644c3a 100644 --- a/idaes/models/properties/modular_properties/base/generic_reaction.py +++ b/idaes/models/properties/modular_properties/base/generic_reaction.py @@ -199,6 +199,13 @@ def build(self): # and cannot be set until the config block is created by super.build super(ReactionParameterBlock, self).build() + # Check to make sure a property block was assigned + if self.config.property_package is None: + raise ConfigurationError( + f"{self.name} was not assigned a property package. " + "Did you forget to unpack the configuration dictionary?" + ) + # Set base units of measurement self.get_metadata().add_default_units(self.config.base_units) diff --git a/idaes/models/properties/modular_properties/base/tests/test_generic_property.py b/idaes/models/properties/modular_properties/base/tests/test_generic_property.py index 716c824443..39a8e84e13 100644 --- a/idaes/models/properties/modular_properties/base/tests/test_generic_property.py +++ b/idaes/models/properties/modular_properties/base/tests/test_generic_property.py @@ -199,7 +199,7 @@ def test_no_components(self): with pytest.raises( ConfigurationError, - match="params was not provided with a components " "argument.", + match="params was not provided with a components argument.", ): m.params = DummyParameterBlock( phases={ @@ -215,12 +215,33 @@ def test_no_phases(self): with pytest.raises( ConfigurationError, - match="params was not provided with a phases " "argument.", + match="params was not provided with a phases argument. " + "Did you forget to unpack the configurations dictionary?", ): m.params = DummyParameterBlock( components={"a": {}, "b": {}, "c": {}}, base_units=base_units ) + @pytest.mark.unit + def test_packed_dict(self): + m = ConcreteModel() + + dummy_dict = { + "phases": { + "p1": {"equation_of_state": "foo"}, + "p2": {"equation_of_state": "bar"}, + }, + } + + with pytest.raises( + ConfigurationError, + match=re.escape( + "params[phases] was not provided with a phases argument. " + "Did you forget to unpack the configurations dictionary?" + ), + ): + m.params = DummyParameterBlock(dummy_dict) + @pytest.mark.unit def test_invalid_component_in_phase_component_list(self): m = ConcreteModel() diff --git a/idaes/models/properties/modular_properties/base/tests/test_generic_reaction.py b/idaes/models/properties/modular_properties/base/tests/test_generic_reaction.py index 39f81c1921..722837906d 100644 --- a/idaes/models/properties/modular_properties/base/tests/test_generic_reaction.py +++ b/idaes/models/properties/modular_properties/base/tests/test_generic_reaction.py @@ -193,6 +193,17 @@ def test_rate_build_no_stoichiometry(self, m): rate_reactions={"r1": {"heat_of_reaction": "foo", "rate_form": "foo"}}, ) + @pytest.mark.unit + def test_packed_config_dict(self, m): + with pytest.raises( + ConfigurationError, + match=re.escape( + "rxn_params[property_package] was not assigned a property package. " + "Did you forget to unpack the configuration dictionary?" + ), + ): + m.rxn_params = GenericReactionParameterBlock({"property_package": m.params}) + @pytest.mark.unit def test_rate_build_invalid_phase_stoichiometry(self, m): with pytest.raises( diff --git a/idaes/models_extra/column_models/tests/test_plate_heat_exchanger.py b/idaes/models_extra/column_models/tests/test_plate_heat_exchanger.py index cd08ffbb67..4b305a1630 100644 --- a/idaes/models_extra/column_models/tests/test_plate_heat_exchanger.py +++ b/idaes/models_extra/column_models/tests/test_plate_heat_exchanger.py @@ -14,8 +14,8 @@ Tests for Plate Heat Exchnager unit model. Author: Akula Paul """ - import pytest + from pyomo.environ import ( check_optimal_termination, ConcreteModel, @@ -23,6 +23,8 @@ units as pyunits, value, ) +from pyomo.util.check_units import assert_units_consistent, assert_units_equivalent + from idaes.core import FlowsheetBlock from idaes.models_extra.column_models.plate_heat_exchanger import ( PlateHeatExchanger as PHE, @@ -37,7 +39,6 @@ from idaes.core.util.model_statistics import degrees_of_freedom from idaes.core.util.testing import initialization_tester from idaes.core.solvers import get_solver -from pyomo.util.check_units import assert_units_consistent, assert_units_equivalent # ----------------------------------------------------------------------------- @@ -66,8 +67,9 @@ def test_config(): workaround_for_1294 = pytest.mark.xfail( + # the failures only occur for Windows on GHA with Python <3.12, and Linux with Python 3.12 reason="These tests fail with Pyomo 6.7.0. See IDAES/idaes-pse#1294 for details", - strict=False, # the failures only occur for certain platforms, e.g. Windows on GHA + strict=False, ) diff --git a/setup.py b/setup.py index 407a027159..d5303b756a 100644 --- a/setup.py +++ b/setup.py @@ -44,6 +44,7 @@ class ExtraDependencies: omlt = [ "omlt==1.1", # fix the version for now as package evolves "tensorflow", + "onnx", ] grid = [ "gridx-prescient>=2.2.1", # idaes.tests.prescient @@ -124,6 +125,7 @@ def __getitem__(self, key): "*.trc", "*.nl", "*.keras", # idaes/core/surrogate/tests/data/keras_models + "*.onnx", ] }, include_package_data=True,