diff --git a/idaes/core/util/model_diagnostics.py b/idaes/core/util/model_diagnostics.py index 49907f64e3..12609da04c 100644 --- a/idaes/core/util/model_diagnostics.py +++ b/idaes/core/util/model_diagnostics.py @@ -3177,6 +3177,47 @@ def assert_baseline_comparison( if any(len(v) != 0 for v in diffs.values()): raise AssertionError("Convergence analysis does not match baseline") + def report_convergence_summary(self, stream=None): + """ + Reports a brief summary of the model convergence run. + + Args: + stream: Optional output stream to print results to. + + Returns: + None + + """ + if stream is None: + stream = sys.stdout + + successes = 0 + failures = 0 + runs_w_restoration = 0 + runs_w_regulariztion = 0 + runs_w_num_iss = 0 + + for v in self.results.values(): + # Check for differences + if v["success"]: + successes += 1 + else: + failures += 1 + + if v["results"]["iters_in_restoration"] > 0: + runs_w_restoration += 1 + if v["results"]["iters_w_regularization"] > 0: + runs_w_regulariztion += 1 + if v["results"]["numerical_issues"] > 0: + runs_w_num_iss += 1 + + stream.write( + f"Successes: {successes}, Failures {failures} ({100*successes/(successes+failures)}%)\n" + ) + stream.write(f"Runs with Restoration: {runs_w_restoration}\n") + stream.write(f"Runs with Regularization: {runs_w_regulariztion}\n") + stream.write(f"Runs with Numerical Issues: {runs_w_num_iss}\n") + def to_dict(self): """ Serialize specification and current results to dict form diff --git a/idaes/core/util/parameter_sweep.py b/idaes/core/util/parameter_sweep.py index 9f9be3e445..0aa06ddfba 100644 --- a/idaes/core/util/parameter_sweep.py +++ b/idaes/core/util/parameter_sweep.py @@ -495,7 +495,7 @@ def execute_single_sample(self, sample_id: int): results = self.build_outputs(model, run_stats) else: # Catch any Exception for recourse - results, success = self.handle_error(model) + results = self.handle_error(model) _log.info(f"Sample {sample_id} finished.") @@ -697,8 +697,8 @@ def handle_error(self, model): Output of handle_solver_error callback """ if self.config.handle_solver_error is None: - # No recourse specified, so success=False and results=None - return None, False + # No recourse specified, so results=None + return None else: args = self.config.handle_solver_error_arguments if args is None: diff --git a/idaes/core/util/tests/test_model_diagnostics.py b/idaes/core/util/tests/test_model_diagnostics.py index 16cc4de6fb..1c0bdacc59 100644 --- a/idaes/core/util/tests/test_model_diagnostics.py +++ b/idaes/core/util/tests/test_model_diagnostics.py @@ -2375,6 +2375,51 @@ def ca_with_results(self): return ca + @pytest.mark.unit + def test_report_convergence_summary(self): + stream = StringIO() + + ca = IpoptConvergenceAnalysis( + model=ConcreteModel(), + ) + + ca._psweep._results = { + 0: { + "success": True, + "results": { + "iters_in_restoration": 1, + "iters_w_regularization": 0, + "numerical_issues": 10, + }, + }, + 1: { + "success": True, + "results": { + "iters_in_restoration": 0, + "iters_w_regularization": 5, + "numerical_issues": 5, + }, + }, + 2: { + "success": False, + "results": { + "iters_in_restoration": 0, + "iters_w_regularization": 0, + "numerical_issues": 0, + }, + }, + } + + ca.report_convergence_summary(stream) + + expected = """Successes: 2, Failures 1 (66.66666666666667%) +Runs with Restoration: 1 +Runs with Regularization: 1 +Runs with Numerical Issues: 2 +""" + + assert stream.getvalue() == expected + @pytest.mark.component def test_to_dict(self, ca_with_results): outdict = ca_with_results.to_dict() diff --git a/idaes/core/util/tests/test_parameter_sweep.py b/idaes/core/util/tests/test_parameter_sweep.py index 329b7c40f2..a851903e94 100644 --- a/idaes/core/util/tests/test_parameter_sweep.py +++ b/idaes/core/util/tests/test_parameter_sweep.py @@ -957,7 +957,7 @@ def test_handle_error_none(self): results = psweep.handle_error(model) - assert results == (None, False) + assert results == None @pytest.mark.unit def test_handle_error(self): @@ -1121,7 +1121,7 @@ def solve(model, *args, **kwargs): raise Exception("Test exception") def recourse(model): - return "foo", "bar" + return "foo" spec2 = ParameterSweepSpecification() spec2.set_sampling_method(UniformSampling) @@ -1138,7 +1138,7 @@ def recourse(model): results, success, error = psweep.execute_single_sample(1) assert results == "foo" - assert success == "bar" + assert not success assert error == "Test exception" @pytest.fixture(scope="class")