Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Tweaks to convergence tester #1377

Merged
merged 4 commits into from
Mar 21, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 41 additions & 0 deletions idaes/core/util/model_diagnostics.py
Original file line number Diff line number Diff line change
Expand Up @@ -3177,6 +3177,47 @@ def assert_baseline_comparison(
if any(len(v) != 0 for v in diffs.values()):
raise AssertionError("Convergence analysis does not match baseline")

def report_convergence_summary(self, stream=None):
"""
Reports a brief summary of the model convergence run.
Args:
stream: Optional output stream to print results to.
Returns:
None
"""
if stream is None:
stream = sys.stdout

successes = 0
failures = 0
runs_w_restoration = 0
runs_w_regulariztion = 0
runs_w_num_iss = 0

for v in self.results.values():
# Check for differences
if v["success"]:
successes += 1
else:
failures += 1

if v["results"]["iters_in_restoration"] > 0:
runs_w_restoration += 1
if v["results"]["iters_w_regularization"] > 0:
runs_w_regulariztion += 1
if v["results"]["numerical_issues"] > 0:
runs_w_num_iss += 1

stream.write(
f"Successes: {successes}, Failures {failures} ({100*successes/(successes+failures)}%)\n"
)
stream.write(f"Runs with Restoration: {runs_w_restoration}\n")
stream.write(f"Runs with Regularization: {runs_w_regulariztion}\n")
stream.write(f"Runs with Numerical Issues: {runs_w_num_iss}\n")

def to_dict(self):
"""
Serialize specification and current results to dict form
Expand Down
6 changes: 3 additions & 3 deletions idaes/core/util/parameter_sweep.py
Original file line number Diff line number Diff line change
Expand Up @@ -495,7 +495,7 @@ def execute_single_sample(self, sample_id: int):
results = self.build_outputs(model, run_stats)
else:
# Catch any Exception for recourse
results, success = self.handle_error(model)
results = self.handle_error(model)

_log.info(f"Sample {sample_id} finished.")

Expand Down Expand Up @@ -697,8 +697,8 @@ def handle_error(self, model):
Output of handle_solver_error callback
"""
if self.config.handle_solver_error is None:
# No recourse specified, so success=False and results=None
return None, False
# No recourse specified, so results=None
return None
else:
args = self.config.handle_solver_error_arguments
if args is None:
Expand Down
45 changes: 45 additions & 0 deletions idaes/core/util/tests/test_model_diagnostics.py
Original file line number Diff line number Diff line change
Expand Up @@ -2375,6 +2375,51 @@ def ca_with_results(self):

return ca

@pytest.mark.unit
def test_report_convergence_summary(self):
stream = StringIO()

ca = IpoptConvergenceAnalysis(
model=ConcreteModel(),
)

ca._psweep._results = {
0: {
"success": True,
"results": {
"iters_in_restoration": 1,
"iters_w_regularization": 0,
"numerical_issues": 10,
},
},
1: {
"success": True,
"results": {
"iters_in_restoration": 0,
"iters_w_regularization": 5,
"numerical_issues": 5,
},
},
2: {
"success": False,
"results": {
"iters_in_restoration": 0,
"iters_w_regularization": 0,
"numerical_issues": 0,
},
},
}

ca.report_convergence_summary(stream)

expected = """Successes: 2, Failures 1 (66.66666666666667%)
Runs with Restoration: 1
Runs with Regularization: 1
Runs with Numerical Issues: 2
"""

assert stream.getvalue() == expected

@pytest.mark.component
def test_to_dict(self, ca_with_results):
outdict = ca_with_results.to_dict()
Expand Down
6 changes: 3 additions & 3 deletions idaes/core/util/tests/test_parameter_sweep.py
Original file line number Diff line number Diff line change
Expand Up @@ -957,7 +957,7 @@ def test_handle_error_none(self):

results = psweep.handle_error(model)

assert results == (None, False)
assert results == None

@pytest.mark.unit
def test_handle_error(self):
Expand Down Expand Up @@ -1121,7 +1121,7 @@ def solve(model, *args, **kwargs):
raise Exception("Test exception")

def recourse(model):
return "foo", "bar"
return "foo"

spec2 = ParameterSweepSpecification()
spec2.set_sampling_method(UniformSampling)
Expand All @@ -1138,7 +1138,7 @@ def recourse(model):
results, success, error = psweep.execute_single_sample(1)

assert results == "foo"
assert success == "bar"
assert not success
assert error == "Test exception"

@pytest.fixture(scope="class")
Expand Down
Loading