Skip to content

Commit

Permalink
Merge pull request #244 from jmccreight/feat_prms_tests
Browse files Browse the repository at this point in the history
Feat prms tests
  • Loading branch information
jmccreight authored Oct 25, 2023
2 parents 21520bc + 0e4bfce commit 3038a5b
Show file tree
Hide file tree
Showing 34 changed files with 1,251 additions and 819 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ generated/
# example output data and gis data
examples/0*/
examples/snow_errors
examples/compare_pws_prms
examples/runoff_errors
examples/pynhm_gis
examples/pywatershed_gis
Expand Down
20 changes: 15 additions & 5 deletions autotest/test_control_read.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
import pathlib as pl

import numpy as np
import pytest

from pywatershed.utils import ControlVariables
from pywatershed.utils import ControlVariables, compare_control_files
from utils import assert_or_print


Expand All @@ -19,11 +21,9 @@ def control_keys():
def test_control_read(domain, control_keys):
control_file = domain["control_file"]
print(f"parsing...'{control_file}'")

control = ControlVariables.load(control_file)

# check control data
answers = domain["test_ans"]["control_read"]

for key, value in answers.items():
if key in (
"start_time",
Expand All @@ -32,13 +32,23 @@ def test_control_read(domain, control_keys):
answers[key] = np.datetime64(value)
elif key in ("initial_deltat",):
answers[key] = np.timedelta64(int(value), "h")

results = {
key: val
for key, val in control.control.items()
if key in answers.keys()
}
assert_or_print(results, answers, print_ans=domain["print_ans"])

print(f"success parsing...'{control_file}'")
return


def test_control_compare():
common_dir = pl.Path("../test_data/common")
n_diffs = compare_control_files(
common_dir / "control.single_hru",
common_dir / "control.multi_hru",
silent=True,
)
assert n_diffs == 5
return
10 changes: 5 additions & 5 deletions autotest/test_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -305,18 +305,18 @@ def test_model(domain, model_args, tmp_path):
9: {
"PRMSChannel": {
"seg_outflow": {
"drb_2yr": 1517.232887980279,
"hru_1": 13.696918669514927,
"drb_2yr": 1553.1874672413599,
"hru_1": 13.696710376067216,
"ucb_2yr": 1694.5697712423928,
},
},
},
99: {
"PRMSChannel": {
"seg_outflow": {
"drb_2yr": 2350.499659332901,
"hru_1": 22.874414994530095,
"ucb_2yr": 733.2293013532435,
"drb_2yr": 2362.7940777644653,
"hru_1": 22.877787915898086,
"ucb_2yr": 733.0192586668293,
},
},
},
Expand Down
136 changes: 41 additions & 95 deletions autotest/test_prms_atmosphere.py
Original file line number Diff line number Diff line change
@@ -1,35 +1,19 @@
from warnings import warn

import numpy as np
import pytest

from pywatershed.atmosphere.prms_atmosphere import PRMSAtmosphere
from pywatershed.base.adapter import adapter_factory
from pywatershed.base.control import Control
from pywatershed.base.parameters import Parameters
from pywatershed.parameters import PrmsParameters
from utils_compare import compare_in_memory, compare_netcdfs

params = ["params_sep", "params_one"]


# @pytest.fixture(scope="function", params=params)
# def control(domain, request):
# if request.param == "params_one":
# params = PrmsParameters.load(domain["param_file"])
# dis = None

# else:
# # channel needs both hru and seg dis files
# dis_hru_file = domain["dir"] / "parameters_dis_hru.nc"
# dis_data = Parameters.merge(
# Parameters.from_netcdf(dis_hru_file, encoding=False),
# )
# dis = {"dis_hru": dis_data}
# compare in memory (faster) or full output files? or both!
do_compare_output_files = False
do_compare_in_memory = True
rtol = 1.0e-5
atol = 1.0e-5 # why is this relatively low accuracy?

# param_file = domain["dir"] / "parameters_PRMSAtmosphere.nc"
# params = {"PRMSAtmosphere": PrmsParameters.from_netcdf(param_file)}

# return Control.load(domain["control_file"], params=params, dis=dis)
params = ["params_sep", "params_one"]


@pytest.fixture(scope="function")
Expand All @@ -55,31 +39,11 @@ def parameters(domain, request):


def test_compare_prms(domain, control, discretization, parameters, tmp_path):
comparison_var_names = PRMSAtmosphere.get_variables()

output_dir = domain["prms_output_dir"]
cbh_dir = domain["cbh_inputs"]["prcp"].parent.resolve()

# get the answer data
comparison_var_names = [
"tmaxf",
"tminf",
"hru_ppt",
"hru_rain",
"hru_snow",
"swrad",
"potet",
"transp_on",
"tmaxc",
"tavgc",
"tminc",
"prmx",
"pptmix",
"orad_hru",
]
ans = {}
for key in comparison_var_names:
nc_pth = output_dir / f"{key}.nc"
ans[key] = adapter_factory(nc_pth, variable_name=key, control=control)

input_variables = {}
for key in PRMSAtmosphere.get_inputs():
dir = ""
Expand All @@ -96,53 +60,35 @@ def test_compare_prms(domain, control, discretization, parameters, tmp_path):
netcdf_output_dir=tmp_path,
)

all_success = True
for istep in range(control.n_times):
control.advance()
atm.advance()
atm.calculate(1.0)

# compare along the way
for key, val in ans.items():
val.advance()

for key in ans.keys():
a1 = ans[key].current
a2 = atm[key].current

tol = 1e-5
if key == "swrad":
tol = 5e-4
warn(f"using tol = {tol} for variable {key}")
if key == "tavgc":
tol = 1e-5
warn(f"using tol = {tol} for variable {key}")

success_a = np.allclose(a2, a1, atol=tol, rtol=0.00)
success_r = np.allclose(a2, a1, atol=0.00, rtol=tol)
success = False
if (not success_a) and (not success_r):
diff = a2 - a1
diffratio = abs(diff / a2)
if (diffratio < 1e-6).all():
success = True
continue
all_success = False
diffmin = diff.min()
diffmax = diff.max()
abs_diff = abs(diff)
absdiffmax = abs_diff.max()
wh_absdiffmax = np.where(abs_diff)[0]
print(f"time step {istep}")
print(f"output variable {key}")
print(f"prms {a1.min()} {a1.max()}")
print(f"pywatershed {a2.min()} {a2.max()}")
print(f"diff {diffmin} {diffmax}")
print(f"absdiffmax {absdiffmax}")
print(f"wh_absdiffmax {wh_absdiffmax}")
assert success

atm.finalize()

if not all_success:
raise Exception("pywatershed results do not match prms results")
if do_compare_in_memory:
answers = {}
for var in comparison_var_names:
var_pth = output_dir / f"{var}.nc"
answers[var] = adapter_factory(
var_pth, variable_name=var, control=control
)

# check the advance/calculate the state
tmaxf_id = id(atm.tmaxf)

for ii in range(control.n_times):
control.advance()
atm.advance()
if ii == 0:
atm.output()
atm.calculate(1.0)

compare_in_memory(atm, answers, atol=atol, rtol=rtol)
assert id(atm.tmaxf) == tmaxf_id

if do_compare_output_files:
compare_netcdfs(
comparison_var_names,
tmp_path,
output_dir,
atol=atol,
rtol=rtol,
print_var_max_errs=False,
)

return
78 changes: 43 additions & 35 deletions autotest/test_prms_canopy.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,17 @@
import pathlib as pl

import numpy as np
import pytest

from pywatershed.base.adapter import adapter_factory
from pywatershed.base.control import Control
from pywatershed.hydrology.prms_canopy import PRMSCanopy, has_prmscanopy_f
from pywatershed.parameters import Parameters, PrmsParameters
from utils_compare import compare_in_memory, compare_netcdfs

# compare in memory (faster) or full output files? or both!
do_compare_output_files = False
do_compare_in_memory = True
rtol = atol = 1.0e-5

calc_methods = ("numpy", "numba", "fortran")
params = ("params_sep", "params_one")
Expand Down Expand Up @@ -44,7 +49,6 @@ def test_compare_prms(
)

tmp_path = pl.Path(tmp_path)
output_dir = domain["prms_output_dir"]

# get the answer data
comparison_var_names = [
Expand All @@ -56,19 +60,20 @@ def test_compare_prms(
"hru_intcpstor",
"hru_intcpevap",
"intcp_changeover",
"intcp_form",
# "intcp_transp_on", # not a prms variable
"hru_intcpstor_change",
"hru_intcpstor_old",
]
ans = {}
for key in comparison_var_names:
nc_pth = output_dir / f"{key}.nc"
ans[key] = adapter_factory(nc_pth, variable_name=key, control=control)

# setup the canopy
output_dir = domain["prms_output_dir"]

input_variables = {}
for key in PRMSCanopy.get_inputs():
nc_pth = output_dir / f"{key}.nc"
input_variables[key] = nc_pth

cnp = PRMSCanopy(
canopy = PRMSCanopy(
control=control,
discretization=discretization,
parameters=parameters,
Expand All @@ -77,34 +82,37 @@ def test_compare_prms(
calc_method=calc_method,
)

all_success = True
if do_compare_output_files:
nc_parent = tmp_path / domain["domain_name"]
canopy.initialize_netcdf(nc_parent)

if do_compare_in_memory:
answers = {}
for var in comparison_var_names:
var_pth = output_dir / f"{var}.nc"
answers[var] = adapter_factory(
var_pth, variable_name=var, control=control
)

for istep in range(control.n_times):
control.advance()
cnp.advance()
cnp.calculate(1.0)

# compare along the way
atol = 1.0e-5
for key, val in ans.items():
val.advance()
for key in ans.keys():
a1 = ans[key].current
a2 = cnp[key]
success = np.isclose(a2, a1, atol=atol).all()
if not success:
all_success = False
diff = a1 - a2
diffmin = diff.min()
diffmax = diff.max()
print(f"time step {istep}")
print(f"output variable {key}")
print(f"prms {a1.min()} {a1.max()}")
print(f"pywatershed {a2.min()} {a2.max()}")
print(f"diff {diffmin} {diffmax}")

cnp.finalize()

if not all_success:
raise Exception("pywatershed results do not match prms results")
canopy.advance()
canopy.calculate(1.0)
canopy.output()
if do_compare_in_memory:
compare_in_memory(
canopy, answers, atol=atol, rtol=rtol, skip_missing_ans=True
)

canopy.finalize()

if do_compare_output_files:
compare_netcdfs(
comparison_var_names,
tmp_path / domain["domain_name"],
output_dir,
atol=atol,
rtol=rtol,
)

return
Loading

0 comments on commit 3038a5b

Please sign in to comment.