Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

added percent bias score (pbias) #639

Merged
merged 8 commits into from
Sep 5, 2024
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions docs/api.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
.. autofunction:: scores.continuous.flip_flop_index_proportion_exceeding
.. autofunction:: scores.continuous.correlation.pearsonr
.. autofunction:: scores.continuous.multiplicative_bias
.. autofunction:: scores.continuous.additive_bias_percentage
.. autofunction:: scores.continuous.isotonic_fit
.. autofunction:: scores.continuous.consistent_expectile_score
.. autofunction:: scores.continuous.consistent_quantile_score
Expand Down
4 changes: 4 additions & 0 deletions docs/included.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,10 @@
- [API](api.md#scores.continuous.additive_bias)
- [Tutorial](project:./tutorials/Additive_and_multiplicative_bias.md)
- [Mean Error (WWRP/WGNE Joint Working Group on Forecast Verification Research)](https://www.cawcr.gov.au/projects/verification/#meanerror)
* - Additive Bias percentage
durgals marked this conversation as resolved.
Show resolved Hide resolved
- [API](api.md#scores.continuous.additive_bias_percentage)
- [Tutorial](project:./tutorials/Additive_and_multiplicative_bias.md)
- [https://search.r-project.org/CRAN/refmans/hydroGOF/html/pbias.html)
durgals marked this conversation as resolved.
Show resolved Hide resolved
* - Consistent Expectile Score
- [API](api.md#scores.continuous.consistent_expectile_score)
- [Tutorial](project:./tutorials/Consistent_Scores.md)
Expand Down
2 changes: 2 additions & 0 deletions src/scores/continuous/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from scores.continuous.quantile_loss_impl import quantile_score
from scores.continuous.standard_impl import (
additive_bias,
additive_bias_percentage,
mae,
mean_error,
mse,
Expand Down Expand Up @@ -44,6 +45,7 @@
"additive_bias",
"mean_error",
"multiplicative_bias",
"additive_bias_percentage",
tennlee marked this conversation as resolved.
Show resolved Hide resolved
"isotonic_fit",
"consistent_expectile_score",
"consistent_huber_score",
Expand Down
54 changes: 54 additions & 0 deletions src/scores/continuous/standard_impl.py
Original file line number Diff line number Diff line change
Expand Up @@ -341,3 +341,57 @@ def multiplicative_bias(
fcst, obs = broadcast_and_match_nan(fcst, obs)
multi_bias = fcst.mean(dim=reduce_dims) / obs.mean(dim=reduce_dims)
return multi_bias


def additive_bias_percentage(
fcst: XarrayLike,
obs: XarrayLike,
*,
reduce_dims: Optional[FlexibleDimensionTypes] = None,
preserve_dims: Optional[FlexibleDimensionTypes] = None,
weights: Optional[XarrayLike] = None,
) -> XarrayLike:
"""
Calculates the percentage bias which is ratio of additive_bias and mean obs.

It is defined as

.. math::
\\text{Additive bias percentage} =\\frac{\\sum_{i=1}^{N}(x_i - y_i)}{\\sum_{i=1}^{N} y_i}
durgals marked this conversation as resolved.
Show resolved Hide resolved
\\text{where } x = \\text{the forecast, and } y = \\text{the observation}


See "pbias" section at https://search.r-project.org/CRAN/refmans/hydroGOF/html/pbias.html for more information

Args:
fcst: Forecast or predicted variables.
obs: Observed variables.
reduce_dims: Optionally specify which dimensions to reduce when
calculating the percentage additive bias. All other dimensions will be preserved.
preserve_dims: Optionally specify which dimensions to preserve when
calculating the additive bias percentage. All other dimensions will be reduced. As a
special case, 'all' will allow all dimensions to be preserved. In
this case, the result will be in the same shape/dimensionality
as the forecast, and the errors will be the error at each
point (i.e. single-value comparison against observed), and the
forecast and observed dimensions must match precisely.
weights: Optionally provide an array for weighted averaging (e.g. by area, by latitude,
by population, custom)

Returns:
An xarray object with the percentage additive bias of a forecast.
durgals marked this conversation as resolved.
Show resolved Hide resolved

durgals marked this conversation as resolved.
Show resolved Hide resolved
"""
reduce_dims = scores.utils.gather_dimensions(
fcst.dims, obs.dims, reduce_dims=reduce_dims, preserve_dims=preserve_dims
)
fcst = scores.functions.apply_weights(fcst, weights=weights)
obs = scores.functions.apply_weights(obs, weights=weights)

# Need to broadcast and match NaNs so that the fcst mean and obs mean are for the
# same points
durgals marked this conversation as resolved.
Show resolved Hide resolved
fcst, obs = broadcast_and_match_nan(fcst, obs)
error = fcst - obs

pbias = 100 * error.mean(dim=reduce_dims) / obs.mean(dim=reduce_dims)
return pbias
70 changes: 70 additions & 0 deletions tests/continuous/test_standard.py
Original file line number Diff line number Diff line change
Expand Up @@ -661,6 +661,34 @@ def test_rmse_angular():
EXP_DS_BIAS1 = xr.Dataset({"a": EXP_BIAS1, "b": -EXP_BIAS1})
EXP_DS_BIAS2 = xr.Dataset({"a": EXP_BIAS4, "b": EXP_BIAS5})

## for additive_bias_percentage
EXP_PBIAS1 = xr.DataArray(
np.array([-50, -100.0, (0.5 / 3 + 0.5 / 3) / (-0.5 / 3) * 100]),
dims=("space"),
coords=[
("space", ["w", "x", "y"]),
],
)
EXP_PBIAS2 = xr.DataArray(
np.array([100.0, np.inf, (0.5 / 3 + 0.5 / 3) / (-0.5 / 3) * 100]),
dims=("space"),
coords=[
("space", ["w", "x", "y"]),
],
)

EXP_PBIAS3 = xr.DataArray(
np.array([-50.0, -100.0, -75.0]),
dims=("space"),
coords=[
("space", ["w", "x", "y"]),
],
)

EXP_PBIAS4 = xr.DataArray(np.array(-13 / 15.5 * 100))

EXP_DS_PBIAS1 = xr.Dataset({"a": EXP_PBIAS1, "b": EXP_PBIAS2})


@pytest.mark.parametrize(
("fcst", "obs", "reduce_dims", "preserve_dims", "weights", "expected"),
Expand Down Expand Up @@ -745,3 +773,45 @@ def test_multiplicative_bias_dask():
result = result.compute()
assert isinstance(result.data, np.ndarray)
xr.testing.assert_equal(result, EXP_BIAS6)


@pytest.mark.parametrize(
("fcst", "obs", "reduce_dims", "preserve_dims", "weights", "expected"),
[
# Check reduce dim arg
(DA1_BIAS, DA2_BIAS, None, "space", None, EXP_PBIAS1),
# Check divide by zero returns a np.inf
(DA2_BIAS, DA1_BIAS, None, "space", None, EXP_PBIAS2),
# Check weighting works
(DA1_BIAS, DA3_BIAS, None, "space", BIAS_WEIGHTS, EXP_PBIAS3),
# # Check preserve dim arg
(DA1_BIAS, DA2_BIAS, "time", None, None, EXP_PBIAS1),
# Reduce all
(DA1_BIAS, DA2_BIAS, None, None, None, EXP_PBIAS4),
# Test with Dataset
(DS_BIAS1, DS_BIAS2, None, "space", None, EXP_DS_PBIAS1),
],
)
def test_additive_bias_percentage(fcst, obs, reduce_dims, preserve_dims, weights, expected):
"""
Tests continuous.additive_bias_percentage
"""
result = scores.continuous.additive_bias_percentage(
fcst, obs, reduce_dims=reduce_dims, preserve_dims=preserve_dims, weights=weights
)
# xr.testing.assert_equal(result, expected)
xr.testing.assert_allclose(result, expected, rtol=1e-10, atol=1e-10)


def test_additive_bias_percentage_dask():
"""
Tests that continuous.additive_bias_percentage works with Dask
"""
fcst = DA1_BIAS.chunk()
obs = DA3_BIAS.chunk()
weights = BIAS_WEIGHTS.chunk()
result = scores.continuous.additive_bias_percentage(fcst, obs, preserve_dims="space", weights=weights)
assert isinstance(result.data, dask.array.Array)
result = result.compute()
assert isinstance(result.data, np.ndarray)
xr.testing.assert_equal(result, EXP_PBIAS3)
76 changes: 66 additions & 10 deletions tutorials/Additive_and_multiplicative_bias.ipynb

Large diffs are not rendered by default.

Loading