From 19bbf6ddc391ba27aea6ae4a1b0e5704736a21f2 Mon Sep 17 00:00:00 2001 From: Juan Orduz Date: Thu, 2 May 2024 22:29:06 +0200 Subject: [PATCH] Pass conv mode to adstock functions (#665) --- pymc_marketing/mmm/transformers.py | 48 ++++++++++++++++++++++++++--- tests/mmm/test_transformers.py | 49 ++++++++++++++++++++++++------ 2 files changed, 83 insertions(+), 14 deletions(-) diff --git a/pymc_marketing/mmm/transformers.py b/pymc_marketing/mmm/transformers.py index 131a0cca..67db1381 100644 --- a/pymc_marketing/mmm/transformers.py +++ b/pymc_marketing/mmm/transformers.py @@ -139,7 +139,12 @@ def batched_convolution( def geometric_adstock( - x, alpha: float = 0.0, l_max: int = 12, normalize: bool = False, axis: int = 0 + x, + alpha: float = 0.0, + l_max: int = 12, + normalize: bool = False, + axis: int = 0, + mode: ConvMode = ConvMode.After, ): R"""Geometric adstock transformation. @@ -189,6 +194,17 @@ def geometric_adstock( Maximum duration of carryover effect. normalize : bool, by default False Whether to normalize the weights. + axis : int + The axis of ``x`` along witch to apply the convolution + mode : ConvMode, optional + The convolution mode determines how the convolution is applied at the boundaries + of the input signal, denoted as "x." The default mode is ConvMode.Before. + + - ConvMode.After: Applies the convolution with the "Adstock" effect, resulting in a trailing decay effect. + - ConvMode.Before: Applies the convolution with the "Excitement" effect, creating a leading effect + similar to the wow factor. + - ConvMode.Overlap: Applies the convolution with both "Pull-Forward" and "Pull-Backward" effects, + where the effect overlaps with both preceding and succeeding elements. Returns ------- @@ -203,7 +219,7 @@ def geometric_adstock( w = pt.power(pt.as_tensor(alpha)[..., None], pt.arange(l_max, dtype=x.dtype)) w = w / pt.sum(w, axis=-1, keepdims=True) if normalize else w - return batched_convolution(x, w, axis=axis, mode=ConvMode.After) + return batched_convolution(x, w, axis=axis, mode=mode) def delayed_adstock( @@ -213,6 +229,7 @@ def delayed_adstock( l_max: int = 12, normalize: bool = False, axis: int = 0, + mode: ConvMode = ConvMode.After, ): R"""Delayed adstock transformation. @@ -259,6 +276,17 @@ def delayed_adstock( Maximum duration of carryover effect. normalize : bool, by default False Whether to normalize the weights. + axis : int + The axis of ``x`` along witch to apply the convolution + mode : ConvMode, optional + The convolution mode determines how the convolution is applied at the boundaries + of the input signal, denoted as "x." The default mode is ConvMode.Before. + + - ConvMode.After: Applies the convolution with the "Adstock" effect, resulting in a trailing decay effect. + - ConvMode.Before: Applies the convolution with the "Excitement" effect, creating a leading effect + similar to the wow factor. + - ConvMode.Overlap: Applies the convolution with both "Pull-Forward" and "Pull-Backward" effects, + where the effect overlaps with both preceding and succeeding elements. Returns ------- @@ -275,7 +303,7 @@ def delayed_adstock( (pt.arange(l_max, dtype=x.dtype) - pt.as_tensor(theta)[..., None]) ** 2, ) w = w / pt.sum(w, axis=-1, keepdims=True) if normalize else w - return batched_convolution(x, w, axis=axis, mode=ConvMode.After) + return batched_convolution(x, w, axis=axis, mode=mode) def weibull_adstock( @@ -284,6 +312,7 @@ def weibull_adstock( k=1, l_max: int = 12, axis: int = 0, + mode: ConvMode = ConvMode.After, type: WeibullType | str = WeibullType.PDF, ): R"""Weibull Adstocking Transformation. @@ -349,6 +378,17 @@ def weibull_adstock( Shape parameter of the Weibull distribution. Must be positive. l_max : int, by default 12 Maximum duration of carryover effect. + axis : int + The axis of ``x`` along witch to apply the convolution + mode : ConvMode, optional + The convolution mode determines how the convolution is applied at the boundaries + of the input signal, denoted as "x." The default mode is ConvMode.Before. + + - ConvMode.After: Applies the convolution with the "Adstock" effect, resulting in a trailing decay effect. + - ConvMode.Before: Applies the convolution with the "Excitement" effect, creating a leading effect + similar to the wow factor. + - ConvMode.Overlap: Applies the convolution with both "Pull-Forward" and "Pull-Backward" effects, + where the effect overlaps with both preceding and succeeding elements. type : WeibullType or str, by default WeibullType.PDF Type of Weibull adstock transformation to be applied (PDF or CDF). @@ -374,7 +414,7 @@ def weibull_adstock( w = pt.cumprod(padded_w, axis=-1) else: raise ValueError(f"Wrong WeibullType: {type}, expected of WeibullType") - return batched_convolution(x, w, axis=axis) + return batched_convolution(x, w, axis=axis, mode=mode) def logistic_saturation(x, lam: npt.NDArray[np.float_] | float = 0.5): diff --git a/tests/mmm/test_transformers.py b/tests/mmm/test_transformers.py index 00e9cdd4..1b430731 100644 --- a/tests/mmm/test_transformers.py +++ b/tests/mmm/test_transformers.py @@ -104,9 +104,14 @@ def test_batched_convolution_broadcasting(): class TestsAdstockTransformers: - def test_geometric_adstock_x_zero(self): + @pytest.mark.parametrize( + argnames="mode", + argvalues=[ConvMode.After, ConvMode.Before, ConvMode.Overlap], + ids=["After", "Before", "Overlap"], + ) + def test_geometric_adstock_x_zero(self, mode): x = np.zeros(shape=(100)) - y = geometric_adstock(x=x, alpha=0.2) + y = geometric_adstock(x=x, alpha=0.2, mode=mode) np.testing.assert_array_equal(x=x, y=y.eval()) @pytest.mark.parametrize( @@ -127,9 +132,14 @@ def test_geometric_adstock_good_alpha(self, x, alpha, l_max): assert y_np[1] == x[1] + alpha * x[0] assert y_np[2] == x[2] + alpha * x[1] + (alpha**2) * x[0] - def test_delayed_adstock_output_type(self): + @pytest.mark.parametrize( + argnames="mode", + argvalues=[ConvMode.After, ConvMode.Before, ConvMode.Overlap], + ids=["After", "Before", "Overlap"], + ) + def test_delayed_adstock_output_type(self, mode): x = np.ones(shape=(100)) - y = delayed_adstock(x=x, alpha=0.5, theta=6, l_max=7) + y = delayed_adstock(x=x, alpha=0.5, theta=6, l_max=7, mode=mode) assert isinstance(y, TensorVariable) assert isinstance(y.eval(), np.ndarray) @@ -138,23 +148,35 @@ def test_delayed_adstock_x_zero(self): y = delayed_adstock(x=x, alpha=0.2, theta=2, l_max=4) np.testing.assert_array_equal(x=x, y=y.eval()) - def test_geometric_adstock_vectorized(self, dummy_design_matrix): + @pytest.mark.parametrize( + argnames="mode", + argvalues=[ConvMode.After, ConvMode.Before, ConvMode.Overlap], + ids=["After", "Before", "Overlap"], + ) + def test_geometric_adstock_vectorized(self, dummy_design_matrix, mode): x = dummy_design_matrix.copy() x_tensor = pt.as_tensor_variable(x) alpha = [0.9, 0.33, 0.5, 0.1, 0.0] alpha_tensor = pt.as_tensor_variable(alpha) - y_tensor = geometric_adstock(x=x_tensor, alpha=alpha_tensor, l_max=12, axis=0) + y_tensor = geometric_adstock( + x=x_tensor, alpha=alpha_tensor, l_max=12, axis=0, mode=mode + ) y = y_tensor.eval() y_tensors = [ - geometric_adstock(x=x[:, i], alpha=alpha[i], l_max=12) + geometric_adstock(x=x[:, i], alpha=alpha[i], l_max=12, mode=mode) for i in range(x.shape[1]) ] ys = np.concatenate([y_t.eval()[..., None] for y_t in y_tensors], axis=1) assert y.shape == x.shape np.testing.assert_almost_equal(actual=y, desired=ys, decimal=12) - def test_delayed_adstock_vectorized(self, dummy_design_matrix): + @pytest.mark.parametrize( + argnames="mode", + argvalues=[ConvMode.After, ConvMode.Before, ConvMode.Overlap], + ids=["After", "Before", "Overlap"], + ) + def test_delayed_adstock_vectorized(self, dummy_design_matrix, mode): x = dummy_design_matrix x_tensor = pt.as_tensor_variable(x) alpha = [0.9, 0.33, 0.5, 0.1, 0.0] @@ -162,12 +184,19 @@ def test_delayed_adstock_vectorized(self, dummy_design_matrix): theta = [0, 1, 2, 3, 4] theta_tensor = pt.as_tensor_variable(theta) y_tensor = delayed_adstock( - x=x_tensor, alpha=alpha_tensor, theta=theta_tensor, l_max=12, axis=0 + x=x_tensor, + alpha=alpha_tensor, + theta=theta_tensor, + l_max=12, + axis=0, + mode=mode, ) y = y_tensor.eval() y_tensors = [ - delayed_adstock(x=x[:, i], alpha=alpha[i], theta=theta[i], l_max=12) + delayed_adstock( + x=x[:, i], alpha=alpha[i], theta=theta[i], l_max=12, mode=mode + ) for i in range(x.shape[1]) ] ys = np.concatenate([y_t.eval()[..., None] for y_t in y_tensors], axis=1)