Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Pass conv mode to adstock functions #665

Merged
merged 3 commits into from
May 2, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
48 changes: 44 additions & 4 deletions pymc_marketing/mmm/transformers.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,12 @@


def geometric_adstock(
x, alpha: float = 0.0, l_max: int = 12, normalize: bool = False, axis: int = 0
x,
alpha: float = 0.0,
l_max: int = 12,
normalize: bool = False,
axis: int = 0,
mode: ConvMode = ConvMode.After,
):
R"""Geometric adstock transformation.

Expand Down Expand Up @@ -189,6 +194,17 @@
Maximum duration of carryover effect.
normalize : bool, by default False
Whether to normalize the weights.
axis : int
The axis of ``x`` along witch to apply the convolution
mode : ConvMode, optional
The convolution mode determines how the convolution is applied at the boundaries
of the input signal, denoted as "x." The default mode is ConvMode.Before.

- ConvMode.After: Applies the convolution with the "Adstock" effect, resulting in a trailing decay effect.
- ConvMode.Before: Applies the convolution with the "Excitement" effect, creating a leading effect
similar to the wow factor.
- ConvMode.Overlap: Applies the convolution with both "Pull-Forward" and "Pull-Backward" effects,
where the effect overlaps with both preceding and succeeding elements.

Returns
-------
Expand All @@ -203,7 +219,7 @@

w = pt.power(pt.as_tensor(alpha)[..., None], pt.arange(l_max, dtype=x.dtype))
w = w / pt.sum(w, axis=-1, keepdims=True) if normalize else w
return batched_convolution(x, w, axis=axis, mode=ConvMode.After)
return batched_convolution(x, w, axis=axis, mode=mode)

Check warning on line 222 in pymc_marketing/mmm/transformers.py

View check run for this annotation

Codecov / codecov/patch

pymc_marketing/mmm/transformers.py#L222

Added line #L222 was not covered by tests


def delayed_adstock(
Expand All @@ -213,6 +229,7 @@
l_max: int = 12,
normalize: bool = False,
axis: int = 0,
mode: ConvMode = ConvMode.After,
):
R"""Delayed adstock transformation.

Expand Down Expand Up @@ -259,6 +276,17 @@
Maximum duration of carryover effect.
normalize : bool, by default False
Whether to normalize the weights.
axis : int
The axis of ``x`` along witch to apply the convolution
mode : ConvMode, optional
The convolution mode determines how the convolution is applied at the boundaries
of the input signal, denoted as "x." The default mode is ConvMode.Before.

- ConvMode.After: Applies the convolution with the "Adstock" effect, resulting in a trailing decay effect.
- ConvMode.Before: Applies the convolution with the "Excitement" effect, creating a leading effect
similar to the wow factor.
- ConvMode.Overlap: Applies the convolution with both "Pull-Forward" and "Pull-Backward" effects,
where the effect overlaps with both preceding and succeeding elements.

Returns
-------
Expand All @@ -275,7 +303,7 @@
(pt.arange(l_max, dtype=x.dtype) - pt.as_tensor(theta)[..., None]) ** 2,
)
w = w / pt.sum(w, axis=-1, keepdims=True) if normalize else w
return batched_convolution(x, w, axis=axis, mode=ConvMode.After)
return batched_convolution(x, w, axis=axis, mode=mode)

Check warning on line 306 in pymc_marketing/mmm/transformers.py

View check run for this annotation

Codecov / codecov/patch

pymc_marketing/mmm/transformers.py#L306

Added line #L306 was not covered by tests


def weibull_adstock(
Expand All @@ -284,6 +312,7 @@
k=1,
l_max: int = 12,
axis: int = 0,
mode: ConvMode = ConvMode.After,
type: WeibullType | str = WeibullType.PDF,
):
R"""Weibull Adstocking Transformation.
Expand Down Expand Up @@ -349,6 +378,17 @@
Shape parameter of the Weibull distribution. Must be positive.
l_max : int, by default 12
Maximum duration of carryover effect.
axis : int
The axis of ``x`` along witch to apply the convolution
mode : ConvMode, optional
The convolution mode determines how the convolution is applied at the boundaries
of the input signal, denoted as "x." The default mode is ConvMode.Before.

- ConvMode.After: Applies the convolution with the "Adstock" effect, resulting in a trailing decay effect.
- ConvMode.Before: Applies the convolution with the "Excitement" effect, creating a leading effect
similar to the wow factor.
- ConvMode.Overlap: Applies the convolution with both "Pull-Forward" and "Pull-Backward" effects,
where the effect overlaps with both preceding and succeeding elements.
type : WeibullType or str, by default WeibullType.PDF
Type of Weibull adstock transformation to be applied (PDF or CDF).

Expand All @@ -374,7 +414,7 @@
w = pt.cumprod(padded_w, axis=-1)
else:
raise ValueError(f"Wrong WeibullType: {type}, expected of WeibullType")
return batched_convolution(x, w, axis=axis)
return batched_convolution(x, w, axis=axis, mode=mode)

Check warning on line 417 in pymc_marketing/mmm/transformers.py

View check run for this annotation

Codecov / codecov/patch

pymc_marketing/mmm/transformers.py#L417

Added line #L417 was not covered by tests


def logistic_saturation(x, lam: npt.NDArray[np.float_] | float = 0.5):
Expand Down
49 changes: 39 additions & 10 deletions tests/mmm/test_transformers.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,9 +104,14 @@ def test_batched_convolution_broadcasting():


class TestsAdstockTransformers:
def test_geometric_adstock_x_zero(self):
@pytest.mark.parametrize(
argnames="mode",
argvalues=[ConvMode.After, ConvMode.Before, ConvMode.Overlap],
ids=["After", "Before", "Overlap"],
)
def test_geometric_adstock_x_zero(self, mode):
x = np.zeros(shape=(100))
y = geometric_adstock(x=x, alpha=0.2)
y = geometric_adstock(x=x, alpha=0.2, mode=mode)
np.testing.assert_array_equal(x=x, y=y.eval())

@pytest.mark.parametrize(
Expand All @@ -127,9 +132,14 @@ def test_geometric_adstock_good_alpha(self, x, alpha, l_max):
assert y_np[1] == x[1] + alpha * x[0]
assert y_np[2] == x[2] + alpha * x[1] + (alpha**2) * x[0]

def test_delayed_adstock_output_type(self):
@pytest.mark.parametrize(
argnames="mode",
argvalues=[ConvMode.After, ConvMode.Before, ConvMode.Overlap],
ids=["After", "Before", "Overlap"],
)
def test_delayed_adstock_output_type(self, mode):
x = np.ones(shape=(100))
y = delayed_adstock(x=x, alpha=0.5, theta=6, l_max=7)
y = delayed_adstock(x=x, alpha=0.5, theta=6, l_max=7, mode=mode)
assert isinstance(y, TensorVariable)
assert isinstance(y.eval(), np.ndarray)

Expand All @@ -138,36 +148,55 @@ def test_delayed_adstock_x_zero(self):
y = delayed_adstock(x=x, alpha=0.2, theta=2, l_max=4)
np.testing.assert_array_equal(x=x, y=y.eval())

def test_geometric_adstock_vectorized(self, dummy_design_matrix):
@pytest.mark.parametrize(
argnames="mode",
argvalues=[ConvMode.After, ConvMode.Before, ConvMode.Overlap],
ids=["After", "Before", "Overlap"],
)
def test_geometric_adstock_vectorized(self, dummy_design_matrix, mode):
x = dummy_design_matrix.copy()
x_tensor = pt.as_tensor_variable(x)
alpha = [0.9, 0.33, 0.5, 0.1, 0.0]
alpha_tensor = pt.as_tensor_variable(alpha)
y_tensor = geometric_adstock(x=x_tensor, alpha=alpha_tensor, l_max=12, axis=0)
y_tensor = geometric_adstock(
x=x_tensor, alpha=alpha_tensor, l_max=12, axis=0, mode=mode
)
y = y_tensor.eval()

y_tensors = [
geometric_adstock(x=x[:, i], alpha=alpha[i], l_max=12)
geometric_adstock(x=x[:, i], alpha=alpha[i], l_max=12, mode=mode)
for i in range(x.shape[1])
]
ys = np.concatenate([y_t.eval()[..., None] for y_t in y_tensors], axis=1)
assert y.shape == x.shape
np.testing.assert_almost_equal(actual=y, desired=ys, decimal=12)

def test_delayed_adstock_vectorized(self, dummy_design_matrix):
@pytest.mark.parametrize(
argnames="mode",
argvalues=[ConvMode.After, ConvMode.Before, ConvMode.Overlap],
ids=["After", "Before", "Overlap"],
)
def test_delayed_adstock_vectorized(self, dummy_design_matrix, mode):
x = dummy_design_matrix
x_tensor = pt.as_tensor_variable(x)
alpha = [0.9, 0.33, 0.5, 0.1, 0.0]
alpha_tensor = pt.as_tensor_variable(alpha)
theta = [0, 1, 2, 3, 4]
theta_tensor = pt.as_tensor_variable(theta)
y_tensor = delayed_adstock(
x=x_tensor, alpha=alpha_tensor, theta=theta_tensor, l_max=12, axis=0
x=x_tensor,
alpha=alpha_tensor,
theta=theta_tensor,
l_max=12,
axis=0,
mode=mode,
)
y = y_tensor.eval()

y_tensors = [
delayed_adstock(x=x[:, i], alpha=alpha[i], theta=theta[i], l_max=12)
delayed_adstock(
x=x[:, i], alpha=alpha[i], theta=theta[i], l_max=12, mode=mode
)
for i in range(x.shape[1])
]
ys = np.concatenate([y_t.eval()[..., None] for y_t in y_tensors], axis=1)
Expand Down
Loading