Skip to content

Commit 891f55f

Browse files
juanitorduztwiecki
authored andcommitted
Pass conv mode to adstock functions (#665)
1 parent fd58fcf commit 891f55f

File tree

2 files changed

+83
-14
lines changed

2 files changed

+83
-14
lines changed

pymc_marketing/mmm/transformers.py

Lines changed: 44 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,12 @@ def batched_convolution(
139139

140140

141141
def geometric_adstock(
142-
x, alpha: float = 0.0, l_max: int = 12, normalize: bool = False, axis: int = 0
142+
x,
143+
alpha: float = 0.0,
144+
l_max: int = 12,
145+
normalize: bool = False,
146+
axis: int = 0,
147+
mode: ConvMode = ConvMode.After,
143148
):
144149
R"""Geometric adstock transformation.
145150
@@ -189,6 +194,17 @@ def geometric_adstock(
189194
Maximum duration of carryover effect.
190195
normalize : bool, by default False
191196
Whether to normalize the weights.
197+
axis : int
198+
The axis of ``x`` along witch to apply the convolution
199+
mode : ConvMode, optional
200+
The convolution mode determines how the convolution is applied at the boundaries
201+
of the input signal, denoted as "x." The default mode is ConvMode.Before.
202+
203+
- ConvMode.After: Applies the convolution with the "Adstock" effect, resulting in a trailing decay effect.
204+
- ConvMode.Before: Applies the convolution with the "Excitement" effect, creating a leading effect
205+
similar to the wow factor.
206+
- ConvMode.Overlap: Applies the convolution with both "Pull-Forward" and "Pull-Backward" effects,
207+
where the effect overlaps with both preceding and succeeding elements.
192208
193209
Returns
194210
-------
@@ -203,7 +219,7 @@ def geometric_adstock(
203219

204220
w = pt.power(pt.as_tensor(alpha)[..., None], pt.arange(l_max, dtype=x.dtype))
205221
w = w / pt.sum(w, axis=-1, keepdims=True) if normalize else w
206-
return batched_convolution(x, w, axis=axis, mode=ConvMode.After)
222+
return batched_convolution(x, w, axis=axis, mode=mode)
207223

208224

209225
def delayed_adstock(
@@ -213,6 +229,7 @@ def delayed_adstock(
213229
l_max: int = 12,
214230
normalize: bool = False,
215231
axis: int = 0,
232+
mode: ConvMode = ConvMode.After,
216233
):
217234
R"""Delayed adstock transformation.
218235
@@ -259,6 +276,17 @@ def delayed_adstock(
259276
Maximum duration of carryover effect.
260277
normalize : bool, by default False
261278
Whether to normalize the weights.
279+
axis : int
280+
The axis of ``x`` along witch to apply the convolution
281+
mode : ConvMode, optional
282+
The convolution mode determines how the convolution is applied at the boundaries
283+
of the input signal, denoted as "x." The default mode is ConvMode.Before.
284+
285+
- ConvMode.After: Applies the convolution with the "Adstock" effect, resulting in a trailing decay effect.
286+
- ConvMode.Before: Applies the convolution with the "Excitement" effect, creating a leading effect
287+
similar to the wow factor.
288+
- ConvMode.Overlap: Applies the convolution with both "Pull-Forward" and "Pull-Backward" effects,
289+
where the effect overlaps with both preceding and succeeding elements.
262290
263291
Returns
264292
-------
@@ -275,7 +303,7 @@ def delayed_adstock(
275303
(pt.arange(l_max, dtype=x.dtype) - pt.as_tensor(theta)[..., None]) ** 2,
276304
)
277305
w = w / pt.sum(w, axis=-1, keepdims=True) if normalize else w
278-
return batched_convolution(x, w, axis=axis, mode=ConvMode.After)
306+
return batched_convolution(x, w, axis=axis, mode=mode)
279307

280308

281309
def weibull_adstock(
@@ -284,6 +312,7 @@ def weibull_adstock(
284312
k=1,
285313
l_max: int = 12,
286314
axis: int = 0,
315+
mode: ConvMode = ConvMode.After,
287316
type: WeibullType | str = WeibullType.PDF,
288317
):
289318
R"""Weibull Adstocking Transformation.
@@ -349,6 +378,17 @@ def weibull_adstock(
349378
Shape parameter of the Weibull distribution. Must be positive.
350379
l_max : int, by default 12
351380
Maximum duration of carryover effect.
381+
axis : int
382+
The axis of ``x`` along witch to apply the convolution
383+
mode : ConvMode, optional
384+
The convolution mode determines how the convolution is applied at the boundaries
385+
of the input signal, denoted as "x." The default mode is ConvMode.Before.
386+
387+
- ConvMode.After: Applies the convolution with the "Adstock" effect, resulting in a trailing decay effect.
388+
- ConvMode.Before: Applies the convolution with the "Excitement" effect, creating a leading effect
389+
similar to the wow factor.
390+
- ConvMode.Overlap: Applies the convolution with both "Pull-Forward" and "Pull-Backward" effects,
391+
where the effect overlaps with both preceding and succeeding elements.
352392
type : WeibullType or str, by default WeibullType.PDF
353393
Type of Weibull adstock transformation to be applied (PDF or CDF).
354394
@@ -374,7 +414,7 @@ def weibull_adstock(
374414
w = pt.cumprod(padded_w, axis=-1)
375415
else:
376416
raise ValueError(f"Wrong WeibullType: {type}, expected of WeibullType")
377-
return batched_convolution(x, w, axis=axis)
417+
return batched_convolution(x, w, axis=axis, mode=mode)
378418

379419

380420
def logistic_saturation(x, lam: npt.NDArray[np.float_] | float = 0.5):

tests/mmm/test_transformers.py

Lines changed: 39 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -104,9 +104,14 @@ def test_batched_convolution_broadcasting():
104104

105105

106106
class TestsAdstockTransformers:
107-
def test_geometric_adstock_x_zero(self):
107+
@pytest.mark.parametrize(
108+
argnames="mode",
109+
argvalues=[ConvMode.After, ConvMode.Before, ConvMode.Overlap],
110+
ids=["After", "Before", "Overlap"],
111+
)
112+
def test_geometric_adstock_x_zero(self, mode):
108113
x = np.zeros(shape=(100))
109-
y = geometric_adstock(x=x, alpha=0.2)
114+
y = geometric_adstock(x=x, alpha=0.2, mode=mode)
110115
np.testing.assert_array_equal(x=x, y=y.eval())
111116

112117
@pytest.mark.parametrize(
@@ -127,9 +132,14 @@ def test_geometric_adstock_good_alpha(self, x, alpha, l_max):
127132
assert y_np[1] == x[1] + alpha * x[0]
128133
assert y_np[2] == x[2] + alpha * x[1] + (alpha**2) * x[0]
129134

130-
def test_delayed_adstock_output_type(self):
135+
@pytest.mark.parametrize(
136+
argnames="mode",
137+
argvalues=[ConvMode.After, ConvMode.Before, ConvMode.Overlap],
138+
ids=["After", "Before", "Overlap"],
139+
)
140+
def test_delayed_adstock_output_type(self, mode):
131141
x = np.ones(shape=(100))
132-
y = delayed_adstock(x=x, alpha=0.5, theta=6, l_max=7)
142+
y = delayed_adstock(x=x, alpha=0.5, theta=6, l_max=7, mode=mode)
133143
assert isinstance(y, TensorVariable)
134144
assert isinstance(y.eval(), np.ndarray)
135145

@@ -138,36 +148,55 @@ def test_delayed_adstock_x_zero(self):
138148
y = delayed_adstock(x=x, alpha=0.2, theta=2, l_max=4)
139149
np.testing.assert_array_equal(x=x, y=y.eval())
140150

141-
def test_geometric_adstock_vectorized(self, dummy_design_matrix):
151+
@pytest.mark.parametrize(
152+
argnames="mode",
153+
argvalues=[ConvMode.After, ConvMode.Before, ConvMode.Overlap],
154+
ids=["After", "Before", "Overlap"],
155+
)
156+
def test_geometric_adstock_vectorized(self, dummy_design_matrix, mode):
142157
x = dummy_design_matrix.copy()
143158
x_tensor = pt.as_tensor_variable(x)
144159
alpha = [0.9, 0.33, 0.5, 0.1, 0.0]
145160
alpha_tensor = pt.as_tensor_variable(alpha)
146-
y_tensor = geometric_adstock(x=x_tensor, alpha=alpha_tensor, l_max=12, axis=0)
161+
y_tensor = geometric_adstock(
162+
x=x_tensor, alpha=alpha_tensor, l_max=12, axis=0, mode=mode
163+
)
147164
y = y_tensor.eval()
148165

149166
y_tensors = [
150-
geometric_adstock(x=x[:, i], alpha=alpha[i], l_max=12)
167+
geometric_adstock(x=x[:, i], alpha=alpha[i], l_max=12, mode=mode)
151168
for i in range(x.shape[1])
152169
]
153170
ys = np.concatenate([y_t.eval()[..., None] for y_t in y_tensors], axis=1)
154171
assert y.shape == x.shape
155172
np.testing.assert_almost_equal(actual=y, desired=ys, decimal=12)
156173

157-
def test_delayed_adstock_vectorized(self, dummy_design_matrix):
174+
@pytest.mark.parametrize(
175+
argnames="mode",
176+
argvalues=[ConvMode.After, ConvMode.Before, ConvMode.Overlap],
177+
ids=["After", "Before", "Overlap"],
178+
)
179+
def test_delayed_adstock_vectorized(self, dummy_design_matrix, mode):
158180
x = dummy_design_matrix
159181
x_tensor = pt.as_tensor_variable(x)
160182
alpha = [0.9, 0.33, 0.5, 0.1, 0.0]
161183
alpha_tensor = pt.as_tensor_variable(alpha)
162184
theta = [0, 1, 2, 3, 4]
163185
theta_tensor = pt.as_tensor_variable(theta)
164186
y_tensor = delayed_adstock(
165-
x=x_tensor, alpha=alpha_tensor, theta=theta_tensor, l_max=12, axis=0
187+
x=x_tensor,
188+
alpha=alpha_tensor,
189+
theta=theta_tensor,
190+
l_max=12,
191+
axis=0,
192+
mode=mode,
166193
)
167194
y = y_tensor.eval()
168195

169196
y_tensors = [
170-
delayed_adstock(x=x[:, i], alpha=alpha[i], theta=theta[i], l_max=12)
197+
delayed_adstock(
198+
x=x[:, i], alpha=alpha[i], theta=theta[i], l_max=12, mode=mode
199+
)
171200
for i in range(x.shape[1])
172201
]
173202
ys = np.concatenate([y_t.eval()[..., None] for y_t in y_tensors], axis=1)

0 commit comments

Comments
 (0)