|
16 | 16 | from sklearn.linear_model import LinearRegression
|
17 | 17 | from sklearn.model_selection import (
|
18 | 18 | GroupKFold, KFold, LeaveOneOut, PredefinedSplit, ShuffleSplit,
|
19 |
| - train_test_split |
| 19 | + train_test_split, LeaveOneGroupOut, LeavePGroupsOut |
20 | 20 | )
|
21 | 21 | from sklearn.pipeline import Pipeline, make_pipeline
|
22 | 22 | from sklearn.preprocessing import OneHotEncoder
|
@@ -290,6 +290,30 @@ def test_predict_output_shape(
|
290 | 290 | assert y_pis.shape == (X.shape[0], 2, n_alpha)
|
291 | 291 |
|
292 | 292 |
|
| 293 | +@pytest.mark.parametrize( |
| 294 | + "cv, n_groups", |
| 295 | + [ |
| 296 | + (LeaveOneGroupOut(), 5), |
| 297 | + (LeavePGroupsOut(2), 10), |
| 298 | + ], |
| 299 | +) |
| 300 | +def test_group_cv_fit_runs_regressor(cv, n_groups): |
| 301 | + """ |
| 302 | + `_MapieRegressor` should accept group‑based CV splitters |
| 303 | + (LeaveOneGroupOut, LeavePGroupsOut) without raising. |
| 304 | + """ |
| 305 | + X, y = make_regression( |
| 306 | + n_samples=n_groups * 30, |
| 307 | + n_features=5, |
| 308 | + noise=0.1, |
| 309 | + random_state=42, |
| 310 | + ) |
| 311 | + groups = np.repeat(np.arange(n_groups), 30) |
| 312 | + |
| 313 | + # Ensuring `.fit` does not raise |
| 314 | + _MapieRegressor(cv=cv).fit(X, y, groups=groups) |
| 315 | + |
| 316 | + |
293 | 317 | @pytest.mark.parametrize("delta", [0.6, 0.8])
|
294 | 318 | @pytest.mark.parametrize("n_calib", [10 + i for i in range(13)] + [50, 100])
|
295 | 319 | def test_coverage_validity(delta: float, n_calib: int) -> None:
|
|
0 commit comments