Skip to content

Commit 2a9492b

Browse files
raghavrvjnothman
authored andcommitted
[MRG] MAINT Removing more deprecated stuff for v0.18 (scikit-learn#5528)
* MNT Removing more deprecated stuff for v0.18 * MNT+TST Remove support for loss='l2' * MNT Remove support for precompute='auto' for ElasticNet * FIX/TST precompute=auto should raise a generic message
1 parent 1e2123d commit 2a9492b

File tree

7 files changed

+27
-45
lines changed

7 files changed

+27
-45
lines changed

sklearn/feature_extraction/text.py

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -280,12 +280,6 @@ def _check_vocabulary(self):
280280
if len(self.vocabulary_) == 0:
281281
raise ValueError("Vocabulary is empty")
282282

283-
@property
284-
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
285-
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
286-
def fixed_vocabulary(self):
287-
return self.fixed_vocabulary_
288-
289283

290284
class HashingVectorizer(BaseEstimator, VectorizerMixin):
291285
"""Convert a collection of text documents to a matrix of token occurrences

sklearn/linear_model/coordinate_descent.py

Lines changed: 7 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -462,7 +462,7 @@ def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
462462
positive)
463463
else:
464464
raise ValueError("Precompute should be one of True, False, "
465-
"'auto' or array-like")
465+
"'auto' or array-like. Got %r" % precompute)
466466
coef_, dual_gap_, eps_, n_iter_ = model
467467
coefs[..., i] = coef_
468468
dual_gaps[i] = dual_gap_
@@ -548,9 +548,8 @@ class ElasticNet(LinearModel, RegressorMixin):
548548
549549
precompute : True | False | array-like
550550
Whether to use a precomputed Gram matrix to speed up
551-
calculations. If set to ``'auto'`` let us decide. The Gram
552-
matrix can also be passed as argument. For sparse input
553-
this option is always ``True`` to preserve sparsity.
551+
calculations. The Gram matrix can also be passed as argument.
552+
For sparse input this option is always ``True`` to preserve sparsity.
554553
555554
max_iter : int, optional
556555
The maximum number of iterations
@@ -661,12 +660,10 @@ def fit(self, X, y, check_input=True):
661660
"well. You are advised to use the LinearRegression "
662661
"estimator", stacklevel=2)
663662

664-
if (isinstance(self.precompute, six.string_types) and
665-
self.precompute == 'auto'):
666-
warnings.warn("Setting precompute to 'auto', was found to be "
667-
"slower even when n_samples > n_features. Hence "
668-
"it will be removed in 0.18.",
669-
DeprecationWarning, stacklevel=2)
663+
if isinstance(self.precompute, six.string_types):
664+
raise ValueError('precompute should be one of True, False or'
665+
' array-like. Got %r' % self.precompute)
666+
670667
# We expect X and y to be already float64 Fortran ordered arrays
671668
# when bypassing checks
672669
if check_input:

sklearn/linear_model/tests/test_coordinate_descent.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
from sklearn.utils.testing import assert_true
1717
from sklearn.utils.testing import assert_greater
1818
from sklearn.utils.testing import assert_raises
19+
from sklearn.utils.testing import assert_raises_regex
1920
from sklearn.utils.testing import assert_warns
2021
from sklearn.utils.testing import assert_warns_message
2122
from sklearn.utils.testing import ignore_warnings
@@ -509,7 +510,12 @@ def test_precompute_invalid_argument():
509510
X, y, _, _ = build_dataset()
510511
for clf in [ElasticNetCV(precompute="invalid"),
511512
LassoCV(precompute="invalid")]:
512-
assert_raises(ValueError, clf.fit, X, y)
513+
assert_raises_regex(ValueError, ".*should be.*True.*False.*auto.*"
514+
"array-like.*Got 'invalid'", clf.fit, X, y)
515+
516+
# Precompute = 'auto' is not supported for ElasticNet
517+
assert_raises_regex(ValueError, ".*should be.*True.*False.*array-like.*"
518+
"Got 'auto'", ElasticNet(precompute='auto').fit, X, y)
513519

514520

515521
def test_warm_start_convergence():

sklearn/svm/base.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -547,7 +547,7 @@ def decision_function(self, X):
547547
dec = self._decision_function(X)
548548
if self.decision_function_shape is None and len(self.classes_) > 2:
549549
warnings.warn("The decision_function_shape default value will "
550-
"change from 'ovo' to 'ovr' in 0.18. This will change "
550+
"change from 'ovo' to 'ovr' in 0.19. This will change "
551551
"the shape of the decision function returned by "
552552
"SVC.", ChangedBehaviorWarning)
553553
if self.decision_function_shape == 'ovr' and len(self.classes_) > 2:

sklearn/svm/bounds.py

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -53,12 +53,6 @@ def l1_min_c(X, y, loss='squared_hinge', fit_intercept=True,
5353
l1_min_c: float
5454
minimum value for C
5555
"""
56-
57-
if loss == "l2":
58-
warn("loss='l2' will be impossible from 0.18 onwards."
59-
" Use loss='squared_hinge' instead.",
60-
DeprecationWarning)
61-
loss = "squared_hinge"
6256
if loss not in ('squared_hinge', 'log'):
6357
raise ValueError('loss type not in ("squared_hinge", "log", "l2")')
6458

sklearn/svm/classes.py

Lines changed: 8 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -188,16 +188,13 @@ def fit(self, X, y, sample_weight=None):
188188
Returns self.
189189
"""
190190
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
191-
loss_l = self.loss.lower()
192-
193191
msg = ("loss='%s' has been deprecated in favor of "
194192
"loss='%s' as of 0.16. Backward compatibility"
195193
" for the loss='%s' will be removed in %s")
196194

197-
# FIXME change loss_l --> self.loss after 0.18
198-
if loss_l in ('l1', 'l2'):
195+
if self.loss in ('l1', 'l2'):
199196
old_loss = self.loss
200-
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l)
197+
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(self.loss)
201198
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
202199
DeprecationWarning)
203200
# ---------------------------------------------------------------------
@@ -357,18 +354,15 @@ def fit(self, X, y, sample_weight=None):
357354
Returns self.
358355
"""
359356
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
360-
loss_l = self.loss.lower()
361-
362357
msg = ("loss='%s' has been deprecated in favor of "
363358
"loss='%s' as of 0.16. Backward compatibility"
364359
" for the loss='%s' will be removed in %s")
365360

366-
# FIXME change loss_l --> self.loss after 0.18
367-
if loss_l in ('l1', 'l2'):
361+
if self.loss in ('l1', 'l2'):
368362
old_loss = self.loss
369363
self.loss = {'l1': 'epsilon_insensitive',
370364
'l2': 'squared_epsilon_insensitive'
371-
}.get(loss_l)
365+
}.get(self.loss)
372366
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
373367
DeprecationWarning)
374368
# ---------------------------------------------------------------------
@@ -467,7 +461,7 @@ class SVC(BaseSVC):
467461
(n_samples, n_classes * (n_classes - 1) / 2).
468462
The default of None will currently behave as 'ovo' for backward
469463
compatibility and raise a deprecation warning, but will change 'ovr'
470-
in 0.18.
464+
in 0.19.
471465
472466
.. versionadded:: 0.17
473467
*decision_function_shape='ovr'* is recommended.
@@ -620,7 +614,7 @@ class NuSVC(BaseSVC):
620614
(n_samples, n_classes * (n_classes - 1) / 2).
621615
The default of None will currently behave as 'ovo' for backward
622616
compatibility and raise a deprecation warning, but will change 'ovr'
623-
in 0.18.
617+
in 0.19.
624618
625619
.. versionadded:: 0.17
626620
*decision_function_shape='ovr'* is recommended.
@@ -1038,8 +1032,8 @@ def fit(self, X, y=None, sample_weight=None, **params):
10381032
If X is not a C-ordered contiguous array it is copied.
10391033
10401034
"""
1041-
super(OneClassSVM, self).fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight,
1042-
**params)
1035+
super(OneClassSVM, self).fit(X, np.ones(_num_samples(X)),
1036+
sample_weight=sample_weight, **params)
10431037
return self
10441038

10451039
def decision_function(self, X):

sklearn/svm/tests/test_bounds.py

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import nose
22
from nose.tools import assert_equal, assert_true
33
from sklearn.utils.testing import clean_warning_registry
4+
from sklearn.utils.testing import assert_raise_message
45
import warnings
56

67
import numpy as np
@@ -37,13 +38,9 @@ def test_l1_min_c():
3738
intercept_label))
3839
yield check
3940

40-
41-
def test_l2_deprecation():
42-
clean_warning_registry()
43-
with warnings.catch_warnings(record=True) as w:
44-
assert_equal(l1_min_c(dense_X, Y1, "l2"),
45-
l1_min_c(dense_X, Y1, "squared_hinge"))
46-
assert_equal(w[0].category, DeprecationWarning)
41+
# loss='l2' should raise ValueError
42+
assert_raise_message(ValueError, "loss type not in",
43+
l1_min_c, dense_X, Y1, "l2")
4744

4845

4946
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):

0 commit comments

Comments
 (0)