@@ -173,7 +173,9 @@ def test_default_configuration(self):
173
173
174
174
auto = SimpleClassificationPipeline (random_state = 1 )
175
175
176
- auto = auto .fit (X_train , Y_train )
176
+ with ignore_warnings (classifier_warnings ):
177
+ auto = auto .fit (X_train , Y_train )
178
+
177
179
predictions = auto .predict (X_test )
178
180
179
181
acc = sklearn .metrics .accuracy_score (predictions , Y_test )
@@ -196,7 +198,9 @@ def test_default_configuration_multilabel(self):
196
198
default = cs .get_default_configuration ()
197
199
classifier .set_hyperparameters (default )
198
200
199
- classifier = classifier .fit (X_train , Y_train )
201
+ with ignore_warnings (classifier_warnings ):
202
+ classifier = classifier .fit (X_train , Y_train )
203
+
200
204
predictions = classifier .predict (X_test )
201
205
202
206
acc = sklearn .metrics .accuracy_score (predictions , Y_test )
@@ -221,10 +225,12 @@ def test_default_configuration_iterative_fit(self):
221
225
random_state = 0
222
226
)
223
227
classifier .fit_transformer (X_train , Y_train )
224
- for i in range (1 , 11 ):
225
- classifier .iterative_fit (X_train , Y_train )
226
- n_estimators = classifier .steps [- 1 ][- 1 ].choice .estimator .n_estimators
227
- self .assertEqual (n_estimators , i )
228
+
229
+ with ignore_warnings (classifier_warnings ):
230
+ for i in range (1 , 11 ):
231
+ classifier .iterative_fit (X_train , Y_train )
232
+ n_estimators = classifier .steps [- 1 ][- 1 ].choice .estimator .n_estimators
233
+ self .assertEqual (n_estimators , i )
228
234
229
235
def test_repr (self ):
230
236
"""Test that the default pipeline can be converted to its representation and
@@ -727,7 +733,9 @@ def test_predict_batched(self):
727
733
728
734
# Multiclass
729
735
X_train , Y_train , X_test , Y_test = get_dataset (dataset = 'digits' )
730
- cls .fit (X_train , Y_train )
736
+
737
+ with ignore_warnings (classifier_warnings ):
738
+ cls .fit (X_train , Y_train )
731
739
732
740
X_test_ = X_test .copy ()
733
741
prediction_ = cls .predict_proba (X_test_ )
@@ -759,7 +767,8 @@ def test_predict_batched_sparse(self):
759
767
760
768
# Multiclass
761
769
X_train , Y_train , X_test , Y_test = get_dataset (dataset = 'digits' , make_sparse = True )
762
- cls .fit (X_train , Y_train )
770
+ with ignore_warnings (classifier_warnings ):
771
+ cls .fit (X_train , Y_train )
763
772
764
773
X_test_ = X_test .copy ()
765
774
prediction_ = cls .predict_proba (X_test_ )
@@ -788,7 +797,8 @@ def test_predict_proba_batched(self):
788
797
cls = SimpleClassificationPipeline (include = {'classifier' : ['sgd' ]})
789
798
X_train , Y_train , X_test , Y_test = get_dataset (dataset = 'digits' )
790
799
791
- cls .fit (X_train , Y_train )
800
+ with ignore_warnings (classifier_warnings ):
801
+ cls .fit (X_train , Y_train )
792
802
793
803
X_test_ = X_test .copy ()
794
804
prediction_ = cls .predict_proba (X_test_ )
@@ -808,7 +818,9 @@ def test_predict_proba_batched(self):
808
818
X_train , Y_train , X_test , Y_test = get_dataset (dataset = 'digits' )
809
819
Y_train = np .array (list ([(list ([1 if i != y else 0 for i in range (10 )]))
810
820
for y in Y_train ]))
811
- cls .fit (X_train , Y_train )
821
+
822
+ with ignore_warnings (classifier_warnings ):
823
+ cls .fit (X_train , Y_train )
812
824
813
825
X_test_ = X_test .copy ()
814
826
prediction_ = cls .predict_proba (X_test_ )
@@ -842,7 +854,9 @@ def test_predict_proba_batched_sparse(self):
842
854
X_train , Y_train , X_test , Y_test = get_dataset (dataset = 'digits' , make_sparse = True )
843
855
X_test_ = X_test .copy ()
844
856
845
- cls .fit (X_train , Y_train )
857
+ with ignore_warnings (classifier_warnings ):
858
+ cls .fit (X_train , Y_train )
859
+
846
860
prediction_ = cls .predict_proba (X_test_ )
847
861
848
862
# The object behind the last step in the pipeline
@@ -861,10 +875,13 @@ def test_predict_proba_batched_sparse(self):
861
875
include = {'classifier' : ['lda' ]}
862
876
)
863
877
X_train , Y_train , X_test , Y_test = get_dataset (dataset = 'digits' , make_sparse = True )
878
+
864
879
X_test_ = X_test .copy ()
865
880
Y_train = np .array ([[1 if i != y else 0 for i in range (10 )] for y in Y_train ])
866
881
867
- cls .fit (X_train , Y_train )
882
+ with ignore_warnings (classifier_warnings ):
883
+ cls .fit (X_train , Y_train )
884
+
868
885
prediction_ = cls .predict_proba (X_test_ )
869
886
870
887
# The object behind the last step in the pipeline
@@ -889,7 +906,9 @@ def test_pipeline_clonability(self):
889
906
X_train , Y_train , X_test , Y_test = get_dataset (dataset = 'iris' )
890
907
891
908
auto = SimpleClassificationPipeline ()
892
- auto = auto .fit (X_train , Y_train )
909
+
910
+ with ignore_warnings (classifier_warnings ):
911
+ auto = auto .fit (X_train , Y_train )
893
912
894
913
auto_clone = clone (auto )
895
914
auto_clone_params = auto_clone .get_params ()
0 commit comments