forked from scikit-learn-contrib/imbalanced-learn
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathbalance_cascade.py
261 lines (212 loc) · 10.2 KB
/
balance_cascade.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
"""Class to perform under-sampling using balace cascade."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
from collections import Counter
import numpy as np
from sklearn.base import ClassifierMixin
from sklearn.neighbors import KNeighborsClassifier
from sklearn.utils import check_random_state, safe_indexing
from sklearn.model_selection import cross_val_predict
from .base import BaseEnsembleSampler
from ..utils import check_ratio
class BalanceCascade(BaseEnsembleSampler):
"""Create an ensemble of balanced sets by iteratively under-sampling the
imbalanced dataset using an estimator.
This method iteratively select subset and make an ensemble of the
different sets. The selection is performed using a specific classifier.
Read more in the :ref:`User Guide <ensemble_samplers>`.
Parameters
----------
ratio : str, dict, or callable, optional (default='auto')
Ratio to use for resampling the data set.
- If ``str``, has to be one of: (i) ``'minority'``: resample the
minority class; (ii) ``'majority'``: resample the majority class,
(iii) ``'not minority'``: resample all classes apart of the minority
class, (iv) ``'all'``: resample all classes, and (v) ``'auto'``:
correspond to ``'all'`` with for over-sampling methods and ``'not
minority'`` for under-sampling methods. The classes targeted will be
over-sampled or under-sampled to achieve an equal number of sample
with the majority or minority class.
- If ``dict``, the keys correspond to the targeted classes. The values
correspond to the desired number of samples.
- If callable, function taking ``y`` and returns a ``dict``. The keys
correspond to the targeted classes. The values correspond to the
desired number of samples.
return_indices : bool, optional (default=True)
Whether or not to return the indices of the samples randomly
selected from the majority class.
random_state : int, RandomState instance or None, optional (default=None)
If int, ``random_state`` is the seed used by the random number
generator; If ``RandomState`` instance, random_state is the random
number generator; If ``None``, the random number generator is the
``RandomState`` instance used by ``np.random``.
n_max_subset : int or None, optional (default=None)
Maximum number of subsets to generate. By default, all data from
the training will be selected that could lead to a large number of
subsets. We can probably deduce this number empirically.
estimator : object, optional (default=KNeighborsClassifier())
An estimator inherited from :class:`sklearn.base.ClassifierMixin` and
having an attribute :func:`predict_proba`.
bootstrap : bool, optional (default=True)
Whether to bootstrap the data before each iteration.
Notes
-----
The method is described in [1]_.
Supports mutli-class resampling. A one-vs.-rest scheme is used as
originally proposed in [1]_.
See :ref:`sphx_glr_auto_examples_ensemble_plot_balance_cascade.py`.
See also
--------
BalancedBaggingClassifier, EasyEnsemble
References
----------
.. [1] X. Y. Liu, J. Wu and Z. H. Zhou, "Exploratory Undersampling for
Class-Imbalance Learning," in IEEE Transactions on Systems, Man, and
Cybernetics, Part B (Cybernetics), vol. 39, no. 2, pp. 539-550,
April 2009.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.ensemble import \
BalanceCascade # doctest: +NORMALIZE_WHITESPACE
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape {}'.format(Counter(y)))
Original dataset shape Counter({1: 900, 0: 100})
>>> bc = BalanceCascade(random_state=42)
>>> X_res, y_res = bc.fit_sample(X, y)
>>> print('Resampled dataset shape {}'.format(Counter(y_res[0]))) \
# doctest: +ELLIPSIS
Resampled dataset shape Counter({...})
"""
def __init__(self,
ratio='auto',
return_indices=False,
random_state=None,
n_max_subset=None,
estimator=None):
super(BalanceCascade, self).__init__(ratio=ratio)
self.random_state = random_state
self.return_indices = return_indices
self.estimator = estimator
self.n_max_subset = n_max_subset
def fit(self, X, y):
"""Find the classes statistics before to perform sampling.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Matrix containing the data which have to be sampled.
y : array-like, shape (n_samples,)
Corresponding label for each sample in X.
Returns
-------
self : object,
Return self.
"""
super(BalanceCascade, self).fit(X, y)
self.ratio_ = check_ratio(self.ratio, y, 'under-sampling')
return self
def _validate_estimator(self):
"""Private function to create the classifier"""
if (self.estimator is not None and
isinstance(self.estimator, ClassifierMixin) and
hasattr(self.estimator, 'predict')):
self.estimator_ = self.estimator
elif self.estimator is None:
self.estimator_ = KNeighborsClassifier()
else:
raise ValueError('Invalid parameter `estimator`. Got {}.'.format(
type(self.estimator)))
self.logger.debug(self.estimator_)
def _sample(self, X, y):
"""Resample the dataset.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Matrix containing the data which have to be sampled.
y : array-like, shape (n_samples,)
Corresponding label for each sample in X.
Returns
-------
X_resampled : {ndarray, sparse matrix}, shape \
(n_subset, n_samples_new, n_features)
The array containing the resampled data.
y_resampled : ndarray, shape (n_subset, n_samples_new)
The corresponding label of `X_resampled`
idx_under : ndarray, shape (n_subset, n_samples, )
If `return_indices` is `True`, a boolean array will be returned
containing the which samples have been selected.
"""
self._validate_estimator()
random_state = check_random_state(self.random_state)
# array to know which samples are available to be taken
samples_mask = np.ones(y.shape, dtype=bool)
# where the different set will be stored
idx_under = []
n_subsets = 0
b_subset_search = True
while b_subset_search:
target_stats = Counter(safe_indexing(
y, np.flatnonzero(samples_mask)))
# store the index of the data to under-sample
index_under_sample = np.empty((0, ), dtype=y.dtype)
# value which will be picked at each round
index_constant = np.empty((0, ), dtype=y.dtype)
for target_class in target_stats.keys():
if target_class in self.ratio_.keys():
n_samples = self.ratio_[target_class]
# extract the data of interest for this round from the
# current class
index_class = np.flatnonzero(y == target_class)
index_class_interest = index_class[samples_mask[
y == target_class]]
y_class = safe_indexing(y, index_class_interest)
# select randomly the desired features
index_target_class = random_state.choice(
range(y_class.size), size=n_samples, replace=False)
index_under_sample = np.concatenate(
(index_under_sample,
index_class_interest[index_target_class]),
axis=0)
else:
index_constant = np.concatenate(
(index_constant,
np.flatnonzero(y == target_class)),
axis=0)
# store the set created
n_subsets += 1
subset_indices = np.concatenate((index_under_sample,
index_constant), axis=0)
idx_under.append(subset_indices)
# fit and predict using cross validation
X_subset = safe_indexing(X, subset_indices)
y_subset = safe_indexing(y, subset_indices)
pred = cross_val_predict(self.estimator_, X_subset, y_subset)
# extract the prediction about the targeted classes only
pred_target = pred[:index_under_sample.size]
index_classified = index_under_sample[
pred_target == safe_indexing(y_subset,
range(index_under_sample.size))]
samples_mask[index_classified] = False
# check the stopping criterion
if self.n_max_subset is not None:
if n_subsets == self.n_max_subset:
b_subset_search = False
# check that there is enough samples for another round
target_stats = Counter(safe_indexing(
y, np.flatnonzero(samples_mask)))
for target_class in self.ratio_.keys():
if target_stats[target_class] < self.ratio_[target_class]:
b_subset_search = False
X_resampled, y_resampled = [], []
for indices in idx_under:
X_resampled.append(safe_indexing(X, indices))
y_resampled.append(safe_indexing(y, indices))
if self.return_indices:
return (np.array(X_resampled), np.array(y_resampled),
np.array(idx_under))
else:
return np.array(X_resampled), np.array(y_resampled)