Skip to content

Commit c6dbee3

Browse files
Internal change
PiperOrigin-RevId: 373487968 Change-Id: Ife7a5f2681c8e07f4c945fd3d7935b0fe9d8dfad
1 parent 27c0b8a commit c6dbee3

File tree

3 files changed

+1
-312
lines changed

3 files changed

+1
-312
lines changed

tensorflow/python/keras/metrics.py

Lines changed: 0 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -973,8 +973,6 @@ def __init__(self,
973973
self.init_thresholds = thresholds
974974
self.thresholds = metrics_utils.parse_init_thresholds(
975975
thresholds, default_threshold=0.5)
976-
self._evenly_distribute_thresholds = (
977-
metrics_utils.evenly_distributed_thresholds(self.thresholds))
978976
self.accumulator = self.add_weight(
979977
'accumulator',
980978
shape=(len(self.thresholds),),
@@ -998,7 +996,6 @@ def update_state(self, y_true, y_pred, sample_weight=None):
998996
y_true,
999997
y_pred,
1000998
thresholds=self.thresholds,
1001-
evenly_distribute_thresholds=self._evenly_distribute_thresholds,
1002999
sample_weight=sample_weight)
10031000

10041001
def result(self):
@@ -1298,8 +1295,6 @@ def __init__(self,
12981295
default_threshold = 0.5 if top_k is None else metrics_utils.NEG_INF
12991296
self.thresholds = metrics_utils.parse_init_thresholds(
13001297
thresholds, default_threshold=default_threshold)
1301-
self._evenly_distribute_thresholds = (
1302-
metrics_utils.evenly_distributed_thresholds(self.thresholds))
13031298
self.true_positives = self.add_weight(
13041299
'true_positives',
13051300
shape=(len(self.thresholds),),
@@ -1331,7 +1326,6 @@ def update_state(self, y_true, y_pred, sample_weight=None):
13311326
y_true,
13321327
y_pred,
13331328
thresholds=self.thresholds,
1334-
evenly_distribute_thresholds=self._evenly_distribute_thresholds,
13351329
top_k=self.top_k,
13361330
class_id=self.class_id,
13371331
sample_weight=sample_weight)
@@ -1427,8 +1421,6 @@ def __init__(self,
14271421
default_threshold = 0.5 if top_k is None else metrics_utils.NEG_INF
14281422
self.thresholds = metrics_utils.parse_init_thresholds(
14291423
thresholds, default_threshold=default_threshold)
1430-
self._evenly_distribute_thresholds = (
1431-
metrics_utils.evenly_distributed_thresholds(self.thresholds))
14321424
self.true_positives = self.add_weight(
14331425
'true_positives',
14341426
shape=(len(self.thresholds),),
@@ -1460,7 +1452,6 @@ def update_state(self, y_true, y_pred, sample_weight=None):
14601452
y_true,
14611453
y_pred,
14621454
thresholds=self.thresholds,
1463-
evenly_distribute_thresholds=self._evenly_distribute_thresholds,
14641455
top_k=self.top_k,
14651456
class_id=self.class_id,
14661457
sample_weight=sample_weight)
@@ -1524,12 +1515,10 @@ def __init__(self,
15241515
# Compute `num_thresholds` thresholds in [0, 1]
15251516
if num_thresholds == 1:
15261517
self.thresholds = [0.5]
1527-
self._evenly_distribute_thresholds = False
15281518
else:
15291519
thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)
15301520
for i in range(num_thresholds - 2)]
15311521
self.thresholds = [0.0] + thresholds + [1.0]
1532-
self._evenly_distribute_thresholds = True
15331522

15341523
def update_state(self, y_true, y_pred, sample_weight=None):
15351524
"""Accumulates confusion matrix statistics.
@@ -1554,7 +1543,6 @@ def update_state(self, y_true, y_pred, sample_weight=None):
15541543
y_true,
15551544
y_pred,
15561545
thresholds=self.thresholds,
1557-
evenly_distribute_thresholds=self._evenly_distribute_thresholds,
15581546
class_id=self.class_id,
15591547
sample_weight=sample_weight)
15601548

@@ -2091,9 +2079,6 @@ def __init__(self,
20912079
# If specified, use the supplied thresholds.
20922080
self.num_thresholds = len(thresholds) + 2
20932081
thresholds = sorted(thresholds)
2094-
self._evenly_distribute_thresholds = (
2095-
metrics_utils.evenly_distributed_thresholds(
2096-
np.array([0.0] + thresholds + [1.0])))
20972082
else:
20982083
if num_thresholds <= 1:
20992084
raise ValueError('`num_thresholds` must be > 1.')
@@ -2103,7 +2088,6 @@ def __init__(self,
21032088
self.num_thresholds = num_thresholds
21042089
thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)
21052090
for i in range(num_thresholds - 2)]
2106-
self._evenly_distribute_thresholds = True
21072091

21082092
# Add an endpoint "threshold" below zero and above one for either
21092093
# threshold method to account for floating point imprecisions.
@@ -2256,7 +2240,6 @@ def update_state(self, y_true, y_pred, sample_weight=None):
22562240
y_true,
22572241
y_pred,
22582242
self._thresholds,
2259-
evenly_distribute_thresholds=self._evenly_distribute_thresholds,
22602243
sample_weight=sample_weight,
22612244
multi_label=self.multi_label,
22622245
label_weights=label_weights)

tensorflow/python/keras/metrics_confusion_matrix_test.py

Lines changed: 0 additions & 104 deletions
Original file line numberDiff line numberDiff line change
@@ -33,11 +33,6 @@
3333
from tensorflow.python.platform import test
3434
from tensorflow.python.platform import tf_logging
3535

36-
try:
37-
import memory_profiler # pylint:disable=g-import-not-at-top
38-
except ImportError:
39-
memory_profiler = None
40-
4136

4237
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
4338
class FalsePositivesTest(test.TestCase, parameterized.TestCase):
@@ -1781,104 +1776,5 @@ def test_reset_state(self):
17811776
self.assertAllEqual(auc_obj.true_positives, np.zeros((5, 2)))
17821777

17831778

1784-
class ThresholdsTest(test.TestCase, parameterized.TestCase):
1785-
1786-
@parameterized.parameters([
1787-
metrics.TruePositives(),
1788-
metrics.TrueNegatives(),
1789-
metrics.FalsePositives(),
1790-
metrics.FalseNegatives(),
1791-
metrics.Precision(),
1792-
metrics.Recall(),
1793-
metrics.SensitivityAtSpecificity(0.5),
1794-
metrics.SpecificityAtSensitivity(0.5),
1795-
metrics.PrecisionAtRecall(0.5),
1796-
metrics.RecallAtPrecision(0.5),
1797-
metrics.AUC()])
1798-
def test_with_default_thresholds(self, metric_obj):
1799-
# By default, the thresholds will be evenly distributed if there are more
1800-
# than 1. In case there is only 1 thresholds, then we expect
1801-
# _evenly_distribute_thresholds to be false.
1802-
expected = len(metric_obj.thresholds) > 1
1803-
self.assertEqual(metric_obj._evenly_distribute_thresholds, expected)
1804-
1805-
@parameterized.parameters([
1806-
metrics.TruePositives,
1807-
metrics.TrueNegatives,
1808-
metrics.FalsePositives,
1809-
metrics.FalseNegatives,
1810-
metrics.Precision,
1811-
metrics.Recall])
1812-
def test_with_manual_thresholds(self, metric_cls):
1813-
even_thresholds = [0.0, 0.25, 0.5, 0.75, 1.0]
1814-
metric_obj = metric_cls(thresholds=even_thresholds)
1815-
self.assertTrue(metric_obj._evenly_distribute_thresholds)
1816-
1817-
uneven_thresholds = [0.0, 0.45, 1.0]
1818-
metric_obj = metric_cls(thresholds=uneven_thresholds)
1819-
self.assertFalse(metric_obj._evenly_distribute_thresholds)
1820-
1821-
def test_manual_thresholds_auc(self):
1822-
# The AUC metric handles manual thresholds input differently (it will add
1823-
# 0.0 and 1.0 for user).
1824-
even_thresholds = [0.25, 0.5, 0.75]
1825-
auc = metrics.AUC(thresholds=even_thresholds)
1826-
self.assertTrue(auc._evenly_distribute_thresholds)
1827-
1828-
# Test for save model
1829-
cloned = metrics.AUC.from_config(auc.get_config())
1830-
self.assertTrue(cloned._evenly_distribute_thresholds)
1831-
1832-
uneven_thresholds = [0.45,]
1833-
auc = metrics.AUC(thresholds=uneven_thresholds)
1834-
self.assertFalse(auc._evenly_distribute_thresholds)
1835-
1836-
cloned = metrics.AUC.from_config(auc.get_config())
1837-
self.assertFalse(cloned._evenly_distribute_thresholds)
1838-
1839-
1840-
@combinations.generate(combinations.combine(mode=['eager']))
1841-
class AUCMemoryTest(test.TestCase, parameterized.TestCase):
1842-
# This test is added to measure the memory footprint for
1843-
# metrics_utils._update_confusion_matrix_variables_optimized().
1844-
1845-
def test_memory_usage(self):
1846-
if memory_profiler is None:
1847-
self.skipTest('Skip test since memory_profiler is not available.')
1848-
1849-
self.y_true = np.random.randint(2, size=(1024, 1024))
1850-
self.y_pred = np.random.rand(1024, 1024)
1851-
1852-
memeory_usage_1 = memory_profiler.memory_usage((self.even_thresholds_auc))
1853-
memeory_usage_2 = memory_profiler.memory_usage((self.uneven_thresholds_auc))
1854-
# memory usage is a list of number which sampled when running the function.
1855-
# The pure memory consumption is approximately max(usage) - min(usage)
1856-
memeory_usage_1 = max(memeory_usage_1) - min(memeory_usage_1)
1857-
memeory_usage_2 = max(memeory_usage_2) - min(memeory_usage_2)
1858-
1859-
# Since we expect the new approach should have memory footprint as O(T + N)
1860-
# and old apporach has O(T * N). When N = 200 here, the ratio between 1 and
1861-
# 2 should be at least 50 (some room for other overhead).
1862-
self.assertLess(memeory_usage_1 * 50, memeory_usage_2)
1863-
1864-
def even_thresholds_auc(self):
1865-
auc = metrics.AUC(num_thresholds=200)
1866-
self.assertTrue(auc._evenly_distribute_thresholds)
1867-
1868-
auc(self.y_true, self.y_pred)
1869-
1870-
def uneven_thresholds_auc(self):
1871-
num_thresholds = 200
1872-
thresholds = [x / (num_thresholds - 1) for x in range(num_thresholds)]
1873-
thresholds[100] += 1 / 200
1874-
thresholds = thresholds[1:-1]
1875-
1876-
auc = metrics.AUC(thresholds=thresholds)
1877-
self.assertFalse(auc._evenly_distribute_thresholds)
1878-
self.assertEqual(auc.num_thresholds, num_thresholds)
1879-
1880-
auc(self.y_true, self.y_pred)
1881-
1882-
18831779
if __name__ == '__main__':
18841780
test.main()

0 commit comments

Comments
 (0)