From ea07906c6c7b7671d0153b5b1f96e1cdf5141600 Mon Sep 17 00:00:00 2001 From: Gabriel de Marmiesse Date: Thu, 26 Mar 2020 13:31:57 +0100 Subject: [PATCH] Moved test out of run_all_in_graph_and_eager_mode in softshrink. (#1405) * Moved test out of run_all_in_graph_and_eager_mode in softshrink. See #1328 * Small fix. --- .../activations/softshrink_test.py | 29 +++++++++---------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/tensorflow_addons/activations/softshrink_test.py b/tensorflow_addons/activations/softshrink_test.py index 7399755ea7..d7b327da29 100644 --- a/tensorflow_addons/activations/softshrink_test.py +++ b/tensorflow_addons/activations/softshrink_test.py @@ -16,7 +16,6 @@ import sys import pytest -from absl.testing import parameterized import numpy as np import tensorflow as tf @@ -48,21 +47,6 @@ def test_softshrink(dtype): ) -@test_utils.run_all_in_graph_and_eager_modes -class SoftshrinkTest(tf.test.TestCase, parameterized.TestCase): - @parameterized.named_parameters(("float32", np.float32), ("float64", np.float64)) - def test_theoretical_gradients(self, dtype): - # Only test theoretical gradients for float32 and float64 - # because of the instability of float16 while computing jacobian - - # Softshrink is not continuous at `lower` and `upper`. - # Avoid these two points to make gradients smooth. - x = tf.constant([-2.0, -1.5, 0.0, 1.5, 2.0], dtype=dtype) - - theoretical, numerical = tf.test.compute_gradient(softshrink, [x]) - self.assertAllCloseAccordingToType(theoretical, numerical, atol=1e-4) - - @pytest.mark.parametrize("dtype", [np.float16, np.float32]) def test_same_as_py_func(dtype): np.random.seed(1234) @@ -89,5 +73,18 @@ def verify_funcs_are_equivalent(dtype): test_utils.assert_allclose_according_to_type(grad_native, grad_py) +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +def test_theoretical_gradients(dtype): + # Only test theoretical gradients for float32 and float64 + # because of the instability of float16 while computing jacobian + + # Softshrink is not continuous at `lower` and `upper`. + # Avoid these two points to make gradients smooth. + x = tf.constant([-2.0, -1.5, 0.0, 1.5, 2.0], dtype=dtype) + + theoretical, numerical = tf.test.compute_gradient(softshrink, [x]) + test_utils.assert_allclose_according_to_type(theoretical, numerical, atol=1e-4) + + if __name__ == "__main__": sys.exit(pytest.main([__file__]))