Skip to content

Complete the black formatting migration. #1155

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Feb 26, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 0 additions & 13 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,18 +14,5 @@ exclude = '''
| build
| dist
)/
| tensorflow_addons/losses/__init__.py
| tensorflow_addons/losses/focal_loss.py
| tensorflow_addons/losses/giou_loss.py
| tensorflow_addons/losses/giou_loss_test.py
| tensorflow_addons/metrics/multilabel_confusion_matrix.py
| tensorflow_addons/metrics/r_square.py
| tensorflow_addons/optimizers/__init__.py
| tensorflow_addons/optimizers/lookahead.py
| tensorflow_addons/optimizers/moving_average.py
| tensorflow_addons/optimizers/weight_decay_optimizers.py
| tensorflow_addons/text/__init__.py
| tensorflow_addons/text/crf.py
| tensorflow_addons/text/crf_test.py
)
'''
15 changes: 12 additions & 3 deletions tensorflow_addons/losses/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,10 @@
"""Additional losses that conform to Keras API."""

from tensorflow_addons.losses.contrastive import contrastive_loss, ContrastiveLoss
from tensorflow_addons.losses.focal_loss import sigmoid_focal_crossentropy, SigmoidFocalCrossEntropy
from tensorflow_addons.losses.focal_loss import (
sigmoid_focal_crossentropy,
SigmoidFocalCrossEntropy,
)
from tensorflow_addons.losses.giou_loss import giou_loss, GIoULoss
from tensorflow_addons.losses.lifted import lifted_struct_loss, LiftedStructLoss
from tensorflow_addons.losses.sparsemax_loss import sparsemax_loss, SparsemaxLoss
Expand All @@ -30,5 +33,11 @@
# Temporarily disable for windows
# Remove after: https://github.com/tensorflow/addons/issues/838
import os
if os.name != 'nt':
from tensorflow_addons.losses.npairs import npairs_loss, NpairsLoss, npairs_multilabel_loss, NpairsMultilabelLoss

if os.name != "nt":
from tensorflow_addons.losses.npairs import (
npairs_loss,
NpairsLoss,
npairs_multilabel_loss,
NpairsMultilabelLoss,
)
42 changes: 22 additions & 20 deletions tensorflow_addons/losses/focal_loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from typeguard import typechecked


@tf.keras.utils.register_keras_serializable(package='Addons')
@tf.keras.utils.register_keras_serializable(package="Addons")
class SigmoidFocalCrossEntropy(tf.keras.losses.Loss):
"""Implements the focal loss function.

Expand Down Expand Up @@ -66,27 +66,28 @@ class SigmoidFocalCrossEntropy(tf.keras.losses.Loss):
"""

@typechecked
def __init__(self,
from_logits: bool = False,
alpha: FloatTensorLike = 0.25,
gamma: FloatTensorLike = 2.0,
reduction: str = tf.keras.losses.Reduction.NONE,
name: str = 'sigmoid_focal_crossentropy'):
def __init__(
self,
from_logits: bool = False,
alpha: FloatTensorLike = 0.25,
gamma: FloatTensorLike = 2.0,
reduction: str = tf.keras.losses.Reduction.NONE,
name: str = "sigmoid_focal_crossentropy",
):
super().__init__(name=name, reduction=reduction)

self.from_logits = from_logits
self.alpha = alpha
self.gamma = gamma

def call(self,
y_true,
y_pred):
def call(self, y_true, y_pred):
return sigmoid_focal_crossentropy(
y_true,
y_pred,
alpha=self.alpha,
gamma=self.gamma,
from_logits=self.from_logits)
from_logits=self.from_logits,
)

def get_config(self):
config = {
Expand All @@ -98,13 +99,15 @@ def get_config(self):
return {**base_config, **config}


@tf.keras.utils.register_keras_serializable(package='Addons')
@tf.keras.utils.register_keras_serializable(package="Addons")
@tf.function
def sigmoid_focal_crossentropy(y_true: TensorLike,
y_pred: TensorLike,
alpha: FloatTensorLike = 0.25,
gamma: FloatTensorLike = 2.0,
from_logits: bool = False) -> tf.Tensor:
def sigmoid_focal_crossentropy(
y_true: TensorLike,
y_pred: TensorLike,
alpha: FloatTensorLike = 0.25,
gamma: FloatTensorLike = 2.0,
from_logits: bool = False,
) -> tf.Tensor:
"""
Args
y_true: true targets tensor.
Expand All @@ -117,8 +120,7 @@ def sigmoid_focal_crossentropy(y_true: TensorLike,
same shape as `y_true`; otherwise, it is scalar.
"""
if gamma and gamma < 0:
raise ValueError(
"Value of gamma should be greater than or equal to zero")
raise ValueError("Value of gamma should be greater than or equal to zero")

y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.convert_to_tensor(y_true, dtype=y_pred.dtype)
Expand All @@ -138,7 +140,7 @@ def sigmoid_focal_crossentropy(y_true: TensorLike,

if alpha:
alpha = tf.convert_to_tensor(alpha, dtype=K.floatx())
alpha_factor = (y_true * alpha + (1 - y_true) * (1 - alpha))
alpha_factor = y_true * alpha + (1 - y_true) * (1 - alpha)

if gamma:
gamma = tf.convert_to_tensor(gamma, dtype=K.floatx())
Expand Down
40 changes: 18 additions & 22 deletions tensorflow_addons/losses/giou_loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from typeguard import typechecked


@tf.keras.utils.register_keras_serializable(package='Addons')
@tf.keras.utils.register_keras_serializable(package="Addons")
class GIoULoss(tf.keras.losses.Loss):
"""Implements the GIoU loss function.

Expand Down Expand Up @@ -50,32 +50,31 @@ class GIoULoss(tf.keras.losses.Loss):
Args:
mode: one of ['giou', 'iou'], decided to calculate GIoU or IoU loss.
"""

@typechecked
def __init__(self,
mode: str = 'giou',
reduction: str = tf.keras.losses.Reduction.AUTO,
name: Optional[str] = 'giou_loss'):
if mode not in ['giou', 'iou']:
def __init__(
self,
mode: str = "giou",
reduction: str = tf.keras.losses.Reduction.AUTO,
name: Optional[str] = "giou_loss",
):
if mode not in ["giou", "iou"]:
raise ValueError("Value of mode should be 'iou' or 'giou'")
super().__init__(name=name, reduction=reduction)
self.mode = mode

def get_config(self):
base_config = super().get_config()
base_config['mode'] = self.mode
base_config["mode"] = self.mode
return base_config

def call(self,
y_true,
y_pred):
def call(self, y_true, y_pred):
return giou_loss(y_true, y_pred, mode=self.mode)


@tf.keras.utils.register_keras_serializable(package='Addons')
@tf.keras.utils.register_keras_serializable(package="Addons")
@tf.function
def giou_loss(y_true: TensorLike,
y_pred: TensorLike,
mode: str = 'giou') -> tf.Tensor:
def giou_loss(y_true: TensorLike, y_pred: TensorLike, mode: str = "giou") -> tf.Tensor:
"""
Args:
y_true: true targets tensor. The coordinates of the each bounding
Expand All @@ -87,7 +86,7 @@ def giou_loss(y_true: TensorLike,
Returns:
GIoU loss float `Tensor`.
"""
if mode not in ['giou', 'iou']:
if mode not in ["giou", "iou"]:
raise ValueError("Value of mode should be 'iou' or 'giou'")
y_pred = tf.convert_to_tensor(y_pred)
if not y_pred.dtype.is_floating:
Expand All @@ -98,9 +97,7 @@ def giou_loss(y_true: TensorLike,
return 1 - giou


def _calculate_giou(b1: TensorLike,
b2: TensorLike,
mode: str = 'giou') -> tf.Tensor:
def _calculate_giou(b1: TensorLike, b2: TensorLike, mode: str = "giou") -> tf.Tensor:
"""
Args:
b1: bounding box. The coordinates of the each bounding box in boxes are
Expand All @@ -112,7 +109,7 @@ def _calculate_giou(b1: TensorLike,
Returns:
GIoU loss float `Tensor`.
"""
zero = tf.convert_to_tensor(0., b1.dtype)
zero = tf.convert_to_tensor(0.0, b1.dtype)
b1_ymin, b1_xmin, b1_ymax, b1_xmax = tf.unstack(b1, 4, axis=-1)
b2_ymin, b2_xmin, b2_ymax, b2_xmax = tf.unstack(b2, 4, axis=-1)
b1_width = tf.maximum(zero, b1_xmax - b1_xmin)
Expand All @@ -132,7 +129,7 @@ def _calculate_giou(b1: TensorLike,

union_area = b1_area + b2_area - intersect_area
iou = tf.math.divide_no_nan(intersect_area, union_area)
if mode == 'iou':
if mode == "iou":
return iou

enclose_ymin = tf.minimum(b1_ymin, b2_ymin)
Expand All @@ -142,6 +139,5 @@ def _calculate_giou(b1: TensorLike,
enclose_width = tf.maximum(zero, enclose_xmax - enclose_xmin)
enclose_height = tf.maximum(zero, enclose_ymax - enclose_ymin)
enclose_area = enclose_width * enclose_height
giou = iou - tf.math.divide_no_nan(
(enclose_area - union_area), enclose_area)
giou = iou - tf.math.divide_no_nan((enclose_area - union_area), enclose_area)
return giou
90 changes: 44 additions & 46 deletions tensorflow_addons/losses/giou_loss_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,95 +27,93 @@ class GIoULossTest(tf.test.TestCase, parameterized.TestCase):
"""GIoU test class."""

def test_config(self):
gl_obj = GIoULoss(
reduction=tf.keras.losses.Reduction.NONE, name='giou_loss')
self.assertEqual(gl_obj.name, 'giou_loss')
gl_obj = GIoULoss(reduction=tf.keras.losses.Reduction.NONE, name="giou_loss")
self.assertEqual(gl_obj.name, "giou_loss")
self.assertEqual(gl_obj.reduction, tf.keras.losses.Reduction.NONE)

@parameterized.named_parameters(("float16", np.float16),
("float32", np.float32),
("float64", np.float64))
@parameterized.named_parameters(
("float16", np.float16), ("float32", np.float32), ("float64", np.float64)
)
def test_iou(self, dtype):
boxes1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]],
dtype=dtype)
boxes2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]],
dtype=dtype)
expected_result = tf.constant([0.875, 1.], dtype=dtype)
loss = giou_loss(boxes1, boxes2, mode='iou')
boxes1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]], dtype=dtype)
boxes2 = tf.constant(
[[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]], dtype=dtype
)
expected_result = tf.constant([0.875, 1.0], dtype=dtype)
loss = giou_loss(boxes1, boxes2, mode="iou")
self.assertAllCloseAccordingToType(loss, expected_result)

@parameterized.named_parameters(("float16", np.float16),
("float32", np.float32),
("float64", np.float64))
@parameterized.named_parameters(
("float16", np.float16), ("float32", np.float32), ("float64", np.float64)
)
def test_giou_loss(self, dtype):
boxes1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]],
dtype=dtype)
boxes2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]],
dtype=dtype)
boxes1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]], dtype=dtype)
boxes2 = tf.constant(
[[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]], dtype=dtype
)
expected_result = tf.constant(
[1.07500000298023224, 1.9333333373069763], dtype=dtype)
[1.07500000298023224, 1.9333333373069763], dtype=dtype
)
loss = giou_loss(boxes1, boxes2)
self.assertAllCloseAccordingToType(loss, expected_result)

def test_with_integer(self):
boxes1 = tf.constant([[4, 3, 7, 5], [5, 6, 10, 7]], dtype=tf.int32)
boxes2 = tf.constant([[3, 4, 6, 8], [14, 14, 15, 15]], dtype=tf.int32)
expected_result = tf.constant(
[1.07500000298023224, 1.9333333373069763], dtype=tf.float32)
[1.07500000298023224, 1.9333333373069763], dtype=tf.float32
)
loss = giou_loss(boxes1, boxes2)
self.assertAllCloseAccordingToType(loss, expected_result)

@parameterized.named_parameters(("float16", np.float16),
("float32", np.float32),
("float64", np.float64))
@parameterized.named_parameters(
("float16", np.float16), ("float32", np.float32), ("float64", np.float64)
)
def test_different_shapes(self, dtype):
boxes1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]],
dtype=dtype)
boxes1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]], dtype=dtype)
boxes2 = tf.constant([[3.0, 4.0, 6.0, 8.0]], dtype=dtype)
tf.expand_dims(boxes1, -2)
tf.expand_dims(boxes2, 0)
expected_result = tf.constant([1.07500000298023224, 1.366071],
dtype=dtype)
expected_result = tf.constant([1.07500000298023224, 1.366071], dtype=dtype)
loss = giou_loss(boxes1, boxes2)
self.assertAllCloseAccordingToType(loss, expected_result)

boxes1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]],
dtype=dtype)
boxes1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]], dtype=dtype)
boxes2 = tf.constant([[3.0, 4.0, 6.0, 8.0]], dtype=dtype)
tf.expand_dims(boxes1, 0)
tf.expand_dims(boxes2, -2)
expected_result = tf.constant([1.07500000298023224, 1.366071],
dtype=dtype)
expected_result = tf.constant([1.07500000298023224, 1.366071], dtype=dtype)
loss = giou_loss(boxes1, boxes2)
self.assertAllCloseAccordingToType(loss, expected_result)

@parameterized.named_parameters(("float16", np.float16),
("float32", np.float32),
("float64", np.float64))
@parameterized.named_parameters(
("float16", np.float16), ("float32", np.float32), ("float64", np.float64)
)
def test_one_bbox(self, dtype):
boxes1 = tf.constant([4.0, 3.0, 7.0, 5.0], dtype=dtype)
boxes2 = tf.constant([3.0, 4.0, 6.0, 8.0], dtype=dtype)
expected_result = tf.constant(1.07500000298023224, dtype=dtype)
loss = giou_loss(boxes1, boxes2)
self.assertAllCloseAccordingToType(loss, expected_result)

@parameterized.named_parameters(("float16", np.float16),
("float32", np.float32),
("float64", np.float64))
@parameterized.named_parameters(
("float16", np.float16), ("float32", np.float32), ("float64", np.float64)
)
def test_keras_model(self, dtype):
boxes1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]],
dtype=dtype)
boxes2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]],
dtype=dtype)
boxes1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]], dtype=dtype)
boxes2 = tf.constant(
[[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]], dtype=dtype
)
expected_result = tf.constant(
[1.07500000298023224, 1.9333333373069763], dtype=dtype)
[1.07500000298023224, 1.9333333373069763], dtype=dtype
)
model = tf.keras.Sequential()
model.compile(
optimizer='adam',
loss=GIoULoss(reduction=tf.keras.losses.Reduction.NONE))
optimizer="adam", loss=GIoULoss(reduction=tf.keras.losses.Reduction.NONE)
)
loss = model.evaluate(boxes1, boxes2, batch_size=2, steps=1)
self.assertAllCloseAccordingToType(loss, expected_result)


if __name__ == '__main__':
if __name__ == "__main__":
tf.test.main()
Loading