Skip to content

Commit

Permalink
Making TF MobileBert model compliant with AMP (huggingface#10259)
Browse files Browse the repository at this point in the history
* Fix AMP

* Trigger CI

* Rework cast
  • Loading branch information
jplu authored Feb 19, 2021
1 parent 2fc6284 commit fb56bf2
Show file tree
Hide file tree
Showing 2 changed files with 15 additions and 16 deletions.
27 changes: 15 additions & 12 deletions src/transformers/models/mobilebert/modeling_tf_mobilebert.py
Original file line number Diff line number Diff line change
Expand Up @@ -251,11 +251,12 @@ def call(
attention_scores = tf.matmul(
query_layer, key_layer, transpose_b=True
) # (batch size, num_heads, seq_len_q, seq_len_k)
dk = tf.cast(shape_list(key_layer)[-1], tf.float32) # scale attention_scores
dk = tf.cast(shape_list(key_layer)[-1], dtype=attention_scores.dtype) # scale attention_scores
attention_scores = attention_scores / tf.math.sqrt(dk)

if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in TFBertModel call() function)
# Apply the attention mask is (precomputed for all layers in TFMobileBertModel call() function)
attention_mask = tf.cast(attention_mask, dtype=attention_scores.dtype)
attention_scores = attention_scores + attention_mask

# Normalize the attention scores to probabilities.
Expand Down Expand Up @@ -726,6 +727,14 @@ def call(
if inputs["token_type_ids"] is None:
inputs["token_type_ids"] = tf.fill(input_shape, 0)

embedding_output = self.embeddings(
inputs["input_ids"],
inputs["position_ids"],
inputs["token_type_ids"],
inputs["inputs_embeds"],
training=inputs["training"],
)

# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
Expand All @@ -738,9 +747,10 @@ def call(
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.

extended_attention_mask = tf.cast(extended_attention_mask, tf.float32)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)
one_cst = tf.constant(1.0, dtype=embedding_output.dtype)
ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)
extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)

# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
Expand All @@ -752,13 +762,6 @@ def call(
else:
inputs["head_mask"] = [None] * self.num_hidden_layers

embedding_output = self.embeddings(
inputs["input_ids"],
inputs["position_ids"],
inputs["token_type_ids"],
inputs["inputs_embeds"],
training=inputs["training"],
)
encoder_outputs = self.encoder(
embedding_output,
extended_attention_mask,
Expand Down
4 changes: 0 additions & 4 deletions tests/test_modeling_tf_mobilebert.py
Original file line number Diff line number Diff line change
Expand Up @@ -310,10 +310,6 @@ def test_saved_model_creation(self):
# This test is too long (>30sec) and makes fail the CI
pass

def test_mixed_precision(self):
# TODO JP: Make MobileBert float16 compliant
pass

@slow
def test_model_from_pretrained(self):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
Expand Down

0 comments on commit fb56bf2

Please sign in to comment.