Skip to content

Commit ac58937

Browse files
[Attention Mask] Refactor all encoder-decoder attention mask (#27086)
* [FA2 Bart] Add FA2 to all Bart-like * better * Refactor attention mask * remove all customized atteniton logic * format * mass rename * replace _expand_mask * replace _expand_mask * mass rename * add pt files * mass replace & rename * mass replace & rename * mass replace & rename * mass replace & rename * Update src/transformers/models/idefics/modeling_idefics.py * fix more * clean more * fix more * make style * fix again * finish * finish * finish * finish * finish * finish * finish * finish * finish * finish * Apply suggestions from code review * Apply suggestions from code review Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * small fix mistral * finish * finish * finish * finish --------- Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com>
1 parent 29c74f5 commit ac58937

File tree

55 files changed

+647
-2879
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

55 files changed

+647
-2879
lines changed
Lines changed: 247 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,247 @@
1+
# Copyright 2023 The HuggingFace Team. All rights reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
from typing import List, Optional, Tuple, Union
15+
16+
import torch
17+
18+
19+
class AttentionMaskConverter:
20+
"""
21+
A utility attention mask class that allows one to:
22+
- Create a causal 4d mask
23+
- Create a causal 4d mask with slided window
24+
- Convert a 2d attention mask (batch_size, query_length) to a 4d attention mask (batch_size, 1, query_length,
25+
key_value_length) that can be multiplied with attention scores
26+
27+
Parameters:
28+
is_causal (`bool`):
29+
Whether the attention mask should be a uni-directional (causal) or bi-directional mask.
30+
31+
sliding_window (`int`, *optional*):
32+
Optionally, the sliding window masks can be created if `sliding_window` is defined to a positive integer.
33+
"""
34+
35+
def __init__(self, is_causal: bool, sliding_window: Optional[int] = None):
36+
self.is_causal = is_causal
37+
self.sliding_window = sliding_window
38+
39+
if self.sliding_window is not None and self.sliding_window <= 0:
40+
raise ValueError(
41+
f"Make sure that when passing `sliding_window` that its value is a strictly positive integer, not `{self.sliding_window}`"
42+
)
43+
44+
def to_causal_4d(
45+
self,
46+
batch_size: int,
47+
query_length: int,
48+
key_value_length: int,
49+
dtype: torch.dtype = torch.float32,
50+
device: Union[torch.device, "str"] = "cpu",
51+
) -> torch.Tensor:
52+
"""
53+
Creates a causal 4D mask of (bsz, head_dim=1, query_length, key_value_length) shape and adds large negative
54+
bias to upper right hand triangular matrix (causal mask).
55+
"""
56+
if not self.is_causal:
57+
raise ValueError(f"Please use `to_causal_4d` only if {self.__class__} has `is_causal` set to True.")
58+
59+
# If shape is not cached, create a new causal mask and cache it
60+
input_shape = (batch_size, query_length)
61+
past_key_values_length = key_value_length - query_length
62+
63+
# create causal mask
64+
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
65+
causal_4d_mask = None
66+
if input_shape[-1] > 1 or self.sliding_window is not None:
67+
causal_4d_mask = self._make_causal_mask(
68+
input_shape,
69+
dtype,
70+
device=device,
71+
past_key_values_length=past_key_values_length,
72+
sliding_window=self.sliding_window,
73+
)
74+
75+
return causal_4d_mask
76+
77+
def to_4d(
78+
self,
79+
attention_mask_2d: torch.Tensor,
80+
query_length: int,
81+
key_value_length: Optional[int] = None,
82+
dtype: torch.dtype = torch.float32,
83+
) -> torch.Tensor:
84+
"""
85+
Converts 2D attention mask to 4D attention mask by expanding mask to (bsz, head_dim=1, query_length,
86+
key_value_length) shape and by adding a large negative bias to not-attended positions. If attention_mask is
87+
causal, a causal mask will be added.
88+
"""
89+
input_shape = (attention_mask_2d.shape[0], query_length)
90+
91+
# create causal mask
92+
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
93+
causal_4d_mask = None
94+
if (input_shape[-1] > 1 or self.sliding_window is not None) and self.is_causal:
95+
if key_value_length is None:
96+
raise ValueError(
97+
"This attention mask converter is causal. Make sure to pass `key_value_length` to correctly create a causal mask."
98+
)
99+
100+
past_key_values_length = key_value_length - query_length
101+
causal_4d_mask = self._make_causal_mask(
102+
input_shape,
103+
dtype,
104+
device=attention_mask_2d.device,
105+
past_key_values_length=past_key_values_length,
106+
sliding_window=self.sliding_window,
107+
)
108+
elif self.sliding_window is not None:
109+
raise NotImplementedError("Sliding window is currently only implemented for causal masking")
110+
111+
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
112+
expanded_attn_mask = self._expand_mask(attention_mask_2d, dtype, tgt_len=input_shape[-1]).to(
113+
attention_mask_2d.device
114+
)
115+
expanded_4d_mask = expanded_attn_mask if causal_4d_mask is None else expanded_attn_mask + causal_4d_mask
116+
117+
return expanded_4d_mask
118+
119+
@staticmethod
120+
def _make_causal_mask(
121+
input_ids_shape: torch.Size,
122+
dtype: torch.dtype,
123+
device: torch.device,
124+
past_key_values_length: int = 0,
125+
sliding_window: Optional[int] = None,
126+
):
127+
"""
128+
Make causal mask used for bi-directional self-attention.
129+
"""
130+
bsz, tgt_len = input_ids_shape
131+
mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
132+
mask_cond = torch.arange(mask.size(-1), device=device)
133+
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
134+
135+
mask = mask.to(dtype)
136+
137+
if past_key_values_length > 0:
138+
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
139+
140+
# add lower triangular sliding window mask if necessary
141+
if sliding_window is not None:
142+
diagonal = past_key_values_length - sliding_window + 1
143+
144+
context_mask = 1 - torch.triu(torch.ones_like(mask, dtype=torch.int), diagonal=diagonal)
145+
mask.masked_fill_(context_mask.bool(), torch.finfo(dtype).min)
146+
147+
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
148+
149+
@staticmethod
150+
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
151+
"""
152+
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
153+
"""
154+
bsz, src_len = mask.size()
155+
tgt_len = tgt_len if tgt_len is not None else src_len
156+
157+
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
158+
159+
inverted_mask = 1.0 - expanded_mask
160+
161+
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
162+
163+
164+
def _prepare_4d_causal_attention_mask(
165+
attention_mask: Optional[torch.Tensor],
166+
input_shape: Union[torch.Size, Tuple, List],
167+
inputs_embeds: torch.Tensor,
168+
past_key_values_length: int,
169+
sliding_window: Optional[int] = None,
170+
):
171+
"""
172+
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
173+
`(batch_size, key_value_length)`
174+
175+
Args:
176+
attention_mask (`torch.Tensor` or `None`):
177+
A 2D attention mask of shape `(batch_size, key_value_length)`
178+
input_shape (`tuple(int)` or `list(int)` or `torch.Size`):
179+
The input shape should be a tuple that defines `(batch_size, query_length)`.
180+
inputs_embeds (`torch.Tensor`):
181+
The embedded inputs as a torch Tensor.
182+
past_key_values_length (`int`):
183+
The length of the key value cache.
184+
sliding_window (`int`, *optional*):
185+
If the model uses windowed attention, a sliding window should be passed.
186+
"""
187+
attn_mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=sliding_window)
188+
189+
key_value_length = input_shape[-1] + past_key_values_length
190+
191+
# 4d mask is passed through the layers
192+
if attention_mask is not None:
193+
attention_mask = attn_mask_converter.to_4d(
194+
attention_mask, input_shape[-1], key_value_length, dtype=inputs_embeds.dtype
195+
)
196+
else:
197+
attention_mask = attn_mask_converter.to_causal_4d(
198+
input_shape[0], input_shape[-1], key_value_length, dtype=inputs_embeds.dtype, device=inputs_embeds.device
199+
)
200+
201+
return attention_mask
202+
203+
204+
def _prepare_4d_attention_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
205+
"""
206+
Creates a non-causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
207+
`(batch_size, key_value_length)`
208+
209+
Args:
210+
mask (`torch.Tensor` or `None`):
211+
A 2D attention mask of shape `(batch_size, key_value_length)`
212+
dtype (`torch.dtype`):
213+
The torch dtype the created mask shall have.
214+
tgt_len (`int`):
215+
The target length or query length the created mask shall have.
216+
"""
217+
return AttentionMaskConverter._expand_mask(mask=mask, dtype=dtype, tgt_len=tgt_len)
218+
219+
220+
def _create_4d_causal_attention_mask(
221+
input_shape: Union[torch.Size, Tuple, List],
222+
dtype: torch.dtype,
223+
device: torch.device,
224+
past_key_values_length: int = 0,
225+
sliding_window: Optional[int] = None,
226+
):
227+
"""
228+
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)`
229+
230+
Args:
231+
input_shape (`tuple(int)` or `list(int)` or `torch.Size`):
232+
The input shape should be a tuple that defines `(batch_size, query_length)`.
233+
dtype (`torch.dtype`):
234+
The torch dtype the created mask shall have.
235+
device (`int`):
236+
The torch device the created mask shall have.
237+
sliding_window (`int`, *optional*):
238+
If the model uses windowed attention, a sliding window should be passed.
239+
"""
240+
attn_mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=sliding_window)
241+
242+
key_value_length = past_key_values_length + input_shape[-1]
243+
attention_mask = attn_mask_converter.to_causal_4d(
244+
input_shape[0], input_shape[-1], key_value_length, dtype=dtype, device=device
245+
)
246+
247+
return attention_mask

src/transformers/models/autoformer/modeling_autoformer.py

Lines changed: 5 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
from torch import nn
2727

2828
from ...activations import ACT2FN
29+
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask
2930
from ...modeling_outputs import (
3031
BaseModelOutput,
3132
ModelOutput,
@@ -357,39 +358,6 @@ def nll(input: torch.distributions.Distribution, target: torch.Tensor) -> torch.
357358
return -input.log_prob(target)
358359

359360

360-
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
361-
def _make_causal_mask(
362-
input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
363-
):
364-
"""
365-
Make causal mask used for bi-directional self-attention.
366-
"""
367-
bsz, tgt_len = input_ids_shape
368-
mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
369-
mask_cond = torch.arange(mask.size(-1), device=device)
370-
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
371-
mask = mask.to(dtype)
372-
373-
if past_key_values_length > 0:
374-
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
375-
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
376-
377-
378-
# Copied from transformers.models.bart.modeling_bart._expand_mask
379-
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
380-
"""
381-
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
382-
"""
383-
bsz, src_len = mask.size()
384-
tgt_len = tgt_len if tgt_len is not None else src_len
385-
386-
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
387-
388-
inverted_mask = 1.0 - expanded_mask
389-
390-
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
391-
392-
393361
# Copied from transformers.models.marian.modeling_marian.MarianSinusoidalPositionalEmbedding with Marian->Autoformer
394362
class AutoformerSinusoidalPositionalEmbedding(nn.Embedding):
395363
"""This module produces sinusoidal positional embeddings of any length."""
@@ -1176,7 +1144,7 @@ def forward(
11761144
# expand attention_mask
11771145
if attention_mask is not None:
11781146
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1179-
attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype)
1147+
attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
11801148

11811149
encoder_states = () if output_hidden_states else None
11821150
all_attentions = () if output_attentions else None
@@ -1262,29 +1230,6 @@ def __init__(self, config: AutoformerConfig):
12621230
# Initialize weights and apply final processing
12631231
self.post_init()
12641232

1265-
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
1266-
# create causal mask
1267-
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1268-
combined_attention_mask = None
1269-
if input_shape[-1] > 1:
1270-
combined_attention_mask = _make_causal_mask(
1271-
input_shape,
1272-
inputs_embeds.dtype,
1273-
device=inputs_embeds.device,
1274-
past_key_values_length=past_key_values_length,
1275-
).to(inputs_embeds.device)
1276-
1277-
if attention_mask is not None:
1278-
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1279-
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
1280-
inputs_embeds.device
1281-
)
1282-
combined_attention_mask = (
1283-
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
1284-
)
1285-
1286-
return combined_attention_mask
1287-
12881233
def forward(
12891234
self,
12901235
trend: Optional[torch.Tensor] = None,
@@ -1374,7 +1319,9 @@ def forward(
13741319
# expand encoder attention mask
13751320
if encoder_hidden_states is not None and encoder_attention_mask is not None:
13761321
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1377-
encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
1322+
encoder_attention_mask = _prepare_4d_attention_mask(
1323+
encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
1324+
)
13781325

13791326
hidden_states = self.value_embedding(inputs_embeds)
13801327
embed_pos = self.embed_positions(

0 commit comments

Comments
 (0)