-
Notifications
You must be signed in to change notification settings - Fork 2k
Open
Description
In eval model - model.eval():
- is_causal_flag = self.training
- self.training is False, means is_causal_flag=Flase, leading to model can see the full context
class MultiHeadCrossAttentionWithRoPE(nn.Module):
def __init__(self, d_model, n_heads, attn_dropout_p=0.0, resid_dropout=0.0):
super().__init__()
self.d_model = d_model
self.n_heads = n_heads
self.head_dim = d_model // n_heads
self.q_proj = nn.Linear(d_model, d_model)
self.k_proj = nn.Linear(d_model, d_model)
self.v_proj = nn.Linear(d_model, d_model)
self.out_proj = nn.Linear(d_model, d_model)
self.rotary = RotaryPositionalEmbedding(self.head_dim)
self.attn_dropout_p = attn_dropout_p
self.resid_dropout = nn.Dropout(resid_dropout)
def forward(self, query, key, value, key_padding_mask=None):
batch_size, q_len, _ = query.shape
_, seq_len, _ = key.shape
q = self.q_proj(query).view(batch_size, q_len, self.n_heads, self.head_dim).transpose(1, 2)
k = self.k_proj(key).view(batch_size, seq_len, self.n_heads, self.head_dim).transpose(1, 2)
v = self.v_proj(value).view(batch_size, seq_len, self.n_heads, self.head_dim).transpose(1, 2)
q, k = self.rotary(q, k)
if key_padding_mask is not None:
attn_mask = key_padding_mask.unsqueeze(1).unsqueeze(2)
attn_mask = attn_mask.expand(-1, self.n_heads, q_len, -1)
else:
attn_mask = None
is_causal_flag = self.training
attn_output = F.scaled_dot_product_attention(
q, k, v,
attn_mask=attn_mask,
dropout_p=self.attn_dropout_p if self.training else 0.0,
is_causal=is_causal_flag
)
attn_output = attn_output.transpose(1, 2).contiguous().view(batch_size, q_len, self.d_model)
return self.resid_dropout(self.out_proj(attn_output))Metadata
Metadata
Assignees
Labels
No labels