Skip to content

Commit

Permalink
remove unnecessary kwargs from JambaAttentionDecoderLayer and JambaMa…
Browse files Browse the repository at this point in the history
…mbaDecoderLayer
  • Loading branch information
tomeras91 committed Mar 31, 2024
1 parent 59d832a commit ce8b476
Showing 1 changed file with 0 additions and 2 deletions.
2 changes: 0 additions & 2 deletions src/transformers/models/jamba/modeling_jamba.py
Original file line number Diff line number Diff line change
Expand Up @@ -1242,7 +1242,6 @@ def forward(
output_attentions: Optional[bool] = False,
output_router_logits: Optional[bool] = False,
use_cache: Optional[bool] = False,
**kwargs,
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
if "padding_mask" in kwargs:
warnings.warn(
Expand Down Expand Up @@ -1321,7 +1320,6 @@ def forward(
output_attentions: Optional[bool] = False,
output_router_logits: Optional[bool] = False,
use_cache: Optional[bool] = False,
**kwargs,
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
if "padding_mask" in kwargs:
warnings.warn(
Expand Down

0 comments on commit ce8b476

Please sign in to comment.