Skip to content

Commit

Permalink
add back self.max_position_embeddings = config.max_position_embeddings (
Browse files Browse the repository at this point in the history
huggingface#33550)

* add back self.max_position_embeddings = config.max_position_embeddings

* fix-copies
  • Loading branch information
chengchengpei authored and amyeroberts committed Oct 2, 2024
1 parent 905948c commit cb26ace
Show file tree
Hide file tree
Showing 2 changed files with 2 additions and 0 deletions.
1 change: 1 addition & 0 deletions src/transformers/models/qwen2/modeling_qwen2.py
Original file line number Diff line number Diff line change
Expand Up @@ -310,6 +310,7 @@ def __init__(self, config: Qwen2Config, layer_idx: Optional[int] = None):
self.head_dim = self.hidden_size // self.num_heads
self.num_key_value_heads = config.num_key_value_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.max_position_embeddings = config.max_position_embeddings
self.rope_theta = config.rope_theta
self.is_causal = True
self.attention_dropout = config.attention_dropout
Expand Down
1 change: 1 addition & 0 deletions src/transformers/models/qwen2_moe/modeling_qwen2_moe.py
Original file line number Diff line number Diff line change
Expand Up @@ -388,6 +388,7 @@ def __init__(self, config: Qwen2MoeConfig, layer_idx: Optional[int] = None):
self.head_dim = self.hidden_size // self.num_heads
self.num_key_value_heads = config.num_key_value_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.max_position_embeddings = config.max_position_embeddings
self.rope_theta = config.rope_theta
self.is_causal = True
self.attention_dropout = config.attention_dropout
Expand Down

0 comments on commit cb26ace

Please sign in to comment.