Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add some changes to make ds-inference work with MoE model #1

Draft
wants to merge 5 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
add tp-support for mixtral
  • Loading branch information
RezaYazdaniAminabadi committed Jan 25, 2024
commit 073fc2345b834e9216aae4c2c2198c7c2eecf76d
3 changes: 2 additions & 1 deletion src/transformers/modeling_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -2475,7 +2475,8 @@ def save_pretrained(
if safe_serialization:
# At some point we will need to deal better with save_function (used for TPU and other distributed
# joyfulness), but for now this enough.
safe_save_file(shard, os.path.join(save_directory, shard_file), metadata={"format": "pt"})
safe_save_file(shard, os.path.join(save_directory,
f"{shard_file.split('.')[0]}-tp_{(torch.distributed.get_rank() if torch.distributed.is_initialized() else 0):0>2d}.safetensors"), metadata={"format": "pt"})
else:
save_function(shard, os.path.join(save_directory, shard_file))

Expand Down
2 changes: 2 additions & 0 deletions src/transformers/models/mixtral/configuration_mixtral.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,7 @@ def __init__(
output_router_logits=False,
router_aux_loss_coef=0.001,
moe_layer_frequency=1,
tp_size=1,
**kwargs,
):
self.vocab_size = vocab_size
Expand Down Expand Up @@ -162,6 +163,7 @@ def __init__(
self.output_router_logits = output_router_logits
self.router_aux_loss_coef = router_aux_loss_coef
self.moe_layer_frequency = moe_layer_frequency
self.tp_size = tp_size
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
Expand Down
14 changes: 7 additions & 7 deletions src/transformers/models/mixtral/modeling_mixtral.py
Original file line number Diff line number Diff line change
Expand Up @@ -270,10 +270,10 @@ def __init__(self, config: MixtralConfig, layer_idx: Optional[int] = None):
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
f" and `num_heads`: {self.num_heads})."
)
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.q_proj = nn.Linear(self.hidden_size, self.num_heads // config.tp_size * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads // config.tp_size * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads // config.tp_size * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads // config.tp_size * self.head_dim, self.hidden_size, bias=False)

self.rotary_emb = MixtralRotaryEmbedding(
self.head_dim,
Expand Down Expand Up @@ -762,9 +762,9 @@ def __init__(self, config: MixtralConfig):
self.ffn_dim = config.intermediate_size
self.hidden_dim = config.hidden_size

self.w1 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
self.w2 = nn.Linear(self.ffn_dim, self.hidden_dim, bias=False)
self.w3 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
self.w1 = nn.Linear(self.hidden_dim, self.ffn_dim // config.tp_size, bias=False)
self.w2 = nn.Linear(self.ffn_dim // config.tp_size, self.hidden_dim, bias=False)
self.w3 = nn.Linear(self.hidden_dim, self.ffn_dim // config.tp_size, bias=False)

self.act_fn = ACT2FN[config.hidden_act]

Expand Down
Loading