Skip to content

Commit

Permalink
Upgrade transformers version to 4.36.0 (vllm-project#2046)
Browse files Browse the repository at this point in the history
  • Loading branch information
WoosukKwon authored Dec 12, 2023
1 parent f3e024b commit cb3f30c
Show file tree
Hide file tree
Showing 3 changed files with 5 additions and 5 deletions.
2 changes: 1 addition & 1 deletion requirements-rocm.txt
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ numpy
tokenizers>=0.15.0
huggingface_hub<0.18,>=0.16.4
einops # Required for phi-1_5
transformers >= 4.34.0 # Required for Mistral.
transformers >= 4.36.0 # Required for Mixtral.
fastapi
uvicorn[standard]
pydantic == 1.10.13 # Required for OpenAI server.
Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ sentencepiece # Required for LLaMA tokenizer.
numpy
einops # Required for phi-1_5
torch >= 2.1.1
transformers >= 4.34.0 # Required for Mistral.
transformers >= 4.36.0 # Required for Mixtral.
xformers >= 0.0.23 # Required for CUDA 12.1.
fastapi
uvicorn[standard]
Expand Down
6 changes: 3 additions & 3 deletions vllm/model_executor/models/mixtral.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
import torch.nn.functional as F

from torch import nn
from transformers import MistralConfig
from transformers import MixtralConfig

try:
import megablocks.ops as ops
Expand Down Expand Up @@ -395,7 +395,7 @@ class MixtralDecoderLayer(nn.Module):

def __init__(
self,
config: MistralConfig,
config: MixtralConfig,
) -> None:
super().__init__()
self.hidden_size = config.hidden_size
Expand Down Expand Up @@ -443,7 +443,7 @@ class MixtralForCausalLM(nn.Module):

def __init__(
self,
config: MistralConfig,
config: MixtralConfig,
linear_method: Optional[LinearMethodBase] = None,
) -> None:
super().__init__()
Expand Down

0 comments on commit cb3f30c

Please sign in to comment.