Skip to content

Commit

Permalink
[Misc] Remove vLLM patch of BaichuanTokenizer (vllm-project#8921)
Browse files Browse the repository at this point in the history
  • Loading branch information
DarkLight1337 authored Sep 28, 2024
1 parent d8673b7 commit 86812a9
Show file tree
Hide file tree
Showing 3 changed files with 3 additions and 273 deletions.
16 changes: 1 addition & 15 deletions vllm/transformers_utils/tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,7 @@
from vllm.envs import VLLM_USE_MODELSCOPE
from vllm.logger import init_logger
from vllm.lora.request import LoRARequest
from vllm.transformers_utils.tokenizers import (BaichuanTokenizer,
MistralTokenizer)
from vllm.transformers_utils.tokenizers import MistralTokenizer
from vllm.transformers_utils.utils import check_gguf_file
from vllm.utils import make_async

Expand Down Expand Up @@ -139,19 +138,6 @@ def get_tokenizer(
raise RuntimeError(err_msg) from e
else:
raise e
except AttributeError as e:
if "BaichuanTokenizer" in str(e):
# This is for the error "'BaichuanTokenizer' object has no
# attribute 'sp_model'".
tokenizer = BaichuanTokenizer.from_pretrained(
tokenizer_name,
*args,
trust_remote_code=trust_remote_code,
revision=revision,
**kwargs,
)
else:
raise e

# NOTE: We can remove this after https://github.com/THUDM/ChatGLM3/issues/1324
if type(tokenizer).__name__ in ("ChatGLMTokenizer",
Expand Down
5 changes: 2 additions & 3 deletions vllm/transformers_utils/tokenizers/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
from vllm.transformers_utils.tokenizers.baichuan import BaichuanTokenizer
from vllm.transformers_utils.tokenizers.mistral import MistralTokenizer
from .mistral import MistralTokenizer

__all__ = ["BaichuanTokenizer", "MistralTokenizer"]
__all__ = ["MistralTokenizer"]
255 changes: 0 additions & 255 deletions vllm/transformers_utils/tokenizers/baichuan.py

This file was deleted.

0 comments on commit 86812a9

Please sign in to comment.