Skip to content

Commit

Permalink
[Misc][Bugfix] Disable guided decoding for mistral tokenizer (#8521)
Browse files Browse the repository at this point in the history
  • Loading branch information
ywang96 authored Sep 17, 2024
1 parent 1c1bb38 commit ee2bcea
Showing 1 changed file with 23 additions and 0 deletions.
23 changes: 23 additions & 0 deletions vllm/model_executor/guided_decoding/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from vllm.model_executor.guided_decoding.guided_fields import (
GuidedDecodingRequest)
from vllm.sampling_params import LogitsProcessor
from vllm.transformers_utils.tokenizer import MistralTokenizer


async def get_guided_decoding_logits_processor(
Expand All @@ -15,12 +16,23 @@ async def get_guided_decoding_logits_processor(
request = _adapt_request_for_tool_use(request)

if guided_decoding_backend == 'outlines':
if isinstance(tokenizer, MistralTokenizer):
raise NotImplementedError(
"Guided decoding with 'outlines' is currently not supported "
"for Mistral tokenizer. Please consider contributing to the "
"'outlines' project if you are interested in this feature.")
# NOTE: lazy import outlines to avoid https://github.com/vllm-project/vllm/issues/4193
from vllm.model_executor.guided_decoding.outlines_decoding import ( # noqa
get_outlines_guided_decoding_logits_processor)
return await get_outlines_guided_decoding_logits_processor(
request, tokenizer)
if guided_decoding_backend == 'lm-format-enforcer':
if isinstance(tokenizer, MistralTokenizer):
raise NotImplementedError(
"Guided decoding with 'lm-format-enforcer' is currently not "
"supported for Mistral tokenizer. Please consider contributing "
"to the 'lm-format-enforcer' project if you are interested "
"in this feature.")
from vllm.model_executor.guided_decoding.lm_format_enforcer_decoding import ( # noqa
get_lm_format_enforcer_guided_decoding_logits_processor)
return await get_lm_format_enforcer_guided_decoding_logits_processor(
Expand All @@ -37,12 +49,23 @@ def get_local_guided_decoding_logits_processor(
# request = _adapt_request_for_tool_use(request)

if guided_decoding_backend == 'outlines':
if isinstance(tokenizer, MistralTokenizer):
raise NotImplementedError(
"Guided decoding with 'outlines' is currently not supported "
"for Mistral tokenizer. Please consider contributing to the "
"'outlines' project if you are interested in this feature.")
# NOTE: lazy import outlines to avoid https://github.com/vllm-project/vllm/issues/4193
from vllm.model_executor.guided_decoding.outlines_decoding import ( # noqa
get_local_outlines_guided_decoding_logits_processor)
return get_local_outlines_guided_decoding_logits_processor(
guided_options, tokenizer)
if guided_decoding_backend == 'lm-format-enforcer':
if isinstance(tokenizer, MistralTokenizer):
raise NotImplementedError(
"Guided decoding with 'lm-format-enforcer' is currently not "
"supported for Mistral tokenizer. Please consider contributing "
"to the 'lm-format-enforcer' project if you are interested "
"in this feature.")
from vllm.model_executor.guided_decoding.lm_format_enforcer_decoding import ( # noqa
get_local_lm_format_enforcer_guided_decoding_logits_processor)
return get_local_lm_format_enforcer_guided_decoding_logits_processor(
Expand Down

0 comments on commit ee2bcea

Please sign in to comment.