Skip to content

Commit acbe6e3

Browse files
author
rshaw@neuralmagic.com
committed
use logger.error directly
1 parent 7c5b564 commit acbe6e3

File tree

4 files changed

+9
-4
lines changed

4 files changed

+9
-4
lines changed

vllm/engine/llm_engine.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -616,6 +616,7 @@ def _add_processed_request(
616616
decoder_inputs = processed_inputs
617617
encoder_inputs = None
618618

619+
print(f"{decoder_inputs=}")
619620
seq = Sequence(seq_id, decoder_inputs, block_size, eos_token_id,
620621
lora_request, prompt_adapter_request)
621622

vllm/entrypoints/openai/serving_completion.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -105,13 +105,16 @@ async def create_completion(
105105

106106
tokenizer = await self.engine_client.get_tokenizer(lora_request)
107107

108+
print(f"{request.prompt=}")
108109
request_prompts, engine_prompts = await self._preprocess_completion(
109110
request,
110111
tokenizer,
111112
request.prompt,
112113
truncate_prompt_tokens=request.truncate_prompt_tokens,
113114
add_special_tokens=request.add_special_tokens,
114115
)
116+
print(f"{request_prompts=}")
117+
print(f"{engine_prompts=}")
115118
except ValueError as e:
116119
logger.exception("Error in preprocessing prompt inputs")
117120
return self.create_error_response(str(e))

vllm/transformers_utils/tokenizer_group/tokenizer_group.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,9 @@ def encode(self,
5757
request_id: Optional[str] = None,
5858
lora_request: Optional[LoRARequest] = None) -> List[int]:
5959
tokenizer = self.get_lora_tokenizer(lora_request)
60+
print(f"{prompt=}")
6061
ret = tokenizer.encode(prompt)
62+
print(f"{ret=}")
6163
self._raise_if_input_too_long(ret, lora_request)
6264
return ret
6365

vllm/v1/engine/async_llm.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
from vllm.transformers_utils.tokenizer import AnyTokenizer
1919
from vllm.transformers_utils.tokenizer_group import init_tokenizer_from_configs
2020
from vllm.usage.usage_lib import UsageContext
21-
from vllm.utils import get_exception_traceback, kill_process_tree
21+
from vllm.utils import kill_process_tree
2222
from vllm.v1.engine.core_client import EngineCoreClient
2323
from vllm.v1.engine.detokenizer import Detokenizer
2424
from vllm.v1.engine.processor import Processor
@@ -293,9 +293,8 @@ async def _run_output_handler(self):
293293
# 4) Abort any requests that finished due to stop strings.
294294
await self.engine_core.abort_requests_async(reqs_to_abort)
295295

296-
except Exception:
297-
traceback = get_exception_traceback()
298-
logger.error("EngineCore hit an exception: %s", traceback)
296+
except Exception as e:
297+
logger.error(e)
299298
kill_process_tree(os.getpid())
300299

301300
async def abort(self, request_id: str) -> None:

0 commit comments

Comments
 (0)