Skip to content

Commit 3cdc576

Browse files
jeejeeleeIsotr0py
andauthored
[Misc] Delete redundant code (#16530)
Signed-off-by: Jee Jee Li <pandaleefree@gmail.com> Co-authored-by: Isotr0py <mozf@mail2.sysu.edu.cn>
1 parent 68bb122 commit 3cdc576

File tree

2 files changed

+0
-14
lines changed

2 files changed

+0
-14
lines changed

examples/offline_inference/vision_language.py

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1096,13 +1096,6 @@ def main(args):
10961096
}
10971097
llm = LLM(**engine_args)
10981098

1099-
# To maintain code compatibility in this script, we add LoRA here.
1100-
# You can also add LoRA using:
1101-
# llm.generate(prompts, lora_request=lora_request,...)
1102-
if req_data.lora_requests:
1103-
for lora_request in req_data.lora_requests:
1104-
llm.llm_engine.add_lora(lora_request=lora_request)
1105-
11061099
# Don't want to check the flag multiple times, so just hijack `prompts`.
11071100
prompts = req_data.prompts if args.use_different_prompt_per_request else [
11081101
req_data.prompts[0]

examples/offline_inference/vision_language_multi_image.py

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -695,13 +695,6 @@ def run_chat(model: str, question: str, image_urls: list[str],
695695
engine_args = asdict(req_data.engine_args) | {"seed": seed}
696696
llm = LLM(**engine_args)
697697

698-
# To maintain code compatibility in this script, we add LoRA here.
699-
# You can also add LoRA using:
700-
# llm.generate(prompts, lora_request=lora_request,...)
701-
if req_data.lora_requests:
702-
for lora_request in req_data.lora_requests:
703-
llm.llm_engine.add_lora(lora_request=lora_request)
704-
705698
sampling_params = SamplingParams(temperature=0.0,
706699
max_tokens=256,
707700
stop_token_ids=req_data.stop_token_ids)

0 commit comments

Comments
 (0)