Skip to content

Commit 9c62db0

Browse files
DamonFoolywang96
andauthored
[Model] Support Qwen-VL and Qwen-VL-Chat models with text-only inputs (#5710)
Co-authored-by: Roger Wang <ywang@roblox.com>
1 parent cf90ae0 commit 9c62db0

File tree

1 file changed

+10
-0
lines changed

1 file changed

+10
-0
lines changed

vllm/model_executor/models/qwen.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
2929
from vllm.model_executor.sampling_metadata import SamplingMetadata
3030
from vllm.sequence import SamplerOutput
31+
from vllm.utils import print_warning_once
3132

3233

3334
class QWenMLP(nn.Module):
@@ -288,6 +289,15 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]):
288289
# Skip loading extra bias for GPTQ models.
289290
if name.endswith(".bias") and name not in params_dict:
290291
continue
292+
# Skip loading visual weights to support Qwen-VL models
293+
# in cases with text-only inputs
294+
# TODO: add support for Qwen-VL
295+
if (name not in params_dict
296+
and name.startswith("transformer.visual.")):
297+
print_warning_once(
298+
"Only text inputs are allowed. Images won't be handled "
299+
"until Qwen-VL models are fully supported.")
300+
continue
291301
param = params_dict[name]
292302
weight_loader = getattr(param, "weight_loader",
293303
default_weight_loader)

0 commit comments

Comments
 (0)