Skip to content

Commit 60b6ff7

Browse files
DarkLight1337prashantgupta24
authored andcommitted
[Model][Bugfix] Implicit model flags and reenable Phi-3-Vision (vllm-project#5896)
1 parent 1aa6d22 commit 60b6ff7

File tree

14 files changed

+26
-32
lines changed

14 files changed

+26
-32
lines changed

vllm/model_executor/models/baichuan.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -295,8 +295,6 @@ def forward(
295295

296296

297297
class BaiChuanBaseForCausalLM(nn.Module, SupportsLoRA):
298-
supports_lora = True
299-
300298
packed_modules_mapping = {
301299
"W_pack": ["W_pack"],
302300
"gate_up_proj": [

vllm/model_executor/models/chatglm.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -325,8 +325,6 @@ def forward(
325325

326326

327327
class ChatGLMForCausalLM(nn.Module, SupportsLoRA):
328-
supports_lora = True
329-
330328
packed_modules_mapping = {
331329
"query_key_value": ["query_key_value"],
332330
"dense_h_to_4h": ["dense_h_to_4h"]

vllm/model_executor/models/gemma.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -291,8 +291,6 @@ def forward(
291291

292292

293293
class GemmaForCausalLM(nn.Module, SupportsLoRA):
294-
supports_lora = True
295-
296294
packed_modules_mapping = {
297295
"qkv_proj": [
298296
"q_proj",

vllm/model_executor/models/gpt_bigcode.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -233,8 +233,6 @@ def forward(
233233

234234

235235
class GPTBigCodeForCausalLM(nn.Module, SupportsLoRA):
236-
supports_lora = True
237-
238236
packed_modules_mapping = {"c_attn": ["c_attn"]}
239237

240238
supported_lora_modules = ["c_fc", "c_proj", "wte", "lm_head", "c_attn"]

vllm/model_executor/models/interfaces.py

Lines changed: 16 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,14 @@
1313
class SupportsVision(Protocol):
1414
"""The interface required for all vision language models (VLMs)."""
1515

16-
supports_vision: ClassVar[Literal[True]]
16+
supports_vision: ClassVar[Literal[True]] = True
17+
"""
18+
A flag that indicates this model supports vision inputs.
19+
20+
Note:
21+
There is no need to redefine this flag if this class is in the
22+
MRO of your model class.
23+
"""
1724

1825
def __init__(self, *, vlm_config: VisionLanguageConfig) -> None:
1926
...
@@ -52,7 +59,14 @@ def supports_vision(
5259
class SupportsLoRA(Protocol):
5360
"""The interface required for all models that support LoRA."""
5461

55-
supports_lora: ClassVar[Literal[True]]
62+
supports_lora: ClassVar[Literal[True]] = True
63+
"""
64+
A flag that indicates this model supports LoRA.
65+
66+
Note:
67+
There is no need to redefine this flag if this class is in the
68+
MRO of your model class.
69+
"""
5670

5771
packed_modules_mapping: ClassVar[Dict[str, List[str]]]
5872
supported_lora_modules: ClassVar[List[str]]

vllm/model_executor/models/llama.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -299,8 +299,6 @@ def forward(
299299

300300

301301
class LlamaForCausalLM(nn.Module, SupportsLoRA):
302-
supports_lora = True
303-
304302
packed_modules_mapping = {
305303
"qkv_proj": [
306304
"q_proj",

vllm/model_executor/models/llava.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -88,8 +88,6 @@ class LlavaImageFeatureInputs(TypedDict):
8888
@MULTIMODAL_REGISTRY.register_dummy_data(get_dummy_image_data)
8989
class LlavaForConditionalGeneration(nn.Module, SupportsVision):
9090

91-
supports_vision = True
92-
9391
def __init__(self,
9492
config: LlavaConfig,
9593
vlm_config: VisionLanguageConfig,

vllm/model_executor/models/llava_next.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -108,8 +108,6 @@ def _image_pixel_processor(
108108
@MULTIMODAL_REGISTRY.register_dummy_data(_get_dummy_image_data)
109109
class LlavaNextForConditionalGeneration(nn.Module, SupportsVision):
110110

111-
supports_vision = True
112-
113111
def __init__(self,
114112
config: LlavaNextConfig,
115113
vlm_config: VisionLanguageConfig,

vllm/model_executor/models/minicpm.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -392,8 +392,6 @@ def forward(
392392

393393

394394
class MiniCPMForCausalLM(nn.Module, SupportsLoRA):
395-
supports_lora = True
396-
397395
packed_modules_mapping = {
398396
"qkv_proj": [
399397
"q_proj",

vllm/model_executor/models/mixtral.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -475,8 +475,6 @@ def forward(
475475

476476

477477
class MixtralForCausalLM(nn.Module, SupportsLoRA):
478-
supports_lora = True
479-
480478
fall_back_to_pt_during_load = False
481479

482480
packed_modules_mapping = {

0 commit comments

Comments
 (0)