We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent db5ec52 commit f0cc0e6Copy full SHA for f0cc0e6
vllm/model_executor/models/phi3v.py
@@ -18,7 +18,6 @@
18
import torch
19
import torch.nn as nn
20
from transformers import CLIPVisionConfig, CLIPVisionModel, PretrainedConfig
21
-from transformers.utils import logging
22
23
from vllm.attention import AttentionMetadata
24
from vllm.config import CacheConfig, VisionLanguageConfig
@@ -35,8 +34,6 @@
35
34
from vllm.multimodal.image import get_dummy_image_data
36
from vllm.sequence import SamplerOutput
37
38
-logger = logging.get_logger(__name__)
39
-
40
_KEYS_TO_MODIFY_MAPPING = {
41
"model.vision_embed_tokens": "vision_embed_tokens",
42
}
0 commit comments