Skip to content

Commit

Permalink
add pipeline mapping to models
Browse files Browse the repository at this point in the history
  • Loading branch information
yonigozlan committed Oct 15, 2024
1 parent 66efd2a commit 4ac2d1f
Show file tree
Hide file tree
Showing 20 changed files with 49 additions and 7 deletions.
1 change: 1 addition & 0 deletions src/transformers/models/donut/processing_donut.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@ def __call__(
# For backward compatibility
legacy = kwargs.pop("legacy", True)
if legacy:
# With `add_special_tokens=True`, the performance of donut are degraded when working with both images and text.
logger.warning_once(
"Legacy behavior is being used. The new behavior with legacy=False will be enabled in the future."
"In the new behavior, if both images and text are provided, the default value of `add_special_tokens` "
Expand Down
1 change: 1 addition & 0 deletions tests/models/blip/test_modeling_blip.py
Original file line number Diff line number Diff line change
Expand Up @@ -436,6 +436,7 @@ class BlipModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"feature-extraction": BlipModel,
"image-to-text": BlipForConditionalGeneration,
"visual-question-answering": BlipForQuestionAnswering,
"image-text-to-text": BlipForConditionalGeneration,
}
if is_torch_available()
else {}
Expand Down
1 change: 1 addition & 0 deletions tests/models/blip_2/test_modeling_blip_2.py
Original file line number Diff line number Diff line change
Expand Up @@ -705,6 +705,7 @@ class Blip2ModelTest(ModelTesterMixin, PipelineTesterMixin, GenerationTesterMixi
"feature-extraction": Blip2Model,
"image-to-text": Blip2ForConditionalGeneration,
"visual-question-answering": Blip2ForConditionalGeneration,
"image-text-to-text": Blip2ForConditionalGeneration,
}
if is_torch_available()
else {}
Expand Down
1 change: 1 addition & 0 deletions tests/models/chameleon/test_modeling_chameleon.py
Original file line number Diff line number Diff line change
Expand Up @@ -279,6 +279,7 @@ class ChameleonModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTester
{
"feature-extraction": ChameleonModel,
"text-generation": ChameleonForConditionalGeneration,
"image-text-to-text": ChameleonForConditionalGeneration,
}
if is_torch_available()
else {}
Expand Down
4 changes: 3 additions & 1 deletion tests/models/fuyu/test_modeling_fuyu.py
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,9 @@ def prepare_config_and_inputs_for_common(self):
@require_torch
class FuyuModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (FuyuForCausalLM,) if is_torch_available() else ()
pipeline_model_mapping = {"text-generation": FuyuForCausalLM} if is_torch_available() else {}
pipeline_model_mapping = (
{"text-generation": FuyuForCausalLM, "image-text-to-text": FuyuForCausalLM} if is_torch_available() else {}
)

test_head_masking = False
test_pruning = False
Expand Down
7 changes: 6 additions & 1 deletion tests/models/git/test_modeling_git.py
Original file line number Diff line number Diff line change
Expand Up @@ -401,7 +401,12 @@ class GitModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin,
all_model_classes = (GitModel, GitForCausalLM) if is_torch_available() else ()
all_generative_model_classes = (GitForCausalLM,) if is_torch_available() else ()
pipeline_model_mapping = (
{"feature-extraction": GitModel, "image-to-text": GitForCausalLM, "text-generation": GitForCausalLM}
{
"feature-extraction": GitModel,
"image-to-text": GitForCausalLM,
"text-generation": GitForCausalLM,
"image-text-to-text": GitForCausalLM,
}
if is_torch_available()
else {}
)
Expand Down
6 changes: 5 additions & 1 deletion tests/models/idefics/test_modeling_idefics.py
Original file line number Diff line number Diff line change
Expand Up @@ -332,7 +332,11 @@ def test_eager_matches_sdpa_generate(self):
@require_torch
class IdeficsModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (IdeficsModel, IdeficsForVisionText2Text) if is_torch_available() else ()
pipeline_model_mapping = {"feature-extraction": IdeficsModel} if is_torch_available() else {}
pipeline_model_mapping = (
{"feature-extraction": IdeficsModel, "image-text-to-text": IdeficsForVisionText2Text}
if is_torch_available()
else {}
)
test_pruning = False
test_headmasking = False
test_torchscript = False
Expand Down
1 change: 1 addition & 0 deletions tests/models/idefics2/test_modeling_idefics2.py
Original file line number Diff line number Diff line change
Expand Up @@ -336,6 +336,7 @@ class Idefics2ForConditionalGenerationModelTest(GenerationTesterMixin, ModelTest

all_model_classes = (Idefics2ForConditionalGeneration,) if is_torch_available() else ()
all_generative_model_classes = (Idefics2ForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = {"image-text-to-text": Idefics2ForConditionalGeneration} if is_torch_available() else ()
fx_compatible = False
test_pruning = False
test_resize_embeddings = True
Expand Down
1 change: 1 addition & 0 deletions tests/models/idefics3/test_modeling_idefics3.py
Original file line number Diff line number Diff line change
Expand Up @@ -323,6 +323,7 @@ class Idefics3ForConditionalGenerationModelTest(GenerationTesterMixin, ModelTest

all_model_classes = (Idefics3ForConditionalGeneration,) if is_torch_available() else ()
all_generative_model_classes = (Idefics3ForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = {"image-text-to-text": Idefics3ForConditionalGeneration} if is_torch_available() else ()
fx_compatible = False
test_pruning = False
test_resize_embeddings = True
Expand Down
1 change: 1 addition & 0 deletions tests/models/instructblip/test_modeling_instructblip.py
Original file line number Diff line number Diff line change
Expand Up @@ -454,6 +454,7 @@ def prepare_config_and_inputs_for_common(self):
@require_torch
class InstructBlipForConditionalGenerationDecoderOnlyTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (InstructBlipForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = {"image-text-to-text": InstructBlipForConditionalGeneration}
fx_compatible = False
test_head_masking = False
test_pruning = False
Expand Down
6 changes: 5 additions & 1 deletion tests/models/kosmos2/test_modeling_kosmos2.py
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,11 @@ class Kosmos2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase)
all_model_classes = (Kosmos2Model, Kosmos2ForConditionalGeneration) if is_torch_available() else ()
all_generative_model_classes = (Kosmos2ForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = (
{"feature-extraction": Kosmos2Model, "image-to-text": Kosmos2ForConditionalGeneration}
{
"feature-extraction": Kosmos2Model,
"image-to-text": Kosmos2ForConditionalGeneration,
"image-text-to-text": Kosmos2ForConditionalGeneration,
}
if is_torch_available()
else {}
)
Expand Down
6 changes: 5 additions & 1 deletion tests/models/llava/test_modeling_llava.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,11 @@ class LlavaForConditionalGenerationModelTest(ModelTesterMixin, GenerationTesterM

all_model_classes = (LlavaForConditionalGeneration,) if is_torch_available() else ()
all_generative_model_classes = (LlavaForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = {"image-to-text": LlavaForConditionalGeneration} if is_torch_available() else {}
pipeline_model_mapping = (
{"image-to-text": LlavaForConditionalGeneration, "image-text-to-text": LlavaForConditionalGeneration}
if is_torch_available()
else {}
)
test_pruning = False
test_head_masking = False

Expand Down
1 change: 1 addition & 0 deletions tests/models/llava_next/test_modeling_llava_next.py
Original file line number Diff line number Diff line change
Expand Up @@ -216,6 +216,7 @@ class LlavaNextForConditionalGenerationModelTest(ModelTesterMixin, GenerationTes

all_model_classes = (LlavaNextForConditionalGeneration,) if is_torch_available() else ()
all_generative_model_classes = (LlavaNextForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = {"image-text-to-text": LlavaNextForConditionalGeneration} if is_torch_available() else {}
test_pruning = False
test_head_masking = False

Expand Down
3 changes: 3 additions & 0 deletions tests/models/llava_onevision/test_modeling_llava_onevision.py
Original file line number Diff line number Diff line change
Expand Up @@ -217,6 +217,9 @@ class LlavaOnevisionForConditionalGenerationModelTest(ModelTesterMixin, Generati

all_model_classes = (LlavaOnevisionForConditionalGeneration,) if is_torch_available() else ()
all_generative_model_classes = (LlavaOnevisionForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = (
{"image-text-to-text": LlavaOnevisionForConditionalGeneration} if is_torch_available() else {}
)
test_pruning = False
test_head_masking = False

Expand Down
1 change: 1 addition & 0 deletions tests/models/mllama/test_modeling_mllama.py
Original file line number Diff line number Diff line change
Expand Up @@ -271,6 +271,7 @@ class MllamaForConditionalGenerationModelTest(ModelTesterMixin, GenerationTester

all_model_classes = (MllamaForConditionalGeneration,) if is_torch_available() else ()
all_generative_model_classes = (MllamaForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = {"image-text-to-text": MllamaForConditionalGeneration} if is_torch_available() else ()
test_pruning = False
test_head_masking = False
test_torchscript = False
Expand Down
1 change: 1 addition & 0 deletions tests/models/paligemma/test_modeling_paligemma.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,6 +183,7 @@ class PaliGemmaForConditionalGenerationModelTest(ModelTesterMixin, GenerationTes

all_model_classes = (PaliGemmaForConditionalGeneration,) if is_torch_available() else ()
all_generative_model_classes = (PaliGemmaForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = {"image-text-to-text": PaliGemmaForConditionalGeneration}
fx_compatible = False
test_pruning = False
test_torchscript = False
Expand Down
6 changes: 5 additions & 1 deletion tests/models/pix2struct/test_modeling_pix2struct.py
Original file line number Diff line number Diff line change
Expand Up @@ -419,7 +419,11 @@ def prepare_config_and_inputs_for_common(self):
@require_torch
class Pix2StructModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (Pix2StructForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = {"image-to-text": Pix2StructForConditionalGeneration} if is_torch_available() else {}
pipeline_model_mapping = (
{"image-to-text": Pix2StructForConditionalGeneration, "image-text-to-text": Pix2StructForConditionalGeneration}
if is_torch_available()
else {}
)
fx_compatible = False
test_head_masking = False
test_pruning = False
Expand Down
1 change: 1 addition & 0 deletions tests/models/qwen2_vl/test_modeling_qwen2_vl.py
Original file line number Diff line number Diff line change
Expand Up @@ -224,6 +224,7 @@ class Qwen2VLModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCas

all_model_classes = (Qwen2VLForConditionalGeneration,) if is_torch_available() else ()
all_generative_model_classes = (Qwen2VLForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = {"image-text-to-text": Qwen2VLForConditionalGeneration}
test_pruning = False
test_head_masking = False

Expand Down
6 changes: 5 additions & 1 deletion tests/models/udop/test_modeling_udop.py
Original file line number Diff line number Diff line change
Expand Up @@ -274,7 +274,11 @@ class UdopModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
else ()
)
all_generative_model_classes = (UdopForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = {"feature-extraction": UdopModel} if is_torch_available() else {}
pipeline_model_mapping = (
{"feature-extraction": UdopModel, "image-text-to-text": UdopForConditionalGeneration}
if is_torch_available()
else {}
)
fx_compatible = False
test_pruning = False
test_torchscript = False
Expand Down
1 change: 1 addition & 0 deletions tests/models/vipllava/test_modeling_vipllava.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,6 +164,7 @@ class VipLlavaForConditionalGenerationModelTest(ModelTesterMixin, GenerationTest

all_model_classes = (VipLlavaForConditionalGeneration,) if is_torch_available() else ()
all_generative_model_classes = (VipLlavaForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = {"image-text-to-text": VipLlavaForConditionalGeneration} if is_torch_available() else {}
fx_compatible = False
test_pruning = False
test_resize_embeddings = True
Expand Down

0 comments on commit 4ac2d1f

Please sign in to comment.