diff --git a/tests/test_pipelines_common.py b/tests/test_pipelines_common.py index 55121f38c522c6..6bd2f4ae3bdafe 100644 --- a/tests/test_pipelines_common.py +++ b/tests/test_pipelines_common.py @@ -21,7 +21,7 @@ from unittest import skipIf from transformers import FEATURE_EXTRACTOR_MAPPING, TOKENIZER_MAPPING, AutoFeatureExtractor, AutoTokenizer, pipeline -from transformers.testing_utils import is_pipeline_test, require_torch +from transformers.testing_utils import is_pipeline_test, require_tf, require_torch logger = logging.getLogger(__name__) @@ -104,6 +104,9 @@ def __repr__(self): class PipelineTestCaseMeta(type): def __new__(mcs, name, bases, dct): def gen_test(ModelClass, checkpoint, tiny_config, tokenizer_class, feature_extractor_class): + require_framework = require_tf if "TF" in tiny_config.__class__.__name__ else require_torch + + @require_framework @skipIf(tiny_config is None, "TinyConfig does not exist") @skipIf(checkpoint is None, "checkpoint does not exist") def test(self): @@ -111,7 +114,14 @@ def test(self): tiny_config.is_encoder_decoder = False if ModelClass.__name__.endswith("WithLMHead"): tiny_config.is_decoder = True - model = ModelClass(tiny_config) + try: + model = ModelClass(tiny_config) + except ImportError as e: + # We need to ignore only detectron 2 import errors. + if "detectron2" in str(e): + self.skipTest(f"This model cannot be created: {e}") + else: + raise e if hasattr(model, "eval"): model = model.eval() if tokenizer_class is not None: diff --git a/tests/test_pipelines_feature_extraction.py b/tests/test_pipelines_feature_extraction.py index d11bf061a3fdc4..b929db14ff07b0 100644 --- a/tests/test_pipelines_feature_extraction.py +++ b/tests/test_pipelines_feature_extraction.py @@ -14,7 +14,7 @@ import unittest -from transformers import MODEL_MAPPING, TF_MODEL_MAPPING, FeatureExtractionPipeline, pipeline +from transformers import MODEL_MAPPING, TF_MODEL_MAPPING, FeatureExtractionPipeline, LxmertConfig, pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch from .test_pipelines_common import PipelineTestCaseMeta @@ -68,6 +68,8 @@ def run_pipeline_test(self, model, tokenizer, feature_extractor): elif feature_extractor is not None: self.skipTest("This is a bimodal model, we need to find a more consistent way to switch on those models.") return + elif isinstance(model.config, LxmertConfig): + self.skipTest("This is a bimodal model, we need to find a more consistent way to switch on those models.") elif model.config.is_encoder_decoder: self.skipTest( """encoder_decoder models are trickier for this pipeline. diff --git a/tests/test_pipelines_question_answering.py b/tests/test_pipelines_question_answering.py index 09d117dfc8cecf..5554976787f134 100644 --- a/tests/test_pipelines_question_answering.py +++ b/tests/test_pipelines_question_answering.py @@ -17,6 +17,7 @@ from transformers import ( MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, + LxmertConfig, QuestionAnsweringPipeline, ) from transformers.data.processors.squad import SquadExample @@ -36,6 +37,8 @@ def run_pipeline_test(self, model, tokenizer, feature_extractor): # This is an bimodal model, we need to find a more consistent way # to switch on those models. self.skipTest("We cannot handle multi modal question answering yet") + if isinstance(model.config, LxmertConfig): + self.skipTest("We cannot handle multi modal question answering yet") question_answerer = QuestionAnsweringPipeline(model, tokenizer) outputs = question_answerer(