Skip to content

Commit

Permalink
Making all tests pass attempt #1.
Browse files Browse the repository at this point in the history
Attempt #2.

Limit scope of error exception to detectron2

`Lxmert` is special.

Typo.
  • Loading branch information
Narsil committed Sep 25, 2021
1 parent e1bc21b commit fe7df00
Show file tree
Hide file tree
Showing 3 changed files with 18 additions and 3 deletions.
14 changes: 12 additions & 2 deletions tests/test_pipelines_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from unittest import skipIf

from transformers import FEATURE_EXTRACTOR_MAPPING, TOKENIZER_MAPPING, AutoFeatureExtractor, AutoTokenizer, pipeline
from transformers.testing_utils import is_pipeline_test, require_torch
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch


logger = logging.getLogger(__name__)
Expand Down Expand Up @@ -104,14 +104,24 @@ def __repr__(self):
class PipelineTestCaseMeta(type):
def __new__(mcs, name, bases, dct):
def gen_test(ModelClass, checkpoint, tiny_config, tokenizer_class, feature_extractor_class):
require_framework = require_tf if "TF" in tiny_config.__class__.__name__ else require_torch

@require_framework
@skipIf(tiny_config is None, "TinyConfig does not exist")
@skipIf(checkpoint is None, "checkpoint does not exist")
def test(self):
if ModelClass.__name__.endswith("ForCausalLM"):
tiny_config.is_encoder_decoder = False
if ModelClass.__name__.endswith("WithLMHead"):
tiny_config.is_decoder = True
model = ModelClass(tiny_config)
try:
model = ModelClass(tiny_config)
except ImportError as e:
# We need to ignore only detectron 2 import errors.
if "detectron2" in str(e):
self.skipTest(f"This model cannot be created: {e}")
else:
raise e
if hasattr(model, "eval"):
model = model.eval()
if tokenizer_class is not None:
Expand Down
4 changes: 3 additions & 1 deletion tests/test_pipelines_feature_extraction.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

import unittest

from transformers import MODEL_MAPPING, TF_MODEL_MAPPING, FeatureExtractionPipeline, pipeline
from transformers import MODEL_MAPPING, TF_MODEL_MAPPING, FeatureExtractionPipeline, LxmertConfig, pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch

from .test_pipelines_common import PipelineTestCaseMeta
Expand Down Expand Up @@ -68,6 +68,8 @@ def run_pipeline_test(self, model, tokenizer, feature_extractor):
elif feature_extractor is not None:
self.skipTest("This is a bimodal model, we need to find a more consistent way to switch on those models.")
return
elif isinstance(model.config, LxmertConfig):
self.skipTest("This is a bimodal model, we need to find a more consistent way to switch on those models.")
elif model.config.is_encoder_decoder:
self.skipTest(
"""encoder_decoder models are trickier for this pipeline.
Expand Down
3 changes: 3 additions & 0 deletions tests/test_pipelines_question_answering.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
from transformers import (
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
LxmertConfig,
QuestionAnsweringPipeline,
)
from transformers.data.processors.squad import SquadExample
Expand All @@ -36,6 +37,8 @@ def run_pipeline_test(self, model, tokenizer, feature_extractor):
# This is an bimodal model, we need to find a more consistent way
# to switch on those models.
self.skipTest("We cannot handle multi modal question answering yet")
if isinstance(model.config, LxmertConfig):
self.skipTest("We cannot handle multi modal question answering yet")
question_answerer = QuestionAnsweringPipeline(model, tokenizer)

outputs = question_answerer(
Expand Down

0 comments on commit fe7df00

Please sign in to comment.