Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Move test model folders #17034

Merged
merged 33 commits into from
May 3, 2022
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
33 commits
Select commit Hold shift + click to select a range
babdc23
test - to be revert
ydshieh Apr 27, 2022
8ff0bde
temp change to generate new cache - to be reverted
ydshieh Apr 27, 2022
d09b62b
temp change to generate new cache - to be reverted
ydshieh Apr 27, 2022
75ff03c
clean-up
ydshieh Apr 27, 2022
ffbcad9
move test model folders (TODO: fix imports and others)
ydshieh Apr 26, 2022
7851bdb
fix (potentially partially) imports (in model test modules)
ydshieh Apr 26, 2022
fed0b1d
fix (potentially partially) imports (in tokenization test modules)
ydshieh Apr 26, 2022
c439b16
fix (potentially partially) imports (in feature extraction test modules)
ydshieh Apr 26, 2022
f35f1c5
fix import utils.test_modeling_tf_core
ydshieh Apr 26, 2022
2d801d2
fix path ../fixtures/
ydshieh Apr 26, 2022
7d1d1a5
fix imports about generation.test_generation_flax_utils
ydshieh Apr 26, 2022
ab59261
fix more imports
ydshieh Apr 26, 2022
ae4827b
fix fixture path
ydshieh Apr 26, 2022
faba7b6
fix get_test_dir
ydshieh Apr 26, 2022
8c9600e
update module_to_test_file
ydshieh Apr 26, 2022
732ca46
fix get_tests_dir from wrong transformers.utils
ydshieh Apr 26, 2022
16e269e
update config.yml (CircleCI)
ydshieh Apr 26, 2022
6b52d1f
fix style
ydshieh Apr 26, 2022
e971915
remove missing imports
ydshieh Apr 26, 2022
4b0320e
update new model script
ydshieh Apr 26, 2022
8b470d0
update check_repo
ydshieh Apr 26, 2022
3dad271
update SPECIAL_MODULE_TO_TEST_MAP
ydshieh Apr 26, 2022
790f0d9
fix style
ydshieh Apr 26, 2022
c31b9c9
add __init__
ydshieh Apr 27, 2022
39966aa
update self-scheduled
ydshieh Apr 27, 2022
c90b35c
fix add_new_model scripts
ydshieh Apr 27, 2022
579cdbf
check one way to get location back
ydshieh Apr 27, 2022
fa9cfd4
python setup.py build install
ydshieh Apr 27, 2022
021ae85
fix import in test auto
ydshieh May 1, 2022
825581f
update self-scheduled.yml
ydshieh May 1, 2022
b83abd8
update slack notification script
ydshieh May 1, 2022
0e44f67
Add comments about artifact names
ydshieh May 2, 2022
9bc8e1a
fix for yolos
ydshieh May 3, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
fix (potentially partially) imports (in model test modules)
  • Loading branch information
ydshieh committed May 3, 2022
commit 7851bdbe0796b593222bb406176ea8934e3a5d18
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from transformers import is_flax_available, {{cookiecutter.camelcase_modelname}}Config
from transformers.testing_utils import require_flax, slow

from ..test_configuration_common import ConfigTester
from ...test_configuration_common import ConfigTester
from ..test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor

if is_flax_available():
Expand Down Expand Up @@ -345,7 +345,7 @@ def test_inference_masked_lm(self):
)
from transformers.testing_utils import require_sentencepiece, require_flax, require_tokenizers, slow

from ..test_configuration_common import ConfigTester
from ...test_configuration_common import ConfigTester
from ..test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from transformers import is_tf_available, {{cookiecutter.camelcase_modelname}}Config
from transformers.testing_utils import require_tf, slow

from ..test_configuration_common import ConfigTester
from ...test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask


Expand Down Expand Up @@ -711,7 +711,7 @@ def test_inference_masked_lm(self):
)
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow

from ..test_configuration_common import ConfigTester
from ...test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
from transformers.testing_utils import require_torch, slow, torch_device

from transformers import {{cookiecutter.camelcase_modelname}}Config
from ..test_configuration_common import ConfigTester
from ...test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask


Expand Down Expand Up @@ -489,8 +489,8 @@ def test_inference_masked_lm(self):
from transformers.utils import cached_property
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device

from ..test_configuration_common import ConfigTester
from ..generation.test_generation_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...generation.test_generation_utils import GenerationTesterMixin
from ..test_modeling_common import ModelTesterMixin, ids_tensor


Expand Down
Empty file added tests/models/__init__.py
Empty file.
4 changes: 2 additions & 2 deletions tests/models/albert/test_modeling_albert.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device

from ..test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask


if is_torch_available():
Expand Down
2 changes: 1 addition & 1 deletion tests/models/albert/test_modeling_flax_albert.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow

from ..test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask


if is_flax_available():
Expand Down
4 changes: 2 additions & 2 deletions tests/models/albert/test_modeling_tf_albert.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow

from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask


if is_tf_available():
Expand Down
6 changes: 3 additions & 3 deletions tests/models/bart/test_modeling_bart.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,9 @@
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property

from ..generation.test_generation_utils import GenerationTesterMixin
from ..test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...generation.test_generation_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor


if is_torch_available():
Expand Down
2 changes: 1 addition & 1 deletion tests/models/bart/test_modeling_flax_bart.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from transformers.testing_utils import require_flax, slow

from ..generation.test_generation_flax_utils import FlaxGenerationTesterMixin
from ..test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask


if is_flax_available():
Expand Down
4 changes: 2 additions & 2 deletions tests/models/bart/test_modeling_tf_bart.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property

from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ..utils.test_modeling_tf_core import TFCoreModelTesterMixin


Expand Down
4 changes: 2 additions & 2 deletions tests/models/beit/test_modeling_beit.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available

from ..test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor


if is_torch_available():
Expand Down
4 changes: 2 additions & 2 deletions tests/models/beit/test_modeling_flax_beit.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available

from ..test_configuration_common import ConfigTester
from ..test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor


if is_flax_available():
Expand Down
6 changes: 3 additions & 3 deletions tests/models/bert/test_modeling_bert.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,9 @@
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device

from ..generation.test_generation_utils import GenerationTesterMixin
from ..test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...generation.test_generation_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask


if is_torch_available():
Expand Down
2 changes: 1 addition & 1 deletion tests/models/bert/test_modeling_flax_bert.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow

from ..test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask


if is_flax_available():
Expand Down
4 changes: 2 additions & 2 deletions tests/models/bert/test_modeling_tf_bert.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow

from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ..utils.test_modeling_tf_core import TFCoreModelTesterMixin


Expand Down
6 changes: 3 additions & 3 deletions tests/models/bert_generation/test_modeling_bert_generation.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device

from ..generation.test_generation_utils import GenerationTesterMixin
from ..test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...generation.test_generation_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask


if is_torch_available():
Expand Down
4 changes: 2 additions & 2 deletions tests/models/big_bird/test_modeling_big_bird.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@
from transformers.models.big_bird.tokenization_big_bird import BigBirdTokenizer
from transformers.testing_utils import require_torch, slow, torch_device

from ..test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask


if is_torch_available():
Expand Down
2 changes: 1 addition & 1 deletion tests/models/big_bird/test_modeling_flax_big_bird.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow

from ..test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask


if is_flax_available():
Expand Down
6 changes: 3 additions & 3 deletions tests/models/bigbird_pegasus/test_modeling_bigbird_pegasus.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,9 @@
from transformers import BigBirdPegasusConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device

from ..generation.test_generation_utils import GenerationTesterMixin
from ..test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, ids_tensor
from ...generation.test_generation_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor


if is_torch_available():
Expand Down
6 changes: 3 additions & 3 deletions tests/models/blenderbot/test_modeling_blenderbot.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,9 @@
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property

from ..generation.test_generation_utils import GenerationTesterMixin
from ..test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, ids_tensor
from ...generation.test_generation_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor


if is_torch_available():
Expand Down
2 changes: 1 addition & 1 deletion tests/models/blenderbot/test_modeling_flax_blenderbot.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from transformers.testing_utils import jax_device, require_flax, slow

from ..generation.test_generation_flax_utils import FlaxGenerationTesterMixin
from ..test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor


if is_flax_available():
Expand Down
4 changes: 2 additions & 2 deletions tests/models/blenderbot/test_modeling_tf_blenderbot.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property

from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor


if is_tf_available():
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,9 @@
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property

from ..generation.test_generation_utils import GenerationTesterMixin
from ..test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, ids_tensor
from ...generation.test_generation_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor


if is_torch_available():
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from transformers.testing_utils import require_flax, slow

from ..generation.test_generation_flax_utils import FlaxGenerationTesterMixin
from ..test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor


if is_flax_available():
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property

from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor


if is_tf_available():
Expand Down
4 changes: 2 additions & 2 deletions tests/models/canine/test_modeling_canine.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@
from transformers import CanineConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device

from ..test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, _config_zero_init, global_rng, ids_tensor, random_attention_mask
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, global_rng, ids_tensor, random_attention_mask


if is_torch_available():
Expand Down
4 changes: 2 additions & 2 deletions tests/models/clip/test_modeling_clip.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,8 @@
)
from transformers.utils import is_torch_available, is_vision_available

from ..test_configuration_common import ConfigTester
from ..test_modeling_common import (
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
ModelTesterMixin,
_config_zero_init,
floats_tensor,
Expand Down
2 changes: 1 addition & 1 deletion tests/models/clip/test_modeling_flax_clip.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from transformers import CLIPConfig, CLIPTextConfig, CLIPVisionConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, slow

from ..test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask


if is_flax_available():
Expand Down
4 changes: 2 additions & 2 deletions tests/models/clip/test_modeling_tf_clip.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available

from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask


if is_tf_available():
Expand Down
4 changes: 2 additions & 2 deletions tests/models/convbert/test_modeling_convbert.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device

from ..test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask


if is_torch_available():
Expand Down
4 changes: 2 additions & 2 deletions tests/models/convbert/test_modeling_tf_convbert.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow

from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask


if is_tf_available():
Expand Down
4 changes: 2 additions & 2 deletions tests/models/convnext/test_modeling_convnext.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available

from ..test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor


if is_torch_available():
Expand Down
4 changes: 2 additions & 2 deletions tests/models/convnext/test_modeling_tf_convnext.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available

from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor


if is_tf_available():
Expand Down
6 changes: 3 additions & 3 deletions tests/models/ctrl/test_modeling_ctrl.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,9 @@
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device

from ..generation.test_generation_utils import GenerationTesterMixin
from ..test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...generation.test_generation_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask


if is_torch_available():
Expand Down
4 changes: 2 additions & 2 deletions tests/models/ctrl/test_modeling_tf_ctrl.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@
from transformers import CTRLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow

from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask


if is_tf_available():
Expand Down
Loading