File tree Expand file tree Collapse file tree 3 files changed +15
-5
lines changed Expand file tree Collapse file tree 3 files changed +15
-5
lines changed Original file line number Diff line number Diff line change
1
+ import inspect
1
2
import os
2
3
import re
3
4
import shutil
@@ -144,6 +145,15 @@ def require_torch_and_cuda(test_case):
144
145
return test_case
145
146
146
147
148
+ def get_tests_dir ():
149
+ """
150
+ returns the full path to the `tests` dir, so that the tests can be invoked from anywhere
151
+ """
152
+ # this function caller's __file__
153
+ caller__file__ = inspect .stack ()[1 ][1 ]
154
+ return os .path .abspath (os .path .dirname (caller__file__ ))
155
+
156
+
147
157
#
148
158
# Helper functions for dealing with testing text outputs
149
159
# The original code came from:
Original file line number Diff line number Diff line change 15
15
TransfoXLTokenizer ,
16
16
is_torch_available ,
17
17
)
18
- from transformers .testing_utils import require_torch
18
+ from transformers .testing_utils import get_tests_dir , require_torch
19
19
from transformers .tokenization_distilbert import DistilBertTokenizerFast
20
20
from transformers .tokenization_openai import OpenAIGPTTokenizerFast
21
21
from transformers .tokenization_roberta import RobertaTokenizerFast
@@ -42,7 +42,7 @@ class CommonFastTokenizerTest(unittest.TestCase):
42
42
TOKENIZERS_CLASSES = frozenset ([])
43
43
44
44
def setUp (self ) -> None :
45
- with open ("tests /fixtures/sample_text.txt" , encoding = "utf-8" ) as f_data :
45
+ with open (f" { get_tests_dir () } /fixtures/sample_text.txt" , encoding = "utf-8" ) as f_data :
46
46
self ._data = f_data .read ().replace ("\n \n " , "\n " ).strip ()
47
47
48
48
def test_all_tokenizers (self ):
Original file line number Diff line number Diff line change 4
4
import numpy as np
5
5
6
6
from transformers import AutoTokenizer , TrainingArguments , is_torch_available
7
- from transformers .testing_utils import require_torch
7
+ from transformers .testing_utils import get_tests_dir , require_torch
8
8
9
9
10
10
if is_torch_available ():
20
20
)
21
21
22
22
23
- PATH_SAMPLE_TEXT = "./tests /fixtures/sample_text.txt"
23
+ PATH_SAMPLE_TEXT = f" { get_tests_dir () } /fixtures/sample_text.txt"
24
24
25
25
26
26
class RegressionDataset :
@@ -262,7 +262,7 @@ def test_trainer_eval_mrpc(self):
262
262
tokenizer = AutoTokenizer .from_pretrained (MODEL_ID )
263
263
model = AutoModelForSequenceClassification .from_pretrained (MODEL_ID )
264
264
data_args = GlueDataTrainingArguments (
265
- task_name = "mrpc" , data_dir = "./tests /fixtures/tests_samples/MRPC" , overwrite_cache = True
265
+ task_name = "mrpc" , data_dir = f" { get_tests_dir () } /fixtures/tests_samples/MRPC" , overwrite_cache = True
266
266
)
267
267
eval_dataset = GlueDataset (data_args , tokenizer = tokenizer , mode = "dev" )
268
268
You can’t perform that action at this time.
0 commit comments