Skip to content

Commit 1ec4537

Browse files
authored
gh-109162: libregrtest: add single.py and result.py (#109243)
* Add single.py and result.py files. * Rename runtest.py to runtests.py. * Move run_single_test() function and its helper functions to single.py. * Move remove_testfn(), abs_module_name() and normalize_test_name() to utils.py. * Move setup_support() to setup.py. * Move type hints like TestName to utils.py. * Rename runtest.py to runtests.py.
1 parent a939b65 commit 1ec4537

File tree

14 files changed

+722
-697
lines changed

14 files changed

+722
-697
lines changed

Lib/test/libregrtest/findtests.py

Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,58 @@
1+
import os
2+
3+
from test.libregrtest.utils import StrPath, TestName, TestList
4+
5+
6+
# If these test directories are encountered recurse into them and treat each
7+
# "test_*.py" file or each sub-directory as a separate test module. This can
8+
# increase parallelism.
9+
#
10+
# Beware this can't generally be done for any directory with sub-tests as the
11+
# __init__.py may do things which alter what tests are to be run.
12+
SPLITTESTDIRS: set[TestName] = {
13+
"test_asyncio",
14+
"test_concurrent_futures",
15+
"test_multiprocessing_fork",
16+
"test_multiprocessing_forkserver",
17+
"test_multiprocessing_spawn",
18+
}
19+
20+
21+
def findtestdir(path=None):
22+
return path or os.path.dirname(os.path.dirname(__file__)) or os.curdir
23+
24+
25+
def findtests(*, testdir: StrPath | None = None, exclude=(),
26+
split_test_dirs: set[TestName] = SPLITTESTDIRS,
27+
base_mod: str = "") -> TestList:
28+
"""Return a list of all applicable test modules."""
29+
testdir = findtestdir(testdir)
30+
tests = []
31+
for name in os.listdir(testdir):
32+
mod, ext = os.path.splitext(name)
33+
if (not mod.startswith("test_")) or (mod in exclude):
34+
continue
35+
if mod in split_test_dirs:
36+
subdir = os.path.join(testdir, mod)
37+
mod = f"{base_mod or 'test'}.{mod}"
38+
tests.extend(findtests(testdir=subdir, exclude=exclude,
39+
split_test_dirs=split_test_dirs,
40+
base_mod=mod))
41+
elif ext in (".py", ""):
42+
tests.append(f"{base_mod}.{mod}" if base_mod else mod)
43+
return sorted(tests)
44+
45+
46+
def split_test_packages(tests, *, testdir: StrPath | None = None, exclude=(),
47+
split_test_dirs=SPLITTESTDIRS):
48+
testdir = findtestdir(testdir)
49+
splitted = []
50+
for name in tests:
51+
if name in split_test_dirs:
52+
subdir = os.path.join(testdir, name)
53+
splitted.extend(findtests(testdir=subdir, exclude=exclude,
54+
split_test_dirs=split_test_dirs,
55+
base_mod=name))
56+
else:
57+
splitted.append(name)
58+
return splitted

Lib/test/libregrtest/logger.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import os
22
import time
33

4-
from test.libregrtest.runtest import RunTests
4+
from test.libregrtest.runtests import RunTests
55
from test.libregrtest.utils import print_warning, MS_WINDOWS
66

77
if MS_WINDOWS:

Lib/test/libregrtest/main.py

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -11,17 +11,19 @@
1111
from test.support import os_helper
1212

1313
from test.libregrtest.cmdline import _parse_args, Namespace
14+
from test.libregrtest.findtests import findtests, split_test_packages
1415
from test.libregrtest.logger import Logger
15-
from test.libregrtest.runtest import (
16-
findtests, split_test_packages, run_single_test, abs_module_name,
17-
PROGRESS_MIN_TIME, State, RunTests, HuntRefleak,
18-
FilterTuple, TestList, StrJSON, TestName)
16+
from test.libregrtest.result import State
17+
from test.libregrtest.runtests import RunTests, HuntRefleak
1918
from test.libregrtest.setup import setup_tests, setup_test_dir
19+
from test.libregrtest.single import run_single_test, PROGRESS_MIN_TIME
2020
from test.libregrtest.pgo import setup_pgo_tests
2121
from test.libregrtest.results import TestResults
2222
from test.libregrtest.utils import (
23-
strip_py_suffix, count, format_duration, StrPath,
24-
printlist, get_build_info, get_temp_dir, get_work_dir, exit_timeout)
23+
StrPath, StrJSON, TestName, TestList, FilterTuple,
24+
strip_py_suffix, count, format_duration,
25+
printlist, get_build_info, get_temp_dir, get_work_dir, exit_timeout,
26+
abs_module_name)
2527

2628

2729
class Regrtest:

Lib/test/libregrtest/refleak.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,11 @@
1-
import os
21
import sys
32
import warnings
43
from inspect import isabstract
4+
55
from test import support
66
from test.support import os_helper
7-
from test.libregrtest.runtest import HuntRefleak
7+
8+
from test.libregrtest.runtests import HuntRefleak
89
from test.libregrtest.utils import clear_caches
910

1011
try:

Lib/test/libregrtest/result.py

Lines changed: 184 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,184 @@
1+
import dataclasses
2+
import json
3+
from typing import Any
4+
5+
from test.support import TestStats
6+
7+
from test.libregrtest.utils import (
8+
TestName, FilterTuple,
9+
format_duration, normalize_test_name, print_warning)
10+
11+
12+
# Avoid enum.Enum to reduce the number of imports when tests are run
13+
class State:
14+
PASSED = "PASSED"
15+
FAILED = "FAILED"
16+
SKIPPED = "SKIPPED"
17+
UNCAUGHT_EXC = "UNCAUGHT_EXC"
18+
REFLEAK = "REFLEAK"
19+
ENV_CHANGED = "ENV_CHANGED"
20+
RESOURCE_DENIED = "RESOURCE_DENIED"
21+
INTERRUPTED = "INTERRUPTED"
22+
MULTIPROCESSING_ERROR = "MULTIPROCESSING_ERROR"
23+
DID_NOT_RUN = "DID_NOT_RUN"
24+
TIMEOUT = "TIMEOUT"
25+
26+
@staticmethod
27+
def is_failed(state):
28+
return state in {
29+
State.FAILED,
30+
State.UNCAUGHT_EXC,
31+
State.REFLEAK,
32+
State.MULTIPROCESSING_ERROR,
33+
State.TIMEOUT}
34+
35+
@staticmethod
36+
def has_meaningful_duration(state):
37+
# Consider that the duration is meaningless for these cases.
38+
# For example, if a whole test file is skipped, its duration
39+
# is unlikely to be the duration of executing its tests,
40+
# but just the duration to execute code which skips the test.
41+
return state not in {
42+
State.SKIPPED,
43+
State.RESOURCE_DENIED,
44+
State.INTERRUPTED,
45+
State.MULTIPROCESSING_ERROR,
46+
State.DID_NOT_RUN}
47+
48+
@staticmethod
49+
def must_stop(state):
50+
return state in {
51+
State.INTERRUPTED,
52+
State.MULTIPROCESSING_ERROR}
53+
54+
55+
@dataclasses.dataclass(slots=True)
56+
class TestResult:
57+
test_name: TestName
58+
state: str | None = None
59+
# Test duration in seconds
60+
duration: float | None = None
61+
xml_data: list[str] | None = None
62+
stats: TestStats | None = None
63+
64+
# errors and failures copied from support.TestFailedWithDetails
65+
errors: list[tuple[str, str]] | None = None
66+
failures: list[tuple[str, str]] | None = None
67+
68+
def is_failed(self, fail_env_changed: bool) -> bool:
69+
if self.state == State.ENV_CHANGED:
70+
return fail_env_changed
71+
return State.is_failed(self.state)
72+
73+
def _format_failed(self):
74+
if self.errors and self.failures:
75+
le = len(self.errors)
76+
lf = len(self.failures)
77+
error_s = "error" + ("s" if le > 1 else "")
78+
failure_s = "failure" + ("s" if lf > 1 else "")
79+
return f"{self.test_name} failed ({le} {error_s}, {lf} {failure_s})"
80+
81+
if self.errors:
82+
le = len(self.errors)
83+
error_s = "error" + ("s" if le > 1 else "")
84+
return f"{self.test_name} failed ({le} {error_s})"
85+
86+
if self.failures:
87+
lf = len(self.failures)
88+
failure_s = "failure" + ("s" if lf > 1 else "")
89+
return f"{self.test_name} failed ({lf} {failure_s})"
90+
91+
return f"{self.test_name} failed"
92+
93+
def __str__(self) -> str:
94+
match self.state:
95+
case State.PASSED:
96+
return f"{self.test_name} passed"
97+
case State.FAILED:
98+
return self._format_failed()
99+
case State.SKIPPED:
100+
return f"{self.test_name} skipped"
101+
case State.UNCAUGHT_EXC:
102+
return f"{self.test_name} failed (uncaught exception)"
103+
case State.REFLEAK:
104+
return f"{self.test_name} failed (reference leak)"
105+
case State.ENV_CHANGED:
106+
return f"{self.test_name} failed (env changed)"
107+
case State.RESOURCE_DENIED:
108+
return f"{self.test_name} skipped (resource denied)"
109+
case State.INTERRUPTED:
110+
return f"{self.test_name} interrupted"
111+
case State.MULTIPROCESSING_ERROR:
112+
return f"{self.test_name} process crashed"
113+
case State.DID_NOT_RUN:
114+
return f"{self.test_name} ran no tests"
115+
case State.TIMEOUT:
116+
return f"{self.test_name} timed out ({format_duration(self.duration)})"
117+
case _:
118+
raise ValueError("unknown result state: {state!r}")
119+
120+
def has_meaningful_duration(self):
121+
return State.has_meaningful_duration(self.state)
122+
123+
def set_env_changed(self):
124+
if self.state is None or self.state == State.PASSED:
125+
self.state = State.ENV_CHANGED
126+
127+
def must_stop(self, fail_fast: bool, fail_env_changed: bool) -> bool:
128+
if State.must_stop(self.state):
129+
return True
130+
if fail_fast and self.is_failed(fail_env_changed):
131+
return True
132+
return False
133+
134+
def get_rerun_match_tests(self) -> FilterTuple | None:
135+
match_tests = []
136+
137+
errors = self.errors or []
138+
failures = self.failures or []
139+
for error_list, is_error in (
140+
(errors, True),
141+
(failures, False),
142+
):
143+
for full_name, *_ in error_list:
144+
match_name = normalize_test_name(full_name, is_error=is_error)
145+
if match_name is None:
146+
# 'setUpModule (test.test_sys)': don't filter tests
147+
return None
148+
if not match_name:
149+
error_type = "ERROR" if is_error else "FAIL"
150+
print_warning(f"rerun failed to parse {error_type} test name: "
151+
f"{full_name!r}: don't filter tests")
152+
return None
153+
match_tests.append(match_name)
154+
155+
if not match_tests:
156+
return None
157+
return tuple(match_tests)
158+
159+
def write_json(self, file) -> None:
160+
json.dump(self, file, cls=_EncodeTestResult)
161+
162+
@staticmethod
163+
def from_json(worker_json) -> 'TestResult':
164+
return json.loads(worker_json, object_hook=_decode_test_result)
165+
166+
167+
class _EncodeTestResult(json.JSONEncoder):
168+
def default(self, o: Any) -> dict[str, Any]:
169+
if isinstance(o, TestResult):
170+
result = dataclasses.asdict(o)
171+
result["__test_result__"] = o.__class__.__name__
172+
return result
173+
else:
174+
return super().default(o)
175+
176+
177+
def _decode_test_result(data: dict[str, Any]) -> TestResult | dict[str, Any]:
178+
if "__test_result__" in data:
179+
data.pop('__test_result__')
180+
if data['stats'] is not None:
181+
data['stats'] = TestStats(**data['stats'])
182+
return TestResult(**data)
183+
else:
184+
return data

Lib/test/libregrtest/results.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
11
import sys
22
from test.support import TestStats
33

4-
from test.libregrtest.runtest import (
5-
TestName, TestTuple, TestList, FilterDict, State,
6-
TestResult, RunTests)
4+
from test.libregrtest.runtests import RunTests
5+
from test.libregrtest.result import State, TestResult
76
from test.libregrtest.utils import (
8-
printlist, count, format_duration, StrPath)
7+
StrPath, TestName, TestTuple, TestList, FilterDict,
8+
printlist, count, format_duration)
99

1010

1111
EXITCODE_BAD_TEST = 2

0 commit comments

Comments
 (0)