Skip to content

Commit

Permalink
[3.12] pythongh-109413: Add more type hints to libregrtest (pythonG…
Browse files Browse the repository at this point in the history
…H-126352)

(cherry picked from commit bfc1d25)

Co-authored-by: sobolevn <mail@sobolevn.me>
  • Loading branch information
sobolevn committed Nov 4, 2024
1 parent 70f777d commit a450024
Show file tree
Hide file tree
Showing 13 changed files with 81 additions and 63 deletions.
14 changes: 8 additions & 6 deletions Lib/test/libregrtest/findtests.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import os
import sys
import unittest
from collections.abc import Container

from test import support

Expand Down Expand Up @@ -34,7 +35,7 @@ def findtestdir(path: StrPath | None = None) -> StrPath:
return path or os.path.dirname(os.path.dirname(__file__)) or os.curdir


def findtests(*, testdir: StrPath | None = None, exclude=(),
def findtests(*, testdir: StrPath | None = None, exclude: Container[str] = (),
split_test_dirs: set[TestName] = SPLITTESTDIRS,
base_mod: str = "") -> TestList:
"""Return a list of all applicable test modules."""
Expand All @@ -60,8 +61,9 @@ def findtests(*, testdir: StrPath | None = None, exclude=(),
return sorted(tests)


def split_test_packages(tests, *, testdir: StrPath | None = None, exclude=(),
split_test_dirs=SPLITTESTDIRS):
def split_test_packages(tests, *, testdir: StrPath | None = None,
exclude: Container[str] = (),
split_test_dirs=SPLITTESTDIRS) -> list[TestName]:
testdir = findtestdir(testdir)
splitted = []
for name in tests:
Expand All @@ -75,9 +77,9 @@ def split_test_packages(tests, *, testdir: StrPath | None = None, exclude=(),
return splitted


def _list_cases(suite):
def _list_cases(suite: unittest.TestSuite) -> None:
for test in suite:
if isinstance(test, unittest.loader._FailedTest):
if isinstance(test, unittest.loader._FailedTest): # type: ignore[attr-defined]
continue
if isinstance(test, unittest.TestSuite):
_list_cases(test)
Expand All @@ -87,7 +89,7 @@ def _list_cases(suite):

def list_cases(tests: TestTuple, *,
match_tests: TestFilter | None = None,
test_dir: StrPath | None = None):
test_dir: StrPath | None = None) -> None:
support.verbose = False
set_match_tests(match_tests)

Expand Down
29 changes: 15 additions & 14 deletions Lib/test/libregrtest/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import sys
import sysconfig
import time
from typing import NoReturn

from test.support import os_helper, MS_WINDOWS, flush_std_streams

Expand Down Expand Up @@ -152,7 +153,7 @@ def __init__(self, ns: Namespace, _add_python_opts: bool = False):
self.next_single_test: TestName | None = None
self.next_single_filename: StrPath | None = None

def log(self, line=''):
def log(self, line: str = '') -> None:
self.logger.log(line)

def find_tests(self, tests: TestList | None = None) -> tuple[TestTuple, TestList | None]:
Expand Down Expand Up @@ -228,11 +229,11 @@ def find_tests(self, tests: TestList | None = None) -> tuple[TestTuple, TestList
return (tuple(selected), tests)

@staticmethod
def list_tests(tests: TestTuple):
def list_tests(tests: TestTuple) -> None:
for name in tests:
print(name)

def _rerun_failed_tests(self, runtests: RunTests):
def _rerun_failed_tests(self, runtests: RunTests) -> RunTests:
# Configure the runner to re-run tests
if self.num_workers == 0:
# Always run tests in fresh processes to have more deterministic
Expand All @@ -257,7 +258,7 @@ def _rerun_failed_tests(self, runtests: RunTests):
self._run_tests_mp(runtests, self.num_workers)
return runtests

def rerun_failed_tests(self, runtests: RunTests):
def rerun_failed_tests(self, runtests: RunTests) -> None:
if self.python_cmd:
# Temp patch for https://github.com/python/cpython/issues/94052
self.log(
Expand Down Expand Up @@ -326,7 +327,7 @@ def run_bisect(self, runtests: RunTests) -> None:
if not self._run_bisect(runtests, name, progress):
return

def display_result(self, runtests):
def display_result(self, runtests: RunTests) -> None:
# If running the test suite for PGO then no one cares about results.
if runtests.pgo:
return
Expand All @@ -353,7 +354,7 @@ def run_test(self, test_name: TestName, runtests: RunTests, tracer):

return result

def run_tests_sequentially(self, runtests):
def run_tests_sequentially(self, runtests: RunTests):
if self.coverage:
import trace
tracer = trace.Trace(trace=False, count=True)
Expand Down Expand Up @@ -413,7 +414,7 @@ def run_tests_sequentially(self, runtests):

return tracer

def get_state(self):
def get_state(self) -> str:
state = self.results.get_state(self.fail_env_changed)
if self.first_state:
state = f'{self.first_state} then {state}'
Expand Down Expand Up @@ -442,7 +443,7 @@ def finalize_tests(self, tracer):
if self.junit_filename:
self.results.write_junit(self.junit_filename)

def display_summary(self):
def display_summary(self) -> None:
duration = time.perf_counter() - self.logger.start_time
filtered = bool(self.match_tests)

Expand All @@ -456,7 +457,7 @@ def display_summary(self):
state = self.get_state()
print(f"Result: {state}")

def create_run_tests(self, tests: TestTuple):
def create_run_tests(self, tests: TestTuple) -> RunTests:
return RunTests(
tests,
fail_fast=self.fail_fast,
Expand Down Expand Up @@ -659,9 +660,9 @@ def _execute_python(self, cmd, environ):
f"Command: {cmd_text}")
# continue executing main()

def _add_python_opts(self):
python_opts = []
regrtest_opts = []
def _add_python_opts(self) -> None:
python_opts: list[str] = []
regrtest_opts: list[str] = []

environ, keep_environ = self._add_cross_compile_opts(regrtest_opts)
if self.ci_mode:
Expand Down Expand Up @@ -694,7 +695,7 @@ def _init(self):

self.tmp_dir = get_temp_dir(self.tmp_dir)

def main(self, tests: TestList | None = None):
def main(self, tests: TestList | None = None) -> NoReturn:
if self.want_add_python_opts:
self._add_python_opts()

Expand Down Expand Up @@ -723,7 +724,7 @@ def main(self, tests: TestList | None = None):
sys.exit(exitcode)


def main(tests=None, _add_python_opts=False, **kwargs):
def main(tests=None, _add_python_opts=False, **kwargs) -> NoReturn:
"""Run the Python suite."""
ns = _parse_args(sys.argv[1:], **kwargs)
Regrtest(ns, _add_python_opts=_add_python_opts).main(tests=tests)
2 changes: 1 addition & 1 deletion Lib/test/libregrtest/pgo.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@
'test_xml_etree_c',
]

def setup_pgo_tests(cmdline_args, pgo_extended: bool):
def setup_pgo_tests(cmdline_args, pgo_extended: bool) -> None:
if not cmdline_args and not pgo_extended:
# run default set of tests for PGO training
cmdline_args[:] = PGO_TESTS[:]
2 changes: 1 addition & 1 deletion Lib/test/libregrtest/refleak.py
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,7 @@ def dash_R_cleanup(fs, ps, pic, zdc, abcs):
sys._clear_type_cache()


def warm_caches():
def warm_caches() -> None:
# char cache
s = bytes(range(256))
for i in range(256):
Expand Down
1 change: 1 addition & 0 deletions Lib/test/libregrtest/result.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,7 @@ def __str__(self) -> str:
case State.DID_NOT_RUN:
return f"{self.test_name} ran no tests"
case State.TIMEOUT:
assert self.duration is not None, "self.duration is None"
return f"{self.test_name} timed out ({format_duration(self.duration)})"
case _:
raise ValueError("unknown result state: {state!r}")
Expand Down
14 changes: 7 additions & 7 deletions Lib/test/libregrtest/results.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def get_state(self, fail_env_changed: bool) -> str:

return ', '.join(state)

def get_exitcode(self, fail_env_changed, fail_rerun):
def get_exitcode(self, fail_env_changed: bool, fail_rerun: bool) -> int:
exitcode = 0
if self.bad:
exitcode = EXITCODE_BAD_TEST
Expand All @@ -84,7 +84,7 @@ def get_exitcode(self, fail_env_changed, fail_rerun):
exitcode = EXITCODE_BAD_TEST
return exitcode

def accumulate_result(self, result: TestResult, runtests: RunTests):
def accumulate_result(self, result: TestResult, runtests: RunTests) -> None:
test_name = result.test_name
rerun = runtests.rerun
fail_env_changed = runtests.fail_env_changed
Expand Down Expand Up @@ -126,7 +126,7 @@ def accumulate_result(self, result: TestResult, runtests: RunTests):
if xml_data:
self.add_junit(xml_data)

def need_rerun(self):
def need_rerun(self) -> bool:
return bool(self.rerun_results)

def prepare_rerun(self, *, clear: bool = True) -> tuple[TestTuple, FilterDict]:
Expand All @@ -149,7 +149,7 @@ def prepare_rerun(self, *, clear: bool = True) -> tuple[TestTuple, FilterDict]:

return (tuple(tests), match_tests_dict)

def add_junit(self, xml_data: list[str]):
def add_junit(self, xml_data: list[str]) -> None:
import xml.etree.ElementTree as ET
for e in xml_data:
try:
Expand All @@ -158,7 +158,7 @@ def add_junit(self, xml_data: list[str]):
print(xml_data, file=sys.__stderr__)
raise

def write_junit(self, filename: StrPath):
def write_junit(self, filename: StrPath) -> None:
if not self.testsuite_xml:
# Don't create empty XML file
return
Expand All @@ -183,7 +183,7 @@ def write_junit(self, filename: StrPath):
for s in ET.tostringlist(root):
f.write(s)

def display_result(self, tests: TestTuple, quiet: bool, print_slowest: bool):
def display_result(self, tests: TestTuple, quiet: bool, print_slowest: bool) -> None:
if print_slowest:
self.test_times.sort(reverse=True)
print()
Expand Down Expand Up @@ -225,7 +225,7 @@ def display_result(self, tests: TestTuple, quiet: bool, print_slowest: bool):
print()
print("Test suite interrupted by signal SIGINT.")

def display_summary(self, first_runtests: RunTests, filtered: bool):
def display_summary(self, first_runtests: RunTests, filtered: bool) -> None:
# Total tests
stats = self.stats
text = f'run={stats.tests_run:,}'
Expand Down
16 changes: 8 additions & 8 deletions Lib/test/libregrtest/runtests.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,12 @@
import shlex
import subprocess
import sys
from typing import Any
from typing import Any, Iterator

from test import support

from .utils import (
StrPath, StrJSON, TestTuple, TestFilter, FilterTuple, FilterDict)
StrPath, StrJSON, TestTuple, TestName, TestFilter, FilterTuple, FilterDict)


class JsonFileType:
Expand Down Expand Up @@ -41,8 +41,8 @@ def configure_subprocess(self, popen_kwargs: dict) -> None:
popen_kwargs['startupinfo'] = startupinfo

@contextlib.contextmanager
def inherit_subprocess(self):
if self.file_type == JsonFileType.WINDOWS_HANDLE:
def inherit_subprocess(self) -> Iterator[None]:
if sys.platform == 'win32' and self.file_type == JsonFileType.WINDOWS_HANDLE:
os.set_handle_inheritable(self.file, True)
try:
yield
Expand Down Expand Up @@ -105,25 +105,25 @@ def copy(self, **override) -> 'RunTests':
state.update(override)
return RunTests(**state)

def create_worker_runtests(self, **override):
def create_worker_runtests(self, **override) -> WorkerRunTests:
state = dataclasses.asdict(self)
state.update(override)
return WorkerRunTests(**state)

def get_match_tests(self, test_name) -> FilterTuple | None:
def get_match_tests(self, test_name: TestName) -> FilterTuple | None:
if self.match_tests_dict is not None:
return self.match_tests_dict.get(test_name, None)
else:
return None

def get_jobs(self):
def get_jobs(self) -> int | None:
# Number of run_single_test() calls needed to run all tests.
# None means that there is not bound limit (--forever option).
if self.forever:
return None
return len(self.tests)

def iter_tests(self):
def iter_tests(self) -> Iterator[TestName]:
if self.forever:
while True:
yield from self.tests
Expand Down
9 changes: 5 additions & 4 deletions Lib/test/libregrtest/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,17 +25,18 @@ def setup_test_dir(testdir: str | None) -> None:
sys.path.insert(0, os.path.abspath(testdir))


def setup_process():
def setup_process() -> None:
fix_umask()

assert sys.__stderr__ is not None, "sys.__stderr__ is None"
try:
stderr_fd = sys.__stderr__.fileno()
except (ValueError, AttributeError):
# Catch ValueError to catch io.UnsupportedOperation on TextIOBase
# and ValueError on a closed stream.
#
# Catch AttributeError for stderr being None.
stderr_fd = None
pass
else:
# Display the Python traceback on fatal errors (e.g. segfault)
faulthandler.enable(all_threads=True, file=stderr_fd)
Expand Down Expand Up @@ -68,7 +69,7 @@ def setup_process():
for index, path in enumerate(module.__path__):
module.__path__[index] = os.path.abspath(path)
if getattr(module, '__file__', None):
module.__file__ = os.path.abspath(module.__file__)
module.__file__ = os.path.abspath(module.__file__) # type: ignore[type-var]

if hasattr(sys, 'addaudithook'):
# Add an auditing hook for all tests to ensure PySys_Audit is tested
Expand All @@ -87,7 +88,7 @@ def _test_audit_hook(name, args):
os.environ.setdefault(UNICODE_GUARD_ENV, FS_NONASCII)


def setup_tests(runtests: RunTests):
def setup_tests(runtests: RunTests) -> None:
support.verbose = runtests.verbose
support.failfast = runtests.fail_fast
support.PGO = runtests.pgo
Expand Down
2 changes: 1 addition & 1 deletion Lib/test/libregrtest/tsan.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,6 @@
]


def setup_tsan_tests(cmdline_args):
def setup_tsan_tests(cmdline_args) -> None:
if not cmdline_args:
cmdline_args[:] = TSAN_TESTS[:]
Loading

0 comments on commit a450024

Please sign in to comment.