diff options
Diffstat (limited to 'testing/framework')
-rw-r--r-- | testing/framework/TestCmd.py | 108 | ||||
-rw-r--r-- | testing/framework/TestCmdTests.py | 222 | ||||
-rw-r--r-- | testing/framework/TestCommon.py | 52 | ||||
-rw-r--r-- | testing/framework/TestCommonTests.py | 190 | ||||
-rw-r--r-- | testing/framework/TestRuntest.py | 10 | ||||
-rw-r--r-- | testing/framework/TestSCons.py | 89 | ||||
-rw-r--r-- | testing/framework/TestSConsMSVS.py | 12 | ||||
-rw-r--r-- | testing/framework/TestSCons_time.py | 8 | ||||
-rw-r--r-- | testing/framework/TestSConsign.py | 4 | ||||
-rw-r--r-- | testing/framework/TestUnit/cli.py | 2 | ||||
-rw-r--r-- | testing/framework/TestUnit/taprunner.py | 28 | ||||
-rw-r--r-- | testing/framework/test-framework.rst | 153 |
12 files changed, 478 insertions, 400 deletions
diff --git a/testing/framework/TestCmd.py b/testing/framework/TestCmd.py index b109843e0..5cdeea0de 100644 --- a/testing/framework/TestCmd.py +++ b/testing/framework/TestCmd.py @@ -427,7 +427,7 @@ def clean_up_ninja_daemon(self, result_type) -> None: shutil.rmtree(daemon_dir) -def fail_test(self=None, condition=True, function=None, skip=0, message=None): +def fail_test(self=None, condition: bool=True, function=None, skip: int=0, message=None) -> None: """Causes a test to exit with a fail. Reports that the test FAILED and exits with a status of 1, unless @@ -468,7 +468,7 @@ def fail_test(self=None, condition=True, function=None, skip=0, message=None): sys.exit(1) -def no_result(self=None, condition=True, function=None, skip=0): +def no_result(self=None, condition: bool=True, function=None, skip: int=0) -> None: """Causes a test to exit with a no result. In testing parlance NO RESULT means the test could not be completed @@ -510,7 +510,7 @@ def no_result(self=None, condition=True, function=None, skip=0): sys.exit(2) -def pass_test(self=None, condition=True, function=None): +def pass_test(self=None, condition: bool=True, function=None) -> None: """Causes a test to exit with a pass. Reports that the test PASSED and exits with a status of 0, unless @@ -644,8 +644,8 @@ def match_re_dotall(lines=None, res=None): return expr.match(lines) -def simple_diff(a, b, fromfile='', tofile='', - fromfiledate='', tofiledate='', n=0, lineterm=''): +def simple_diff(a, b, fromfile: str='', tofile: str='', + fromfiledate: str='', tofiledate: str='', n: int=0, lineterm: str=''): r"""Compare two sequences of lines; generate the delta as a simple diff. Similar to difflib.context_diff and difflib.unified_diff but @@ -694,8 +694,8 @@ def simple_diff(a, b, fromfile='', tofile='', yield f"> {l}" -def diff_re(a, b, fromfile='', tofile='', - fromfiledate='', tofiledate='', n=3, lineterm='\n'): +def diff_re(a, b, fromfile: str='', tofile: str='', + fromfiledate: str='', tofiledate: str='', n: int=3, lineterm: str='\n'): """Compare a and b (lists of strings) where a are regular expressions. A simple "diff" of two sets of lines when the expected lines @@ -864,7 +864,7 @@ class Popen(subprocess.Popen): def recv_err(self, maxsize=None): return self._recv('stderr', maxsize) - def send_recv(self, input='', maxsize=None): + def send_recv(self, input: str='', maxsize=None): return self.send(input), self.recv(maxsize), self.recv_err(maxsize) def get_conn_maxsize(self, which, maxsize): @@ -874,7 +874,7 @@ class Popen(subprocess.Popen): maxsize = 1 return getattr(self, which), maxsize - def _close(self, which): + def _close(self, which) -> None: getattr(self, which).close() setattr(self, which, None) @@ -969,7 +969,7 @@ class Popen(subprocess.Popen): disconnect_message = "Other end disconnected!" -def recv_some(p, t=.1, e=1, tr=5, stderr=0): +def recv_some(p, t: float=.1, e: int=1, tr: int=5, stderr: int=0): if tr < 1: tr = 1 x = time.time() + t @@ -1004,7 +1004,7 @@ _Cleanup = [] @atexit.register -def _clean(): +def _clean() -> None: global _Cleanup cleanlist = [c for c in _Cleanup if c] del _Cleanup[:] @@ -1030,10 +1030,10 @@ class TestCmd: diff=None, diff_stdout=None, diff_stderr=None, - combine=0, - universal_newlines=True, + combine: int=0, + universal_newlines: bool=True, timeout=None, - ): + ) -> None: self.external = os.environ.get('SCONS_EXTERNAL_TEST', 0) self._cwd = os.getcwd() self.description_set(description) @@ -1087,16 +1087,16 @@ class TestCmd: self.fixture_dirs = [] - def __del__(self): + def __del__(self) -> None: self.cleanup() - def __repr__(self): + def __repr__(self) -> str: return f"{id(self):x}" banner_char = '=' banner_width = 80 - def banner(self, s, width=None): + def banner(self, s, width=None) -> str: if width is None: width = self.banner_width return f"{s:{self.banner_char}<{width}}" @@ -1110,12 +1110,12 @@ class TestCmd: path = os.path.join(self.workdir, path) return path - def chmod(self, path, mode): + def chmod(self, path, mode) -> None: """Changes permissions on the specified file or directory.""" path = self.canonicalize(path) os.chmod(path, mode) - def cleanup(self, condition=None): + def cleanup(self, condition=None) -> None: """Removes any temporary working directories. Cleans the TestCmd instance. If the environment variable PRESERVE was @@ -1182,11 +1182,11 @@ class TestCmd: cmd.extend(arguments) return cmd - def description_set(self, description): + def description_set(self, description) -> None: """Set the description of the functionality being tested. """ self.description = description - def set_diff_function(self, diff=_Null, stdout=_Null, stderr=_Null): + def set_diff_function(self, diff=_Null, stdout=_Null, stderr=_Null) -> None: """Sets the specified diff functions.""" if diff is not _Null: self._diff_function = diff @@ -1195,7 +1195,7 @@ class TestCmd: if stderr is not _Null: self._diff_stderr_function = stderr - def diff(self, a, b, name=None, diff_function=None, *args, **kw): + def diff(self, a, b, name=None, diff_function=None, *args, **kw) -> None: if diff_function is None: try: diff_function = getattr(self, self._diff_function) @@ -1239,7 +1239,7 @@ class TestCmd: unified_diff = staticmethod(difflib.unified_diff) - def fail_test(self, condition=True, function=None, skip=0, message=None): + def fail_test(self, condition: bool=True, function=None, skip: int=0, message=None) -> None: """Cause the test to fail.""" if not condition: return @@ -1250,13 +1250,13 @@ class TestCmd: skip=skip, message=message) - def interpreter_set(self, interpreter): + def interpreter_set(self, interpreter) -> None: """Set the program to be used to interpret the program under test as a script. """ self.interpreter = interpreter - def set_match_function(self, match=_Null, stdout=_Null, stderr=_Null): + def set_match_function(self, match=_Null, stdout=_Null, stderr=_Null) -> None: """Sets the specified match functions. """ if match is not _Null: self._match_function = match @@ -1306,7 +1306,7 @@ class TestCmd: match_re_dotall = staticmethod(match_re_dotall) - def no_result(self, condition=True, function=None, skip=0): + def no_result(self, condition: bool=True, function=None, skip: int=0) -> None: """Report that the test could not be run.""" if not condition: return @@ -1316,14 +1316,14 @@ class TestCmd: function=function, skip=skip) - def pass_test(self, condition=True, function=None): + def pass_test(self, condition: bool=True, function=None) -> None: """Cause the test to pass.""" if not condition: return self.condition = 'pass_test' pass_test(self=self, condition=condition, function=function) - def preserve(self, *conditions): + def preserve(self, *conditions) -> None: """Preserves temporary working directories. Arrange for the temporary working directories for the @@ -1337,14 +1337,14 @@ class TestCmd: for cond in conditions: self._preserve[cond] = 1 - def program_set(self, program): + def program_set(self, program) -> None: """Sets the executable program or script to be tested.""" if not self.external: if program and not os.path.isabs(program): program = os.path.join(self._cwd, program) self.program = program - def read(self, file, mode='rb', newline=None): + def read(self, file, mode: str='rb', newline=None): """Reads and returns the contents of the specified file name. The file name may be a list, in which case the elements are @@ -1364,7 +1364,7 @@ class TestCmd: with open(file, mode) as f: return f.read() - def rmdir(self, dir): + def rmdir(self, dir) -> None: """Removes the specified dir name. The dir name may be a list, in which case the elements are @@ -1377,7 +1377,7 @@ class TestCmd: os.rmdir(dir) - def parse_path(self, path, suppress_current=False): + def parse_path(self, path, suppress_current: bool=False): """Return a list with the single path components of path.""" head, tail = os.path.split(path) result = [] @@ -1395,7 +1395,7 @@ class TestCmd: return result - def dir_fixture(self, srcdir, dstdir=None): + def dir_fixture(self, srcdir, dstdir=None) -> None: """ Copies the contents of the fixture directory to the test directory. If srcdir is an absolute path, it is tried directly, else @@ -1444,7 +1444,7 @@ class TestCmd: else: shutil.copy(epath, dpath) - def file_fixture(self, srcfile, dstfile=None): + def file_fixture(self, srcfile, dstfile=None) -> None: """ Copies a fixture file to the test directory, optionally renaming. If srcfile is an absolute path, it is tried directly, else @@ -1575,7 +1575,7 @@ class TestCmd: stream = stream.decode('utf-8', errors='replace') return stream.replace('\r\n', '\n') - def finish(self, popen=None, **kw): + def finish(self, popen=None, **kw) -> None: """ Finishes and waits for the process. Process being run under control of the specified popen argument @@ -1620,7 +1620,7 @@ class TestCmd: chdir=None, stdin=None, universal_newlines=None, - timeout=None): + timeout=None) -> None: """Runs a test of the program or script for the test environment. Output and error output are saved for future retrieval via @@ -1709,7 +1709,7 @@ class TestCmd: write(err) write('============ END STDERR\n') - def sleep(self, seconds=default_sleep_seconds): + def sleep(self, seconds=default_sleep_seconds) -> None: """Sleeps at least the specified number of seconds. If no number is specified, sleeps at least the minimum number of @@ -1789,7 +1789,7 @@ class TestCmd: return count - def symlink(self, target, link): + def symlink(self, target, link) -> None: """Creates a symlink to the specified target. The link name may be a list, in which case the elements are @@ -1849,7 +1849,7 @@ class TestCmd: return path - def touch(self, path, mtime=None): + def touch(self, path, mtime=None) -> None: """Updates the modification time on the specified file or directory. The default is to update to the @@ -1861,7 +1861,7 @@ class TestCmd: mtime = time.time() os.utime(path, (atime, mtime)) - def unlink(self, file): + def unlink(self, file) -> None: """Unlinks the specified file name. The file name may be a list, in which case the elements are @@ -1872,7 +1872,7 @@ class TestCmd: file = self.canonicalize(file) os.unlink(file) - def verbose_set(self, verbose): + def verbose_set(self, verbose) -> None: """Sets the verbose level.""" self.verbose = verbose @@ -1884,7 +1884,7 @@ class TestCmd: file = where_is(file, path, pathext) return file - def workdir_set(self, path): + def workdir_set(self, path) -> None: """Creates a temporary working directory with the specified path name. If the path is a null string (''), a unique directory name is created. @@ -1903,7 +1903,7 @@ class TestCmd: """ return os.path.join(self.workdir, *args) - def readable(self, top, read=True): + def readable(self, top, read: bool=True) -> None: """Makes the specified directory tree readable or unreadable. Tree is made readable if `read` evaluates True (the default), @@ -1917,7 +1917,7 @@ class TestCmd: return if read: - def do_chmod(fname): + def do_chmod(fname) -> None: try: st = os.stat(fname) except OSError: @@ -1926,7 +1926,7 @@ class TestCmd: os.chmod(fname, stat.S_IMODE( st[stat.ST_MODE] | stat.S_IREAD)) else: - def do_chmod(fname): + def do_chmod(fname) -> None: try: st = os.stat(fname) except OSError: @@ -1958,7 +1958,7 @@ class TestCmd: do_chmod(os.path.join(dirpath, name)) do_chmod(top) - def writable(self, top, write=True): + def writable(self, top, write: bool=True) -> None: """Make the specified directory tree writable or unwritable. Tree is made writable if `write` evaluates True (the default), @@ -1968,13 +1968,13 @@ class TestCmd: if sys.platform == 'win32': if write: - def do_chmod(fname): + def do_chmod(fname) -> None: try: os.chmod(fname, stat.S_IWRITE) except OSError: pass else: - def do_chmod(fname): + def do_chmod(fname) -> None: try: os.chmod(fname, stat.S_IREAD) except OSError: @@ -1983,7 +1983,7 @@ class TestCmd: else: if write: - def do_chmod(fname): + def do_chmod(fname) -> None: try: st = os.stat(fname) except OSError: @@ -1991,7 +1991,7 @@ class TestCmd: else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE] | 0o200)) else: - def do_chmod(fname): + def do_chmod(fname) -> None: try: st = os.stat(fname) except OSError: @@ -2008,7 +2008,7 @@ class TestCmd: for name in dirnames + filenames: do_chmod(os.path.join(dirpath, name)) - def executable(self, top, execute=True): + def executable(self, top, execute: bool=True) -> None: """Make the specified directory tree executable or not executable. Tree is made executable if `execute` evaluates True (the default), @@ -2022,7 +2022,7 @@ class TestCmd: return if execute: - def do_chmod(fname): + def do_chmod(fname) -> None: try: st = os.stat(fname) except OSError: @@ -2031,7 +2031,7 @@ class TestCmd: os.chmod(fname, stat.S_IMODE( st[stat.ST_MODE] | stat.S_IEXEC)) else: - def do_chmod(fname): + def do_chmod(fname) -> None: try: st = os.stat(fname) except OSError: @@ -2063,7 +2063,7 @@ class TestCmd: do_chmod(os.path.join(dirpath, name)) do_chmod(top) - def write(self, file, content, mode='wb'): + def write(self, file, content, mode: str='wb'): """Writes data to file. The file is created under the temporary working directory. diff --git a/testing/framework/TestCmdTests.py b/testing/framework/TestCmdTests.py index 0a1fa26f9..dc752ba4e 100644 --- a/testing/framework/TestCmdTests.py +++ b/testing/framework/TestCmdTests.py @@ -54,7 +54,7 @@ def _is_executable(path): # XXX this doesn't take into account UID, it assumes it's our file return os.stat(path)[stat.ST_MODE] & stat.S_IEXEC -def _clear_dict(dict, *keys): +def _clear_dict(dict, *keys) -> None: for key in keys: try: del dict[key] @@ -68,10 +68,10 @@ class ExitError(Exception): class TestCmdTestCase(unittest.TestCase): """Base class for TestCmd test cases, with fixture and utility methods.""" - def setUp(self): + def setUp(self) -> None: self.orig_cwd = os.getcwd() - def tearDown(self): + def tearDown(self) -> None: os.chdir(self.orig_cwd) def setup_run_scripts(self): @@ -145,7 +145,7 @@ class TestCmdTestCase(unittest.TestCase): stderr = self.translate_newlines(to_str(cp.stderr)) return stdout, stderr, cp.returncode - def popen_python(self, indata, status=0, stdout="", stderr="", python=None): + def popen_python(self, indata, status: int=0, stdout: str="", stderr: str="", python=None) -> None: if python is None: python = sys.executable _stdout, _stderr, _status = self.call_python(indata, python) @@ -164,7 +164,7 @@ class TestCmdTestCase(unittest.TestCase): f"Actual STDERR ============\n{_stderr}" ) - def run_match(self, content, *args): + def run_match(self, content, *args) -> None: expect = "%s: %s: %s: %s\n" % args content = self.translate_newlines(to_str(content)) assert content == expect, ( @@ -175,7 +175,7 @@ class TestCmdTestCase(unittest.TestCase): class __init__TestCase(TestCmdTestCase): - def test_init(self): + def test_init(self) -> None: """Test init()""" test = TestCmd.TestCmd() test = TestCmd.TestCmd(description = 'test') @@ -187,14 +187,14 @@ class __init__TestCase(TestCmdTestCase): class basename_TestCase(TestCmdTestCase): - def test_basename(self): + def test_basename(self) -> None: """Test basename() [XXX TO BE WRITTEN]""" assert 1 == 1 class cleanup_TestCase(TestCmdTestCase): - def test_cleanup(self): + def test_cleanup(self) -> None: """Test cleanup()""" test = TestCmd.TestCmd(workdir = '') wdir = test.workdir @@ -202,7 +202,7 @@ class cleanup_TestCase(TestCmdTestCase): test.cleanup() assert not os.path.exists(wdir) - def test_writable(self): + def test_writable(self) -> None: """Test cleanup() when the directory isn't writable""" test = TestCmd.TestCmd(workdir = '') wdir = test.workdir @@ -212,7 +212,7 @@ class cleanup_TestCase(TestCmdTestCase): test.cleanup() assert not os.path.exists(wdir) - def test_shutil(self): + def test_shutil(self) -> None: """Test cleanup() when used with shutil""" test = TestCmd.TestCmd(workdir = '') wdir = test.workdir @@ -220,7 +220,7 @@ class cleanup_TestCase(TestCmdTestCase): import shutil save_rmtree = shutil.rmtree - def my_rmtree(dir, ignore_errors=0, wdir=wdir, _rmtree=save_rmtree): + def my_rmtree(dir, ignore_errors: int=0, wdir=wdir, _rmtree=save_rmtree): assert os.getcwd() != wdir return _rmtree(dir, ignore_errors=ignore_errors) try: @@ -229,7 +229,7 @@ class cleanup_TestCase(TestCmdTestCase): finally: shutil.rmtree = save_rmtree - def test_atexit(self): + def test_atexit(self) -> None: """Test cleanup when atexit is used""" self.popen_python(f"""\ import atexit @@ -248,7 +248,7 @@ sys.exit(0) class chmod_TestCase(TestCmdTestCase): - def test_chmod(self): + def test_chmod(self) -> None: """Test chmod()""" test = TestCmd.TestCmd(workdir = '', subdir = 'sub') @@ -381,7 +381,7 @@ run2 STDERR third line class description_TestCase(TestCmdTestCase): - def test_description(self): + def test_description(self) -> None: """Test description()""" test = TestCmd.TestCmd() assert test.description is None, 'initialized description?' @@ -393,7 +393,7 @@ class description_TestCase(TestCmdTestCase): class diff_TestCase(TestCmdTestCase): - def test_diff_re(self): + def test_diff_re(self) -> None: """Test diff_re()""" result = TestCmd.diff_re(["abcde"], ["abcde"]) result = list(result) @@ -405,7 +405,7 @@ class diff_TestCase(TestCmdTestCase): result = list(result) assert result == ['1c1', "< 'a.*e'", '---', "> 'xxx'"], result - def test_diff_custom_function(self): + def test_diff_custom_function(self) -> None: """Test diff() using a custom function""" self.popen_python(f"""import sys sys.path = [r'{self.orig_cwd}'] + sys.path @@ -431,7 +431,7 @@ STDOUT========================================================================== ***** """) - def test_diff_string(self): + def test_diff_string(self) -> None: self.popen_python(f"""import sys sys.path = [r'{self.orig_cwd}'] + sys.path import TestCmd @@ -447,7 +447,7 @@ STDOUT========================================================================== > 'b2' """) - def test_error(self): + def test_error(self) -> None: """Test handling a compilation error in TestCmd.diff_re()""" script_input = f"""import sys sys.path = [r'{self.orig_cwd}'] + sys.path @@ -462,7 +462,7 @@ sys.exit(0) assert (stderr.find(expect1) != -1 or stderr.find(expect2) != -1), repr(stderr) - def test_simple_diff_static_method(self): + def test_simple_diff_static_method(self) -> None: """Test calling the TestCmd.TestCmd.simple_diff() static method""" self.popen_python(f"""import sys sys.path = [r'{self.orig_cwd}'] + sys.path @@ -475,7 +475,7 @@ assert result == expect, result sys.exit(0) """) - def test_context_diff_static_method(self): + def test_context_diff_static_method(self) -> None: """Test calling the TestCmd.TestCmd.context_diff() static method""" self.popen_python(f"""import sys sys.path = [r'{self.orig_cwd}'] + sys.path @@ -504,7 +504,7 @@ assert result == expect, result sys.exit(0) """) - def test_unified_diff_static_method(self): + def test_unified_diff_static_method(self) -> None: """Test calling the TestCmd.TestCmd.unified_diff() static method""" self.popen_python(f"""import sys sys.path = [r'{self.orig_cwd}'] + sys.path @@ -528,7 +528,7 @@ assert result == expect, result sys.exit(0) """) - def test_diff_re_static_method(self): + def test_diff_re_static_method(self) -> None: """Test calling the TestCmd.TestCmd.diff_re() static method""" self.popen_python(f"""import sys sys.path = [r'{self.orig_cwd}'] + sys.path @@ -557,7 +557,7 @@ sys.exit(0) class diff_stderr_TestCase(TestCmdTestCase): - def test_diff_stderr_default(self): + def test_diff_stderr_default(self) -> None: """Test diff_stderr() default behavior""" self.popen_python(fr"""import sys sys.path = [r'{self.orig_cwd}'] + sys.path @@ -573,7 +573,7 @@ sys.exit(0) > b2 """) - def test_diff_stderr_not_affecting_diff_stdout(self): + def test_diff_stderr_not_affecting_diff_stdout(self) -> None: """Test diff_stderr() not affecting diff_stdout() behavior""" self.popen_python(fr""" import sys @@ -595,7 +595,7 @@ diff_stdout: > bb """) - def test_diff_stderr_custom_function(self): + def test_diff_stderr_custom_function(self) -> None: """Test diff_stderr() using a custom function""" self.popen_python(fr"""import sys sys.path = [r'{self.orig_cwd}'] + sys.path @@ -613,7 +613,7 @@ b: def """) - def test_diff_stderr_TestCmd_function(self): + def test_diff_stderr_TestCmd_function(self) -> None: """Test diff_stderr() using a TestCmd function""" self.popen_python(fr"""import sys sys.path = [r'{self.orig_cwd}'] + sys.path @@ -629,7 +629,7 @@ sys.exit(0) > 'b' """) - def test_diff_stderr_static_method(self): + def test_diff_stderr_static_method(self) -> None: """Test diff_stderr() using a static method""" self.popen_python(fr"""import sys sys.path = [r'{self.orig_cwd}'] + sys.path @@ -645,7 +645,7 @@ sys.exit(0) > 'b' """) - def test_diff_stderr_string(self): + def test_diff_stderr_string(self) -> None: """Test diff_stderr() using a string to fetch the diff method""" self.popen_python(fr"""import sys sys.path = [r'{self.orig_cwd}'] + sys.path @@ -664,7 +664,7 @@ sys.exit(0) class diff_stdout_TestCase(TestCmdTestCase): - def test_diff_stdout_default(self): + def test_diff_stdout_default(self) -> None: """Test diff_stdout() default behavior""" self.popen_python(fr"""import sys sys.path = [r'{self.orig_cwd}'] + sys.path @@ -680,7 +680,7 @@ sys.exit(0) > b2 """) - def test_diff_stdout_not_affecting_diff_stderr(self): + def test_diff_stdout_not_affecting_diff_stderr(self) -> None: """Test diff_stdout() not affecting diff_stderr() behavior""" self.popen_python(fr""" import sys @@ -702,7 +702,7 @@ diff_stderr: > bb """) - def test_diff_stdout_custom_function(self): + def test_diff_stdout_custom_function(self) -> None: """Test diff_stdout() using a custom function""" self.popen_python(fr"""import sys sys.path = [r'{self.orig_cwd}'] + sys.path @@ -720,7 +720,7 @@ b: def """) - def test_diff_stdout_TestCmd_function(self): + def test_diff_stdout_TestCmd_function(self) -> None: """Test diff_stdout() using a TestCmd function""" self.popen_python(fr"""import sys sys.path = [r'{self.orig_cwd}'] + sys.path @@ -736,7 +736,7 @@ sys.exit(0) > 'b' """) - def test_diff_stdout_static_method(self): + def test_diff_stdout_static_method(self) -> None: """Test diff_stdout() using a static method""" self.popen_python(fr"""import sys sys.path = [r'{self.orig_cwd}'] + sys.path @@ -752,7 +752,7 @@ sys.exit(0) > 'b' """) - def test_diff_stdout_string(self): + def test_diff_stdout_string(self) -> None: """Test diff_stdout() using a string to fetch the diff method""" self.popen_python(fr"""import sys sys.path = [r'{self.orig_cwd}'] + sys.path @@ -771,7 +771,7 @@ sys.exit(0) class exit_TestCase(TestCmdTestCase): - def test_exit(self): + def test_exit(self) -> None: """Test exit()""" def _test_it(cwd, tempdir, condition, preserved): close_true = {'pass_test': 1, 'fail_test': 0, 'no_result': 0} @@ -845,7 +845,7 @@ test.{condition}() class fail_test_TestCase(TestCmdTestCase): - def test_fail_test(self): + def test_fail_test(self) -> None: """Test fail_test()""" run_env = TestCmd.TestCmd(workdir = '') run_env.write('run', """import sys @@ -912,7 +912,7 @@ test2(TestCmd.TestCmd(program = 'run', interpreter = 'python', workdir = '')) class interpreter_TestCase(TestCmdTestCase): - def test_interpreter(self): + def test_interpreter(self) -> None: """Test interpreter()""" run_env = TestCmd.TestCmd(workdir = '') run_env.write('run', """import sys @@ -932,7 +932,7 @@ sys.stderr.write("run: STDERR\\n") class match_TestCase(TestCmdTestCase): - def test_match_default(self): + def test_match_default(self) -> None: """Test match() default behavior""" test = TestCmd.TestCmd() assert test.match("abcde\n", "a.*e\n") @@ -941,7 +941,7 @@ class match_TestCase(TestCmdTestCase): regexes = ["v[^a-u]*z\n", "6[^ ]+0\n"] assert test.match(lines, regexes) - def test_match_custom_function(self): + def test_match_custom_function(self) -> None: """Test match() using a custom function""" def match_length(lines, matches): return len(lines) == len(matches) @@ -954,7 +954,7 @@ class match_TestCase(TestCmdTestCase): regexes = ["1\n", "1\n"] assert test.match(lines, regexes) # due to equal numbers of lines - def test_match_TestCmd_function(self): + def test_match_TestCmd_function(self) -> None: """Test match() using a TestCmd function""" test = TestCmd.TestCmd(match = TestCmd.match_exact) assert not test.match("abcde\n", "a.*e\n") @@ -966,7 +966,7 @@ class match_TestCase(TestCmdTestCase): assert not test.match(lines, regexes) assert test.match(lines, lines) - def test_match_static_method(self): + def test_match_static_method(self) -> None: """Test match() using a static method""" test = TestCmd.TestCmd(match=TestCmd.TestCmd.match_exact) assert not test.match("abcde\n", "a.*e\n") @@ -978,7 +978,7 @@ class match_TestCase(TestCmdTestCase): assert not test.match(lines, regexes) assert test.match(lines, lines) - def test_match_string(self): + def test_match_string(self) -> None: """Test match() using a string to fetch the match method""" test = TestCmd.TestCmd(match='match_exact') assert not test.match("abcde\n", "a.*e\n") @@ -993,23 +993,23 @@ class match_TestCase(TestCmdTestCase): class match_exact_TestCase(TestCmdTestCase): - def test_match_exact_function(self): + def test_match_exact_function(self) -> None: """Test calling the TestCmd.match_exact() function""" assert not TestCmd.match_exact("abcde\\n", "a.*e\\n") assert TestCmd.match_exact("abcde\\n", "abcde\\n") - def test_match_exact_instance_method(self): + def test_match_exact_instance_method(self) -> None: """Test calling the TestCmd.TestCmd().match_exact() instance method""" test = TestCmd.TestCmd() assert not test.match_exact("abcde\\n", "a.*e\\n") assert test.match_exact("abcde\\n", "abcde\\n") - def test_match_exact_static_method(self): + def test_match_exact_static_method(self) -> None: """Test calling the TestCmd.TestCmd.match_exact() static method""" assert not TestCmd.TestCmd.match_exact("abcde\\n", "a.*e\\n") assert TestCmd.TestCmd.match_exact("abcde\\n", "abcde\\n") - def test_evaluation(self): + def test_evaluation(self) -> None: """Test match_exact() evaluation""" test = TestCmd.TestCmd() assert not test.match_exact("abcde\n", "a.*e\n") @@ -1034,20 +1034,20 @@ class match_exact_TestCase(TestCmdTestCase): class match_re_dotall_TestCase(TestCmdTestCase): - def test_match_re_dotall_function(self): + def test_match_re_dotall_function(self) -> None: """Test calling the TestCmd.match_re_dotall() function""" assert TestCmd.match_re_dotall("abcde\nfghij\n", r"a.*j\n") - def test_match_re_dotall_instance_method(self): + def test_match_re_dotall_instance_method(self) -> None: """Test calling the TestCmd.TestCmd().match_re_dotall() instance method""" test = TestCmd.TestCmd() test.match_re_dotall("abcde\\nfghij\\n", r"a.*j\\n") - def test_match_re_dotall_static_method(self): + def test_match_re_dotall_static_method(self) -> None: """Test calling the TestCmd.TestCmd.match_re_dotall() static method""" assert TestCmd.TestCmd.match_re_dotall("abcde\nfghij\n", r"a.*j\n") - def test_error(self): + def test_error(self) -> None: """Test handling a compilation error in TestCmd.match_re_dotall()""" run_env = TestCmd.TestCmd(workdir = '') cwd = os.getcwd() @@ -1070,7 +1070,7 @@ sys.exit(0) finally: os.chdir(cwd) - def test_evaluation(self): + def test_evaluation(self) -> None: """Test match_re_dotall() evaluation""" test = TestCmd.TestCmd() assert test.match_re_dotall("abcde\nfghij\n", r"a.*e\nf.*j\n") @@ -1107,20 +1107,20 @@ sys.exit(0) class match_re_TestCase(TestCmdTestCase): - def test_match_re_function(self): + def test_match_re_function(self) -> None: """Test calling the TestCmd.match_re() function""" assert TestCmd.match_re("abcde\n", "a.*e\n") - def test_match_re_instance_method(self): + def test_match_re_instance_method(self) -> None: """Test calling the TestCmd.TestCmd().match_re() instance method""" test = TestCmd.TestCmd() assert test.match_re("abcde\n", "a.*e\n") - def test_match_re_static_method(self): + def test_match_re_static_method(self) -> None: """Test calling the TestCmd.TestCmd.match_re() static method""" assert TestCmd.TestCmd.match_re("abcde\n", "a.*e\n") - def test_error(self): + def test_error(self) -> None: """Test handling a compilation error in TestCmd.match_re()""" run_env = TestCmd.TestCmd(workdir = '') cwd = os.getcwd() @@ -1145,7 +1145,7 @@ sys.exit(0) finally: os.chdir(cwd) - def test_evaluation(self): + def test_evaluation(self) -> None: """Test match_re() evaluation""" test = TestCmd.TestCmd() assert test.match_re("abcde\n", "a.*e\n") @@ -1170,7 +1170,7 @@ sys.exit(0) class match_stderr_TestCase(TestCmdTestCase): - def test_match_stderr_default(self): + def test_match_stderr_default(self) -> None: """Test match_stderr() default behavior""" test = TestCmd.TestCmd() assert test.match_stderr("abcde\n", "a.*e\n") @@ -1179,7 +1179,7 @@ class match_stderr_TestCase(TestCmdTestCase): regexes = [r"v[^a-u]*z\n", r"6[^ ]+0\n"] assert test.match_stderr(lines, regexes) - def test_match_stderr_not_affecting_match_stdout(self): + def test_match_stderr_not_affecting_match_stdout(self) -> None: """Test match_stderr() not affecting match_stdout() behavior""" test = TestCmd.TestCmd(match_stderr=TestCmd.TestCmd.match_exact) @@ -1198,7 +1198,7 @@ class match_stderr_TestCase(TestCmdTestCase): regexes = [r"v[^a-u]*z\n", r"6[^ ]+0\n"] assert test.match_stdout(lines, regexes) - def test_match_stderr_custom_function(self): + def test_match_stderr_custom_function(self) -> None: """Test match_stderr() using a custom function""" def match_length(lines, matches): return len(lines) == len(matches) @@ -1211,7 +1211,7 @@ class match_stderr_TestCase(TestCmdTestCase): regexes = [r"1\n", r"1\n"] assert test.match_stderr(lines, regexes) # equal numbers of lines - def test_match_stderr_TestCmd_function(self): + def test_match_stderr_TestCmd_function(self) -> None: """Test match_stderr() using a TestCmd function""" test = TestCmd.TestCmd(match_stderr = TestCmd.match_exact) assert not test.match_stderr("abcde\n", "a.*e\n") @@ -1223,7 +1223,7 @@ class match_stderr_TestCase(TestCmdTestCase): assert not test.match_stderr(lines, regexes) assert test.match_stderr(lines, lines) - def test_match_stderr_static_method(self): + def test_match_stderr_static_method(self) -> None: """Test match_stderr() using a static method""" test = TestCmd.TestCmd(match_stderr=TestCmd.TestCmd.match_exact) assert not test.match_stderr("abcde\n", "a.*e\n") @@ -1235,7 +1235,7 @@ class match_stderr_TestCase(TestCmdTestCase): assert not test.match_stderr(lines, regexes) assert test.match_stderr(lines, lines) - def test_match_stderr_string(self): + def test_match_stderr_string(self) -> None: """Test match_stderr() using a string to fetch the match method""" test = TestCmd.TestCmd(match_stderr='match_exact') assert not test.match_stderr("abcde\n", "a.*e\n") @@ -1250,7 +1250,7 @@ class match_stderr_TestCase(TestCmdTestCase): class match_stdout_TestCase(TestCmdTestCase): - def test_match_stdout_default(self): + def test_match_stdout_default(self) -> None: """Test match_stdout() default behavior""" test = TestCmd.TestCmd() assert test.match_stdout("abcde\n", "a.*e\n") @@ -1259,7 +1259,7 @@ class match_stdout_TestCase(TestCmdTestCase): regexes = [r"v[^a-u]*z\n", r"6[^ ]+0\n"] assert test.match_stdout(lines, regexes) - def test_match_stdout_not_affecting_match_stderr(self): + def test_match_stdout_not_affecting_match_stderr(self) -> None: """Test match_stdout() not affecting match_stderr() behavior""" test = TestCmd.TestCmd(match_stdout=TestCmd.TestCmd.match_exact) @@ -1278,7 +1278,7 @@ class match_stdout_TestCase(TestCmdTestCase): regexes = [r"v[^a-u]*z\n", r"6[^ ]+0\n"] assert test.match_stderr(lines, regexes) - def test_match_stdout_custom_function(self): + def test_match_stdout_custom_function(self) -> None: """Test match_stdout() using a custom function""" def match_length(lines, matches): return len(lines) == len(matches) @@ -1291,7 +1291,7 @@ class match_stdout_TestCase(TestCmdTestCase): regexes = [r"1\n", r"1\n"] assert test.match_stdout(lines, regexes) # equal numbers of lines - def test_match_stdout_TestCmd_function(self): + def test_match_stdout_TestCmd_function(self) -> None: """Test match_stdout() using a TestCmd function""" test = TestCmd.TestCmd(match_stdout = TestCmd.match_exact) assert not test.match_stdout("abcde\n", "a.*e\n") @@ -1303,7 +1303,7 @@ class match_stdout_TestCase(TestCmdTestCase): assert not test.match_stdout(lines, regexes) assert test.match_stdout(lines, lines) - def test_match_stdout_static_method(self): + def test_match_stdout_static_method(self) -> None: """Test match_stdout() using a static method""" test = TestCmd.TestCmd(match_stdout=TestCmd.TestCmd.match_exact) assert not test.match_stdout("abcde\n", "a.*e\n") @@ -1315,7 +1315,7 @@ class match_stdout_TestCase(TestCmdTestCase): assert not test.match_stdout(lines, regexes) assert test.match_stdout(lines, lines) - def test_match_stdout_string(self): + def test_match_stdout_string(self) -> None: """Test match_stdout() using a string to fetch the match method""" test = TestCmd.TestCmd(match_stdout='match_exact') assert not test.match_stdout("abcde\n", "a.*e\n") @@ -1330,7 +1330,7 @@ class match_stdout_TestCase(TestCmdTestCase): class no_result_TestCase(TestCmdTestCase): - def test_no_result(self): + def test_no_result(self) -> None: """Test no_result()""" run_env = TestCmd.TestCmd(workdir = '') run_env.write('run', """import sys @@ -1397,7 +1397,7 @@ test2(TestCmd.TestCmd(program = 'run', interpreter = 'python', workdir = '')) class pass_test_TestCase(TestCmdTestCase): - def test_pass_test(self): + def test_pass_test(self) -> None: """Test pass_test()""" run_env = TestCmd.TestCmd(workdir = '') run_env.write('run', """import sys @@ -1436,9 +1436,9 @@ test.pass_test(condition = (test.status == 0), function = brag) class preserve_TestCase(TestCmdTestCase): - def test_preserve(self): + def test_preserve(self) -> None: """Test preserve()""" - def cleanup_test(test, cond=None, stdout=""): + def cleanup_test(test, cond=None, stdout: str="") -> None: save = sys.stdout with closing(StringIO()) as io: sys.stdout = io @@ -1526,7 +1526,7 @@ class preserve_TestCase(TestCmdTestCase): class program_TestCase(TestCmdTestCase): - def test_program(self): + def test_program(self) -> None: """Test program()""" test = TestCmd.TestCmd() assert test.program is None, 'initialized program?' @@ -1572,7 +1572,7 @@ class read_TestCase(TestCmdTestCase): except: raise - def _file_matches(file, contents, expected): + def _file_matches(file, contents, expected) -> None: contents = to_str(contents) assert contents == expected, \ "Expected contents of " + str(file) + "==========\n" + \ @@ -1649,7 +1649,7 @@ class rmdir_TestCase(TestCmdTestCase): class run_TestCase(TestCmdTestCase): - def test_run(self): + def test_run(self) -> None: """Test run()""" t = self.setup_run_scripts() @@ -1799,7 +1799,7 @@ class run_TestCase(TestCmdTestCase): finally: os.chdir(t.orig_cwd) - def test_run_subclass(self): + def test_run_subclass(self) -> None: """Test run() through a subclass with different signatures""" t = self.setup_run_scripts() @@ -1827,7 +1827,7 @@ class run_TestCase(TestCmdTestCase): class run_verbose_TestCase(TestCmdTestCase): - def test_run_verbose(self): + def test_run_verbose(self) -> None: """Test the run() method's verbose attribute""" # Prepare our "source directory." @@ -2043,7 +2043,7 @@ class run_verbose_TestCase(TestCmdTestCase): class set_diff_function_TestCase(TestCmdTestCase): - def test_set_diff_function(self): + def test_set_diff_function(self) -> None: """Test set_diff_function()""" self.popen_python(fr"""import sys sys.path = [r'{self.orig_cwd}'] + sys.path @@ -2055,7 +2055,7 @@ test.diff(".\n", "a\n") sys.exit(0) """) - def test_set_diff_function_stdout(self): + def test_set_diff_function_stdout(self) -> None: """Test set_diff_function(): stdout""" self.popen_python(f"""import sys sys.path = [r'{self.orig_cwd}'] + sys.path @@ -2083,7 +2083,7 @@ diff: diff_stdout: """) - def test_set_diff_function_stderr(self): + def test_set_diff_function_stderr(self) -> None: """Test set_diff_function(): stderr """ self.popen_python(f"""import sys sys.path = [r'{self.orig_cwd}'] + sys.path @@ -2114,7 +2114,7 @@ diff_stderr: class set_match_function_TestCase(TestCmdTestCase): - def test_set_match_function(self): + def test_set_match_function(self) -> None: """Test set_match_function()""" test = TestCmd.TestCmd() assert test.match("abcde\n", "a.*e\n") @@ -2125,7 +2125,7 @@ class set_match_function_TestCase(TestCmdTestCase): assert not test.match("abcde\n", "a.*e\n") assert test.match("abcde\n", "abcde\n") - def test_set_match_function_stdout(self): + def test_set_match_function_stdout(self) -> None: """Test set_match_function(): stdout """ test = TestCmd.TestCmd() assert test.match("abcde\n", "a.*e\n") @@ -2140,7 +2140,7 @@ class set_match_function_TestCase(TestCmdTestCase): assert not test.match_stdout("abcde\n", "a.*e\n") assert test.match_stdout("abcde\n", "abcde\n") - def test_set_match_function_stderr(self): + def test_set_match_function_stderr(self) -> None: """Test set_match_function(): stderr """ test = TestCmd.TestCmd() assert test.match("abcde\n", "a.*e\n") @@ -2158,7 +2158,7 @@ class set_match_function_TestCase(TestCmdTestCase): class sleep_TestCase(TestCmdTestCase): - def test_sleep(self): + def test_sleep(self) -> None: """Test sleep()""" test = TestCmd.TestCmd() @@ -2216,7 +2216,7 @@ sys.stderr.write("run2 STDERR second line\\n") class command_args_TestCase(TestCmdTestCase): - def test_command_args(self): + def test_command_args(self) -> None: """Test command_args()""" run_env = TestCmd.TestCmd(workdir = '') os.chdir(run_env.workdir) @@ -2317,14 +2317,14 @@ with open(r'{t.recv_out_path}', 'w') as logfp: os.chmod(t.recv_script_path, 0o644) # XXX UNIX-specific return t - def _cleanup(self, popen): + def _cleanup(self, popen) -> None: """Quiet Python ResourceWarning after wait()""" if popen.stdout: popen.stdout.close() if popen.stderr: popen.stderr.close() - def test_start(self): + def test_start(self) -> None: """Test start()""" t = self.setup_run_scripts() @@ -2467,7 +2467,7 @@ with open(r'{t.recv_out_path}', 'w') as logfp: finally: os.chdir(t.orig_cwd) - def test_finish(self): + def test_finish(self) -> None: """Test finish()""" t = self.setup_run_scripts() @@ -2528,7 +2528,7 @@ script_recv: STDERR: input finally: os.chdir(t.orig_cwd) - def test_recv(self): + def test_recv(self) -> None: """Test the recv() method of objects returned by start()""" t = self.setup_run_scripts() @@ -2554,7 +2554,7 @@ script_recv: STDERR: input finally: os.chdir(t.orig_cwd) - def test_recv_err(self): + def test_recv_err(self) -> None: """Test the recv_err() method of objects returned by start()""" t = self.setup_run_scripts() @@ -2582,7 +2582,7 @@ script_recv: STDERR: input finally: os.chdir(t.orig_cwd) - def test_send(self): + def test_send(self) -> None: """Test the send() method of objects returned by start()""" t = self.setup_run_scripts() @@ -2620,7 +2620,7 @@ script_recv: STDERR: input os.chdir(t.orig_cwd) # TODO(sgk): figure out how to eliminate the race conditions here. - def __FLAKY__test_send_recv(self): + def __FLAKY__test_send_recv(self) -> None: """Test the send_recv() method of objects returned by start()""" t = self.setup_run_scripts() @@ -2682,7 +2682,7 @@ script_recv: STDERR: input to the receive script class stdin_TestCase(TestCmdTestCase): - def test_stdin(self): + def test_stdin(self) -> None: """Test stdin()""" run_env = TestCmd.TestCmd(workdir = '') run_env.write('run', """\ @@ -2745,7 +2745,7 @@ sys.stderr.write("run2 STDERR second line\\n") class subdir_TestCase(TestCmdTestCase): - def test_subdir(self): + def test_subdir(self) -> None: """Test subdir()""" # intermediate directories are created test = TestCmd.TestCmd(workdir='', subdir=['no', 'such', 'subdir']) @@ -2788,7 +2788,7 @@ class subdir_TestCase(TestCmdTestCase): class symlink_TestCase(TestCmdTestCase): @unittest.skipIf(sys.platform == 'win32', "Skip symlink test on win32") - def test_symlink(self): + def test_symlink(self) -> None: """Test symlink()""" test = TestCmd.TestCmd(workdir = '', subdir = 'foo') wdir_file1 = os.path.join(test.workdir, 'file1') @@ -2817,16 +2817,16 @@ class symlink_TestCase(TestCmdTestCase): class tempdir_TestCase(TestCmdTestCase): - def setUp(self): + def setUp(self) -> None: TestCmdTestCase.setUp(self) self._tempdir = tempfile.mkdtemp() os.chdir(self._tempdir) - def tearDown(self): + def tearDown(self) -> None: TestCmdTestCase.tearDown(self) os.rmdir(self._tempdir) - def test_tempdir(self): + def test_tempdir(self) -> None: """Test tempdir()""" test = TestCmd.TestCmd() tdir1 = test.tempdir() @@ -2859,7 +2859,7 @@ sys.exit(0) """ class timeout_TestCase(TestCmdTestCase): - def test_initialization(self): + def test_initialization(self) -> None: """Test initializating a TestCmd with a timeout""" test = TestCmd.TestCmd(workdir='', timeout=2) test.write('sleep.py', timeout_script) @@ -2872,7 +2872,7 @@ class timeout_TestCase(TestCmdTestCase): assert test.stderr() == '', test.stderr() assert test.stdout() == 'sleeping 4\n', test.stdout() - def test_cancellation(self): + def test_cancellation(self) -> None: """Test timer cancellation after firing""" test = TestCmd.TestCmd(workdir='', timeout=4) test.write('sleep.py', timeout_script) @@ -2889,7 +2889,7 @@ class timeout_TestCase(TestCmdTestCase): assert test.stderr() == '', test.stderr() assert test.stdout() == 'sleeping 6\n', test.stdout() - def test_run(self): + def test_run(self) -> None: """Test run() timeout""" test = TestCmd.TestCmd(workdir='', timeout=8) test.write('sleep.py', timeout_script) @@ -2995,7 +2995,7 @@ class unlink_TestCase(TestCmdTestCase): class touch_TestCase(TestCmdTestCase): - def test_touch(self): + def test_touch(self) -> None: """Test touch()""" test = TestCmd.TestCmd(workdir = '', subdir = 'sub') @@ -3032,7 +3032,7 @@ class touch_TestCase(TestCmdTestCase): class verbose_TestCase(TestCmdTestCase): - def test_verbose(self): + def test_verbose(self) -> None: """Test verbose()""" test = TestCmd.TestCmd() assert test.verbose == 0, 'verbose already initialized?' @@ -3094,7 +3094,7 @@ class workdir_TestCase(TestCmdTestCase): class workdirs_TestCase(TestCmdTestCase): - def test_workdirs(self): + def test_workdirs(self) -> None: """Test workdirs()""" test = TestCmd.TestCmd() assert test.workdir is None @@ -3111,7 +3111,7 @@ class workdirs_TestCase(TestCmdTestCase): class workpath_TestCase(TestCmdTestCase): - def test_workpath(self): + def test_workpath(self) -> None: """Test workpath()""" test = TestCmd.TestCmd() assert test.workdir is None @@ -3123,7 +3123,7 @@ class workpath_TestCase(TestCmdTestCase): class readable_TestCase(TestCmdTestCase): @unittest.skipIf(sys.platform == 'win32', "Skip permission fiddling on win32") - def test_readable(self): + def test_readable(self) -> None: """Test readable()""" test = TestCmd.TestCmd(workdir = '', subdir = 'foo') test.write('file1', "Test file #1\n") @@ -3162,7 +3162,7 @@ class readable_TestCase(TestCmdTestCase): class writable_TestCase(TestCmdTestCase): @unittest.skipIf(sys.platform == 'win32', "Skip permission fiddling on win32") - def test_writable(self): + def test_writable(self) -> None: """Test writable()""" test = TestCmd.TestCmd(workdir = '', subdir = 'foo') test.write('file1', "Test file #1\n") @@ -3198,18 +3198,18 @@ class writable_TestCase(TestCmdTestCase): class executable_TestCase(TestCmdTestCase): @unittest.skipIf(sys.platform == 'win32', "Skip permission fiddling on win32") - def test_executable(self): + def test_executable(self) -> None: """Test executable()""" test = TestCmd.TestCmd(workdir = '', subdir = 'foo') test.write('file1', "Test file #1\n") test.write(['foo', 'file2'], "Test file #2\n") os.symlink('no_such_file', test.workpath('dangling_symlink')) - def make_executable(fname): + def make_executable(fname) -> None: st = os.stat(fname) os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]|0o100)) - def make_non_executable(fname): + def make_non_executable(fname) -> None: st = os.stat(fname) os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]&~0o100)) @@ -3312,7 +3312,7 @@ class write_TestCase(TestCmdTestCase): class variables_TestCase(TestCmdTestCase): - def test_variables(self): + def test_variables(self) -> None: """Test global variables""" run_env = TestCmd.TestCmd(workdir = '') diff --git a/testing/framework/TestCommon.py b/testing/framework/TestCommon.py index 91d5332cd..b0879a6f9 100644 --- a/testing/framework/TestCommon.py +++ b/testing/framework/TestCommon.py @@ -211,7 +211,7 @@ def separate_files(flist): missing.append(f) return existing, missing -def contains(seq, subseq, find): +def contains(seq, subseq, find) -> bool: # Returns True or False. if find is None: return subseq in seq @@ -234,14 +234,14 @@ def find_index(seq, subseq, find): if os.name == 'posix': - def _failed(self, status = 0): + def _failed(self, status: int = 0): if self.status is None or status is None: return None return _status(self) != status def _status(self): return self.status elif os.name == 'nt': - def _failed(self, status = 0): + def _failed(self, status: int = 0): return not (self.status is None or status is None) and \ self.status != status def _status(self): @@ -256,7 +256,7 @@ class TestCommon(TestCmd): # # $test->copy('src_file', 'dst_file'); - def __init__(self, **kw): + def __init__(self, **kw) -> None: """Initialize a new TestCommon instance. This involves just calling the base class initialization, and then changing directory to the workdir. @@ -281,7 +281,7 @@ class TestCommon(TestCmd): return arguments - def must_be_writable(self, *files): + def must_be_writable(self, *files) -> None: """Ensures that the specified file(s) exist and are writable. An individual file can be specified as a list of directory names, in which case the pathname will be constructed by concatenating @@ -297,7 +297,7 @@ class TestCommon(TestCmd): print("Unwritable files: `%s'" % "', `".join(unwritable)) self.fail_test(missing + unwritable) - def must_contain(self, file, required, mode='rb', find=None): + def must_contain(self, file, required, mode: str='rb', find=None) -> None: """Ensures specified file contains the required text. Args: @@ -326,7 +326,7 @@ class TestCommon(TestCmd): print(file_contents) self.fail_test() - def must_contain_all(self, output, input, title=None, find=None): + def must_contain_all(self, output, input, title=None, find=None) -> None: """Ensures that the specified output string (first argument) contains all of the specified input as a block (second argument). @@ -349,7 +349,7 @@ class TestCommon(TestCmd): print(output) self.fail_test() - def must_contain_all_lines(self, output, lines, title=None, find=None): + def must_contain_all_lines(self, output, lines, title=None, find=None) -> None: """Ensures that the specified output string (first argument) contains all of the specified lines (second argument). @@ -374,7 +374,7 @@ class TestCommon(TestCmd): sys.stdout.write(output) self.fail_test() - def must_contain_single_instance_of(self, output, lines, title=None): + def must_contain_single_instance_of(self, output, lines, title=None) -> None: """Ensures that the specified output string (first argument) contains one instance of the specified lines (second argument). @@ -401,7 +401,7 @@ class TestCommon(TestCmd): sys.stdout.write(output) self.fail_test() - def must_contain_any_line(self, output, lines, title=None, find=None): + def must_contain_any_line(self, output, lines, title=None, find=None) -> None: """Ensures that the specified output string (first argument) contains at least one of the specified lines (second argument). @@ -425,7 +425,7 @@ class TestCommon(TestCmd): sys.stdout.write(output) self.fail_test() - def must_contain_exactly_lines(self, output, expect, title=None, find=None): + def must_contain_exactly_lines(self, output, expect, title=None, find=None) -> None: """Ensures that the specified output string (first argument) contains all of the lines in the expected string (second argument) with none left over. @@ -477,7 +477,7 @@ class TestCommon(TestCmd): # Deprecated; retain for backwards compatibility. return self.must_contain_all_lines(output, lines, title, find) - def must_exist(self, *files): + def must_exist(self, *files) -> None: """Ensures that the specified file(s) must exist. An individual file be specified as a list of directory names, in which case the pathname will be constructed by concatenating them. Exits FAILED @@ -489,7 +489,7 @@ class TestCommon(TestCmd): print("Missing files: `%s'" % "', `".join(missing)) self.fail_test(missing) - def must_exist_one_of(self, files): + def must_exist_one_of(self, files) -> None: """Ensures that at least one of the specified file(s) exists. The filenames can be given as a list, where each entry may be a single path string, or a tuple of folder names and the final @@ -509,7 +509,7 @@ class TestCommon(TestCmd): print("Missing one of: `%s'" % "', `".join(missing)) self.fail_test(missing) - def must_match(self, file, expect, mode = 'rb', match=None, message=None, newline=None): + def must_match(self, file, expect, mode: str = 'rb', match=None, message=None, newline=None): """Matches the contents of the specified file (first argument) against the expected contents (second argument). The expected contents are a list of lines or a string which will be split @@ -527,7 +527,7 @@ class TestCommon(TestCmd): self.diff(expect, file_contents, 'contents ') raise - def must_match_file(self, file, golden_file, mode='rb', match=None, message=None, newline=None): + def must_match_file(self, file, golden_file, mode: str='rb', match=None, message=None, newline=None): """Matches the contents of the specified file (first argument) against the expected contents (second argument). The expected contents are a list of lines or a string which will be split @@ -548,7 +548,7 @@ class TestCommon(TestCmd): self.diff(golden_file_contents, file_contents, 'contents ') raise - def must_not_contain(self, file, banned, mode = 'rb', find = None): + def must_not_contain(self, file, banned, mode: str = 'rb', find = None) -> None: """Ensures that the specified file doesn't contain the banned text. """ file_contents = self.read(file, mode) @@ -561,7 +561,7 @@ class TestCommon(TestCmd): print(file_contents) self.fail_test() - def must_not_contain_any_line(self, output, lines, title=None, find=None): + def must_not_contain_any_line(self, output, lines, title=None, find=None) -> None: """Ensures that the specified output string (first argument) does not contain any of the specified lines (second argument). @@ -590,7 +590,7 @@ class TestCommon(TestCmd): def must_not_contain_lines(self, lines, output, title=None, find=None): return self.must_not_contain_any_line(output, lines, title, find) - def must_not_exist(self, *files): + def must_not_exist(self, *files) -> None: """Ensures that the specified file(s) must not exist. An individual file be specified as a list of directory names, in which case the pathname will be constructed by concatenating them. @@ -602,7 +602,7 @@ class TestCommon(TestCmd): print("Unexpected files exist: `%s'" % "', `".join(existing)) self.fail_test(existing) - def must_not_exist_any_of(self, files): + def must_not_exist_any_of(self, files) -> None: """Ensures that none of the specified file(s) exists. The filenames can be given as a list, where each entry may be a single path string, or a tuple of folder names and the final @@ -622,7 +622,7 @@ class TestCommon(TestCmd): print("Unexpected files exist: `%s'" % "', `".join(existing)) self.fail_test(existing) - def must_not_be_empty(self, file): + def must_not_be_empty(self, file) -> None: """Ensures that the specified file exists, and that it is not empty. Exits FAILED if the file doesn't exist or is empty. """ @@ -639,7 +639,7 @@ class TestCommon(TestCmd): print(f"File is empty: `{file}'") self.fail_test(file) - def must_not_be_writable(self, *files): + def must_not_be_writable(self, *files) -> None: """Ensures that the specified file(s) exist and are not writable. An individual file can be specified as a list of directory names, in which case the pathname will be constructed by concatenating @@ -656,7 +656,7 @@ class TestCommon(TestCmd): self.fail_test(missing + writable) def _complete(self, actual_stdout, expected_stdout, - actual_stderr, expected_stderr, status, match): + actual_stderr, expected_stderr, status, match) -> None: """ Post-processes running a subcommand, checking for failure status and displaying output appropriately. @@ -716,7 +716,7 @@ class TestCommon(TestCmd): sys.stderr.write(f'Exception trying to execute: {cmd_args}\n') raise e - def finish(self, popen, stdout = None, stderr = '', status = 0, **kw): + def finish(self, popen, stdout = None, stderr: str = '', status: int = 0, **kw) -> None: """ Finishes and waits for the process being run under control of the specified popen argument. Additional arguments are similar @@ -740,7 +740,7 @@ class TestCommon(TestCmd): self.stderr(), stderr, status, match) def run(self, options = None, arguments = None, - stdout = None, stderr = '', status = 0, **kw): + stdout = None, stderr: str = '', status: int = 0, **kw) -> None: """Runs the program under test, checking that the test succeeded. The parameters are the same as the base TestCmd.run() method, @@ -775,7 +775,7 @@ class TestCommon(TestCmd): self._complete(self.stdout(), stdout, self.stderr(), stderr, status, match) - def skip_test(self, message="Skipping test.\n", from_fw=False): + def skip_test(self, message: str="Skipping test.\n", from_fw: bool=False) -> None: """Skips a test. Proper test-skipping behavior is dependent on the external @@ -818,7 +818,7 @@ class TestCommon(TestCmd): self.pass_test() @staticmethod - def detailed_diff(value, expect): + def detailed_diff(value, expect) -> str: v_split = value.split('\n') e_split = expect.split('\n') if len(v_split) != len(e_split): diff --git a/testing/framework/TestCommonTests.py b/testing/framework/TestCommonTests.py index 6d8c27fe3..c55177188 100644 --- a/testing/framework/TestCommonTests.py +++ b/testing/framework/TestCommonTests.py @@ -60,15 +60,15 @@ class TestCommonTestCase(unittest.TestCase): """Base class for TestCommon test cases, fixture and utility methods.""" create_run_env = True - def setUp(self): + def setUp(self) -> None: self.orig_cwd = os.getcwd() if self.create_run_env: self.run_env = TestCmd.TestCmd(workdir = '') - def tearDown(self): + def tearDown(self) -> None: os.chdir(self.orig_cwd) - def set_up_execution_scripts(self): + def set_up_execution_scripts(self) -> None: run_env = self.run_env run_env.subdir('sub dir') @@ -118,7 +118,7 @@ class TestCommonTestCase(unittest.TestCase): run_env.write(self.stdin_script, wrapper % stdin_body) - def run_execution_test(self, script, expect_stdout, expect_stderr): + def run_execution_test(self, script, expect_stdout, expect_stderr) -> None: self.set_up_execution_scripts() run_env = self.run_env @@ -150,7 +150,7 @@ class TestCommonTestCase(unittest.TestCase): class __init__TestCase(TestCommonTestCase): - def test___init__(self): + def test___init__(self) -> None: """Test initialization""" run_env = self.run_env @@ -170,7 +170,7 @@ class __init__TestCase(TestCommonTestCase): class banner_TestCase(TestCommonTestCase): create_run_env = False - def test_banner(self): + def test_banner(self) -> None: """Test banner()""" tc = TestCommon.TestCommon(workdir='') @@ -191,7 +191,7 @@ class banner_TestCase(TestCommonTestCase): assert b == "xyzzy ----", b class must_be_writable_TestCase(TestCommonTestCase): - def test_file_does_not_exists(self): + def test_file_does_not_exists(self) -> None: """Test must_be_writable(): file does not exist""" run_env = self.run_env @@ -207,7 +207,7 @@ class must_be_writable_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr.find("FAILED") != -1, stderr - def test_writable_file_exists(self): + def test_writable_file_exists(self) -> None: """Test must_be_writable(): writable file exists""" run_env = self.run_env @@ -229,7 +229,7 @@ class must_be_writable_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr == "PASSED\n", stderr - def test_non_writable_file_exists(self): + def test_non_writable_file_exists(self) -> None: """Test must_be_writable(): non-writable file exists""" run_env = self.run_env @@ -251,7 +251,7 @@ class must_be_writable_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr.find("FAILED") != -1, stderr - def test_file_specified_as_list(self): + def test_file_specified_as_list(self) -> None: """Test must_be_writable(): file specified as list""" run_env = self.run_env @@ -276,7 +276,7 @@ class must_be_writable_TestCase(TestCommonTestCase): class must_contain_TestCase(TestCommonTestCase): - def test_success(self): + def test_success(self) -> None: """Test must_contain(): success""" run_env = self.run_env @@ -293,7 +293,7 @@ class must_contain_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr == "PASSED\n", stderr - def test_success_index_0(self): + def test_success_index_0(self) -> None: """Test must_contain(): success at index 0""" run_env = self.run_env @@ -310,7 +310,7 @@ class must_contain_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr == "PASSED\n", stderr - def test_file_missing(self): + def test_file_missing(self) -> None: """Test must_contain(): file missing""" run_env = self.run_env @@ -326,7 +326,7 @@ class must_contain_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr.find("No such file or directory:") != -1, stderr - def test_failure(self): + def test_failure(self) -> None: """Test must_contain(): failure""" run_env = self.run_env @@ -350,7 +350,7 @@ class must_contain_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr.find("FAILED") != -1, stderr - def test_mode(self): + def test_mode(self) -> None: """Test must_contain(): mode""" run_env = self.run_env @@ -372,7 +372,7 @@ class must_contain_TestCase(TestCommonTestCase): class must_contain_all_lines_TestCase(TestCommonTestCase): - def test_success(self): + def test_success(self) -> None: """Test must_contain_all_lines(): success""" run_env = self.run_env @@ -405,7 +405,7 @@ class must_contain_all_lines_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr == "PASSED\n", stderr - def test_failure(self): + def test_failure(self) -> None: """Test must_contain_all_lines(): failure""" run_env = self.run_env @@ -443,7 +443,7 @@ class must_contain_all_lines_TestCase(TestCommonTestCase): assert stdout == expect, assert_display(expect, stdout, stderr) assert stderr.find("FAILED") != -1, stderr - def test_find(self): + def test_find(self) -> None: """Test must_contain_all_lines(): find""" run_env = self.run_env @@ -477,7 +477,7 @@ class must_contain_all_lines_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr == "PASSED\n", stderr - def test_title(self): + def test_title(self) -> None: """Test must_contain_all_lines(): title""" run_env = self.run_env @@ -518,7 +518,7 @@ class must_contain_all_lines_TestCase(TestCommonTestCase): class must_contain_any_line_TestCase(TestCommonTestCase): - def test_success(self): + def test_success(self) -> None: """Test must_contain_any_line(): success""" run_env = self.run_env @@ -551,7 +551,7 @@ class must_contain_any_line_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr == "PASSED\n", stderr - def test_failure(self): + def test_failure(self) -> None: """Test must_contain_any_line(): failure""" run_env = self.run_env @@ -589,7 +589,7 @@ class must_contain_any_line_TestCase(TestCommonTestCase): assert stdout == expect, assert_display(expect, stdout, stderr) assert stderr.find("FAILED") != -1, stderr - def test_find(self): + def test_find(self) -> None: """Test must_contain_any_line(): find""" run_env = self.run_env @@ -623,7 +623,7 @@ class must_contain_any_line_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr == "PASSED\n", stderr - def test_title(self): + def test_title(self) -> None: """Test must_contain_any_line(): title""" run_env = self.run_env @@ -664,7 +664,7 @@ class must_contain_any_line_TestCase(TestCommonTestCase): class must_contain_exactly_lines_TestCase(TestCommonTestCase): - def test_success_list(self): + def test_success_list(self) -> None: """Test must_contain_exactly_lines(): success (input list)""" run_env = self.run_env @@ -697,7 +697,7 @@ class must_contain_exactly_lines_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr == "PASSED\n", stderr - def test_success_string(self): + def test_success_string(self) -> None: """Test must_contain_exactly_lines(): success (input string)""" run_env = self.run_env @@ -730,7 +730,7 @@ class must_contain_exactly_lines_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr == "PASSED\n", stderr - def test_failure(self): + def test_failure(self) -> None: """Test must_contain_exactly_lines(): failure""" run_env = self.run_env @@ -770,7 +770,7 @@ class must_contain_exactly_lines_TestCase(TestCommonTestCase): assert stdout == expect, assert_display(expect, stdout, stderr) assert stderr.find("FAILED") != -1, stderr - def test_find(self): + def test_find(self) -> None: """Test must_contain_exactly_lines(): find""" run_env = self.run_env @@ -812,7 +812,7 @@ class must_contain_exactly_lines_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr == "PASSED\n", stderr - def test_title(self): + def test_title(self) -> None: """Test must_contain_exactly_lines(): title""" run_env = self.run_env @@ -855,7 +855,7 @@ class must_contain_exactly_lines_TestCase(TestCommonTestCase): class must_contain_lines_TestCase(TestCommonTestCase): - def test_success(self): + def test_success(self) -> None: """Test must_contain_lines(): success""" run_env = self.run_env @@ -886,7 +886,7 @@ class must_contain_lines_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr == "PASSED\n", stderr - def test_failure(self): + def test_failure(self) -> None: """Test must_contain_lines(): failure""" run_env = self.run_env @@ -927,7 +927,7 @@ class must_contain_lines_TestCase(TestCommonTestCase): class must_exist_TestCase(TestCommonTestCase): - def test_success(self): + def test_success(self) -> None: """Test must_exist(): success""" run_env = self.run_env @@ -944,7 +944,7 @@ class must_exist_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr == "PASSED\n", stderr - def test_failure(self): + def test_failure(self) -> None: """Test must_exist(): failure""" run_env = self.run_env @@ -960,7 +960,7 @@ class must_exist_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr.find("FAILED") != -1, stderr - def test_file_specified_as_list(self): + def test_file_specified_as_list(self) -> None: """Test must_exist(): file specified as list""" run_env = self.run_env @@ -979,7 +979,7 @@ class must_exist_TestCase(TestCommonTestCase): assert stderr == "PASSED\n", stderr @unittest.skipIf(sys.platform == 'win32', "Skip symlink test on win32") - def test_broken_link(self) : + def test_broken_link(self) -> None : """Test must_exist(): exists but it is a broken link""" run_env = self.run_env @@ -997,7 +997,7 @@ class must_exist_TestCase(TestCommonTestCase): assert stderr == "PASSED\n", stderr class must_exist_one_of_TestCase(TestCommonTestCase): - def test_success(self): + def test_success(self) -> None: """Test must_exist_one_of(): success""" run_env = self.run_env @@ -1014,7 +1014,7 @@ class must_exist_one_of_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr == "PASSED\n", stderr - def test_failure(self): + def test_failure(self) -> None: """Test must_exist_one_of(): failure""" run_env = self.run_env @@ -1030,7 +1030,7 @@ class must_exist_one_of_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr.find("FAILED") != -1, stderr - def test_files_specified_as_list(self): + def test_files_specified_as_list(self) -> None: """Test must_exist_one_of(): files specified as list""" run_env = self.run_env @@ -1047,7 +1047,7 @@ class must_exist_one_of_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr == "PASSED\n", stderr - def test_files_specified_with_wildcards(self): + def test_files_specified_with_wildcards(self) -> None: """Test must_exist_one_of(): files specified with wildcards""" run_env = self.run_env @@ -1064,7 +1064,7 @@ class must_exist_one_of_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr == "PASSED\n", stderr - def test_file_given_as_list(self): + def test_file_given_as_list(self) -> None: """Test must_exist_one_of(): file given as list""" run_env = self.run_env @@ -1083,7 +1083,7 @@ class must_exist_one_of_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr == "PASSED\n", stderr - def test_file_given_as_sequence(self): + def test_file_given_as_sequence(self) -> None: """Test must_exist_one_of(): file given as sequence""" run_env = self.run_env @@ -1103,7 +1103,7 @@ class must_exist_one_of_TestCase(TestCommonTestCase): assert stderr == "PASSED\n", stderr class must_match_TestCase(TestCommonTestCase): - def test_success(self): + def test_success(self) -> None: """Test must_match(): success""" run_env = self.run_env @@ -1120,7 +1120,7 @@ class must_match_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr == "PASSED\n", stderr - def test_file_does_not_exists(self): + def test_file_does_not_exists(self) -> None: """Test must_match(): file does not exist""" run_env = self.run_env @@ -1136,7 +1136,7 @@ class must_match_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr.find("No such file or directory:") != -1, stderr - def test_failure(self): + def test_failure(self) -> None: """Test must_match(): failure""" run_env = self.run_env @@ -1165,7 +1165,7 @@ class must_match_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr.find("FAILED") != -1, stderr - def test_mode(self): + def test_mode(self) -> None: """Test must_match(): mode""" run_env = self.run_env @@ -1187,7 +1187,7 @@ class must_match_TestCase(TestCommonTestCase): class must_not_be_writable_TestCase(TestCommonTestCase): - def test_file_does_not_exists(self): + def test_file_does_not_exists(self) -> None: """Test must_not_be_writable(): file does not exist""" run_env = self.run_env @@ -1203,7 +1203,7 @@ class must_not_be_writable_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr.find("FAILED") != -1, stderr - def test_writable_file_exists(self): + def test_writable_file_exists(self) -> None: """Test must_not_be_writable(): writable file exists""" run_env = self.run_env @@ -1225,7 +1225,7 @@ class must_not_be_writable_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr.find("FAILED") != -1, stderr - def test_non_writable_file_exists(self): + def test_non_writable_file_exists(self) -> None: """Test must_not_be_writable(): non-writable file exists""" run_env = self.run_env @@ -1247,7 +1247,7 @@ class must_not_be_writable_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr == "PASSED\n", stderr - def test_file_specified_as_list(self): + def test_file_specified_as_list(self) -> None: """Test must_not_be_writable(): file specified as list""" run_env = self.run_env @@ -1273,7 +1273,7 @@ class must_not_be_writable_TestCase(TestCommonTestCase): class must_not_contain_TestCase(TestCommonTestCase): - def test_success(self): + def test_success(self) -> None: """Test must_not_contain(): success""" run_env = self.run_env @@ -1290,7 +1290,7 @@ class must_not_contain_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr == "PASSED\n", stderr - def test_file_does_not_exist(self): + def test_file_does_not_exist(self) -> None: """Test must_not_contain(): file does not exist""" run_env = self.run_env @@ -1306,7 +1306,7 @@ class must_not_contain_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr.find("No such file or directory:") != -1, stderr - def test_failure(self): + def test_failure(self) -> None: """Test must_not_contain(): failure""" run_env = self.run_env @@ -1331,7 +1331,7 @@ class must_not_contain_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr.find("FAILED") != -1, stderr - def test_failure_index_0(self): + def test_failure_index_0(self) -> None: """Test must_not_contain(): failure at index 0""" run_env = self.run_env @@ -1356,7 +1356,7 @@ class must_not_contain_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr.find("FAILED") != -1, stderr - def test_mode(self): + def test_mode(self) -> None: """Test must_not_contain(): mode""" run_env = self.run_env @@ -1378,7 +1378,7 @@ class must_not_contain_TestCase(TestCommonTestCase): class must_not_contain_any_line_TestCase(TestCommonTestCase): - def test_failure(self): + def test_failure(self) -> None: """Test must_not_contain_any_line(): failure""" run_env = self.run_env @@ -1422,7 +1422,7 @@ class must_not_contain_any_line_TestCase(TestCommonTestCase): assert stdout == expect, assert_display(expect, stdout, stderr) assert stderr.find("FAILED") != -1, stderr - def test_find(self): + def test_find(self) -> None: """Test must_not_contain_any_line(): find""" run_env = self.run_env @@ -1454,7 +1454,7 @@ class must_not_contain_any_line_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr == "PASSED\n", stderr - def test_success(self): + def test_success(self) -> None: """Test must_not_contain_any_line(): success""" run_env = self.run_env @@ -1483,7 +1483,7 @@ class must_not_contain_any_line_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr == "PASSED\n", stderr - def test_title(self): + def test_title(self) -> None: """Test must_not_contain_any_line(): title""" run_env = self.run_env @@ -1528,7 +1528,7 @@ class must_not_contain_any_line_TestCase(TestCommonTestCase): class must_not_contain_lines_TestCase(TestCommonTestCase): - def test_failure(self): + def test_failure(self) -> None: """Test must_not_contain_lines(): failure""" run_env = self.run_env @@ -1570,7 +1570,7 @@ class must_not_contain_lines_TestCase(TestCommonTestCase): assert stdout == expect, assert_display(expect, stdout, stderr) assert stderr.find("FAILED") != -1, stderr - def test_success(self): + def test_success(self) -> None: """Test must_not_contain_lines(): success""" run_env = self.run_env @@ -1602,7 +1602,7 @@ class must_not_contain_lines_TestCase(TestCommonTestCase): class must_not_exist_TestCase(TestCommonTestCase): - def test_failure(self): + def test_failure(self) -> None: """Test must_not_exist(): failure""" run_env = self.run_env @@ -1619,7 +1619,7 @@ class must_not_exist_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr.find("FAILED") != -1, stderr - def test_success(self): + def test_success(self) -> None: """Test must_not_exist(): success""" run_env = self.run_env @@ -1635,7 +1635,7 @@ class must_not_exist_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr == "PASSED\n", stderr - def test_file_specified_as_list(self): + def test_file_specified_as_list(self) -> None: """Test must_not_exist(): file specified as list""" run_env = self.run_env @@ -1653,7 +1653,7 @@ class must_not_exist_TestCase(TestCommonTestCase): assert stderr == "PASSED\n", stderr @unittest.skipIf(sys.platform == 'win32', "Skip symlink test on win32") - def test_existing_broken_link(self): + def test_existing_broken_link(self) -> None: """Test must_not_exist(): exists but it is a broken link""" run_env = self.run_env @@ -1671,7 +1671,7 @@ class must_not_exist_TestCase(TestCommonTestCase): assert stderr.find("FAILED") != -1, stderr class must_not_exist_any_of_TestCase(TestCommonTestCase): - def test_success(self): + def test_success(self) -> None: """Test must_not_exist_any_of(): success""" run_env = self.run_env @@ -1687,7 +1687,7 @@ class must_not_exist_any_of_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr == "PASSED\n", stderr - def test_failure(self): + def test_failure(self) -> None: """Test must_not_exist_any_of(): failure""" run_env = self.run_env @@ -1704,7 +1704,7 @@ class must_not_exist_any_of_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr.find("FAILED") != -1, stderr - def test_files_specified_as_list(self): + def test_files_specified_as_list(self) -> None: """Test must_not_exist_any_of(): files specified as list""" run_env = self.run_env @@ -1720,7 +1720,7 @@ class must_not_exist_any_of_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr == "PASSED\n", stderr - def test_files_specified_with_wildcards(self): + def test_files_specified_with_wildcards(self) -> None: """Test must_not_exist_any_of(): files specified with wildcards""" run_env = self.run_env @@ -1737,7 +1737,7 @@ class must_not_exist_any_of_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr == "PASSED\n", stderr - def test_file_given_as_list(self): + def test_file_given_as_list(self) -> None: """Test must_not_exist_any_of(): file given as list""" run_env = self.run_env @@ -1756,7 +1756,7 @@ class must_not_exist_any_of_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr == "PASSED\n", stderr - def test_file_given_as_sequence(self): + def test_file_given_as_sequence(self) -> None: """Test must_not_exist_any_of(): file given as sequence""" run_env = self.run_env @@ -1776,7 +1776,7 @@ class must_not_exist_any_of_TestCase(TestCommonTestCase): assert stderr == "PASSED\n", stderr class must_not_be_empty_TestCase(TestCommonTestCase): - def test_failure(self): + def test_failure(self) -> None: """Test must_not_be_empty(): failure""" run_env = self.run_env @@ -1793,7 +1793,7 @@ class must_not_be_empty_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr.find("FAILED") != -1, stderr - def test_success(self): + def test_success(self) -> None: """Test must_not_be_empty(): success""" run_env = self.run_env @@ -1810,7 +1810,7 @@ class must_not_be_empty_TestCase(TestCommonTestCase): stderr = run_env.stderr() assert stderr == "PASSED\n", stderr - def test_file_doesnt_exist(self): + def test_file_doesnt_exist(self) -> None: """Test must_not_be_empty(): failure""" run_env = self.run_env @@ -1827,7 +1827,7 @@ class must_not_be_empty_TestCase(TestCommonTestCase): assert stderr.find("FAILED") != -1, stderr class run_TestCase(TestCommonTestCase): - def test_argument_handling(self): + def test_argument_handling(self) -> None: """Test run(): argument handling""" script = lstrip("""\ @@ -1842,7 +1842,7 @@ class run_TestCase(TestCommonTestCase): self.run_execution_test(script, "", "") - def test_default_pass(self): + def test_default_pass(self) -> None: """Test run(): default arguments, script passes""" script = lstrip("""\ @@ -1855,7 +1855,7 @@ class run_TestCase(TestCommonTestCase): self.run_execution_test(script, "", "") - def test_default_fail(self): + def test_default_fail(self) -> None: """Test run(): default arguments, script fails""" script = lstrip("""\ @@ -1885,7 +1885,7 @@ class run_TestCase(TestCommonTestCase): self.run_execution_test(script, expect_stdout, expect_stderr) - def test_default_stderr(self): + def test_default_stderr(self) -> None: """Test run(): default arguments, error output""" script = lstrip("""\ from TestCommon import TestCommon @@ -1914,7 +1914,7 @@ class run_TestCase(TestCommonTestCase): self.run_execution_test(script, expect_stdout, expect_stderr) - def test_exception_handling(self): + def test_exception_handling(self) -> None: """Test run(): exception handling""" script = lstrip("""\ import TestCmd @@ -1954,7 +1954,7 @@ TypeError: forced TypeError self.run_execution_test(script, expect_stdout, expect_stderr) - def test_ignore_stderr(self): + def test_ignore_stderr(self) -> None: """Test run(): ignore stderr""" script = lstrip("""\ @@ -1967,7 +1967,7 @@ TypeError: forced TypeError self.run_execution_test(script, "", "") - def test_match_function_stdout(self): + def test_match_function_stdout(self) -> None: """Test run(): explicit match function, stdout""" script = lstrip("""\ @@ -1984,7 +1984,7 @@ TypeError: forced TypeError self.run_execution_test(script, "", "") - def test_match_function_stderr(self): + def test_match_function_stderr(self) -> None: """Test run(): explicit match function, stderr""" script = lstrip("""\ @@ -2001,7 +2001,7 @@ TypeError: forced TypeError self.run_execution_test(script, "", "") - def test_matched_status_fails(self): + def test_matched_status_fails(self) -> None: """Test run(): matched status, script fails""" script = lstrip("""\ @@ -2014,7 +2014,7 @@ TypeError: forced TypeError self.run_execution_test(script, "", "") - def test_matched_stdout(self): + def test_matched_stdout(self) -> None: """Test run(): matched stdout""" script = lstrip("""\ @@ -2028,7 +2028,7 @@ TypeError: forced TypeError self.run_execution_test(script, "", "") - def test_matched_stderr(self): + def test_matched_stderr(self) -> None: """Test run(): matched stderr""" script = lstrip("""\ @@ -2042,7 +2042,7 @@ TypeError: forced TypeError self.run_execution_test(script, "", "") - def test_mismatched_status_pass(self): + def test_mismatched_status_pass(self) -> None: """Test run(): mismatched status, script passes""" script = lstrip("""\ @@ -2072,7 +2072,7 @@ TypeError: forced TypeError self.run_execution_test(script, expect_stdout, expect_stderr) - def test_mismatched_status_fail(self): + def test_mismatched_status_fail(self) -> None: """Test run(): mismatched status, script fails""" script = lstrip("""\ @@ -2102,7 +2102,7 @@ TypeError: forced TypeError self.run_execution_test(script, expect_stdout, expect_stderr) - def test_mismatched_stdout(self): + def test_mismatched_stdout(self) -> None: """Test run(): mismatched stdout""" script = lstrip("""\ @@ -2134,7 +2134,7 @@ TypeError: forced TypeError self.run_execution_test(script, expect_stdout, expect_stderr) - def test_mismatched_stderr(self): + def test_mismatched_stderr(self) -> None: """Test run(): mismatched stderr""" script = lstrip("""\ @@ -2168,7 +2168,7 @@ TypeError: forced TypeError self.run_execution_test(script, expect_stdout, expect_stderr) - def test_option_handling(self): + def test_option_handling(self) -> None: """Test run(): option handling""" script = lstrip("""\ @@ -2183,7 +2183,7 @@ TypeError: forced TypeError self.run_execution_test(script, "", "") - def test_options_plus_arguments(self): + def test_options_plus_arguments(self) -> None: """Test run(): option handling with arguments""" script = lstrip("""\ @@ -2199,7 +2199,7 @@ TypeError: forced TypeError self.run_execution_test(script, "", "") - def test_signal_handling(self): + def test_signal_handling(self) -> None: """Test run(): signal handling""" try: @@ -2237,7 +2237,7 @@ TypeError: forced TypeError self.run_execution_test(script, expect_stdout, expect_stderr) - def test_stdin(self): + def test_stdin(self) -> None: """Test run(): stdin handling""" script = lstrip("""\ @@ -2265,7 +2265,7 @@ TypeError: forced TypeError class start_TestCase(TestCommonTestCase): - def test_option_handling(self): + def test_option_handling(self) -> None: """Test start(): option handling""" script = lstrip("""\ @@ -2281,7 +2281,7 @@ class start_TestCase(TestCommonTestCase): self.run_execution_test(script, "", "") - def test_options_plus_arguments(self): + def test_options_plus_arguments(self) -> None: """Test start(): option handling with arguments""" script = lstrip("""\ @@ -2301,7 +2301,7 @@ class start_TestCase(TestCommonTestCase): class skip_test_TestCase(TestCommonTestCase): - def test_skip_test(self): + def test_skip_test(self) -> None: """Test skip_test()""" run_env = self.run_env @@ -2356,7 +2356,7 @@ class skip_test_TestCase(TestCommonTestCase): class variables_TestCase(TestCommonTestCase): - def test_variables(self): + def test_variables(self) -> None: """Test global variables""" run_env = self.run_env diff --git a/testing/framework/TestRuntest.py b/testing/framework/TestRuntest.py index 378f44109..d2b3b706a 100644 --- a/testing/framework/TestRuntest.py +++ b/testing/framework/TestRuntest.py @@ -99,7 +99,7 @@ class TestRuntest(TestCommon): initializations. """ - def __init__(self, **kw): + def __init__(self, **kw) -> None: """Initialize a Runtest testing object. If they're not overridden by keyword arguments, this @@ -161,7 +161,7 @@ class TestRuntest(TestCommon): os.environ['PYTHONPATH'] = '' - def write_fake_scons_source_tree(self): + def write_fake_scons_source_tree(self) -> None: os.mkdir('scripts') self.write('scripts/scons.py', fake_scons_py) @@ -170,13 +170,13 @@ class TestRuntest(TestCommon): os.mkdir('SCons/Script') self.write('SCons/Script/__init__.py', fake___init___py) - def write_failing_test(self, name): + def write_failing_test(self, name) -> None: self.write(name, failing_test_template) - def write_no_result_test(self, name): + def write_no_result_test(self, name) -> None: self.write(name, no_result_test_template) - def write_passing_test(self, name): + def write_passing_test(self, name) -> None: self.write(name, passing_test_template) # Local Variables: diff --git a/testing/framework/TestSCons.py b/testing/framework/TestSCons.py index 469f6c1c5..021128359 100644 --- a/testing/framework/TestSCons.py +++ b/testing/framework/TestSCons.py @@ -55,7 +55,7 @@ from TestCmd import PIPE # here provides some independent verification that what we packaged # conforms to what we expect. -default_version = '4.4.1ayyyymmdd' +default_version = '4.5.3ayyyymmdd' # TODO: these need to be hand-edited when there are changes python_version_unsupported = (3, 6, 0) @@ -107,7 +107,7 @@ dll_ = dll_prefix if sys.platform == 'cygwin': # On Cygwin, os.path.normcase() lies, so just report back the # fact that the underlying Win32 OS is case-insensitive. - def case_sensitive_suffixes(s1, s2): + def case_sensitive_suffixes(s1, s2) -> int: return 0 else: def case_sensitive_suffixes(s1, s2): @@ -212,7 +212,7 @@ def initialize_sconsflags(ignore_python_version): return save_sconsflags -def restore_sconsflags(sconsflags): +def restore_sconsflags(sconsflags) -> None: if sconsflags is None: del os.environ['SCONSFLAGS'] else: @@ -232,7 +232,7 @@ class NoMatch(Exception): """ Exception for matchPart to indicate there was no match found in the passed logfile """ - def __init__(self, p): + def __init__(self, p) -> None: self.pos = p @@ -258,7 +258,7 @@ class TestSCons(TestCommon): scons_version = SConsVersion javac_is_gcj = False - def __init__(self, **kw): + def __init__(self, **kw) -> None: """Initialize an SCons testing object. If they're not overridden by keyword arguments, this @@ -416,7 +416,7 @@ class TestSCons(TestCommon): return None - def wrap_stdout(self, build_str="", read_str="", error=0, cleaning=0) -> str: + def wrap_stdout(self, build_str: str="", read_str: str="", error: int=0, cleaning: int=0) -> str: """Wraps "expect" strings in SCons boilerplate. Given strings of expected output specific to a test, @@ -448,7 +448,7 @@ class TestSCons(TestCommon): build_str + \ term - def run(self, *args, **kw): + def run(self, *args, **kw) -> None: """ Set up SCONSFLAGS for every command so test scripts don't need to worry about unexpected warnings in their output. @@ -482,7 +482,7 @@ class TestSCons(TestCommon): # kw['options'] = ' '.join(options) # TestCommon.run(self, *args, **kw) - def up_to_date(self, arguments='.', read_str="", **kw): + def up_to_date(self, arguments: str='.', read_str: str="", **kw) -> None: """Asserts that all of the targets listed in arguments is up to date, but does not make any assumptions on other targets. This function is most useful in conjunction with the -n option. @@ -498,7 +498,7 @@ class TestSCons(TestCommon): kw['match'] = self.match_re_dotall self.run(**kw) - def not_up_to_date(self, arguments='.', read_str="", **kw): + def not_up_to_date(self, arguments: str='.', read_str: str="", **kw) -> None: """Asserts that none of the targets listed in arguments is up to date, but does not make any assumptions on other targets. This function is most useful in conjunction with the -n option. @@ -529,7 +529,7 @@ class TestSCons(TestCommon): kw['arguments'] = f"{option} {arguments}" return self.run(**kw) - def deprecated_wrap(self, msg): + def deprecated_wrap(self, msg) -> str: """ Calculate the pattern that matches a deprecation warning. """ @@ -592,7 +592,7 @@ class TestSCons(TestCommon): """ warning = self.deprecated_fatal(warn, msg) - def RunPair(option, expected): + def RunPair(option, expected) -> None: # run the same test with the option on the command line and # then with the option passed via SetOption(). self.run(options=f"--warn={option}", @@ -616,7 +616,7 @@ class TestSCons(TestCommon): return warning - def diff_substr(self, expect, actual, prelen=20, postlen=40): + def diff_substr(self, expect, actual, prelen: int=20, postlen: int=40) -> str: i = 0 for x, y in zip(expect, actual): if x != y: @@ -668,7 +668,7 @@ class TestSCons(TestCommon): return s @staticmethod - def to_bytes_re_sub(pattern, repl, str, count=0, flags=0): + def to_bytes_re_sub(pattern, repl, str, count: int=0, flags: int=0): """ Wrapper around re.sub to change pattern and repl to bytes to work with both python 2 & 3 @@ -749,7 +749,7 @@ class TestSCons(TestCommon): return database_prefix - def unlink_sconsignfile(self, name='.sconsign.dblite'): + def unlink_sconsignfile(self, name: str='.sconsign.dblite') -> None: """Delete the sconsign file. Note on python it seems to append .p3 to the file name so we take @@ -1066,7 +1066,7 @@ class TestSCons(TestCommon): result.append(os.path.join(dirpath, fname)) return sorted(result) - def Qt_dummy_installation(self, dir='qt'): + def Qt_dummy_installation(self, dir: str='qt') -> None: # create a dummy qt installation self.subdir(dir, [dir, 'bin'], [dir, 'include'], [dir, 'lib']) @@ -1172,17 +1172,20 @@ else: self.QT_UIC = f"{_python_} {self.workpath(dir, 'bin', 'myuic.py')}" self.QT_LIB_DIR = self.workpath(dir, 'lib') - def Qt_create_SConstruct(self, place): + def Qt_create_SConstruct(self, place, qt_tool: str='qt3') -> None: if isinstance(place, list): place = test.workpath(*place) - self.write(place, """\ + + var_prefix=qt_tool.upper() + self.write(place, f"""\ if ARGUMENTS.get('noqtdir', 0): - QTDIR = None + {var_prefix}DIR = None else: - QTDIR = r'%s' + {var_prefix}DIR = r'{self.QT}' DefaultEnvironment(tools=[]) # test speedup env = Environment( - QTDIR=QTDIR, QT_LIB=r'%s', QT_MOC=r'%s', QT_UIC=r'%s', tools=['default', 'qt'] + {var_prefix}DIR={var_prefix}DIR, {var_prefix}_LIB=r'{self.QT_LIB}', {var_prefix}_MOC=r'{self.QT_MOC}', + {var_prefix}_UIC=r'{self.QT_UIC}', tools=['default', '{qt_tool}'] ) dup = 1 if ARGUMENTS.get('variant_dir', 0): @@ -1203,7 +1206,7 @@ else: sconscript = File('SConscript') Export("env dup") SConscript(sconscript) -""" % (self.QT, self.QT_LIB, self.QT_MOC, self.QT_UIC)) +""") NCR = 0 # non-cached rebuild CR = 1 # cached rebuild (up to date) @@ -1218,12 +1221,12 @@ SConscript(sconscript) # to use cygwin compilers on cmd.exe -> uncomment following line # Configure_lib = 'm' - def coverage_run(self): + def coverage_run(self) -> bool: """ Check if the the tests are being run under coverage. """ return 'COVERAGE_PROCESS_START' in os.environ or 'COVERAGE_FILE' in os.environ - def skip_if_not_msvc(self, check_platform=True): + def skip_if_not_msvc(self, check_platform: bool=True) -> None: """ Skip test if MSVC is not available. Check whether we are on a Windows platform and skip the test if @@ -1248,10 +1251,10 @@ SConscript(sconscript) pass def checkConfigureLogAndStdout(self, checks, - logfile='config.log', - sconf_dir='.sconf_temp', - sconstruct="SConstruct", - doCheckLog=True, doCheckStdout=True): + logfile: str='config.log', + sconf_dir: str='.sconf_temp', + sconstruct: str="SConstruct", + doCheckLog: bool=True, doCheckStdout: bool=True): """ Verify expected output from Configure. Used to verify the expected output from using Configure() @@ -1383,7 +1386,7 @@ SConscript(sconscript) def checkLogAndStdout(self, checks, results, cached, logfile, sconf_dir, sconstruct, - doCheckLog=True, doCheckStdout=True): + doCheckLog: bool=True, doCheckStdout: bool=True): """ Verify expected output from Configure. Used to verify the expected output from using Configure() @@ -1544,7 +1547,7 @@ SConscript(sconscript) # see also sys.prefix documentation return python_minor_version_string() - def get_platform_python_info(self, python_h_required=False): + def get_platform_python_info(self, python_h_required: bool=False): """Return information about Python. Returns a path to a Python executable suitable for testing on @@ -1637,7 +1640,7 @@ else: restore_sconsflags(sconsflags) return p - def wait_for(self, fname, timeout=20.0, popen=None): + def wait_for(self, fname, timeout: float=20.0, popen=None) -> None: """ Waits for the specified file name to exist. """ @@ -1699,7 +1702,7 @@ else: class Stat: - def __init__(self, name, units, expression, convert=None): + def __init__(self, name, units, expression, convert=None) -> None: if convert is None: convert = lambda x: x self.name = name @@ -1733,7 +1736,7 @@ StatList = [ class TimeSCons(TestSCons): """Class for timing SCons.""" - def __init__(self, *args, **kw): + def __init__(self, *args, **kw) -> None: """ In addition to normal TestSCons.TestSCons intialization, this enables verbose mode (which causes the command lines to @@ -1781,7 +1784,7 @@ class TimeSCons(TestSCons): self.test_dir = os.path.join(self.orig_cwd, self.test_dir) self.copy_timing_configuration(self.test_dir, self.workpath()) - def main(self, *args, **kw): + def main(self, *args, **kw) -> None: """ The main entry point for standard execution of timings. @@ -1811,7 +1814,7 @@ class TimeSCons(TestSCons): self.full(*args, **kw) self.null(*args, **kw) - def trace(self, graph, name, value, units, sort=None): + def trace(self, graph, name, value, units, sort=None) -> None: fmt = "TRACE: graph=%s name=%s value=%s units=%s" line = fmt % (graph, name, value, units) if sort is not None: @@ -1820,7 +1823,7 @@ class TimeSCons(TestSCons): sys.stdout.write(line) sys.stdout.flush() - def report_traces(self, trace, stats): + def report_traces(self, trace, stats) -> None: self.trace('TimeSCons-elapsed', trace, self.elapsed_time(), @@ -1829,7 +1832,7 @@ class TimeSCons(TestSCons): for name, args in stats.items(): self.trace(name, trace, **args) - def uptime(self): + def uptime(self) -> None: try: fp = open('/proc/loadavg') except EnvironmentError: @@ -1853,7 +1856,7 @@ class TimeSCons(TestSCons): result[stat.name] = {'value': value, 'units': stat.units} return result - def add_timing_options(self, kw, additional=None): + def add_timing_options(self, kw, additional=None) -> None: """ Add the necessary timings options to the kw['options'] value. """ @@ -1862,7 +1865,7 @@ class TimeSCons(TestSCons): options += additional kw['options'] = f"{options} --debug=memory,time" - def startup(self, *args, **kw): + def startup(self, *args, **kw) -> None: """ Runs scons with the --help option. @@ -1883,7 +1886,7 @@ class TimeSCons(TestSCons): del stats['time-commands'] self.report_traces('startup', stats) - def full(self, *args, **kw): + def full(self, *args, **kw) -> None: """ Runs a full build of SCons. """ @@ -1896,7 +1899,7 @@ class TimeSCons(TestSCons): self.trace('full-memory', 'prebuild', **stats['memory-prebuild']) self.trace('full-memory', 'final', **stats['memory-final']) - def calibration(self, *args, **kw): + def calibration(self, *args, **kw) -> None: """ Runs a full build of SCons, but only reports calibration information (the variable(s) that were set for this configuration, @@ -1909,7 +1912,7 @@ class TimeSCons(TestSCons): sys.stdout.write(f'VARIABLE: {variable}={value}\n') sys.stdout.write(f'ELAPSED: {self.elapsed_time()}\n') - def null(self, *args, **kw): + def null(self, *args, **kw) -> None: """ Runs an up-to-date null build of SCons. """ @@ -1957,7 +1960,7 @@ class TimeSCons(TestSCons): self.endTime = time.perf_counter() return result - def copy_timing_configuration(self, source_dir, dest_dir): + def copy_timing_configuration(self, source_dir, dest_dir) -> None: """ Copies the timing configuration from the specified source_dir (the directory in which the controlling script lives) to the specified @@ -1982,7 +1985,7 @@ class TimeSCons(TestSCons): destination = source.replace(source_dir, dest_dir) shutil.copy2(source, destination) - def up_to_date(self, arguments='.', read_str="", **kw): + def up_to_date(self, arguments: str='.', read_str: str="", **kw) -> None: """Asserts that all of the targets listed in arguments is up to date, but does not make any assumptions on other targets. This function is most useful in conjunction with the -n option. diff --git a/testing/framework/TestSConsMSVS.py b/testing/framework/TestSConsMSVS.py index b001d79c8..92e436d93 100644 --- a/testing/framework/TestSConsMSVS.py +++ b/testing/framework/TestSConsMSVS.py @@ -684,7 +684,7 @@ print("self._msvs_versions =%%s"%%str(SCons.Tool.MSCommon.query_versions())) return self._msvs_versions - def vcproj_sys_path(self, fname): + def vcproj_sys_path(self, fname) -> None: """ """ orig = 'sys.path = [ join(sys' @@ -700,7 +700,7 @@ print("self._msvs_versions =%%s"%%str(SCons.Tool.MSCommon.query_versions())) subdir=None, sconscript=None, python=None, project_guid=None, - vcproj_sccinfo='', sln_sccinfo=''): + vcproj_sccinfo: str='', sln_sccinfo: str=''): if not hasattr(self, '_msvs_versions'): self.msvs_versions() @@ -786,7 +786,7 @@ print("self._msvs_versions =%%s"%%str(SCons.Tool.MSCommon.query_versions())) return host - def validate_msvs_file(self, file): + def validate_msvs_file(self, file) -> None: try: x = ElementTree.parse(file) except: @@ -809,7 +809,7 @@ print("self._msvs_versions =%%s"%%str(SCons.Tool.MSCommon.query_versions())) minor = 0 if len(components) < 2 else int(components[1]) return major, minor - def _get_solution_file_format_version(self, vc_version): + def _get_solution_file_format_version(self, vc_version) -> str: """ Returns the Visual Studio format version expected in the .sln file. """ @@ -825,7 +825,7 @@ print("self._msvs_versions =%%s"%%str(SCons.Tool.MSCommon.query_versions())) else: raise SCons.Errors.UserError(f'Received unexpected VC version {vc_version}') - def _get_solution_file_vs_number(self, vc_version): + def _get_solution_file_vs_number(self, vc_version) -> str: """ Returns the Visual Studio number expected in the .sln file. """ @@ -848,7 +848,7 @@ print("self._msvs_versions =%%s"%%str(SCons.Tool.MSCommon.query_versions())) else: raise SCons.Errors.UserError(f'Received unexpected VC version {vc_version}') - def _get_vcxproj_file_tools_version(self, vc_version): + def _get_vcxproj_file_tools_version(self, vc_version) -> str: """ Returns the version entry expected in the project file. For .vcxproj files, this goes is ToolsVersion. diff --git a/testing/framework/TestSCons_time.py b/testing/framework/TestSCons_time.py index 282a9a69e..b84bff222 100644 --- a/testing/framework/TestSCons_time.py +++ b/testing/framework/TestSCons_time.py @@ -155,7 +155,7 @@ class TestSCons_time(TestCommon): initializations. """ - def __init__(self, **kw): + def __init__(self, **kw) -> None: """Initialize an SCons_time testing object. If they're not overridden by keyword arguments, this @@ -205,10 +205,10 @@ class TestSCons_time(TestCommon): else: return os.path.splitext(path) - def fake_logfile(self, logfile_name, index=0): + def fake_logfile(self, logfile_name, index: int=0) -> None: self.write(self.workpath(logfile_name), logfile_contents % locals()) - def profile_data(self, profile_name, python_name, call, body): + def profile_data(self, profile_name, python_name, call, body) -> None: profile_name = self.workpath(profile_name) python_name = self.workpath(python_name) d = { @@ -244,7 +244,7 @@ class TestSCons_time(TestCommon): x = x.replace('time\\-', f'time\\-[^{sep}]*') return x - def write_fake_scons_py(self): + def write_fake_scons_py(self) -> None: self.subdir('scripts') self.write('scripts/scons.py', scons_py) diff --git a/testing/framework/TestSConsign.py b/testing/framework/TestSConsign.py index b0562bfba..9f02b4937 100644 --- a/testing/framework/TestSConsign.py +++ b/testing/framework/TestSConsign.py @@ -55,7 +55,7 @@ class TestSConsign(TestSCons): "scons" itself, since we need to run scons to generate the .sconsign files that we want the sconsign script to read. """ - def __init__(self, *args, **kw): + def __init__(self, *args, **kw) -> None: try: script_dir = os.environ['SCONS_SCRIPT_DIR'] except KeyError: @@ -92,7 +92,7 @@ class TestSConsign(TestSCons): def script_path(self, script): return os.path.join(self.script_dir, script) - def set_sconsign(self, sconsign): + def set_sconsign(self, sconsign) -> None: self.my_kw['program'] = sconsign def run_sconsign(self, *args, **kw): diff --git a/testing/framework/TestUnit/cli.py b/testing/framework/TestUnit/cli.py index 6aec73548..defe5a1ef 100644 --- a/testing/framework/TestUnit/cli.py +++ b/testing/framework/TestUnit/cli.py @@ -22,7 +22,7 @@ def get_runner(): return getattr(runnermod, fromsplit[1]) -def run(suite=None): +def run(suite=None) -> None: runner = get_runner() if suite: if not runner().run(suite).wasSuccessful(): diff --git a/testing/framework/TestUnit/taprunner.py b/testing/framework/TestUnit/taprunner.py index 001db5c14..6f8cb0051 100644 --- a/testing/framework/TestUnit/taprunner.py +++ b/testing/framework/TestUnit/taprunner.py @@ -24,7 +24,7 @@ except ImportError: class TAPTestResult(TextTestResult): - def _process(self, test, msg, failtype = None, directive = None): + def _process(self, test, msg, failtype = None, directive = None) -> None: """ increase the counter, format and output TAP info """ # counterhack: increase test counter test.suite.tap_counter += 1 @@ -42,29 +42,29 @@ class TAPTestResult(TextTestResult): # [ ] write test __doc__ (if exists) in comment self.stream.flush() - def addSuccess(self, test): + def addSuccess(self, test) -> None: super().addSuccess(test) self._process(test, "ok") - def addFailure(self, test, err): + def addFailure(self, test, err) -> None: super().addFailure(test, err) self._process(test, "not ok", "FAIL") # [ ] add structured data about assertion - def addError(self, test, err): + def addError(self, test, err) -> None: super().addError(test, err) self._process(test, "not ok", "ERROR") # [ ] add structured data about exception - def addSkip(self, test, reason): + def addSkip(self, test, reason) -> None: super().addSkip(test, reason) self._process(test, "ok", directive=(" # SKIP %s" % reason)) - def addExpectedFailure(self, test, err): + def addExpectedFailure(self, test, err) -> None: super().addExpectedFailure(test, err) self._process(test, "not ok", directive=" # TODO") - def addUnexpectedSuccess(self, test): + def addUnexpectedSuccess(self, test) -> None: super().addUnexpectedSuccess(test) self._process(test, "not ok", "FAIL (unexpected success)") @@ -98,22 +98,22 @@ if __name__ == "__main__": import unittest class Test(unittest.TestCase): - def test_ok(self): + def test_ok(self) -> None: pass - def test_fail(self): + def test_fail(self) -> None: self.assertTrue(False) - def test_error(self): + def test_error(self) -> None: bad_symbol @unittest.skip("skipin'") - def test_skip(self): + def test_skip(self) -> None: pass @unittest.expectedFailure - def test_not_ready(self): + def test_not_ready(self) -> None: self.fail() @unittest.expectedFailure - def test_invalid_fail_mark(self): + def test_invalid_fail_mark(self) -> None: pass - def test_another_ok(self): + def test_another_ok(self) -> None: pass diff --git a/testing/framework/test-framework.rst b/testing/framework/test-framework.rst index 265b82e74..01893ee61 100644 --- a/testing/framework/test-framework.rst +++ b/testing/framework/test-framework.rst @@ -61,7 +61,7 @@ scripts as we find them.) End-to-end tests are by their nature harder to debug. You can drop straight into the Python debugger on the unit test scripts by using the ``runtest.py --pdb`` option, but the end-to-end -tests treat an SCons invocation as a "black box" and just look for +tests treat an SCons invocation as a *black box* and just look for external effects; simple methods like inserting ``print`` statements in the SCons code itself can disrupt those external effects. See `Debugging end-to-end tests`_ for some more thoughts. @@ -72,7 +72,7 @@ Naming conventions The end-to-end tests, more or less, stick to the following naming conventions: -#. All tests end with a .py suffix. +#. All tests end with a ``.py`` suffix. #. In the *General* form we use ``Feature.py`` @@ -161,7 +161,7 @@ a function which takes a path-component argument and returns the path to that path-component in the testing directory. The use of an ephemeral test directory means that you can't simply change -into a directory to "debug things" after a test has gone wrong. +into a directory to debug after a test has gone wrong. For a way around this, check out the ``PRESERVE`` environment variable. It can be seen in action in `How to convert old tests to use fixures`_ below. @@ -170,7 +170,7 @@ Not running tests If you simply want to check which tests would get executed, you can call the ``runtest.py`` script with the ``-l`` option combined with whichever -test finding options (see below) you intend to use. Example:: +test selection options (see below) you intend to use. Example:: $ python runtest.py -l test/scons-time @@ -179,8 +179,8 @@ each test which would have been run, but doesn't actually run them:: $ python runtest.py -n -a -Finding Tests -============= +Selecting tests +=============== When started in *standard* mode:: @@ -209,11 +209,11 @@ The same rules apply when testing external Tools when using the ``-e`` option. -Example End-to-End Test Script +Example End-to-End test script ============================== To illustrate how the end-to-end test scripts work, let's walk through -a simple "Hello, world!" example:: +a simple *Hello, world!* example:: #!python import TestSCons @@ -241,6 +241,8 @@ a simple "Hello, world!" example:: test.pass_test() +Explanation +----------- ``import TestSCons`` Imports the main infrastructure for writing SCons tests. This is @@ -249,7 +251,7 @@ a simple "Hello, world!" example:: imported before this line. ``test = TestSCons.TestSCons()`` - This initializes an object for testing. A fair amount happens under + Initializes an object for testing. A fair amount happens under the covers when the object is created, including: * A temporary directory is created for all the in-line files that will @@ -302,8 +304,8 @@ Working with fixtures In the simple example above, the files to set up the test are created on the fly by the test program. We give a filename to the ``TestSCons.write()`` -method, and a string holding its contents, and it gets written to the test -directory right before starting.. +method, plus a string holding its contents, and it gets written to the test +directory right before starting. This simple technique can be seen throughout most of the end-to-end tests as it was the original technique provided to test developers, @@ -321,12 +323,12 @@ for code, so the effect is lost on them. In testing parlance, a fixture is a repeatable test setup. The SCons test harness allows the use of saved files or directories to be used -in that sense: "the fixture for this test is foo", instead of writing +in that sense: *the fixture for this test is foo*, instead of writing a whole bunch of strings to create files. Since these setups can be reusable across multiple tests, the *fixture* terminology applies well. Note: fixtures must not be treated by SCons as runnable tests. To exclude -them, see instructions in the above section named "Finding Tests". +them, see instructions in the above section named `Selecting tests`_. Directory fixtures ------------------ @@ -390,9 +392,11 @@ would have been placed in the top level of the test directory. Again, a reference example can be found in the current revision of SCons, see ``test/packaging/sandbox-test/sandbox-test.py``. -For even more examples you should check out -one of the external Tools, e.g. the *Qt4* Tool at -https://bitbucket.org/dirkbaechle/scons_qt4. Also visit the SCons Tools +For even more examples you should check out one of the external Tools, +e.g. the *Qt5* Tool at +https://github.com/SCons/scons-contrib/tree/master/sconscontrib/SCons/Tool/qt5. +There are many other tools in the contrib repository, +and you can also visit the SCons Tools Index at https://github.com/SCons/scons/wiki/ToolsIndex for a complete list of available Tools, though not all may have tests yet. @@ -453,23 +457,24 @@ kind of usage that does not lend itself to a fixture:: import TestSCons _python_ = TestSCons._python_ - test.write('SConstruct', """ + test.write('SConstruct', f""" cc = Environment().Dictionary('CC') env = Environment( - LINK=r'%(_python_)s mylink.py', + LINK=r'{_python_} mylink.py', LINKFLAGS=[], - CC=r'%(_python_)s mycc.py', + CC=r'{_python_} mycc.py', CXX=cc, CXXFLAGS=[], ) env.Program(target='test1', source='test1.c') - """ % locals()) + """ -Here the value of ``_python_`` is picked out of the script's -``locals`` dictionary - which works because we've set it above - -and interpolated using a mapping key into the string that will -be written to ``SConstruct``. A fixture would be hard to use -here because we don't know the value of ``_python_`` until runtime. +Here the value of ``_python_`` from the test program is +pasted in via f-string formatting. A fixture would be hard to use +here because we don't know the value of ``_python_`` until runtime +(also note that as it will be a full pathname, it's entered as a +Python rawstring to avoid interpretation problems on Windows, +where the path separator is a backslash). The other files created in this test may still be candidates for use as fixture files, however. @@ -518,7 +523,7 @@ for debugging purposes. If you have a failing test, try:: You can now go to the save directory reported from this run and invoke the test manually to see what it is doing, without the presence of the test infrastructure which would otherwise -"swallow" output you may be interested in. In this case, +consume output you may be interested in. In this case, adding debug prints may be more useful. @@ -528,17 +533,17 @@ Test infrastructure The main test API is defined in the ``TestSCons`` class. ``TestSCons`` is a subclass of ``TestCommon``, which is a subclass of ``TestCmd``. All those classes are defined in Python files of the same name -in ``testing/framework``. +in ``testing/framework``. Start in ``testing/framework/TestCmd.py`` for the base API definitions, like how to create files (``test.write()``) and run commands (``test.run()``). Use ``TestSCons`` for the end-to-end tests in ``test``, but use -``TestCmd`` for the unit tests in the ``src`` directory. +``TestCmd`` for the unit tests in the ``SCons`` directory. The match functions work like this: ``TestSCons.match_re`` - match each line with a RE + match each line with an RE * Splits the lines into a list (unless they already are) * splits the REs at newlines (unless already a list) @@ -614,14 +619,84 @@ plumbed into the environment. These things can be tested by mocking the behavior of the executable. Many examples of this can be found in the ``test`` directory. See for example ``test/subdivide.py``. -This leads to a suggestion for E2E test organization because the framework -doesn't have a way to indicate a partial skip - if you executed -200 lines of test, then found a condition which caused you to skip the -last 20 lines, the whole test is marked as a skip; -it also doesn't have a way to indicate a partial pass. -To improve on this, keep tool tests which don't need the -underlying program in separate files from ones which do - -that way one can see in the test results that the "plumbing" -tests worked even if the the ones using the underlying program -maybe were skipped. +Testing DOs and DONTs +===================== + +There's no question that having to write tests in order to get a change +approved - even an apparently trivial change - does make it a little harder +to contribute to the SCons code base - but the requirement to have features +and bugfixes testable is a necessary part of ensuring SCons quality. +Thinking of SCons development in terms of the red/green model from +Test Driven Development should make things a little easier. + +If you are working on an SCons bug, try to come up with a simple +reproducer first. Bug reports (even your own!) are often like *I tried +to do this but it surprisingly failed*, and a reproducer is normally an +``SConstruct`` along with, probably, some supporting files such as source +files, data files, subsidiary SConscripts, etc. Try to make this example +as simple and clean as possible. No, this isn't necessarily easy to do, +but winnowing down what triggers a problem and removing the stuff that +doesn't actually contribute to triggering the problem it is a step that +lets you (and later readers) more clearly understand what is going on. +You don't have to turn this into a formal testcase yet, but keep this +reproducer around, and document with it what you expect to happen, +and what actually happens. This material will help produce an E2E +test later, and this is something you *may* be able to get help with, +if the way the tests are usually written and the test harness proves +too confusing. With a clean test in hand (make sure it's failing!) +you can go ahead an code up a fix and make sure it passes with the fix +in place. Jumping straight to a fix without working on a testcase like +this will often lead to a disappointing *how do I come up with a test +so the maintainer will be willing to merge* phase. Asking questions on +a public forum can be productive here. + +E2E-specific Suggestions: + +* Do not require the use of an external tool unless necessary. + Usually the SCons behavior is the thing we want to test, + not the behavior of the external tool. *Necessary* is not a precise term - + sometimes it would be too time-consuming to write a script to mock + a compiler with an extensive set of options, and sometimes it's + not a good idea to assume you know what all those will do vs what + the real tool does; there may be other good reasons for just going + ahead and calling the external tool. +* If using an external tool, be prepared to skip the test if it is unavailable. +* Do not combine tests that need an external tool with ones that + do not - divide these into separate test files. There is no concept + of partial skip for e2e tests, so if you successfully complete seven + of eight tests, and then come to a conditional "skip if tool missing" + or "skip if on Windows", and that branch is taken, then the + whole test file ends up skipped, and the seven that ran will + never be recorded. Some tests follow the convention of creating a + second test file with the ending ``-live`` for the part that requires + actually running the external tool. +* In testing, *fail fast* is not always the best policy - if you can think + of many scenarios that could go wrong and they are all run linearly in + a single test file, then you only hear about the first one that fails. + In some cases it may make sense to split them out a bit more, so you + can see several fails at once, which may show a helpful failure pattern + you wouldn't spot from a single fail. +* Use test fixtures where it makes sense, and in particular, try to + make use of shareable mocked tools, which, by getting lots of use, + will be better debugged (that is, don't have each test produce its + own ``myfortan.py`` or ``mylex.py`` etc. unless they need drastically + different behaviors). + +Unittest-specific hints: + +- Let the ``unittest`` module help! Lots of the existing tests just + use a bare ``assert`` call for checks, which works fine, but then + you are responsible for preparing the message if it fails. The base + ``TestCase`` class has methods which know how to display many things, + for example ``self.assertEqual()`` displays in what way the two arguments + differ if they are *not* equal. Checking for am expected exception can + be done with ``self.assertRaises()`` rather than crafting a stub of + code using a try block for this situation. +- The *fail fast* consideration applies here, too: try not to fail a whole + testcase on the first problem, if there are more checks to go. + Again, existing tests may use elaborate tricks for this, but modern + ``unittest`` has a ``subTest`` context manager that can be used to wrap + each distinct piece and not abort the testcase for a failing subtest + (to be fair, this functionality is a recent addition, after most SCons + unit tests were written - but it should be used going forward). |