diff options
Diffstat (limited to 'waflib/Tools/waf_unit_test.py')
-rw-r--r-- | waflib/Tools/waf_unit_test.py | 211 |
1 files changed, 58 insertions, 153 deletions
diff --git a/waflib/Tools/waf_unit_test.py b/waflib/Tools/waf_unit_test.py index a71ed1c0..27cd9a40 100644 --- a/waflib/Tools/waf_unit_test.py +++ b/waflib/Tools/waf_unit_test.py @@ -1,10 +1,10 @@ #!/usr/bin/env python # encoding: utf-8 # Carlos Rafael Giani, 2006 -# Thomas Nagy, 2010-2018 (ita) +# Thomas Nagy, 2010 """ -Unit testing system for C/C++/D and interpreted languages providing test execution: +Unit testing system for C/C++/D providing test execution: * in parallel, by using ``waf -j`` * partial (only the tests that have changed) or full (by using ``waf --alltests``) @@ -31,128 +31,31 @@ the predefined callback:: bld(features='cxx cxxprogram test', source='main.c', target='app') from waflib.Tools import waf_unit_test bld.add_post_fun(waf_unit_test.summary) - -By passing --dump-test-scripts the build outputs corresponding python files -(with extension _run.py) that are useful for debugging purposes. """ -import os, shlex, sys +import os from waflib.TaskGen import feature, after_method, taskgen_method from waflib import Utils, Task, Logs, Options -from waflib.Tools import ccroot testlock = Utils.threading.Lock() -SCRIPT_TEMPLATE = """#! %(python)s -import subprocess, sys -cmd = %(cmd)r -# if you want to debug with gdb: -#cmd = ['gdb', '-args'] + cmd -env = %(env)r -status = subprocess.call(cmd, env=env, cwd=%(cwd)r, shell=isinstance(cmd, str)) -sys.exit(status) -""" - -@taskgen_method -def handle_ut_cwd(self, key): - """ - Task generator method, used internally to limit code duplication. - This method may disappear anytime. - """ - cwd = getattr(self, key, None) - if cwd: - if isinstance(cwd, str): - # we want a Node instance - if os.path.isabs(cwd): - self.ut_cwd = self.bld.root.make_node(cwd) - else: - self.ut_cwd = self.path.make_node(cwd) - -@feature('test_scripts') -def make_interpreted_test(self): - """Create interpreted unit tests.""" - for x in ['test_scripts_source', 'test_scripts_template']: - if not hasattr(self, x): - Logs.warn('a test_scripts taskgen i missing %s' % x) - return - - self.ut_run, lst = Task.compile_fun(self.test_scripts_template, shell=getattr(self, 'test_scripts_shell', False)) - - script_nodes = self.to_nodes(self.test_scripts_source) - for script_node in script_nodes: - tsk = self.create_task('utest', [script_node]) - tsk.vars = lst + tsk.vars - tsk.env['SCRIPT'] = script_node.path_from(tsk.get_cwd()) - - self.handle_ut_cwd('test_scripts_cwd') - - env = getattr(self, 'test_scripts_env', None) - if env: - self.ut_env = env - else: - self.ut_env = dict(os.environ) - - paths = getattr(self, 'test_scripts_paths', {}) - for (k,v) in paths.items(): - p = self.ut_env.get(k, '').split(os.pathsep) - if isinstance(v, str): - v = v.split(os.pathsep) - self.ut_env[k] = os.pathsep.join(p + v) - @feature('test') -@after_method('apply_link', 'process_use') +@after_method('apply_link') def make_test(self): """Create the unit test task. There can be only one unit test task by task generator.""" - if not getattr(self, 'link_task', None): - return - - tsk = self.create_task('utest', self.link_task.outputs) - if getattr(self, 'ut_str', None): - self.ut_run, lst = Task.compile_fun(self.ut_str, shell=getattr(self, 'ut_shell', False)) - tsk.vars = lst + tsk.vars - - self.handle_ut_cwd('ut_cwd') - - if not hasattr(self, 'ut_paths'): - paths = [] - for x in self.tmp_use_sorted: - try: - y = self.bld.get_tgen_by_name(x).link_task - except AttributeError: - pass - else: - if not isinstance(y, ccroot.stlink_task): - paths.append(y.outputs[0].parent.abspath()) - self.ut_paths = os.pathsep.join(paths) + os.pathsep - - if not hasattr(self, 'ut_env'): - self.ut_env = dct = dict(os.environ) - def add_path(var): - dct[var] = self.ut_paths + dct.get(var,'') - if Utils.is_win32: - add_path('PATH') - elif Utils.unversioned_sys_platform() == 'darwin': - add_path('DYLD_LIBRARY_PATH') - add_path('LD_LIBRARY_PATH') - else: - add_path('LD_LIBRARY_PATH') - - if not hasattr(self, 'ut_cmd'): - self.ut_cmd = getattr(Options.options, 'testcmd', False) + if getattr(self, 'link_task', None): + self.create_task('utest', self.link_task.outputs) + @taskgen_method def add_test_results(self, tup): """Override and return tup[1] to interrupt the build immediately if a test does not run""" Logs.debug("ut: %r", tup) - try: - self.utest_results.append(tup) - except AttributeError: - self.utest_results = [tup] + self.utest_result = tup try: self.bld.utest_results.append(tup) except AttributeError: self.bld.utest_results = [tup] -@Task.deep_inputs class utest(Task.Task): """ Execute a unit test @@ -160,7 +63,6 @@ class utest(Task.Task): color = 'PINK' after = ['vnum', 'inst'] vars = [] - def runnable_status(self): """ Always execute the task if `waf --alltests` was used or no @@ -175,17 +77,37 @@ class utest(Task.Task): return Task.RUN_ME return ret + def add_path(self, dct, path, var): + dct[var] = os.pathsep.join(Utils.to_list(path) + [os.environ.get(var, '')]) + def get_test_env(self): """ In general, tests may require any library built anywhere in the project. Override this method if fewer paths are needed """ - return self.generator.ut_env - - def post_run(self): - super(utest, self).post_run() - if getattr(Options.options, 'clear_failed_tests', False) and self.waf_unit_test_results[1]: - self.generator.bld.task_sigs[self.uid()] = None + try: + fu = getattr(self.generator.bld, 'all_test_paths') + except AttributeError: + # this operation may be performed by at most #maxjobs + fu = os.environ.copy() + + lst = [] + for g in self.generator.bld.groups: + for tg in g: + if getattr(tg, 'link_task', None): + s = tg.link_task.outputs[0].parent.abspath() + if s not in lst: + lst.append(s) + + if Utils.is_win32: + self.add_path(fu, lst, 'PATH') + elif Utils.unversioned_sys_platform() == 'darwin': + self.add_path(fu, lst, 'DYLD_LIBRARY_PATH') + self.add_path(fu, lst, 'LD_LIBRARY_PATH') + else: + self.add_path(fu, lst, 'LD_LIBRARY_PATH') + self.generator.bld.all_test_paths = fu + return fu def run(self): """ @@ -194,44 +116,29 @@ class utest(Task.Task): Override ``add_test_results`` to interrupt the build """ - if hasattr(self.generator, 'ut_run'): - return self.generator.ut_run(self) - - self.ut_exec = getattr(self.generator, 'ut_exec', [self.inputs[0].abspath()]) - ut_cmd = getattr(self.generator, 'ut_cmd', False) - if ut_cmd: - self.ut_exec = shlex.split(ut_cmd % ' '.join(self.ut_exec)) - - return self.exec_command(self.ut_exec) - - def exec_command(self, cmd, **kw): - Logs.debug('runner: %r', cmd) - if getattr(Options.options, 'dump_test_scripts', False): - script_code = SCRIPT_TEMPLATE % { - 'python': sys.executable, - 'env': self.get_test_env(), - 'cwd': self.get_cwd().abspath(), - 'cmd': cmd - } - script_file = self.inputs[0].abspath() + '_run.py' - Utils.writef(script_file, script_code) - os.chmod(script_file, Utils.O755) - if Logs.verbose > 1: - Logs.info('Test debug file written as %r' % script_file) - - proc = Utils.subprocess.Popen(cmd, cwd=self.get_cwd().abspath(), env=self.get_test_env(), - stderr=Utils.subprocess.PIPE, stdout=Utils.subprocess.PIPE, shell=isinstance(cmd,str)) + + filename = self.inputs[0].abspath() + self.ut_exec = getattr(self.generator, 'ut_exec', [filename]) + if getattr(self.generator, 'ut_fun', None): + self.generator.ut_fun(self) + + + cwd = getattr(self.generator, 'ut_cwd', '') or self.inputs[0].parent.abspath() + + testcmd = getattr(self.generator, 'ut_cmd', False) or getattr(Options.options, 'testcmd', False) + if testcmd: + self.ut_exec = (testcmd % self.ut_exec[0]).split(' ') + + proc = Utils.subprocess.Popen(self.ut_exec, cwd=cwd, env=self.get_test_env(), stderr=Utils.subprocess.PIPE, stdout=Utils.subprocess.PIPE) (stdout, stderr) = proc.communicate() - self.waf_unit_test_results = tup = (self.inputs[0].abspath(), proc.returncode, stdout, stderr) + + tup = (filename, proc.returncode, stdout, stderr) testlock.acquire() try: return self.generator.add_test_results(tup) finally: testlock.release() - def get_cwd(self): - return getattr(self.generator, 'ut_cwd', self.inputs[0].parent) - def summary(bld): """ Display an execution summary:: @@ -248,15 +155,15 @@ def summary(bld): total = len(lst) tfail = len([x for x in lst if x[1]]) - Logs.pprint('GREEN', ' tests that pass %d/%d' % (total-tfail, total)) + Logs.pprint('CYAN', ' tests that pass %d/%d' % (total-tfail, total)) for (f, code, out, err) in lst: if not code: - Logs.pprint('GREEN', ' %s' % f) + Logs.pprint('CYAN', ' %s' % f) - Logs.pprint('GREEN' if tfail == 0 else 'RED', ' tests that fail %d/%d' % (tfail, total)) + Logs.pprint('CYAN', ' tests that fail %d/%d' % (tfail, total)) for (f, code, out, err) in lst: if code: - Logs.pprint('RED', ' %s' % f) + Logs.pprint('CYAN', ' %s' % f) def set_exit_code(bld): """ @@ -287,10 +194,8 @@ def options(opt): """ opt.add_option('--notests', action='store_true', default=False, help='Exec no unit tests', dest='no_tests') opt.add_option('--alltests', action='store_true', default=False, help='Exec all unit tests', dest='all_tests') - opt.add_option('--clear-failed', action='store_true', default=False, - help='Force failed unit tests to run again next time', dest='clear_failed_tests') - opt.add_option('--testcmd', action='store', default=False, dest='testcmd', - help='Run the unit tests using the test-cmd string example "--testcmd="valgrind --error-exitcode=1 %s" to run under valgrind') - opt.add_option('--dump-test-scripts', action='store_true', default=False, - help='Create python scripts to help debug tests', dest='dump_test_scripts') + opt.add_option('--testcmd', action='store', default=False, + help = 'Run the unit tests using the test-cmd string' + ' example "--test-cmd="valgrind --error-exitcode=1' + ' %s" to run under valgrind', dest='testcmd') |