summaryrefslogtreecommitdiff
path: root/third_party/waf/waflib/Tools/waf_unit_test.py
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/waf/waflib/Tools/waf_unit_test.py')
-rw-r--r--third_party/waf/waflib/Tools/waf_unit_test.py112
1 files changed, 79 insertions, 33 deletions
diff --git a/third_party/waf/waflib/Tools/waf_unit_test.py b/third_party/waf/waflib/Tools/waf_unit_test.py
index 3e4fec0f498..b1db1704c9c 100644
--- a/third_party/waf/waflib/Tools/waf_unit_test.py
+++ b/third_party/waf/waflib/Tools/waf_unit_test.py
@@ -5,10 +5,10 @@
#!/usr/bin/env python
# encoding: utf-8
# Carlos Rafael Giani, 2006
-# Thomas Nagy, 2010-2016 (ita)
+# Thomas Nagy, 2010-2018 (ita)
"""
-Unit testing system for C/C++/D providing test execution:
+Unit testing system for C/C++/D and interpreted languages providing test execution:
* in parallel, by using ``waf -j``
* partial (only the tests that have changed) or full (by using ``waf --alltests``)
@@ -40,7 +40,7 @@ By passing --dump-test-scripts the build outputs corresponding python files
(with extension _run.py) that are useful for debugging purposes.
"""
-import os, sys
+import os, shlex, sys
from waflib.TaskGen import feature, after_method, taskgen_method
from waflib import Utils, Task, Logs, Options
from waflib.Tools import ccroot
@@ -56,6 +56,52 @@ status = subprocess.call(cmd, env=env, cwd=%(cwd)r, shell=isinstance(cmd, str))
sys.exit(status)
"""
+@taskgen_method
+def handle_ut_cwd(self, key):
+ """
+ Task generator method, used internally to limit code duplication.
+ This method may disappear anytime.
+ """
+ cwd = getattr(self, key, None)
+ if cwd:
+ if isinstance(cwd, str):
+ # we want a Node instance
+ if os.path.isabs(cwd):
+ self.ut_cwd = self.bld.root.make_node(cwd)
+ else:
+ self.ut_cwd = self.path.make_node(cwd)
+
+@feature('test_scripts')
+def make_interpreted_test(self):
+ """Create interpreted unit tests."""
+ for x in ['test_scripts_source', 'test_scripts_template']:
+ if not hasattr(self, x):
+ Logs.warn('a test_scripts taskgen i missing %s' % x)
+ return
+
+ self.ut_run, lst = Task.compile_fun(self.test_scripts_template, shell=getattr(self, 'test_scripts_shell', False))
+
+ script_nodes = self.to_nodes(self.test_scripts_source)
+ for script_node in script_nodes:
+ tsk = self.create_task('utest', [script_node])
+ tsk.vars = lst + tsk.vars
+ tsk.env['SCRIPT'] = script_node.path_from(tsk.get_cwd())
+
+ self.handle_ut_cwd('test_scripts_cwd')
+
+ env = getattr(self, 'test_scripts_env', None)
+ if env:
+ self.ut_env = env
+ else:
+ self.ut_env = dict(os.environ)
+
+ paths = getattr(self, 'test_scripts_paths', {})
+ for (k,v) in paths.items():
+ p = self.ut_env.get(k, '').split(os.pathsep)
+ if isinstance(v, str):
+ v = v.split(os.pathsep)
+ self.ut_env[k] = os.pathsep.join(p + v)
+
@feature('test')
@after_method('apply_link', 'process_use')
def make_test(self):
@@ -68,15 +114,7 @@ def make_test(self):
self.ut_run, lst = Task.compile_fun(self.ut_str, shell=getattr(self, 'ut_shell', False))
tsk.vars = lst + tsk.vars
- if getattr(self, 'ut_cwd', None):
- if isinstance(self.ut_cwd, str):
- # we want a Node instance
- if os.path.isabs(self.ut_cwd):
- self.ut_cwd = self.bld.root.make_node(self.ut_cwd)
- else:
- self.ut_cwd = self.path.make_node(self.ut_cwd)
- else:
- self.ut_cwd = tsk.inputs[0].parent
+ self.handle_ut_cwd('ut_cwd')
if not hasattr(self, 'ut_paths'):
paths = []
@@ -102,11 +140,17 @@ def make_test(self):
else:
add_path('LD_LIBRARY_PATH')
+ if not hasattr(self, 'ut_cmd'):
+ self.ut_cmd = getattr(Options.options, 'testcmd', False)
+
@taskgen_method
def add_test_results(self, tup):
"""Override and return tup[1] to interrupt the build immediately if a test does not run"""
Logs.debug("ut: %r", tup)
- self.utest_result = tup
+ try:
+ self.utest_results.append(tup)
+ except AttributeError:
+ self.utest_results = [tup]
try:
self.bld.utest_results.append(tup)
except AttributeError:
@@ -156,24 +200,21 @@ class utest(Task.Task):
if hasattr(self.generator, 'ut_run'):
return self.generator.ut_run(self)
- # TODO ut_exec, ut_fun, ut_cmd should be considered obsolete
self.ut_exec = getattr(self.generator, 'ut_exec', [self.inputs[0].abspath()])
- if getattr(self.generator, 'ut_fun', None):
- self.generator.ut_fun(self)
- testcmd = getattr(self.generator, 'ut_cmd', False) or getattr(Options.options, 'testcmd', False)
- if testcmd:
- self.ut_exec = (testcmd % ' '.join(self.ut_exec)).split(' ')
+ ut_cmd = getattr(self.generator, 'ut_cmd', False)
+ if ut_cmd:
+ self.ut_exec = shlex.split(ut_cmd % ' '.join(self.ut_exec))
return self.exec_command(self.ut_exec)
def exec_command(self, cmd, **kw):
Logs.debug('runner: %r', cmd)
if getattr(Options.options, 'dump_test_scripts', False):
- global SCRIPT_TEMPLATE
script_code = SCRIPT_TEMPLATE % {
'python': sys.executable,
'env': self.get_test_env(),
- 'cwd': self.get_cwd().abspath(), 'cmd': cmd
+ 'cwd': self.get_cwd().abspath(),
+ 'cmd': cmd
}
script_file = self.inputs[0].abspath() + '_run.py'
Utils.writef(script_file, script_code)
@@ -182,7 +223,7 @@ class utest(Task.Task):
Logs.info('Test debug file written as %r' % script_file)
proc = Utils.subprocess.Popen(cmd, cwd=self.get_cwd().abspath(), env=self.get_test_env(),
- stderr=Utils.subprocess.PIPE, stdout=Utils.subprocess.PIPE)
+ stderr=Utils.subprocess.PIPE, stdout=Utils.subprocess.PIPE, shell=isinstance(cmd,str))
(stdout, stderr) = proc.communicate()
self.waf_unit_test_results = tup = (self.inputs[0].abspath(), proc.returncode, stdout, stderr)
testlock.acquire()
@@ -192,7 +233,12 @@ class utest(Task.Task):
testlock.release()
def get_cwd(self):
- return self.generator.ut_cwd
+ return getattr(self.generator, 'ut_cwd', self.inputs[0].parent)
+
+ def sig_explicit_deps(self):
+ lst = [os.stat(node.abspath()).st_mtime for node in self.inputs]
+ self.m.update(str(lst))
+ return super(utest, self).sig_explicit_deps()
def summary(bld):
"""
@@ -210,15 +256,15 @@ def summary(bld):
total = len(lst)
tfail = len([x for x in lst if x[1]])
- Logs.pprint('CYAN', ' tests that pass %d/%d' % (total-tfail, total))
+ Logs.pprint('GREEN', ' tests that pass %d/%d' % (total-tfail, total))
for (f, code, out, err) in lst:
if not code:
- Logs.pprint('CYAN', ' %s' % f)
+ Logs.pprint('GREEN', ' %s' % f)
- Logs.pprint('CYAN', ' tests that fail %d/%d' % (tfail, total))
+ Logs.pprint('GREEN' if tfail == 0 else 'RED', ' tests that fail %d/%d' % (tfail, total))
for (f, code, out, err) in lst:
if code:
- Logs.pprint('CYAN', ' %s' % f)
+ Logs.pprint('RED', ' %s' % f)
def set_exit_code(bld):
"""
@@ -249,10 +295,10 @@ def options(opt):
"""
opt.add_option('--notests', action='store_true', default=False, help='Exec no unit tests', dest='no_tests')
opt.add_option('--alltests', action='store_true', default=False, help='Exec all unit tests', dest='all_tests')
- opt.add_option('--clear-failed', action='store_true', default=False, help='Force failed unit tests to run again next time', dest='clear_failed_tests')
- opt.add_option('--testcmd', action='store', default=False,
- help = 'Run the unit tests using the test-cmd string'
- ' example "--test-cmd="valgrind --error-exitcode=1'
- ' %s" to run under valgrind', dest='testcmd')
+ opt.add_option('--clear-failed', action='store_true', default=False,
+ help='Force failed unit tests to run again next time', dest='clear_failed_tests')
+ opt.add_option('--testcmd', action='store', default=False, dest='testcmd',
+ help='Run the unit tests using the test-cmd string example "--testcmd="valgrind --error-exitcode=1 %s" to run under valgrind')
opt.add_option('--dump-test-scripts', action='store_true', default=False,
- help='Create python scripts to help debug tests', dest='dump_test_scripts')
+ help='Create python scripts to help debug tests', dest='dump_test_scripts')
+