summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNobuaki Sukegawa <nsuke@apache.org>2016-02-11 13:15:40 +0900
committerNobuaki Sukegawa <nsuke@apache.org>2016-02-11 20:06:46 +0900
commit144bbef3ddd3a66ff038e5ad271500aa06b63aae (patch)
tree9df0b7b9b72cfad6784abae823b23d005c30dc97
parentd10eb082d02ecfc5206bce62961cd45e6c48656b (diff)
downloadthrift-144bbef3ddd3a66ff038e5ad271500aa06b63aae.tar.gz
THRIFT-3611 Add --regex filter to cross test runner
Client: Test Patch: Nobuaki Sukegawa This closes #843
-rwxr-xr-xtest/README.md5
-rw-r--r--test/crossrunner/collect.py15
-rw-r--r--test/crossrunner/test.py4
-rwxr-xr-xtest/test.py13
4 files changed, 25 insertions, 12 deletions
diff --git a/test/README.md b/test/README.md
index 066b34f65..0682f5d98 100755
--- a/test/README.md
+++ b/test/README.md
@@ -31,6 +31,11 @@ implementation (currently `cpp` and `java` are recommended) like this:
test/test.py --server cpp,java --client nodejs
test/test.py --server nodejs --client cpp,java
+Another useful flag is --regex. For example, to run all tests that involve
+Java TBinaryProtocol:
+
+ test/test.py --regex "java.*binary"
+
## Test case definition file
The cross test cases are defined in [tests.json](tests.json).
diff --git a/test/crossrunner/collect.py b/test/crossrunner/collect.py
index e91ac0b43..d7594cb62 100644
--- a/test/crossrunner/collect.py
+++ b/test/crossrunner/collect.py
@@ -22,6 +22,7 @@ import re
from itertools import product
from .util import merge_dict
+from .test import TestEntry
# Those keys are passed to execution as is.
# Note that there are keys other than these, namely:
@@ -144,12 +145,18 @@ def _do_collect_tests(servers, clients):
}
-def collect_cross_tests(tests_dict, server_match, client_match):
+def _filter_entries(tests, regex):
+ if regex:
+ return filter(lambda t: re.search(regex, TestEntry.get_name(**t)), tests)
+ return tests
+
+
+def collect_cross_tests(tests_dict, server_match, client_match, regex):
sv, cl = _collect_testlibs(tests_dict, server_match, client_match)
- return list(_do_collect_tests(sv, cl))
+ return list(_filter_entries(_do_collect_tests(sv, cl), regex))
-def collect_feature_tests(tests_dict, features_dict, server_match, feature_match):
+def collect_feature_tests(tests_dict, features_dict, server_match, feature_match, regex):
sv, _ = _collect_testlibs(tests_dict, server_match)
ft = collect_features(features_dict, feature_match)
- return list(_do_collect_tests(sv, ft))
+ return list(_filter_entries(_do_collect_tests(sv, ft), regex))
diff --git a/test/crossrunner/test.py b/test/crossrunner/test.py
index dcc8a9416..74fd916ec 100644
--- a/test/crossrunner/test.py
+++ b/test/crossrunner/test.py
@@ -124,8 +124,8 @@ class TestEntry(object):
return config
@classmethod
- def get_name(cls, server, client, proto, trans, sock, *args):
- return '%s-%s_%s_%s-%s' % (server, client, proto, trans, sock)
+ def get_name(cls, server, client, protocol, transport, socket, *args, **kwargs):
+ return '%s-%s_%s_%s-%s' % (server, client, protocol, transport, socket)
@property
def name(self):
diff --git a/test/test.py b/test/test.py
index 42babebb3..9305967c3 100755
--- a/test/test.py
+++ b/test/test.py
@@ -45,12 +45,12 @@ FEATURE_DIR_RELATIVE = path_join(TEST_DIR_RELATIVE, 'features')
CONFIG_FILE = 'tests.json'
-def run_cross_tests(server_match, client_match, jobs, skip_known_failures, retry_count):
+def run_cross_tests(server_match, client_match, jobs, skip_known_failures, retry_count, regex):
logger = multiprocessing.get_logger()
logger.debug('Collecting tests')
with open(path_join(TEST_DIR, CONFIG_FILE), 'r') as fp:
j = json.load(fp)
- tests = crossrunner.collect_cross_tests(j, server_match, client_match)
+ tests = crossrunner.collect_cross_tests(j, server_match, client_match, regex)
if not tests:
print('No test found that matches the criteria', file=sys.stderr)
print(' servers: %s' % server_match, file=sys.stderr)
@@ -74,7 +74,7 @@ def run_cross_tests(server_match, client_match, jobs, skip_known_failures, retry
return False
-def run_feature_tests(server_match, feature_match, jobs, skip_known_failures, retry_count):
+def run_feature_tests(server_match, feature_match, jobs, skip_known_failures, retry_count, regex):
basedir = path_join(ROOT_DIR, FEATURE_DIR_RELATIVE)
logger = multiprocessing.get_logger()
logger.debug('Collecting tests')
@@ -82,7 +82,7 @@ def run_feature_tests(server_match, feature_match, jobs, skip_known_failures, re
j = json.load(fp)
with open(path_join(basedir, CONFIG_FILE), 'r') as fp:
j2 = json.load(fp)
- tests = crossrunner.collect_feature_tests(j, j2, server_match, feature_match)
+ tests = crossrunner.collect_feature_tests(j, j2, server_match, feature_match, regex)
if not tests:
print('No test found that matches the criteria', file=sys.stderr)
print(' servers: %s' % server_match, file=sys.stderr)
@@ -122,6 +122,7 @@ def main(argv):
help='list of clients to test')
parser.add_argument('-F', '--features', nargs='*', default=None,
help='run server feature tests instead of cross language tests')
+ parser.add_argument('-R', '--regex', help='test name pattern to run')
parser.add_argument('-s', '--skip-known-failures', action='store_true', dest='skip_known_failures',
help='do not execute tests that are known to fail')
parser.add_argument('-r', '--retry-count', type=int,
@@ -160,9 +161,9 @@ def main(argv):
options.update_failures, options.print_failures)
elif options.features is not None:
features = options.features or ['.*']
- res = run_feature_tests(server_match, features, options.jobs, options.skip_known_failures, options.retry_count)
+ res = run_feature_tests(server_match, features, options.jobs, options.skip_known_failures, options.retry_count, options.regex)
else:
- res = run_cross_tests(server_match, client_match, options.jobs, options.skip_known_failures, options.retry_count)
+ res = run_cross_tests(server_match, client_match, options.jobs, options.skip_known_failures, options.retry_count, options.regex)
return 0 if res else 1
if __name__ == '__main__':