summaryrefslogtreecommitdiff
path: root/contrib
diff options
context:
space:
mode:
authorBernhard Reutner-Fischer <aldot@gcc.gnu.org>2021-10-31 23:58:44 +0100
committerBernhard Reutner-Fischer <aldot@gcc.gnu.org>2021-11-05 16:38:03 +0100
commitb58c12f3cf926f877ea2f590c411fe7e853974e3 (patch)
treeb1583ab9437d48b2d88dd7a65a20273c5714fb04 /contrib
parentb33b2678342c655ff8ddb9fab379b26f656adc03 (diff)
downloadgcc-b58c12f3cf926f877ea2f590c411fe7e853974e3.tar.gz
contrib: testsuite-management: Update to be python3 compatible
contrib/ChangeLog: * testsuite-management/validate_failures.py: 2to3
Diffstat (limited to 'contrib')
-rwxr-xr-xcontrib/testsuite-management/validate_failures.py44
1 files changed, 20 insertions, 24 deletions
diff --git a/contrib/testsuite-management/validate_failures.py b/contrib/testsuite-management/validate_failures.py
index 2779050468d..925ba22ea0f 100755
--- a/contrib/testsuite-management/validate_failures.py
+++ b/contrib/testsuite-management/validate_failures.py
@@ -1,11 +1,7 @@
-#!/usr/bin/python
+#!/usr/bin/env python3
# Script to compare testsuite failures against a list of known-to-fail
# tests.
-#
-# NOTE: This script is used in installations that are running Python 2.4.
-# Please stick to syntax features available in 2.4 and earlier
-# versions.
# Contributed by Diego Novillo <dnovillo@google.com>
#
@@ -82,7 +78,7 @@ _MANIFEST_PATH_PATTERN = '%s/%s/%s.xfail'
_OPTIONS = None
def Error(msg):
- print >>sys.stderr, 'error: %s' % msg
+ print('error: %s' % msg, file=sys.stderr)
sys.exit(1)
@@ -126,7 +122,7 @@ class TestResult(object):
self.description) = re.match(r'([A-Z]+):\s*(\S+)\s*(.*)',
summary_line).groups()
except:
- print 'Failed to parse summary line: "%s"' % summary_line
+ print('Failed to parse summary line: "%s"' % summary_line)
raise
self.ordinal = ordinal
except ValueError:
@@ -180,7 +176,7 @@ class TestResult(object):
def GetMakefileValue(makefile_name, value_name):
if os.path.exists(makefile_name):
- makefile = open(makefile_name)
+ makefile = open(makefile_name, encoding='latin-1', mode='r')
for line in makefile:
if line.startswith(value_name):
(_, value) = line.split('=', 1)
@@ -246,8 +242,8 @@ def GetNegativeResult(line):
def ParseManifestWorker(result_set, manifest_path):
"""Read manifest_path, adding the contents to result_set."""
if _OPTIONS.verbosity >= 1:
- print 'Parsing manifest file %s.' % manifest_path
- manifest_file = open(manifest_path)
+ print('Parsing manifest file %s.' % manifest_path)
+ manifest_file = open(manifest_path, encoding='latin-1', mode='r')
for line in manifest_file:
line = line.strip()
if line == "":
@@ -278,7 +274,7 @@ def ParseSummary(sum_fname):
# ordinal is used when sorting the results so that tests within each
# .exp file are kept sorted.
ordinal=0
- sum_file = open(sum_fname)
+ sum_file = open(sum_fname, encoding='latin-1', mode='r')
for line in sum_file:
if IsInterestingResult(line):
result = TestResult(line, ordinal)
@@ -287,7 +283,7 @@ def ParseSummary(sum_fname):
# Tests that have expired are not added to the set of expected
# results. If they are still present in the set of actual results,
# they will cause an error to be reported.
- print 'WARNING: Expected failure "%s" has expired.' % line.strip()
+ print('WARNING: Expected failure "%s" has expired.' % line.strip())
continue
result_set.add(result)
sum_file.close()
@@ -324,7 +320,7 @@ def GetResults(sum_files):
"""Collect all the test results from the given .sum files."""
build_results = set()
for sum_fname in sum_files:
- print '\t%s' % sum_fname
+ print('\t%s' % sum_fname)
build_results |= ParseSummary(sum_fname)
return build_results
@@ -387,23 +383,23 @@ def GetBuildData():
return None, None
srcdir = GetMakefileValue('%s/Makefile' % _OPTIONS.build_dir, 'srcdir =')
target = GetMakefileValue('%s/Makefile' % _OPTIONS.build_dir, 'target_alias=')
- print 'Source directory: %s' % srcdir
- print 'Build target: %s' % target
+ print('Source directory: %s' % srcdir)
+ print('Build target: %s' % target)
return srcdir, target
def PrintSummary(msg, summary):
- print '\n\n%s' % msg
+ print('\n\n%s' % msg)
for result in sorted(summary):
- print result
+ print(result)
def GetSumFiles(results, build_dir):
if not results:
- print 'Getting actual results from build directory %s' % build_dir
+ print('Getting actual results from build directory %s' % build_dir)
sum_files = CollectSumFiles(build_dir)
else:
- print 'Getting actual results from user-provided results'
+ print('Getting actual results from user-provided results')
sum_files = results.split()
return sum_files
@@ -425,7 +421,7 @@ def PerformComparison(expected, actual, ignore_missing_failures):
expected_vs_actual)
if tests_ok:
- print '\nSUCCESS: No unexpected failures.'
+ print('\nSUCCESS: No unexpected failures.')
return tests_ok
@@ -433,7 +429,7 @@ def PerformComparison(expected, actual, ignore_missing_failures):
def CheckExpectedResults():
srcdir, target = GetBuildData()
manifest_path = GetManifestPath(srcdir, target, True)
- print 'Manifest: %s' % manifest_path
+ print('Manifest: %s' % manifest_path)
manifest = GetManifest(manifest_path)
sum_files = GetSumFiles(_OPTIONS.results, _OPTIONS.build_dir)
actual = GetResults(sum_files)
@@ -448,16 +444,16 @@ def CheckExpectedResults():
def ProduceManifest():
(srcdir, target) = GetBuildData()
manifest_path = GetManifestPath(srcdir, target, False)
- print 'Manifest: %s' % manifest_path
+ print('Manifest: %s' % manifest_path)
if os.path.exists(manifest_path) and not _OPTIONS.force:
Error('Manifest file %s already exists.\nUse --force to overwrite.' %
manifest_path)
sum_files = GetSumFiles(_OPTIONS.results, _OPTIONS.build_dir)
actual = GetResults(sum_files)
- manifest_file = open(manifest_path, 'w')
+ manifest_file = open(manifest_path, encoding='latin-1', mode='w')
for result in sorted(actual):
- print result
+ print(result)
manifest_file.write('%s\n' % result)
manifest_file.close()