summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDan Fandrich <dan@coneharvesters.com>2023-04-20 17:41:31 -0700
committerDan Fandrich <dan@coneharvesters.com>2023-04-22 13:07:35 -0700
commita549e046b18684c710bb5bbf2c3969411560fc8f (patch)
tree10c32a7ed5cbfeaa97021ae9ef2e1184319e1041
parent020cf1c1170b49ca33f7439435701e84f99cf957 (diff)
downloadcurl-a549e046b18684c710bb5bbf2c3969411560fc8f.tar.gz
runtests: refactor the main test loop into two
The test loop now has an initial loop that first runs through all possible tests to build a set of those to attempt on this run based on features and keywords and only then goes through that new list to run them. This actually makes it three loops through all tests cases, as there is an existing loop that gathers possible test numbers from the test files on disk. This has two minor effects on the output: all the tests that will be skipped are displayed at the start (instead of being interspersed with other tests) and the -l option no longer shows a count of tests at the end or a (misleading) statement that tests have run successfully. The skipped tests are also omitted from the test results sent to AppVeyor and Azure in CI builds. Another effect is a reduction in the amount of work considered part of the "Test definition reading and preparation time" reported with -r making those figures slightly lower than before. Ref: #10818
-rw-r--r--tests/getpart.pm5
-rw-r--r--tests/runner.pm35
-rwxr-xr-xtests/runtests.pl151
3 files changed, 123 insertions, 68 deletions
diff --git a/tests/getpart.pm b/tests/getpart.pm
index f1ffd2bdd..043261bc2 100644
--- a/tests/getpart.pm
+++ b/tests/getpart.pm
@@ -232,6 +232,11 @@ sub partexists {
sub loadtest {
my ($file)=@_;
+ if(defined $xmlfile && $file eq $xmlfile) {
+ # This test is already loaded
+ return
+ }
+
undef @xml;
$xmlfile = "";
diff --git a/tests/runner.pm b/tests/runner.pm
index 40f1da193..f6586bf26 100644
--- a/tests/runner.pm
+++ b/tests/runner.pm
@@ -50,6 +50,7 @@ BEGIN {
# these are for debugging only
our @EXPORT_OK = qw(
+ readtestkeywords
singletest_preprocess
);
}
@@ -192,6 +193,21 @@ sub prepro {
#######################################################################
+# Load test keywords into %keywords hash
+#
+sub readtestkeywords {
+ my @info_keywords = getpart("info", "keywords");
+
+ # Clear the list of keywords from the last test
+ %keywords = ();
+ for my $k (@info_keywords) {
+ chomp $k;
+ $keywords{$k} = 1;
+ }
+}
+
+
+#######################################################################
# Memory allocation test and failure torture testing.
#
sub torture {
@@ -349,6 +365,7 @@ sub torture {
}
+#######################################################################
# restore environment variables that were modified in test
sub restore_test_env {
my $deleteoldenv = $_[0]; # 1 to delete the saved contents after restore
@@ -880,9 +897,21 @@ sub singletest_postcheck {
# Get ready to run a single test case
sub runner_test_preprocess {
my ($testnum)=@_;
-
my %testtimings;
+ # timestamp test preparation start
+ # TODO: this metric now shows only a portion of the prep time; better would
+ # be to time singletest_preprocess below instead
+ $testtimings{"timeprepini"} = Time::HiRes::time();
+
+ ###################################################################
+ # Load test metadata
+ # ignore any error here--if there were one, it would have been
+ # caught during the selection phase and this test would not be
+ # running now
+ loadtest("${TESTDIR}/test${testnum}");
+ readtestkeywords();
+
###################################################################
# Start the servers needed to run this test case
my $why = singletest_startservers($testnum, \%testtimings);
@@ -891,14 +920,14 @@ sub runner_test_preprocess {
###############################################################
# Generate preprocessed test file
+ # This must be done after the servers are started so server
+ # variables are available for substitution.
singletest_preprocess($testnum);
-
###############################################################
# Set up the test environment to run this test case
singletest_setenv();
-
###############################################################
# Check that the test environment is fine to run this test case
if (!$listonly) {
diff --git a/tests/runtests.pl b/tests/runtests.pl
index 9a1e373c9..ca68c00f3 100755
--- a/tests/runtests.pl
+++ b/tests/runtests.pl
@@ -139,6 +139,7 @@ my %ignored_keywords; # key words of tests to ignore results
my %enabled_keywords; # key words of tests to run
my %disabled; # disabled test cases
my %ignored; # ignored results of test cases
+my %ignoretestcodes; # if test results are to be ignored
my $timestats; # time stamping and stats generation
my $fullstats; # show time stats for every single test
@@ -325,6 +326,27 @@ sub compare {
}
#######################################################################
+# Parse and store the protocols in curl's Protocols: line
+sub parseprotocols {
+ my ($line)=@_;
+
+ @protocols = split(' ', lc($line));
+
+ # Generate a "proto-ipv6" version of each protocol to match the
+ # IPv6 <server> name and a "proto-unix" to match the variant which
+ # uses Unix domain sockets. This works even if support isn't
+ # compiled in because the <features> test will fail.
+ push @protocols, map(("$_-ipv6", "$_-unix"), @protocols);
+
+ # 'http-proxy' is used in test cases to do CONNECT through
+ push @protocols, 'http-proxy';
+
+ # 'none' is used in test cases to mean no server
+ push @protocols, 'none';
+}
+
+
+#######################################################################
# Check & display information about curl and the host the test suite runs on.
# Information to do with servers is displayed in displayserverfeatures, after
# the server initialization is performed.
@@ -458,19 +480,7 @@ sub checksystemfeatures {
}
elsif($_ =~ /^Protocols: (.*)/i) {
# these are the protocols compiled in to this libcurl
- @protocols = split(' ', lc($1));
-
- # Generate a "proto-ipv6" version of each protocol to match the
- # IPv6 <server> name and a "proto-unix" to match the variant which
- # uses Unix domain sockets. This works even if support isn't
- # compiled in because the <features> test will fail.
- push @protocols, map(("$_-ipv6", "$_-unix"), @protocols);
-
- # 'http-proxy' is used in test cases to do CONNECT through
- push @protocols, 'http-proxy';
-
- # 'none' is used in test cases to mean no server
- push @protocols, 'none';
+ parseprotocols($1);
}
elsif($_ =~ /^Features: (.*)/i) {
$feat = $1;
@@ -844,6 +854,9 @@ sub citest_finishtestrun {
sub updatetesttimings {
my ($testnum, %testtimings)=@_;
+ if(defined $testtimings{"timeprepini"}) {
+ $timeprepini{$testnum} = $testtimings{"timeprepini"};
+ }
if(defined $testtimings{"timesrvrini"}) {
$timesrvrini{$testnum} = $testtimings{"timesrvrini"};
}
@@ -870,15 +883,6 @@ sub singletest_shouldrun {
my $errorreturncode = 1; # 1 means normal error, 2 means ignored error
my @what; # what features are needed
- # first, remove all lingering log files
- if(!cleardir($LOGDIR) && $clearlocks) {
- clearlocks($LOGDIR);
- cleardir($LOGDIR);
- }
-
- # timestamp test preparation start
- $timeprepini{$testnum} = Time::HiRes::time();
-
if($disttests !~ /test$testnum(\W|\z)/ ) {
logmsg "Warning: test$testnum not present in tests/data/Makefile.inc\n";
}
@@ -895,7 +899,6 @@ sub singletest_shouldrun {
$errorreturncode = 2;
}
- # load the test case file definition
if(loadtest("${TESTDIR}/test${testnum}")) {
if($verbose) {
# this is not a test
@@ -946,9 +949,6 @@ sub singletest_shouldrun {
if(!$why) {
@info_keywords = getpart("info", "keywords");
- # Clear the list of keywords from the last test
- %keywords = ();
-
if(!$info_keywords[0]) {
$why = "missing the <keywords> section!";
}
@@ -966,8 +966,6 @@ sub singletest_shouldrun {
logmsg "Warning: test$testnum result is ignored due to $k\n";
$errorreturncode = 2;
}
-
- $keywords{$k} = 1;
}
if(!$why && !$match && %enabled_keywords) {
@@ -1585,39 +1583,35 @@ sub singletest_success {
sub singletest {
my ($testnum, $count, $total)=@_;
- #######################################################################
- # Verify that the test should be run
- my ($why, $errorreturncode) = singletest_shouldrun($testnum);
-
- if(!$listonly) {
+ # first, remove all lingering log files
+ if(!cleardir($LOGDIR) && $clearlocks) {
+ clearlocks($LOGDIR);
+ cleardir($LOGDIR);
+ }
- ###################################################################
- # Restore environment variables that were modified in a previous run.
- # Test definition may instruct to (un)set environment vars.
- # This is done this early so that leftover variables don't affect
- # starting servers or CI registration.
- restore_test_env(1);
+ ###################################################################
+ # Restore environment variables that were modified in a previous run.
+ # Test definition may instruct to (un)set environment vars.
+ # This is done this early so that leftover variables don't affect
+ # starting servers or CI registration.
+ restore_test_env(1);
- ###################################################################
- # Register the test case with the CI environment
- citest_starttest($testnum);
+ ###################################################################
+ # Load test file so CI registration can get the right data before the
+ # runner is called
+ loadtest("${TESTDIR}/test${testnum}");
- if(!$why) {
- my $testtimings;
- ($why, $testtimings) = runner_test_preprocess($testnum);
- updatetesttimings($testnum, %$testtimings);
- } else {
+ ###################################################################
+ # Register the test case with the CI environment
+ citest_starttest($testnum);
- # set zero servers verification time when they aren't started
- $timesrvrini{$testnum} = $timesrvrend{$testnum} = Time::HiRes::time();
- }
- }
+ my ($why, $testtimings) = runner_test_preprocess($testnum);
+ updatetesttimings($testnum, %$testtimings);
#######################################################################
# Print the test name and count tests
- my $error;
- $error = singletest_count($testnum, $why);
- if($error || $listonly) {
+ my $error = singletest_count($testnum, $why);
+ if($error) {
return $error;
}
@@ -1627,14 +1621,13 @@ sub singletest {
my $CURLOUT;
my $tool;
my $usedvalgrind;
- my $testtimings;
($error, $testtimings, $cmdres, $CURLOUT, $tool, $usedvalgrind) = runner_test_run($testnum);
updatetesttimings($testnum, %$testtimings);
if($error == -1) {
# no further verification will occur
$timevrfyend{$testnum} = Time::HiRes::time();
# return a test failure, either to be reported or to be ignored
- return $errorreturncode;
+ return ignoreresultcode($testnum);
}
elsif($error == -2) {
# fill in the missing timings on error
@@ -1651,8 +1644,8 @@ sub singletest {
# Verify that the test succeeded
$error = singletest_check($testnum, $cmdres, $CURLOUT, $tool, $usedvalgrind);
if($error == -1) {
- # return a test failure, either to be reported or to be ignored
- return $errorreturncode;
+ # return a test failure, either to be reported or to be ignored
+ return ignoreresultcode($testnum);
}
elsif($error == -2) {
# torture test; there is no verification, so the run result holds the
@@ -1663,7 +1656,7 @@ sub singletest {
#######################################################################
# Report a successful test
- singletest_success($testnum, $count, $total, $errorreturncode);
+ singletest_success($testnum, $count, $total, ignoreresultcode($testnum));
return 0;
@@ -1806,6 +1799,19 @@ sub runtimestats {
}
#######################################################################
+# returns code indicating why a test was skipped
+# 0=unknown test, 1=use test result, 2=ignore test result
+#
+sub ignoreresultcode {
+ my ($testnum)=@_;
+ if(defined $ignoretestcodes{$testnum}) {
+ return $ignoretestcodes{$testnum};
+ }
+ return 0;
+}
+
+
+#######################################################################
# Check options to this test program
#
@@ -2422,18 +2428,33 @@ my $count=0;
$start = time();
+# scan all tests to find ones we should try to run
+my @runtests;
foreach my $testnum (@at) {
-
$lasttest = $testnum if($testnum > $lasttest);
+ my ($why, $errorreturncode) = singletest_shouldrun($testnum);
+ if($why || $listonly) {
+ # Display test name now--test will be completely skipped later
+ my $error = singletest_count($testnum, $why);
+ next;
+ }
+ $ignoretestcodes{$testnum} = $errorreturncode;
+ push(@runtests, $testnum);
+}
+
+if($listonly) {
+ exit(0);
+}
+
+# run through each candidate test and execute it
+foreach my $testnum (@runtests) {
$count++;
# execute one test case
- my $error = singletest($testnum, $count, scalar(@at));
+ my $error = singletest($testnum, $count, scalar(@runtests));
- if(!$listonly) {
- # Submit the test case result with the CI environment
- citest_finishtest($testnum, $error);
- }
+ # Submit the test case result with the CI environment
+ citest_finishtest($testnum, $error);
if($error < 0) {
# not a test we can run