summaryrefslogtreecommitdiff
path: root/ctdb/tests/INTEGRATION
diff options
context:
space:
mode:
authorMartin Schwenke <martin@meltin.net>2020-07-22 14:43:24 +1000
committerMartin Schwenke <martins@samba.org>2020-07-22 05:07:46 +0000
commit4438e44f880249164bb33d783f9a773f44ba1a99 (patch)
treef5b5e5d6b8addfc911703efb237194155a64f545 /ctdb/tests/INTEGRATION
parent271ad95e2359b1c4fc6d73c0c8e941bafb85c97c (diff)
downloadsamba-4438e44f880249164bb33d783f9a773f44ba1a99.tar.gz
Revert "ctdb-tests: Update preamble for INTEGRATION tests"
Fix missing Reviewed-by: tag. This reverts commit 65f56505e29c01d5891e5bc1050b6c37b8cbdee7. Signed-off-by: Martin Schwenke <martin@meltin.net> Reviewed-by: Amitay Isaacs <amitay@gmail.com>
Diffstat (limited to 'ctdb/tests/INTEGRATION')
-rwxr-xr-xctdb/tests/INTEGRATION/database/basics.001.attach.sh29
-rwxr-xr-xctdb/tests/INTEGRATION/database/basics.002.attach.sh39
-rwxr-xr-xctdb/tests/INTEGRATION/database/basics.003.detach.sh31
-rwxr-xr-xctdb/tests/INTEGRATION/database/basics.004.wipe.sh35
-rwxr-xr-xctdb/tests/INTEGRATION/database/basics.010.backup_restore.sh39
-rwxr-xr-xctdb/tests/INTEGRATION/database/fetch.001.ring.sh17
-rwxr-xr-xctdb/tests/INTEGRATION/database/fetch.002.ring-hotkeys.sh2
-rwxr-xr-xctdb/tests/INTEGRATION/database/readonly.001.basic.sh50
-rwxr-xr-xctdb/tests/INTEGRATION/database/recovery.001.volatile.sh62
-rwxr-xr-xctdb/tests/INTEGRATION/database/recovery.002.large.sh34
-rwxr-xr-xctdb/tests/INTEGRATION/database/recovery.003.no_resurrect.sh14
-rwxr-xr-xctdb/tests/INTEGRATION/database/recovery.010.persistent.sh41
-rwxr-xr-xctdb/tests/INTEGRATION/database/recovery.011.continue.sh35
-rwxr-xr-xctdb/tests/INTEGRATION/database/transaction.001.ptrans.sh29
-rwxr-xr-xctdb/tests/INTEGRATION/database/transaction.002.loop.sh17
-rwxr-xr-xctdb/tests/INTEGRATION/database/transaction.003.loop_recovery.sh23
-rwxr-xr-xctdb/tests/INTEGRATION/database/transaction.004.update_record.sh49
-rwxr-xr-xctdb/tests/INTEGRATION/database/transaction.010.loop_recovery.sh25
-rwxr-xr-xctdb/tests/INTEGRATION/database/traverse.001.one.sh26
-rwxr-xr-xctdb/tests/INTEGRATION/database/traverse.002.many.sh32
-rwxr-xr-xctdb/tests/INTEGRATION/database/vacuum.001.fast.sh2
-rwxr-xr-xctdb/tests/INTEGRATION/database/vacuum.002.full.sh2
-rwxr-xr-xctdb/tests/INTEGRATION/database/vacuum.003.recreate.sh2
-rwxr-xr-xctdb/tests/INTEGRATION/database/vacuum.030.locked.sh2
-rwxr-xr-xctdb/tests/INTEGRATION/database/vacuum.031.locked.sh2
-rwxr-xr-xctdb/tests/INTEGRATION/database/vacuum.032.locked.sh2
-rwxr-xr-xctdb/tests/INTEGRATION/database/vacuum.033.locked.sh2
-rwxr-xr-xctdb/tests/INTEGRATION/database/vacuum.034.locked.sh2
-rwxr-xr-xctdb/tests/INTEGRATION/failover/pubips.001.list.sh29
-rwxr-xr-xctdb/tests/INTEGRATION/failover/pubips.010.addip.sh16
-rwxr-xr-xctdb/tests/INTEGRATION/failover/pubips.011.delip.sh16
-rwxr-xr-xctdb/tests/INTEGRATION/failover/pubips.012.reloadips.sh26
-rwxr-xr-xctdb/tests/INTEGRATION/failover/pubips.013.failover_noop.sh16
-rwxr-xr-xctdb/tests/INTEGRATION/failover/pubips.014.iface_gc.sh13
-rwxr-xr-xctdb/tests/INTEGRATION/failover/pubips.020.moveip.sh21
-rwxr-xr-xctdb/tests/INTEGRATION/failover/pubips.030.disable_enable.sh15
-rwxr-xr-xctdb/tests/INTEGRATION/failover/pubips.032.stop_continue.sh13
-rwxr-xr-xctdb/tests/INTEGRATION/failover/pubips.040.NoIPTakeover.sh30
-rwxr-xr-xctdb/tests/INTEGRATION/failover/pubips.050.missing_ip.sh19
-rwxr-xr-xctdb/tests/INTEGRATION/simple/basics.000.onnode.sh25
-rwxr-xr-xctdb/tests/INTEGRATION/simple/basics.001.listnodes.sh28
-rwxr-xr-xctdb/tests/INTEGRATION/simple/basics.002.tunables.sh13
-rwxr-xr-xctdb/tests/INTEGRATION/simple/basics.003.ping.sh44
-rwxr-xr-xctdb/tests/INTEGRATION/simple/basics.004.getpid.sh28
-rwxr-xr-xctdb/tests/INTEGRATION/simple/basics.005.process_exists.sh34
-rwxr-xr-xctdb/tests/INTEGRATION/simple/basics.010.statistics.sh29
-rwxr-xr-xctdb/tests/INTEGRATION/simple/basics.011.statistics_reset.sh31
-rwxr-xr-xctdb/tests/INTEGRATION/simple/cluster.001.isnotrecmaster.sh30
-rwxr-xr-xctdb/tests/INTEGRATION/simple/cluster.002.recmaster_yield.sh28
-rwxr-xr-xctdb/tests/INTEGRATION/simple/cluster.010.getrelock.sh15
-rwxr-xr-xctdb/tests/INTEGRATION/simple/cluster.012.reclock_command.sh19
-rwxr-xr-xctdb/tests/INTEGRATION/simple/cluster.015.reclock_remove_lock.sh2
-rwxr-xr-xctdb/tests/INTEGRATION/simple/cluster.016.reclock_move_lock_dir.sh2
-rwxr-xr-xctdb/tests/INTEGRATION/simple/cluster.020.message_ring.sh17
-rwxr-xr-xctdb/tests/INTEGRATION/simple/cluster.021.tunnel_ring.sh17
-rwxr-xr-xctdb/tests/INTEGRATION/simple/cluster.090.unreachable.sh40
-rwxr-xr-xctdb/tests/INTEGRATION/simple/cluster.091.version_check.sh11
-rwxr-xr-xctdb/tests/INTEGRATION/simple/debug.001.getdebug.sh26
-rwxr-xr-xctdb/tests/INTEGRATION/simple/debug.002.setdebug.sh19
-rwxr-xr-xctdb/tests/INTEGRATION/simple/debug.003.dumpmemory.sh26
-rwxr-xr-xctdb/tests/INTEGRATION/simple/eventscripts.001.zero_scripts.sh11
-rwxr-xr-xctdb/tests/INTEGRATION/simple/eventscripts.090.debug_hung.sh19
62 files changed, 1060 insertions, 313 deletions
diff --git a/ctdb/tests/INTEGRATION/database/basics.001.attach.sh b/ctdb/tests/INTEGRATION/database/basics.001.attach.sh
index e5c6b81ba65..bcec20afc5e 100755
--- a/ctdb/tests/INTEGRATION/database/basics.001.attach.sh
+++ b/ctdb/tests/INTEGRATION/database/basics.001.attach.sh
@@ -1,12 +1,35 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Verify that 'ctdb getdbmap' operates as expected
+test_info()
+{
+ cat <<EOF
+Verify that 'ctdb getdbmap' operates as expected.
+
+This test creates some test databases using 'ctdb attach'.
+
+Prerequisites:
+
+* An active CTDB cluster with at least 2 active nodes.
+
+Steps:
+
+1. Verify that the status on all of the ctdb nodes is 'OK'.
+2. Get the database on using 'ctdb getdbmap'.
+3. Verify that the output is valid.
+
+Expected results:
+
+* 'ctdb getdbmap' shows a valid listing of databases.
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
make_temp_db_filename ()
{
diff --git a/ctdb/tests/INTEGRATION/database/basics.002.attach.sh b/ctdb/tests/INTEGRATION/database/basics.002.attach.sh
index 6a5c812f35a..ae09741f980 100755
--- a/ctdb/tests/INTEGRATION/database/basics.002.attach.sh
+++ b/ctdb/tests/INTEGRATION/database/basics.002.attach.sh
@@ -1,19 +1,38 @@
-#!/usr/bin/env bash
+#!/bin/bash
+
+test_info()
+{
+ cat <<EOF
+Verify the operation of 'ctdb attach' command.
+
+Prerequisites:
+
+* An active CTDB cluster with at least 2 active nodes.
+
+Steps:
+
+1. Verify that the status on all of the ctdb nodes is 'OK'.
+2. Shut down one of the nodes
+3. Attach test databases
+4. Start shutdown node
+5. Verify that the databases are attached.
+6. Restart one of the nodes
+7. Verify that the databses are attached.
-# Verify that databases are attached a node joins the cluster:
-# 1. Shut down CTDB on one node
-# 2. Attach test databases
-# 3. Check that databases are attached on all up nodes
-# 4. Start CTDB on the node where it is shut down
-# 5. Verify that the test databases are attached on this node
-# 6. Restart one of the nodes
-# 7. Verify that the test databases are attached on this node
+
+Expected results:
+
+* Command 'ctdb attach' command successfully attaches databases.
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
######################################################################
diff --git a/ctdb/tests/INTEGRATION/database/basics.003.detach.sh b/ctdb/tests/INTEGRATION/database/basics.003.detach.sh
index cb44955d1e8..5d1e12328c6 100755
--- a/ctdb/tests/INTEGRATION/database/basics.003.detach.sh
+++ b/ctdb/tests/INTEGRATION/database/basics.003.detach.sh
@@ -1,15 +1,34 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Verify that 'ctdb detach' works as expected:
-# 1. Attach test databases
-# 2. Detach test databases
-# 3. Confirm test databases are not attached
+test_info()
+{
+ cat <<EOF
+Verify the operation of 'ctdb detach' command.
+
+Prerequisites:
+
+* An active CTDB cluster with at least 2 active nodes.
+
+Steps:
+
+1. Verify that the status on all of the ctdb nodes is 'OK'.
+2. Attach test databases
+3. Detach test databases
+4. Verify that the databases are not attached.
+
+Expected results:
+
+* Command 'ctdb detach' command successfully removes attached databases.
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
######################################################################
diff --git a/ctdb/tests/INTEGRATION/database/basics.004.wipe.sh b/ctdb/tests/INTEGRATION/database/basics.004.wipe.sh
index 115d64cd606..9305339d1e3 100755
--- a/ctdb/tests/INTEGRATION/database/basics.004.wipe.sh
+++ b/ctdb/tests/INTEGRATION/database/basics.004.wipe.sh
@@ -1,17 +1,36 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Verify that 'ctdb wipedb' can clear a persistent database:
-# 1. Verify that the status on all of the ctdb nodes is 'OK'.
-# 2. Create a persistent test database
-# 3. Add some records to node 0 and node 1
-# 4. Run wipedb on node 0
-# 5. verify the database is empty on both node 0 and 1
+test_info()
+{
+ cat <<EOF
+The command 'ctdb wipedb' is used to clear a database across the whole
+cluster.
+
+Prerequisites:
+
+* An active CTDB cluster with at least 2 active nodes.
+
+Steps:
+
+1. Verify that the status on all of the ctdb nodes is 'OK'.
+2. Create a persistent test database
+3. Add some records to node #0 and node #1
+4. Perform wipedb on node #0 and verify the database is empty on both node 0 and 1
+
+Expected results:
+
+* An empty database will result
+
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
try_command_on_node 0 "$CTDB listnodes | wc -l"
num_nodes="$out"
diff --git a/ctdb/tests/INTEGRATION/database/basics.010.backup_restore.sh b/ctdb/tests/INTEGRATION/database/basics.010.backup_restore.sh
index 8c469d4642b..579233e0e80 100755
--- a/ctdb/tests/INTEGRATION/database/basics.010.backup_restore.sh
+++ b/ctdb/tests/INTEGRATION/database/basics.010.backup_restore.sh
@@ -1,18 +1,39 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Confirm that 'ctdb restoredb' works correctly:
-# 1. Create a persistent test database
-# 2. Add some records to test database
-# 3. Backup database
-# 4. Wipe database and verify the database is empty on all nodes
-# 5. Restore database and make sure all the records are restored
-# 6. Make sure no recovery has been triggered
+test_info()
+{
+ cat <<EOF
+The command 'ctdb restoredb' is used to restore a database across the
+whole cluster.
+
+Prerequisites:
+
+* An active CTDB cluster with at least 2 active nodes.
+
+Steps:
+
+1. Verify that the status on all of the ctdb nodes is 'OK'.
+2. Create a persistent test database
+3. Add some records to test database
+4. Backup database
+5. Wipe database and verify the database is empty on all nodes
+6. Restore database and make sure all the records are restored
+7. Make sure no recovery has been triggered
+
+Expected results:
+
+* Database operations should not cause a recovery
+
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
try_command_on_node 0 $CTDB status
generation=$(sed -n -e 's/^Generation:\([0-9]*\)/\1/p' "$outfile")
diff --git a/ctdb/tests/INTEGRATION/database/fetch.001.ring.sh b/ctdb/tests/INTEGRATION/database/fetch.001.ring.sh
index 4d7d392240d..33df07b7d88 100755
--- a/ctdb/tests/INTEGRATION/database/fetch.001.ring.sh
+++ b/ctdb/tests/INTEGRATION/database/fetch.001.ring.sh
@@ -1,12 +1,23 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Run the fetch_ring test and sanity check the output
+test_info()
+{
+ cat <<EOF
+Run the fetch_ring test and sanity check the output.
+
+Prerequisites:
+
+* An active CTDB cluster with at least 2 active nodes.
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
try_command_on_node 0 "$CTDB listnodes | wc -l"
num_nodes="$out"
diff --git a/ctdb/tests/INTEGRATION/database/fetch.002.ring-hotkeys.sh b/ctdb/tests/INTEGRATION/database/fetch.002.ring-hotkeys.sh
index 6d442535a21..c4e0023e11c 100755
--- a/ctdb/tests/INTEGRATION/database/fetch.002.ring-hotkeys.sh
+++ b/ctdb/tests/INTEGRATION/database/fetch.002.ring-hotkeys.sh
@@ -1,4 +1,4 @@
-#!/usr/bin/env bash
+#!/bin/bash
# Run the fetch_ring test, sanity check the output and check hot keys
# statistics
diff --git a/ctdb/tests/INTEGRATION/database/readonly.001.basic.sh b/ctdb/tests/INTEGRATION/database/readonly.001.basic.sh
index 20faa3aa03b..a83b4d31530 100755
--- a/ctdb/tests/INTEGRATION/database/readonly.001.basic.sh
+++ b/ctdb/tests/INTEGRATION/database/readonly.001.basic.sh
@@ -1,28 +1,46 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Test support for read-only records
+test_info()
+{
+ cat <<EOF
+Read-only records can be activated at runtime using a ctdb command.
+If read-only records are not activated, then any attempt to fetch a read-only
+copy should be automatically upgraded to a read-write fetch_lock().
+
+If read-only delegations are present, then any attempt to aquire a read-write
+fetch_lock will trigger all delegations to be revoked before the fetch lock
+completes.
+
+
+Prerequisites:
+
+* An active CTDB cluster with at least 2 active nodes.
+
+Steps:
-# Read-only records can be activated at runtime using a ctdb command.
-# If read-only records are not activated, then any attempt to fetch a
-# read-only copy should be automatically upgraded to a read-write
-# fetch_locked().
+1. Verify that the status on all of the ctdb nodes is 'OK'.
+2. create a test database and some records
+3. try to fetch read-only records, this should not result in any delegations
+4. activate read-only support
+5. try to fetch read-only records, this should result in delegations
+6. do a fetchlock and the delegations should be revoked
+7. try to fetch read-only records, this should result in delegations
+8. do a recovery and the delegations should be revoked
-# If read-only delegations are present, then any attempt to acquire a
-# read-write fetch_lock will trigger revocation of all delegations
-# before the fetch_locked().
+Expected results:
-# 1. Create a test database and some records
-# 2. Try to fetch read-only records, this should not result in any delegations
-# 3. Activate read-only support
-# 4. Try to fetch read-only records, this should result in delegations
-# 5. Do a fetchlock and the delegations should be revoked
-# 6. Try to fetch read-only records, this should result in delegations
+Delegations should be created and revoked as above
+
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
######################################################################
diff --git a/ctdb/tests/INTEGRATION/database/recovery.001.volatile.sh b/ctdb/tests/INTEGRATION/database/recovery.001.volatile.sh
index e523e835de7..a8bc2d95ef6 100755
--- a/ctdb/tests/INTEGRATION/database/recovery.001.volatile.sh
+++ b/ctdb/tests/INTEGRATION/database/recovery.001.volatile.sh
@@ -1,33 +1,45 @@
-#!/usr/bin/env bash
-
-# Test that recovery correctly handles RSNs
-
-# Recovery can under certain circumstances lead to old record copies
-# resurrecting: Recovery selects the newest record copy purely by RSN. At
-# the end of the recovery, the recovery master is the dmaster for all
-# records in all (non-persistent) databases. And the other nodes locally
-# hold the complete copy of the databases. The bug is that the recovery
-# process does not increment the RSN on the recovery master at the end of
-# the recovery. Now clients acting directly on the Recovery master will
-# directly change a record's content on the recmaster without migration
-# and hence without RSN bump. So a subsequent recovery can not tell that
-# the recmaster's copy is newer than the copies on the other nodes, since
-# their RSN is the same. Hence, if the recmaster is not node 0 (or more
-# precisely not the active node with the lowest node number), the recovery
-# will choose copies from nodes with lower number and stick to these.
-
-# 1. Create a test database
-# 2. Add a record with value value1 on recovery master
-# 3. Force a recovery
-# 4. Update the record with value value2 on recovery master
-# 5. Force a recovery
-# 6. Confirm that the value is value2
+#!/bin/bash
+
+test_info()
+{
+ cat <<EOF
+Recovery can under certain circumstances lead to old record copies
+resurrecting: Recovery selects the newest record copy purely by RSN. At
+the end of the recovery, the recovery master is the dmaster for all
+records in all (non-persistent) databases. And the other nodes locally
+hold the complete copy of the databases. The bug is that the recovery
+process does not increment the RSN on the recovery master at the end of
+the recovery. Now clients acting directly on the Recovery master will
+directly change a record's content on the recmaster without migration
+and hence without RSN bump. So a subsequent recovery can not tell that
+the recmaster's copy is newer than the copies on the other nodes, since
+their RSN is the same. Hence, if the recmaster is not node 0 (or more
+precisely not the active node with the lowest node number), the recovery
+will choose copies from nodes with lower number and stick to these.
+
+Steps:
+
+1. Create a test database
+2. Add a record with value value1 on recovery master
+3. Force a recovery
+4. Update the record with value value2 on recovery master
+5. Force a recovery
+6. Fetch the record
+
+Expected results:
+
+* The record should have value value2 and not value1
+
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
#
# Main test
diff --git a/ctdb/tests/INTEGRATION/database/recovery.002.large.sh b/ctdb/tests/INTEGRATION/database/recovery.002.large.sh
index 4c0f7024a99..4f544fa8475 100755
--- a/ctdb/tests/INTEGRATION/database/recovery.002.large.sh
+++ b/ctdb/tests/INTEGRATION/database/recovery.002.large.sh
@@ -1,23 +1,33 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Test recovery of large volatile and persistent databases
+test_info()
+{
+ cat <<EOF
+Older style of recovery using PULL_DB and PUSH_DB controls tries to
+construct a single large marshall buffer for all the records in the
+database. However, this approach is problematic as talloc restricts the
+maximum size of buffer to 256M. Also, trying to construct and send large
+buffers is inefficient and can cause CTDB daemon to be tied up for long
+periods of time.
-# Older style of recovery using PULL_DB and PUSH_DB controls tries to
-# construct a single large marshall buffer for all the records in the
-# database. However, this approach is problematic as talloc restricts the
-# maximum size of buffer to 256M. Also, trying to construct and send large
-# buffers is inefficient and can cause CTDB daemon to be tied up for long
-# periods of time.
+Instead new style recovery is introduced using DB_PULL and
+DB_PUSH_START/DB_PUSH_CONFIRM controls. This sends the records in
+batches of ~RecBufferSizeLimit in size at a time.
-# Instead new style recovery is introduced using DB_PULL and
-# DB_PUSH_START/DB_PUSH_CONFIRM controls. This sends the records in
-# batches of ~RecBufferSizeLimit in size at a time.
+Expected results:
+
+* The recovery should complete successfully
+
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
#
# Main test
diff --git a/ctdb/tests/INTEGRATION/database/recovery.003.no_resurrect.sh b/ctdb/tests/INTEGRATION/database/recovery.003.no_resurrect.sh
index b314d4d86eb..b65452c0e06 100755
--- a/ctdb/tests/INTEGRATION/database/recovery.003.no_resurrect.sh
+++ b/ctdb/tests/INTEGRATION/database/recovery.003.no_resurrect.sh
@@ -1,13 +1,19 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Ensure recovery doesn't resurrect deleted records from recently
-# inactive nodes
+test_info()
+{
+ cat <<EOF
+Ensure recovery doesn't resurrect deleted records from recently inactive nodes
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
testdb="rec_test.tdb"
diff --git a/ctdb/tests/INTEGRATION/database/recovery.010.persistent.sh b/ctdb/tests/INTEGRATION/database/recovery.010.persistent.sh
index d13a9a5ccac..84e0ec15c43 100755
--- a/ctdb/tests/INTEGRATION/database/recovery.010.persistent.sh
+++ b/ctdb/tests/INTEGRATION/database/recovery.010.persistent.sh
@@ -1,20 +1,39 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Ensure that persistent databases are correctly recovered by database
-# sequence number
-#
-# 1. Create and wipe a persistent test database
-# 2. Directly add a single record to the database on each node
-# 3. Trigger a recover
-# 4. Ensure that the database contains only a single record
-#
-# Repeat but with sequence numbers set by hand on each node
+test_info()
+{
+ cat <<EOF
+The persistent databases are recovered using sequence number.
+The recovery is performed by picking the copy of the database from the
+node that has the highest sequence number and ignore the content on all
+other nodes.
+
+
+Prerequisites:
+
+* An active CTDB cluster with at least 2 active nodes.
+
+Steps:
+
+1. Verify that the status on all of the ctdb nodes is 'OK'.
+2. create a persistent test database
+3. test that no seqnum record blends the database during recovery
+4. test that seqnum record does not blend the database during recovery
+
+Expected results:
+
+* that 3,4 will recover the highest seqnum database
+
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
try_command_on_node 0 "$CTDB listnodes | wc -l"
num_nodes="$out"
diff --git a/ctdb/tests/INTEGRATION/database/recovery.011.continue.sh b/ctdb/tests/INTEGRATION/database/recovery.011.continue.sh
index 3376e062bed..81aa6e089d3 100755
--- a/ctdb/tests/INTEGRATION/database/recovery.011.continue.sh
+++ b/ctdb/tests/INTEGRATION/database/recovery.011.continue.sh
@@ -1,20 +1,33 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Confirm that the deleted records are not resurrected after recovery
-#
-# 1. Create a persistent database
-# 2. Add a record and update it few times.
-# 3. Delete the record
-# 4. Use "ctdb stop" to stop one of the nodes
-# 5. Add a record with same key.
-# 6. Continue on the stopped node
-# 7. Confirm that the record still exists
+test_info()
+{
+ cat <<EOF
+This test confirms that the deleted records are not resurrected after recovery.
+
+Steps:
+
+1. Create a persistent database
+2. Add a record and update it few times.
+3. Delete the record
+4. Turn off one of the nodes
+5. Add a record with same key.
+6. Turn on the stopped node
+
+Expected results:
+
+* Check that the deleted record is present after recovery.
+
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
do_test()
{
diff --git a/ctdb/tests/INTEGRATION/database/transaction.001.ptrans.sh b/ctdb/tests/INTEGRATION/database/transaction.001.ptrans.sh
index 556e52330a6..72b18348398 100755
--- a/ctdb/tests/INTEGRATION/database/transaction.001.ptrans.sh
+++ b/ctdb/tests/INTEGRATION/database/transaction.001.ptrans.sh
@@ -1,15 +1,32 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Verify that the 'ctdb ptrans' works as expected
-#
-# Pipe some operation to ctdb ptrans and validate the TDB contents
-# with ctdb catdb
+test_info()
+{
+ cat <<EOF
+Verify that the ctdb ptrans works as expected
+
+Prerequisites:
+
+* An active CTDB cluster with at least 2 active nodes.
+
+Steps:
+
+1. Verify that the status on all of the ctdb nodes is 'OK'.
+2. Pipe some operation to ctdb ptrans and validate the TDB contents with ctdb catdb
+
+Expected results:
+
+* ctdb ptrans works as expected.
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
TESTDB="ptrans_test.tdb"
diff --git a/ctdb/tests/INTEGRATION/database/transaction.002.loop.sh b/ctdb/tests/INTEGRATION/database/transaction.002.loop.sh
index d633c7c0f05..d47761216ce 100755
--- a/ctdb/tests/INTEGRATION/database/transaction.002.loop.sh
+++ b/ctdb/tests/INTEGRATION/database/transaction.002.loop.sh
@@ -1,12 +1,23 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Verify that the transaction_loop test succeeds
+test_info()
+{
+ cat <<EOF
+Verify that the transaction_loop test succeeds.
+
+Prerequisites:
+
+* An active CTDB cluster with at least 2 active nodes.
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
TESTDB="persistent_trans.tdb"
diff --git a/ctdb/tests/INTEGRATION/database/transaction.003.loop_recovery.sh b/ctdb/tests/INTEGRATION/database/transaction.003.loop_recovery.sh
index 05aadba04ce..7beacb9c913 100755
--- a/ctdb/tests/INTEGRATION/database/transaction.003.loop_recovery.sh
+++ b/ctdb/tests/INTEGRATION/database/transaction.003.loop_recovery.sh
@@ -1,12 +1,15 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Verify that the transaction_loop test succeeds with recoveries.
-
-. "${TEST_SCRIPTS_DIR}/integration.bash"
+test_info()
+{
+ cat <<EOF
+Verify that the transaction_loop test succeeds with recoveries.
-set -e
+Prerequisites:
-ctdb_test_init
+* An active CTDB cluster with at least 2 active nodes.
+EOF
+}
recovery_loop()
{
@@ -27,6 +30,14 @@ recovery_loop_start()
ctdb_test_exit_hook_add "kill $RECLOOP_PID >/dev/null 2>&1"
}
+. "${TEST_SCRIPTS_DIR}/integration.bash"
+
+ctdb_test_init
+
+set -e
+
+cluster_is_healthy
+
TESTDB="persistent_trans.tdb"
try_command_on_node 0 "$CTDB attach $TESTDB persistent"
diff --git a/ctdb/tests/INTEGRATION/database/transaction.004.update_record.sh b/ctdb/tests/INTEGRATION/database/transaction.004.update_record.sh
index 528303a6435..90b5ee06151 100755
--- a/ctdb/tests/INTEGRATION/database/transaction.004.update_record.sh
+++ b/ctdb/tests/INTEGRATION/database/transaction.004.update_record.sh
@@ -1,21 +1,38 @@
-#!/usr/bin/env bash
-
-# Verify that "ctdb update_record_persistent" creates new records and
-# updates existing records in a persistent database
-#
-# 1. Create and wipe a persistent test database
-# 2. Do a recovery
-# 3. Confirm that the database is empty
-# 4. Create a new record using "ctdb update_record_persistent"
-# 5. Confirm the record exists in the database using "ctdb cattdb"
-# 6. Update the record's value using "ctdb update_record_persistent"
-# 7. Confirm that the original value no longer exists using "ctdb cattdb"
+#!/bin/bash
+
+test_info()
+{
+ cat <<EOF
+UPDATE_RECORD control should be able to create new records and update
+existing records in a persistent database.
+
+Prerequisites:
+
+* An active CTDB cluster with at least one active node.
+
+Steps:
+
+1. Verify that the status on all of the ctdb nodes is 'OK'.
+2. create a persistent test database
+3, wipe the database to make sure it is empty
+4, create a new record
+5, update the record
+
+Expected results:
+
+* 4 created record found in the tdb
+* 5 updated record found in the tdb
+
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
try_command_on_node 0 "$CTDB listnodes | wc -l"
num_nodes="$out"
@@ -27,7 +44,7 @@ echo "Create persistent test database \"$test_db\""
try_command_on_node 0 $CTDB attach "$test_db" persistent
-# 3.
+# 3,
echo "Wipe the persistent test database"
try_command_on_node 0 $CTDB wipedb "$test_db"
echo "Force a recovery"
@@ -42,7 +59,7 @@ else
exit 1
fi
-# 4.
+# 4,
echo "Create a new record in the persistent database using UPDATE_RECORD"
try_command_on_node 0 $CTDB_TEST_WRAPPER $VALGRIND update_record_persistent \
-D "$test_db" -k "Update_Record_Persistent" -v "FirstValue"
@@ -55,7 +72,7 @@ else
exit 1
fi
-# 5.
+# 5,
echo Modify an existing record in the persistent database using UPDATE_RECORD
try_command_on_node 0 $CTDB_TEST_WRAPPER $VALGRIND update_record_persistent \
-D "$test_db" -k "Update_Record_Persistent" -v "SecondValue"
diff --git a/ctdb/tests/INTEGRATION/database/transaction.010.loop_recovery.sh b/ctdb/tests/INTEGRATION/database/transaction.010.loop_recovery.sh
index 9de6c34be40..4d5c1297fa9 100755
--- a/ctdb/tests/INTEGRATION/database/transaction.010.loop_recovery.sh
+++ b/ctdb/tests/INTEGRATION/database/transaction.010.loop_recovery.sh
@@ -1,13 +1,16 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Verify that the transaction_loop test succeeds with recoveries for
-# replicated databases
-
-. "${TEST_SCRIPTS_DIR}/integration.bash"
+test_info()
+{
+ cat <<EOF
+Verify that the transaction_loop test succeeds with recoveries for replicated
+databases.
-set -e
+Prerequisites:
-ctdb_test_init
+* An active CTDB cluster with at least 2 active nodes.
+EOF
+}
recovery_loop()
{
@@ -28,6 +31,14 @@ recovery_loop_start()
ctdb_test_exit_hook_add "kill $RECLOOP_PID >/dev/null 2>&1"
}
+. "${TEST_SCRIPTS_DIR}/integration.bash"
+
+ctdb_test_init
+
+set -e
+
+cluster_is_healthy
+
TESTDB="replicated_trans.tdb"
try_command_on_node 0 "$CTDB attach $TESTDB replicated"
diff --git a/ctdb/tests/INTEGRATION/database/traverse.001.one.sh b/ctdb/tests/INTEGRATION/database/traverse.001.one.sh
index 1b3b7c2d25c..7f3007d5105 100755
--- a/ctdb/tests/INTEGRATION/database/traverse.001.one.sh
+++ b/ctdb/tests/INTEGRATION/database/traverse.001.one.sh
@@ -1,17 +1,29 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Confirm that traverses of volatile databases work as expected
+test_info()
+{
+ cat <<EOF
+Confirm that traverses of volatile databases work as expected
+
+This is a very simple example. It writes a single record, updates it
+on another node and then confirms that the correct value is found when
+traversing. It then repeats this after removing the LMASTER role from
+the node where the value is updated.
+
+Expected results:
-# This is a very simple example. It writes a single record, updates it
-# on another node and then confirms that the correct value is found when
-# traversing. It then repeats this after removing the LMASTER role from
-# the node where the value is updated.
+* The expected records should be found
+
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
#
# Main test
diff --git a/ctdb/tests/INTEGRATION/database/traverse.002.many.sh b/ctdb/tests/INTEGRATION/database/traverse.002.many.sh
index fb0dc98f52b..d28f9c2814e 100755
--- a/ctdb/tests/INTEGRATION/database/traverse.002.many.sh
+++ b/ctdb/tests/INTEGRATION/database/traverse.002.many.sh
@@ -1,16 +1,34 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Test cluster wide traverse code
-#
-# 1. Create a volatile test database
-# 2. Add records on different nodes
-# 3. Use "ctdb catdb" to confirm that all added records are present
+test_info()
+{
+ cat <<EOF
+Test CTDB cluster wide traverse code.
+
+Prerequisites:
+
+* An active CTDB cluster with at least 2 active nodes.
+
+Steps:
+
+1. Create a test database
+2. Add records on different nodes
+3. Run traverse
+
+Expected results:
+
+* All records are retrieved.
+
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
try_command_on_node 0 "$CTDB listnodes"
num_nodes=$(echo "$out" | wc -l)
diff --git a/ctdb/tests/INTEGRATION/database/vacuum.001.fast.sh b/ctdb/tests/INTEGRATION/database/vacuum.001.fast.sh
index 27a2225f437..651de4df57c 100755
--- a/ctdb/tests/INTEGRATION/database/vacuum.001.fast.sh
+++ b/ctdb/tests/INTEGRATION/database/vacuum.001.fast.sh
@@ -1,4 +1,4 @@
-#!/usr/bin/env bash
+#!/bin/bash
# Ensure that vacuuming deletes records on all nodes
diff --git a/ctdb/tests/INTEGRATION/database/vacuum.002.full.sh b/ctdb/tests/INTEGRATION/database/vacuum.002.full.sh
index 0dc8372a6a8..cc1113b02f5 100755
--- a/ctdb/tests/INTEGRATION/database/vacuum.002.full.sh
+++ b/ctdb/tests/INTEGRATION/database/vacuum.002.full.sh
@@ -1,4 +1,4 @@
-#!/usr/bin/env bash
+#!/bin/bash
# Ensure a full vacuuming run deletes records
diff --git a/ctdb/tests/INTEGRATION/database/vacuum.003.recreate.sh b/ctdb/tests/INTEGRATION/database/vacuum.003.recreate.sh
index acb7b135e55..00cb2c3df29 100755
--- a/ctdb/tests/INTEGRATION/database/vacuum.003.recreate.sh
+++ b/ctdb/tests/INTEGRATION/database/vacuum.003.recreate.sh
@@ -1,4 +1,4 @@
-#!/usr/bin/env bash
+#!/bin/bash
# Ensure that vacuuming does not delete a record that is recreated
# before vacuuming completes. This needs at least 3 nodes.
diff --git a/ctdb/tests/INTEGRATION/database/vacuum.030.locked.sh b/ctdb/tests/INTEGRATION/database/vacuum.030.locked.sh
index 386252633c8..193a769096d 100755
--- a/ctdb/tests/INTEGRATION/database/vacuum.030.locked.sh
+++ b/ctdb/tests/INTEGRATION/database/vacuum.030.locked.sh
@@ -1,4 +1,4 @@
-#!/usr/bin/env bash
+#!/bin/bash
# Confirm that a record is not vacuumed if it is locked when the 1st
# fast vacuuming run occurs on the node on which it was deleted, but
diff --git a/ctdb/tests/INTEGRATION/database/vacuum.031.locked.sh b/ctdb/tests/INTEGRATION/database/vacuum.031.locked.sh
index b6adbf73c87..284808c8775 100755
--- a/ctdb/tests/INTEGRATION/database/vacuum.031.locked.sh
+++ b/ctdb/tests/INTEGRATION/database/vacuum.031.locked.sh
@@ -1,4 +1,4 @@
-#!/usr/bin/env bash
+#!/bin/bash
# Confirm that a record is vacuumed if it is locked on the deleting
# node when the 2nd fast vacuuming run occurs, but vacuuming is
diff --git a/ctdb/tests/INTEGRATION/database/vacuum.032.locked.sh b/ctdb/tests/INTEGRATION/database/vacuum.032.locked.sh
index 481d1d44428..2f983aa79d6 100755
--- a/ctdb/tests/INTEGRATION/database/vacuum.032.locked.sh
+++ b/ctdb/tests/INTEGRATION/database/vacuum.032.locked.sh
@@ -1,4 +1,4 @@
-#!/usr/bin/env bash
+#!/bin/bash
# Confirm that a record is not vacuumed if it is locked on the lmaster
# when the 3rd fast vacuuming run occurs, but is dropped from the
diff --git a/ctdb/tests/INTEGRATION/database/vacuum.033.locked.sh b/ctdb/tests/INTEGRATION/database/vacuum.033.locked.sh
index 63d7d1f3938..47b6be2c7f2 100755
--- a/ctdb/tests/INTEGRATION/database/vacuum.033.locked.sh
+++ b/ctdb/tests/INTEGRATION/database/vacuum.033.locked.sh
@@ -1,4 +1,4 @@
-#!/usr/bin/env bash
+#!/bin/bash
# Confirm that a record is not vacuumed if it is locked on the
# deleting node when the 3rd fast vacuuming run occurs, but is dropped
diff --git a/ctdb/tests/INTEGRATION/database/vacuum.034.locked.sh b/ctdb/tests/INTEGRATION/database/vacuum.034.locked.sh
index 7f37ada3115..405ddbb8034 100755
--- a/ctdb/tests/INTEGRATION/database/vacuum.034.locked.sh
+++ b/ctdb/tests/INTEGRATION/database/vacuum.034.locked.sh
@@ -1,4 +1,4 @@
-#!/usr/bin/env bash
+#!/bin/bash
# Confirm that a record is not vacuumed if it is locked on another
# (non-lmaster, non-deleting) node when the 3rd fast vacuuming run
diff --git a/ctdb/tests/INTEGRATION/failover/pubips.001.list.sh b/ctdb/tests/INTEGRATION/failover/pubips.001.list.sh
index 2fc75b731b9..d8e0097362f 100755
--- a/ctdb/tests/INTEGRATION/failover/pubips.001.list.sh
+++ b/ctdb/tests/INTEGRATION/failover/pubips.001.list.sh
@@ -1,12 +1,35 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Verify that 'ctdb ip' shows the correct output
+test_info()
+{
+ cat <<EOF
+Verify that 'ctdb ip' shows the correct output.
+
+Prerequisites:
+
+* An active CTDB cluster with at least 2 active nodes.
+
+Steps:
+
+1. Verify that the status on all of the ctdb nodes is 'OK'.
+2. Run 'ctdb ip' on one of the nodes and verify the list of IP
+ addresses displayed (cross check the result with the output of
+ 'ip addr show' on the node).
+3. Verify that pipe-separated output is generated with the -X option.
+
+Expected results:
+
+* 'ctdb ip' shows the list of public IPs being served by a node.
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
echo "Getting list of public IPs..."
try_command_on_node -v 1 "$CTDB ip all | tail -n +2"
diff --git a/ctdb/tests/INTEGRATION/failover/pubips.010.addip.sh b/ctdb/tests/INTEGRATION/failover/pubips.010.addip.sh
index aba85dd0499..aefed54c45e 100755
--- a/ctdb/tests/INTEGRATION/failover/pubips.010.addip.sh
+++ b/ctdb/tests/INTEGRATION/failover/pubips.010.addip.sh
@@ -1,12 +1,22 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Verify that an IP address can be added to a node using 'ctdb addip'
+test_info()
+{
+ cat <<EOF
+Verify that an IP address can be added to a node using 'ctdb addip'.
+
+This test does not do any network level checks to make sure IP
+addresses are actually on interfaces. It just consults "ctdb ip".
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
select_test_node_and_ips
get_test_ip_mask_and_iface
diff --git a/ctdb/tests/INTEGRATION/failover/pubips.011.delip.sh b/ctdb/tests/INTEGRATION/failover/pubips.011.delip.sh
index 5235a9d3203..d3f0f3a012b 100755
--- a/ctdb/tests/INTEGRATION/failover/pubips.011.delip.sh
+++ b/ctdb/tests/INTEGRATION/failover/pubips.011.delip.sh
@@ -1,12 +1,22 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Verify that a node's public IP address can be deleted using 'ctdb deleteip'
+test_info()
+{
+ cat <<EOF
+Verify that a node's public IP address can be deleted using 'ctdb deleteip'.
+
+This test does not do any network level checks to make sure IP
+addresses are actually on interfaces. It just consults "ctdb ip".
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
select_test_node_and_ips
diff --git a/ctdb/tests/INTEGRATION/failover/pubips.012.reloadips.sh b/ctdb/tests/INTEGRATION/failover/pubips.012.reloadips.sh
index 829b83930da..451fca3a866 100755
--- a/ctdb/tests/INTEGRATION/failover/pubips.012.reloadips.sh
+++ b/ctdb/tests/INTEGRATION/failover/pubips.012.reloadips.sh
@@ -1,15 +1,31 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Verify that IPs can be reconfigured using 'ctdb reloadips'
+test_info()
+{
+ cat <<EOF
+Verify that IPs can be rearrranged using 'ctdb reloadips'.
+
+Various sub-tests that remove addresses from the public_addresses file
+on a node or delete the entire contents of the public_addresses file.
-# Various sub-tests that remove addresses from the public_addresses file
-# on a node or delete the entire contents of the public_addresses file.
+Prerequisites:
+
+* An active CTDB cluster with at least 2 active nodes.
+
+Expected results:
+
+* When addresses are deconfigured "ctdb ip" no longer reports them and
+ when added they are seen again.
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
select_test_node_and_ips
diff --git a/ctdb/tests/INTEGRATION/failover/pubips.013.failover_noop.sh b/ctdb/tests/INTEGRATION/failover/pubips.013.failover_noop.sh
index 1c13a3ce3c2..0c6fd833a5e 100755
--- a/ctdb/tests/INTEGRATION/failover/pubips.013.failover_noop.sh
+++ b/ctdb/tests/INTEGRATION/failover/pubips.013.failover_noop.sh
@@ -1,9 +1,17 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Check that CTDB operates correctly if:
+test_info()
+{
+ cat <<EOF
+Check that CTDB operates correctly if:
-# * failover is disabled; or
-# * there are 0 public IPs configured
+* failover is disabled; or
+* there are 0 public IPs configured
+
+This test only does anything with local daemons. On a real cluster it
+has no way of updating configuration.
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
diff --git a/ctdb/tests/INTEGRATION/failover/pubips.014.iface_gc.sh b/ctdb/tests/INTEGRATION/failover/pubips.014.iface_gc.sh
index 845b4b50fdb..2756f64ceb1 100755
--- a/ctdb/tests/INTEGRATION/failover/pubips.014.iface_gc.sh
+++ b/ctdb/tests/INTEGRATION/failover/pubips.014.iface_gc.sh
@@ -1,12 +1,19 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Verify that an interface is deleted when all IPs on it are deleted
+test_info()
+{
+ cat <<EOF
+Verify that an interface is deleted when all IPs on it are deleted.
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
select_test_node_and_ips
diff --git a/ctdb/tests/INTEGRATION/failover/pubips.020.moveip.sh b/ctdb/tests/INTEGRATION/failover/pubips.020.moveip.sh
index 68b5e079d66..699ccc3d2ee 100755
--- a/ctdb/tests/INTEGRATION/failover/pubips.020.moveip.sh
+++ b/ctdb/tests/INTEGRATION/failover/pubips.020.moveip.sh
@@ -1,18 +1,25 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Verify that 'ctdb moveip' allows movement of public IPs between nodes
+test_info()
+{
+ cat <<EOF
+Verify that 'ctdb moveip' allows movement of public IPs between cluster nodes.
-# This test does not do any network level checks to make sure IP
-# addresses are actually on interfaces. It just consults "ctdb ip".
+This test does not do any network level checks to make sure IP
+addresses are actually on interfaces. It just consults "ctdb ip".
-# To work, this test ensures that IPAllocAlgorithm is not set to 0
-# (Deterministic IPs) and sets NoIPFailback.
+To work, this test ensures that IPAllocAlgorithm is not set to 0
+(Deterministic IPs) and sets NoIPFailback.
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
select_test_node_and_ips
diff --git a/ctdb/tests/INTEGRATION/failover/pubips.030.disable_enable.sh b/ctdb/tests/INTEGRATION/failover/pubips.030.disable_enable.sh
index 9a1616ceefc..c0bb62d1991 100755
--- a/ctdb/tests/INTEGRATION/failover/pubips.030.disable_enable.sh
+++ b/ctdb/tests/INTEGRATION/failover/pubips.030.disable_enable.sh
@@ -1,15 +1,22 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Verify the operation of "ctdb disable" and "ctdb enable"
+test_info()
+{
+ cat <<EOF
+Verify the operation of "ctdb disable" and "ctdb enable"
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
-set -e
-
ctdb_test_init
########################################
+set -e
+
+cluster_is_healthy
+
select_test_node_and_ips
echo "Disabling node $test_node"
diff --git a/ctdb/tests/INTEGRATION/failover/pubips.032.stop_continue.sh b/ctdb/tests/INTEGRATION/failover/pubips.032.stop_continue.sh
index f5936b03d88..d9a64508180 100755
--- a/ctdb/tests/INTEGRATION/failover/pubips.032.stop_continue.sh
+++ b/ctdb/tests/INTEGRATION/failover/pubips.032.stop_continue.sh
@@ -1,12 +1,19 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Verify the operation of "ctdb stop" and "ctdb continue"
+test_info()
+{
+ cat <<EOF
+Verify the operation of "ctdb stop" and "ctdb continue"
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
select_test_node_and_ips
diff --git a/ctdb/tests/INTEGRATION/failover/pubips.040.NoIPTakeover.sh b/ctdb/tests/INTEGRATION/failover/pubips.040.NoIPTakeover.sh
index a39e48d0883..9af55d8e7ef 100755
--- a/ctdb/tests/INTEGRATION/failover/pubips.040.NoIPTakeover.sh
+++ b/ctdb/tests/INTEGRATION/failover/pubips.040.NoIPTakeover.sh
@@ -1,13 +1,35 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Verify that 'ctdb setvar NoIPTakeover 1' stops IP addresses being taken over
+test_info()
+{
+ cat <<EOF
+Verify that 'ctdb setvar NoIPTakeover 1' stops ip addresses from being failed
+over onto the node.
-. "${TEST_SCRIPTS_DIR}/integration.bash"
+Prerequisites:
+
+* An active CTDB cluster with at least 2 active nodes.
+
+Steps:
-set -e
+1. Verify that the status on all of the ctdb nodes is 'OK'.
+2. Use 'ctdb ip' on one of the nodes to list the IP addresses being
+ served.
+3. Use 'ctdb moveip' to move an address from one node to another.
+4. Verify that the IP is no longer being hosted by the first node and is now being hosted by the second node.
+
+Expected results:
+
+* 'ctdb moveip' allows an IP address to be moved between cluster nodes.
+EOF
+}
+
+. "${TEST_SCRIPTS_DIR}/integration.bash"
ctdb_test_init
+cluster_is_healthy
+
try_command_on_node 0 "$CTDB listnodes | wc -l"
num_nodes="$out"
echo "There are $num_nodes nodes..."
diff --git a/ctdb/tests/INTEGRATION/failover/pubips.050.missing_ip.sh b/ctdb/tests/INTEGRATION/failover/pubips.050.missing_ip.sh
index 543f9a96d89..c455784c886 100755
--- a/ctdb/tests/INTEGRATION/failover/pubips.050.missing_ip.sh
+++ b/ctdb/tests/INTEGRATION/failover/pubips.050.missing_ip.sh
@@ -1,17 +1,24 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Verify that the recovery daemon handles unhosted IPs properly
+test_info()
+{
+ cat <<EOF
+Verify that the recovery daemon handles unhosted IPs properly.
-# This test does not do any network level checks to make sure the IP
-# address is actually on an interface. It just consults "ctdb ip".
+This test does not do any network level checks to make sure the IP
+address is actually on an interface. It just consults "ctdb ip".
-# This is a variation of the "addip" test.
+This is a variation of the "addip" test.
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
select_test_node_and_ips
diff --git a/ctdb/tests/INTEGRATION/simple/basics.000.onnode.sh b/ctdb/tests/INTEGRATION/simple/basics.000.onnode.sh
index 4ca6e462ad4..7e74c088847 100755
--- a/ctdb/tests/INTEGRATION/simple/basics.000.onnode.sh
+++ b/ctdb/tests/INTEGRATION/simple/basics.000.onnode.sh
@@ -1,12 +1,29 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Use 'onnode' to confirm connectivity between all cluster nodes
+test_info()
+{
+ cat <<EOF
+Use 'onnode' to confirm connectivity between all cluster nodes.
-. "${TEST_SCRIPTS_DIR}/integration.bash"
+Steps:
+
+1. Do a recursive "onnode all" to make sure all the nodes can connect
+ to each other. On a cluster this ensures that SSH keys are known
+ between all hosts, which will stop output being corrupted with
+ messages about nodes being added to the list of known hosts.
-set -e
+Expected results:
+
+* 'onnode' works between all nodes.
+EOF
+}
+
+. "${TEST_SCRIPTS_DIR}/integration.bash"
ctdb_test_init
+
+#
+
echo "Checking connectivity between nodes..."
onnode all onnode -p all hostname
diff --git a/ctdb/tests/INTEGRATION/simple/basics.001.listnodes.sh b/ctdb/tests/INTEGRATION/simple/basics.001.listnodes.sh
index aafe27e6cc1..722c9d2a035 100755
--- a/ctdb/tests/INTEGRATION/simple/basics.001.listnodes.sh
+++ b/ctdb/tests/INTEGRATION/simple/basics.001.listnodes.sh
@@ -1,12 +1,34 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Verify that 'ctdb listnodes' shows the list of nodes
+test_info()
+{
+ cat <<EOF
+Verify that 'ctdb listnodes' shows the list of nodes in a ctdb cluster.
+
+Prerequisites:
+
+* An active CTDB cluster with at least 2 active nodes.
+
+Steps:
+
+1. Verify that the status on all of the ctdb nodes is 'OK'.
+2. Run 'ctdb listnodes' on all the nodes of the cluster.
+3. Verify that one all the nodes the command displays a list of
+ current cluster nodes.
+
+Expected results:
+
+* 'ctdb listnodes' displays the correct information.
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
try_command_on_node -v 0 "$CTDB listnodes"
diff --git a/ctdb/tests/INTEGRATION/simple/basics.002.tunables.sh b/ctdb/tests/INTEGRATION/simple/basics.002.tunables.sh
index 6f362c685b3..2e7a08b6f6f 100755
--- a/ctdb/tests/INTEGRATION/simple/basics.002.tunables.sh
+++ b/ctdb/tests/INTEGRATION/simple/basics.002.tunables.sh
@@ -1,12 +1,19 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Verify the operation of "ctdb listvars", "ctdb getvar", "ctdb setvar"
+test_info()
+{
+ cat <<EOF
+Verify the operation of "ctdb listvars", "ctdb getvar", "ctdb setvar"
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
try_command_on_node -v 0 "$CTDB listvars"
diff --git a/ctdb/tests/INTEGRATION/simple/basics.003.ping.sh b/ctdb/tests/INTEGRATION/simple/basics.003.ping.sh
index 8071762ca40..1ff37b903f7 100755
--- a/ctdb/tests/INTEGRATION/simple/basics.003.ping.sh
+++ b/ctdb/tests/INTEGRATION/simple/basics.003.ping.sh
@@ -1,21 +1,39 @@
-#!/usr/bin/env bash
-
-# Verify the operation of the 'ctdb ping' command
-#
-# 1. Run the 'ctdb ping' command on one of the nodes and verify that it
-# shows valid and expected output.
-# 2. Shutdown one of the cluster nodes, using the 'ctdb shutdown'
-# command.
-# 3. Run the 'ctdb ping -n <node>' command from another node to this
-# node.
-# 4. Verify that the command is not successful since th ctdb daemon is
-# not running on the node.
+#!/bin/bash
+
+test_info()
+{
+ cat <<EOF
+Verify the operation of the 'ctdb ping' command.
+
+Prerequisites:
+
+* An active CTDB cluster with at least 2 active nodes.
+
+Steps:
+
+1. Verify that the status on all of the ctdb nodes is 'OK'.
+2. Run the 'ctdb ping' command on one of the nodes and verify that it
+ shows valid and expected output.
+3. Shutdown one of the cluster nodes, using the 'ctdb shutdown'
+ command.
+4. Run the 'ctdb ping -n <node>' command from another node to this
+ node.
+5. Verify that the command is not successful since th ctdb daemon is
+ not running on the node.
+
+Expected results:
+
+* The 'ctdb ping' command shows valid and expected output.
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
try_command_on_node -v 0 "$CTDB ping -n 1"
diff --git a/ctdb/tests/INTEGRATION/simple/basics.004.getpid.sh b/ctdb/tests/INTEGRATION/simple/basics.004.getpid.sh
index 27025df9309..48540d052b2 100755
--- a/ctdb/tests/INTEGRATION/simple/basics.004.getpid.sh
+++ b/ctdb/tests/INTEGRATION/simple/basics.004.getpid.sh
@@ -1,12 +1,34 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Verify that 'ctdb getpid' works as expected
+test_info()
+{
+ cat <<EOF
+Verify that 'ctdb getpid' works as expected.
+
+Prerequisites:
+
+* An active CTDB cluster with at least 2 active nodes.
+
+Steps:
+
+1. Verify that the status on all of the ctdb nodes is 'OK'.
+2. Run 'ctdb getpid -n <number>' on the nodes to check the PID of the
+ ctdbd process.
+3. Verify that the output is valid.
+
+Expected results:
+
+* 'ctdb getpid' shows valid output.
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
try_command_on_node 0 "$CTDB listnodes | wc -l"
num_nodes="$out"
diff --git a/ctdb/tests/INTEGRATION/simple/basics.005.process_exists.sh b/ctdb/tests/INTEGRATION/simple/basics.005.process_exists.sh
index c6212fda52e..ae331bcec33 100755
--- a/ctdb/tests/INTEGRATION/simple/basics.005.process_exists.sh
+++ b/ctdb/tests/INTEGRATION/simple/basics.005.process_exists.sh
@@ -1,15 +1,39 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Verify that 'ctdb process-exists' shows correct information
+test_info()
+{
+ cat <<EOF
+Verify that 'ctdb process-exists' shows correct information.
+
+The implementation is creative about how it gets PIDs for existing and
+non-existing processes.
+
+Prerequisites:
+
+* An active CTDB cluster with at least 2 active nodes.
+
+Steps:
+
+1. Verify that the status on all of the ctdb nodes is 'OK'.
+2. On one of the cluster nodes, get the PID of a ctdb client.
+3. Run 'ctdb process-exists <pid>' on the node and verify that the
+ correct output is shown.
+4. Run 'ctdb process-exists <pid>' with a pid of ctdb daemon
+ process and verify that the correct output is shown.
-# The implementation is creative about how it gets PIDs for existing and
-# non-existing processes.
+Expected results:
+
+* 'ctdb process-exists' shows the correct output.
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
test_node=1
srvid=0xAE00000012345678
diff --git a/ctdb/tests/INTEGRATION/simple/basics.010.statistics.sh b/ctdb/tests/INTEGRATION/simple/basics.010.statistics.sh
index d97e0353b2e..cda6e5f478e 100755
--- a/ctdb/tests/INTEGRATION/simple/basics.010.statistics.sh
+++ b/ctdb/tests/INTEGRATION/simple/basics.010.statistics.sh
@@ -1,14 +1,35 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Verify that 'ctdb statistics' works as expected
+test_info()
+{
+ cat <<EOF
+Verify that 'ctdb statistics' works as expected.
-# This is pretty superficial and could do more validation.
+This is pretty superficial and could do more validation.
+
+Prerequisites:
+
+* An active CTDB cluster with at least 2 active nodes.
+
+Steps:
+
+1. Verify that the status on all of the ctdb nodes is 'OK'.
+2. Run 'ctdb statistics' on a node, and verify that the output is
+ valid.
+
+Expected results:
+
+* 'ctdb statistics' shows valid output on all the nodes.
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
pattern='^(CTDB version 1|Current time of statistics[[:space:]]*:.*|Statistics collected since[[:space:]]*:.*|Gathered statistics for [[:digit:]]+ nodes|[[:space:]]+[[:alpha:]_]+[[:space:]]+[[:digit:]]+|[[:space:]]+(node|client|timeouts|locks)|[[:space:]]+([[:alpha:]_]+_latency|max_reclock_[[:alpha:]]+)[[:space:]]+[[:digit:]-]+\.[[:digit:]]+[[:space:]]sec|[[:space:]]*(locks_latency|reclock_ctdbd|reclock_recd|call_latency|lockwait_latency|childwrite_latency)[[:space:]]+MIN/AVG/MAX[[:space:]]+[-.[:digit:]]+/[-.[:digit:]]+/[-.[:digit:]]+ sec out of [[:digit:]]+|[[:space:]]+(hop_count_buckets|lock_buckets):[[:space:][:digit:]]+)$'
diff --git a/ctdb/tests/INTEGRATION/simple/basics.011.statistics_reset.sh b/ctdb/tests/INTEGRATION/simple/basics.011.statistics_reset.sh
index 51f34d9f034..1dce7b39965 100755
--- a/ctdb/tests/INTEGRATION/simple/basics.011.statistics_reset.sh
+++ b/ctdb/tests/INTEGRATION/simple/basics.011.statistics_reset.sh
@@ -1,15 +1,36 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Verify that 'ctdb statisticsreset' works as expected
+test_info()
+{
+ cat <<EOF
+Verify that 'ctdb statisticsreset' works as expected.
+
+This is pretty superficial. It just checks that a few particular
+items reduce.
+
+Prerequisites:
+
+* An active CTDB cluster with at least 2 active nodes.
+
+Steps:
+
+1. Verify that the status on all of the ctdb nodes is 'OK'.
+2. Run 'ctdb statisticsreset' on all nodes and verify that it executes
+ successfully.
-# This is pretty superficial. It just checks that a few particular
-# items reduce.
+Expected results:
+
+* 'ctdb statisticsreset' executes successfully.
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
try_command_on_node 0 "$CTDB listnodes | wc -l"
num_nodes="$out"
diff --git a/ctdb/tests/INTEGRATION/simple/cluster.001.isnotrecmaster.sh b/ctdb/tests/INTEGRATION/simple/cluster.001.isnotrecmaster.sh
index 7d8620bc2f5..68e22879ba3 100755
--- a/ctdb/tests/INTEGRATION/simple/cluster.001.isnotrecmaster.sh
+++ b/ctdb/tests/INTEGRATION/simple/cluster.001.isnotrecmaster.sh
@@ -1,12 +1,36 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Verify the operation of 'ctdb isnotrecmaster'
+test_info()
+{
+ cat <<EOF
+Verify the operation of 'ctdb isnotrecmaster'.
+
+Prerequisites:
+
+* An active CTDB cluster with at least 2 active nodes.
+
+Steps:
+
+1. Verify that the status on all of the ctdb nodes is 'OK'.
+2. Run 'ctdb isnotrecmaster' on each node.
+
+3. Verify that only 1 node shows the output 'This node is the
+ recmaster' and all the other nodes show the output 'This node is
+ not the recmaster'.
+
+Expected results:
+
+* 'ctdb isnotrecmaster' shows the correct output.
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
cmd="$CTDB isnotrecmaster || true"
try_command_on_node -v all "$cmd"
diff --git a/ctdb/tests/INTEGRATION/simple/cluster.002.recmaster_yield.sh b/ctdb/tests/INTEGRATION/simple/cluster.002.recmaster_yield.sh
index 67406dc29d2..f32a784863f 100755
--- a/ctdb/tests/INTEGRATION/simple/cluster.002.recmaster_yield.sh
+++ b/ctdb/tests/INTEGRATION/simple/cluster.002.recmaster_yield.sh
@@ -1,12 +1,34 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Verify that 'ctdb stop' causes a node to yield the recovery master role
+test_info()
+{
+ cat <<EOF
+Verify that 'ctdb stop' causes a node to yield the recovery master role.
+
+Prerequisites:
+
+* An active CTDB cluster with at least 2 active nodes.
+
+Steps:
+
+1. Determine which node is the recmaster.
+2. Stop this node using the 'ctdb stop' command.
+3. Verify that the status of the node changes to 'stopped'.
+4. Verify that this node no longer has the recovery master role.
+
+Expected results:
+
+* The 'ctdb stop' command causes a node to yield the recmaster role.
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
echo "Finding out which node is the recovery master..."
try_command_on_node -v 0 "$CTDB recmaster"
diff --git a/ctdb/tests/INTEGRATION/simple/cluster.010.getrelock.sh b/ctdb/tests/INTEGRATION/simple/cluster.010.getrelock.sh
index 58633af973d..8cedd34045b 100755
--- a/ctdb/tests/INTEGRATION/simple/cluster.010.getrelock.sh
+++ b/ctdb/tests/INTEGRATION/simple/cluster.010.getrelock.sh
@@ -1,14 +1,21 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Verify that "ctdb getreclock" gets the recovery lock correctly
+test_info()
+{
+ cat <<EOF
+Verify that "ctdb getreclock" gets the recovery lock correctly.
-# Make sure the recovery lock is consistent across all nodes.
+Make sure the recovery lock is consistent across all nodes.
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
echo "Check that recovery lock is set the same on all nodes..."
try_command_on_node -v -q all $CTDB getreclock
diff --git a/ctdb/tests/INTEGRATION/simple/cluster.012.reclock_command.sh b/ctdb/tests/INTEGRATION/simple/cluster.012.reclock_command.sh
index 76be340d7cb..8199b079610 100755
--- a/ctdb/tests/INTEGRATION/simple/cluster.012.reclock_command.sh
+++ b/ctdb/tests/INTEGRATION/simple/cluster.012.reclock_command.sh
@@ -1,10 +1,15 @@
-#!/usr/bin/env bash
-
-# Check that CTDB operates correctly if the recovery lock is configured
-# as a command.
-
-# This test works only with local daemons. On a real cluster it has
-# no way of updating configuration.
+#!/bin/bash
+
+test_info()
+{
+ cat <<EOF
+Check that CTDB operates correctly if the recovery lock is configured
+as a command.
+
+This test only does anything with local daemons. On a real cluster it
+has no way of updating configuration.
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
diff --git a/ctdb/tests/INTEGRATION/simple/cluster.015.reclock_remove_lock.sh b/ctdb/tests/INTEGRATION/simple/cluster.015.reclock_remove_lock.sh
index eccf0bea9da..d74bcf819b4 100755
--- a/ctdb/tests/INTEGRATION/simple/cluster.015.reclock_remove_lock.sh
+++ b/ctdb/tests/INTEGRATION/simple/cluster.015.reclock_remove_lock.sh
@@ -1,4 +1,4 @@
-#!/usr/bin/env bash
+#!/bin/bash
# Verify that the cluster recovers if the recovery lock is removed.
diff --git a/ctdb/tests/INTEGRATION/simple/cluster.016.reclock_move_lock_dir.sh b/ctdb/tests/INTEGRATION/simple/cluster.016.reclock_move_lock_dir.sh
index e6f63417abd..cd193f05a49 100755
--- a/ctdb/tests/INTEGRATION/simple/cluster.016.reclock_move_lock_dir.sh
+++ b/ctdb/tests/INTEGRATION/simple/cluster.016.reclock_move_lock_dir.sh
@@ -1,4 +1,4 @@
-#!/usr/bin/env bash
+#!/bin/bash
# Verify that if the directory containing the recovery lock is moved
# then all nodes are banned (because they can't take the lock).
diff --git a/ctdb/tests/INTEGRATION/simple/cluster.020.message_ring.sh b/ctdb/tests/INTEGRATION/simple/cluster.020.message_ring.sh
index b841f5b02f2..6f90c8fd5bb 100755
--- a/ctdb/tests/INTEGRATION/simple/cluster.020.message_ring.sh
+++ b/ctdb/tests/INTEGRATION/simple/cluster.020.message_ring.sh
@@ -1,12 +1,23 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Run the message_ring test and sanity check the output
+test_info()
+{
+ cat <<EOF
+Run the message_ring test and sanity check the output.
+
+Prerequisites:
+
+* An active CTDB cluster with at least 2 active nodes.
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
try_command_on_node 0 "$CTDB listnodes | wc -l"
num_nodes="$out"
diff --git a/ctdb/tests/INTEGRATION/simple/cluster.021.tunnel_ring.sh b/ctdb/tests/INTEGRATION/simple/cluster.021.tunnel_ring.sh
index f86d0802218..c78c4209ea6 100755
--- a/ctdb/tests/INTEGRATION/simple/cluster.021.tunnel_ring.sh
+++ b/ctdb/tests/INTEGRATION/simple/cluster.021.tunnel_ring.sh
@@ -1,12 +1,23 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Run tunnel_test and sanity check the output
+test_info()
+{
+ cat <<EOF
+Run tunnel_test and sanity check the output.
+
+Prerequisites:
+
+* An active CTDB cluster with at least 2 active nodes.
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
try_command_on_node 0 "$CTDB listnodes | wc -l"
num_nodes="$out"
diff --git a/ctdb/tests/INTEGRATION/simple/cluster.090.unreachable.sh b/ctdb/tests/INTEGRATION/simple/cluster.090.unreachable.sh
index 2835e55cd97..d7c0c0f9100 100755
--- a/ctdb/tests/INTEGRATION/simple/cluster.090.unreachable.sh
+++ b/ctdb/tests/INTEGRATION/simple/cluster.090.unreachable.sh
@@ -1,16 +1,44 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Verify an error occurs if a ctdb command is run against a node
-# without a ctdbd
+test_info()
+{
+ cat <<EOF
+Verify an error occurs if a ctdb command is run against a node without a ctdbd.
-# That is, check that an error message is printed if an attempt is made
-# to execute a ctdb command against a node that is not running ctdbd.
+That is, check that an error message is printed if an attempt is made
+to execute a ctdb command against a node that is not running ctdbd.
+
+Prerequisites:
+
+* An active CTDB cluster with at least 2 active nodes.
+
+Steps:
+
+1. Verify that the status on all of the ctdb nodes is 'OK'.
+2. Shutdown ctdb on a node using 'ctdb shutdown -n <node>'.
+3. Verify that the status of the node changes to 'DISCONNECTED'.
+4. Now run 'ctdb ip -n <node>' from another node.
+5. Verify that an error message is printed stating that the node is
+ disconnected.
+6. Execute some other commands against the shutdown node. For example,
+ disable, enable, ban, unban, listvars.
+7. For each command, verify that an error message is printed stating
+ that the node is disconnected.
+
+Expected results:
+
+* For a node on which ctdb is not running, all commands display an
+ error message stating that the node is disconnected.
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
test_node=1
diff --git a/ctdb/tests/INTEGRATION/simple/cluster.091.version_check.sh b/ctdb/tests/INTEGRATION/simple/cluster.091.version_check.sh
index be7175036dc..fa934e8bef1 100755
--- a/ctdb/tests/INTEGRATION/simple/cluster.091.version_check.sh
+++ b/ctdb/tests/INTEGRATION/simple/cluster.091.version_check.sh
@@ -1,6 +1,11 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Check that the CTDB version consistency checking operates correctly
+test_info()
+{
+ cat <<EOF
+Check that the CTDB version consistency checking operates correctly.
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
@@ -10,6 +15,8 @@ ctdb_test_skip_on_cluster
ctdb_test_init
+cluster_is_healthy
+
select_test_node
try_command_on_node -v "$test_node" ctdb version
diff --git a/ctdb/tests/INTEGRATION/simple/debug.001.getdebug.sh b/ctdb/tests/INTEGRATION/simple/debug.001.getdebug.sh
index 2220a208125..979392f9a1d 100755
--- a/ctdb/tests/INTEGRATION/simple/debug.001.getdebug.sh
+++ b/ctdb/tests/INTEGRATION/simple/debug.001.getdebug.sh
@@ -1,12 +1,32 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Verify that 'ctdb getdebug' works as expected
+test_info()
+{
+ cat <<EOF
+Verify that 'ctdb getdebug' works as expected.
+
+Prerequisites:
+
+* An active CTDB cluster with at least 2 active nodes.
+
+Steps:
+
+1. Verify that the status on all of the ctdb nodes is 'OK'.
+2. Get the current debug level on a node, using 'ctdb getdebug -n <node>'.
+
+Expected results:
+
+* 'ctdb getdebug' shows the debug level on all the nodes.
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
try_command_on_node 0 "$CTDB listnodes | wc -l"
num_nodes="$out"
diff --git a/ctdb/tests/INTEGRATION/simple/debug.002.setdebug.sh b/ctdb/tests/INTEGRATION/simple/debug.002.setdebug.sh
index dd5949eec77..3e04ca33733 100755
--- a/ctdb/tests/INTEGRATION/simple/debug.002.setdebug.sh
+++ b/ctdb/tests/INTEGRATION/simple/debug.002.setdebug.sh
@@ -1,16 +1,23 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Verify that 'ctdb setdebug' works as expected.
+test_info()
+{
+ cat <<EOF
+Verify that 'ctdb setdebug' works as expected.
-# This is a little superficial. It checks that CTDB thinks the debug
-# level has been changed but doesn't actually check that logging occurs
-# at the new level.
+This is a little superficial. It checks that CTDB thinks the debug
+level has been changed but doesn't actually check that logging occurs
+at the new level.
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
select_test_node
diff --git a/ctdb/tests/INTEGRATION/simple/debug.003.dumpmemory.sh b/ctdb/tests/INTEGRATION/simple/debug.003.dumpmemory.sh
index 6205c27c6ca..3d976c17d0f 100755
--- a/ctdb/tests/INTEGRATION/simple/debug.003.dumpmemory.sh
+++ b/ctdb/tests/INTEGRATION/simple/debug.003.dumpmemory.sh
@@ -1,12 +1,32 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Verify that 'ctdb dumpmemory' shows expected output
+test_info()
+{
+ cat <<EOF
+Verify that 'ctdb dumpmemory' shows expected output.
+
+Prerequisites:
+
+* An active CTDB cluster with at least 2 active nodes.
+
+Steps:
+
+1. Verify that the status on all of the ctdb nodes is 'OK'.
+2. Run 'ctdb dumpmemory' and verify that it shows expected output
+
+Expected results:
+
+* 'ctdb dumpmemory' sows valid output.
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
+ctdb_test_init
+
set -e
-ctdb_test_init
+cluster_is_healthy
pat='^([[:space:]].+[[:space:]]+contains[[:space:]]+[[:digit:]]+ bytes in[[:space:]]+[[:digit:]]+ blocks \(ref [[:digit:]]+\)[[:space:]]+0x[[:xdigit:]]+|[[:space:]]+reference to: .+|full talloc report on .+ \(total[[:space:]]+[[:digit:]]+ bytes in [[:digit:]]+ blocks\))$'
diff --git a/ctdb/tests/INTEGRATION/simple/eventscripts.001.zero_scripts.sh b/ctdb/tests/INTEGRATION/simple/eventscripts.001.zero_scripts.sh
index fb1d031e982..4e55319000e 100755
--- a/ctdb/tests/INTEGRATION/simple/eventscripts.001.zero_scripts.sh
+++ b/ctdb/tests/INTEGRATION/simple/eventscripts.001.zero_scripts.sh
@@ -1,7 +1,14 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Check that CTDB operates correctly if there are 0 event scripts
+test_info()
+{
+ cat <<EOF
+Check that CTDB operated correctly if there are 0 event scripts
+This test only does anything with local daemons. On a real cluster it
+has no way of updating configuration.
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
diff --git a/ctdb/tests/INTEGRATION/simple/eventscripts.090.debug_hung.sh b/ctdb/tests/INTEGRATION/simple/eventscripts.090.debug_hung.sh
index 046989cabb1..0e39496155e 100755
--- a/ctdb/tests/INTEGRATION/simple/eventscripts.090.debug_hung.sh
+++ b/ctdb/tests/INTEGRATION/simple/eventscripts.090.debug_hung.sh
@@ -1,6 +1,19 @@
-#!/usr/bin/env bash
+#!/bin/bash
-# Verify CTDB's debugging of timed out eventscripts
+test_info()
+{
+ cat <<EOF
+Verify CTDB's debugging of timed out eventscripts
+
+Prerequisites:
+
+* An active CTDB cluster with monitoring enabled
+
+Expected results:
+
+* When an eventscript times out the correct debugging is executed.
+EOF
+}
. "${TEST_SCRIPTS_DIR}/integration.bash"
@@ -10,6 +23,8 @@ ctdb_test_skip_on_cluster
ctdb_test_init
+cluster_is_healthy
+
select_test_node
####################