diff options
62 files changed, 313 insertions, 1060 deletions
diff --git a/ctdb/tests/INTEGRATION/database/basics.001.attach.sh b/ctdb/tests/INTEGRATION/database/basics.001.attach.sh index bcec20afc5e..e5c6b81ba65 100755 --- a/ctdb/tests/INTEGRATION/database/basics.001.attach.sh +++ b/ctdb/tests/INTEGRATION/database/basics.001.attach.sh @@ -1,35 +1,12 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Verify that 'ctdb getdbmap' operates as expected. - -This test creates some test databases using 'ctdb attach'. - -Prerequisites: - -* An active CTDB cluster with at least 2 active nodes. - -Steps: - -1. Verify that the status on all of the ctdb nodes is 'OK'. -2. Get the database on using 'ctdb getdbmap'. -3. Verify that the output is valid. - -Expected results: - -* 'ctdb getdbmap' shows a valid listing of databases. -EOF -} +# Verify that 'ctdb getdbmap' operates as expected . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init make_temp_db_filename () { diff --git a/ctdb/tests/INTEGRATION/database/basics.002.attach.sh b/ctdb/tests/INTEGRATION/database/basics.002.attach.sh index ae09741f980..6a5c812f35a 100755 --- a/ctdb/tests/INTEGRATION/database/basics.002.attach.sh +++ b/ctdb/tests/INTEGRATION/database/basics.002.attach.sh @@ -1,38 +1,19 @@ -#!/bin/bash - -test_info() -{ - cat <<EOF -Verify the operation of 'ctdb attach' command. - -Prerequisites: - -* An active CTDB cluster with at least 2 active nodes. - -Steps: - -1. Verify that the status on all of the ctdb nodes is 'OK'. -2. Shut down one of the nodes -3. Attach test databases -4. Start shutdown node -5. Verify that the databases are attached. -6. Restart one of the nodes -7. Verify that the databses are attached. +#!/usr/bin/env bash - -Expected results: - -* Command 'ctdb attach' command successfully attaches databases. -EOF -} +# Verify that databases are attached a node joins the cluster: +# 1. Shut down CTDB on one node +# 2. Attach test databases +# 3. Check that databases are attached on all up nodes +# 4. Start CTDB on the node where it is shut down +# 5. Verify that the test databases are attached on this node +# 6. Restart one of the nodes +# 7. Verify that the test databases are attached on this node . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init ###################################################################### diff --git a/ctdb/tests/INTEGRATION/database/basics.003.detach.sh b/ctdb/tests/INTEGRATION/database/basics.003.detach.sh index 5d1e12328c6..cb44955d1e8 100755 --- a/ctdb/tests/INTEGRATION/database/basics.003.detach.sh +++ b/ctdb/tests/INTEGRATION/database/basics.003.detach.sh @@ -1,34 +1,15 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Verify the operation of 'ctdb detach' command. - -Prerequisites: - -* An active CTDB cluster with at least 2 active nodes. - -Steps: - -1. Verify that the status on all of the ctdb nodes is 'OK'. -2. Attach test databases -3. Detach test databases -4. Verify that the databases are not attached. - -Expected results: - -* Command 'ctdb detach' command successfully removes attached databases. -EOF -} +# Verify that 'ctdb detach' works as expected: +# 1. Attach test databases +# 2. Detach test databases +# 3. Confirm test databases are not attached . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init ###################################################################### diff --git a/ctdb/tests/INTEGRATION/database/basics.004.wipe.sh b/ctdb/tests/INTEGRATION/database/basics.004.wipe.sh index 9305339d1e3..115d64cd606 100755 --- a/ctdb/tests/INTEGRATION/database/basics.004.wipe.sh +++ b/ctdb/tests/INTEGRATION/database/basics.004.wipe.sh @@ -1,36 +1,17 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -The command 'ctdb wipedb' is used to clear a database across the whole -cluster. - -Prerequisites: - -* An active CTDB cluster with at least 2 active nodes. - -Steps: - -1. Verify that the status on all of the ctdb nodes is 'OK'. -2. Create a persistent test database -3. Add some records to node #0 and node #1 -4. Perform wipedb on node #0 and verify the database is empty on both node 0 and 1 - -Expected results: - -* An empty database will result - -EOF -} +# Verify that 'ctdb wipedb' can clear a persistent database: +# 1. Verify that the status on all of the ctdb nodes is 'OK'. +# 2. Create a persistent test database +# 3. Add some records to node 0 and node 1 +# 4. Run wipedb on node 0 +# 5. verify the database is empty on both node 0 and 1 . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init try_command_on_node 0 "$CTDB listnodes | wc -l" num_nodes="$out" diff --git a/ctdb/tests/INTEGRATION/database/basics.010.backup_restore.sh b/ctdb/tests/INTEGRATION/database/basics.010.backup_restore.sh index 579233e0e80..8c469d4642b 100755 --- a/ctdb/tests/INTEGRATION/database/basics.010.backup_restore.sh +++ b/ctdb/tests/INTEGRATION/database/basics.010.backup_restore.sh @@ -1,39 +1,18 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -The command 'ctdb restoredb' is used to restore a database across the -whole cluster. - -Prerequisites: - -* An active CTDB cluster with at least 2 active nodes. - -Steps: - -1. Verify that the status on all of the ctdb nodes is 'OK'. -2. Create a persistent test database -3. Add some records to test database -4. Backup database -5. Wipe database and verify the database is empty on all nodes -6. Restore database and make sure all the records are restored -7. Make sure no recovery has been triggered - -Expected results: - -* Database operations should not cause a recovery - -EOF -} +# Confirm that 'ctdb restoredb' works correctly: +# 1. Create a persistent test database +# 2. Add some records to test database +# 3. Backup database +# 4. Wipe database and verify the database is empty on all nodes +# 5. Restore database and make sure all the records are restored +# 6. Make sure no recovery has been triggered . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init try_command_on_node 0 $CTDB status generation=$(sed -n -e 's/^Generation:\([0-9]*\)/\1/p' "$outfile") diff --git a/ctdb/tests/INTEGRATION/database/fetch.001.ring.sh b/ctdb/tests/INTEGRATION/database/fetch.001.ring.sh index 33df07b7d88..4d7d392240d 100755 --- a/ctdb/tests/INTEGRATION/database/fetch.001.ring.sh +++ b/ctdb/tests/INTEGRATION/database/fetch.001.ring.sh @@ -1,23 +1,12 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Run the fetch_ring test and sanity check the output. - -Prerequisites: - -* An active CTDB cluster with at least 2 active nodes. -EOF -} +# Run the fetch_ring test and sanity check the output . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init try_command_on_node 0 "$CTDB listnodes | wc -l" num_nodes="$out" diff --git a/ctdb/tests/INTEGRATION/database/fetch.002.ring-hotkeys.sh b/ctdb/tests/INTEGRATION/database/fetch.002.ring-hotkeys.sh index c4e0023e11c..6d442535a21 100755 --- a/ctdb/tests/INTEGRATION/database/fetch.002.ring-hotkeys.sh +++ b/ctdb/tests/INTEGRATION/database/fetch.002.ring-hotkeys.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Run the fetch_ring test, sanity check the output and check hot keys # statistics diff --git a/ctdb/tests/INTEGRATION/database/readonly.001.basic.sh b/ctdb/tests/INTEGRATION/database/readonly.001.basic.sh index a83b4d31530..20faa3aa03b 100755 --- a/ctdb/tests/INTEGRATION/database/readonly.001.basic.sh +++ b/ctdb/tests/INTEGRATION/database/readonly.001.basic.sh @@ -1,46 +1,28 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Read-only records can be activated at runtime using a ctdb command. -If read-only records are not activated, then any attempt to fetch a read-only -copy should be automatically upgraded to a read-write fetch_lock(). - -If read-only delegations are present, then any attempt to aquire a read-write -fetch_lock will trigger all delegations to be revoked before the fetch lock -completes. - - -Prerequisites: - -* An active CTDB cluster with at least 2 active nodes. - -Steps: +# Test support for read-only records -1. Verify that the status on all of the ctdb nodes is 'OK'. -2. create a test database and some records -3. try to fetch read-only records, this should not result in any delegations -4. activate read-only support -5. try to fetch read-only records, this should result in delegations -6. do a fetchlock and the delegations should be revoked -7. try to fetch read-only records, this should result in delegations -8. do a recovery and the delegations should be revoked +# Read-only records can be activated at runtime using a ctdb command. +# If read-only records are not activated, then any attempt to fetch a +# read-only copy should be automatically upgraded to a read-write +# fetch_locked(). -Expected results: +# If read-only delegations are present, then any attempt to acquire a +# read-write fetch_lock will trigger revocation of all delegations +# before the fetch_locked(). -Delegations should be created and revoked as above - -EOF -} +# 1. Create a test database and some records +# 2. Try to fetch read-only records, this should not result in any delegations +# 3. Activate read-only support +# 4. Try to fetch read-only records, this should result in delegations +# 5. Do a fetchlock and the delegations should be revoked +# 6. Try to fetch read-only records, this should result in delegations . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init ###################################################################### diff --git a/ctdb/tests/INTEGRATION/database/recovery.001.volatile.sh b/ctdb/tests/INTEGRATION/database/recovery.001.volatile.sh index a8bc2d95ef6..e523e835de7 100755 --- a/ctdb/tests/INTEGRATION/database/recovery.001.volatile.sh +++ b/ctdb/tests/INTEGRATION/database/recovery.001.volatile.sh @@ -1,45 +1,33 @@ -#!/bin/bash - -test_info() -{ - cat <<EOF -Recovery can under certain circumstances lead to old record copies -resurrecting: Recovery selects the newest record copy purely by RSN. At -the end of the recovery, the recovery master is the dmaster for all -records in all (non-persistent) databases. And the other nodes locally -hold the complete copy of the databases. The bug is that the recovery -process does not increment the RSN on the recovery master at the end of -the recovery. Now clients acting directly on the Recovery master will -directly change a record's content on the recmaster without migration -and hence without RSN bump. So a subsequent recovery can not tell that -the recmaster's copy is newer than the copies on the other nodes, since -their RSN is the same. Hence, if the recmaster is not node 0 (or more -precisely not the active node with the lowest node number), the recovery -will choose copies from nodes with lower number and stick to these. - -Steps: - -1. Create a test database -2. Add a record with value value1 on recovery master -3. Force a recovery -4. Update the record with value value2 on recovery master -5. Force a recovery -6. Fetch the record - -Expected results: - -* The record should have value value2 and not value1 - -EOF -} +#!/usr/bin/env bash + +# Test that recovery correctly handles RSNs + +# Recovery can under certain circumstances lead to old record copies +# resurrecting: Recovery selects the newest record copy purely by RSN. At +# the end of the recovery, the recovery master is the dmaster for all +# records in all (non-persistent) databases. And the other nodes locally +# hold the complete copy of the databases. The bug is that the recovery +# process does not increment the RSN on the recovery master at the end of +# the recovery. Now clients acting directly on the Recovery master will +# directly change a record's content on the recmaster without migration +# and hence without RSN bump. So a subsequent recovery can not tell that +# the recmaster's copy is newer than the copies on the other nodes, since +# their RSN is the same. Hence, if the recmaster is not node 0 (or more +# precisely not the active node with the lowest node number), the recovery +# will choose copies from nodes with lower number and stick to these. + +# 1. Create a test database +# 2. Add a record with value value1 on recovery master +# 3. Force a recovery +# 4. Update the record with value value2 on recovery master +# 5. Force a recovery +# 6. Confirm that the value is value2 . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init # # Main test diff --git a/ctdb/tests/INTEGRATION/database/recovery.002.large.sh b/ctdb/tests/INTEGRATION/database/recovery.002.large.sh index 4f544fa8475..4c0f7024a99 100755 --- a/ctdb/tests/INTEGRATION/database/recovery.002.large.sh +++ b/ctdb/tests/INTEGRATION/database/recovery.002.large.sh @@ -1,33 +1,23 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Older style of recovery using PULL_DB and PUSH_DB controls tries to -construct a single large marshall buffer for all the records in the -database. However, this approach is problematic as talloc restricts the -maximum size of buffer to 256M. Also, trying to construct and send large -buffers is inefficient and can cause CTDB daemon to be tied up for long -periods of time. +# Test recovery of large volatile and persistent databases -Instead new style recovery is introduced using DB_PULL and -DB_PUSH_START/DB_PUSH_CONFIRM controls. This sends the records in -batches of ~RecBufferSizeLimit in size at a time. +# Older style of recovery using PULL_DB and PUSH_DB controls tries to +# construct a single large marshall buffer for all the records in the +# database. However, this approach is problematic as talloc restricts the +# maximum size of buffer to 256M. Also, trying to construct and send large +# buffers is inefficient and can cause CTDB daemon to be tied up for long +# periods of time. -Expected results: - -* The recovery should complete successfully - -EOF -} +# Instead new style recovery is introduced using DB_PULL and +# DB_PUSH_START/DB_PUSH_CONFIRM controls. This sends the records in +# batches of ~RecBufferSizeLimit in size at a time. . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init # # Main test diff --git a/ctdb/tests/INTEGRATION/database/recovery.003.no_resurrect.sh b/ctdb/tests/INTEGRATION/database/recovery.003.no_resurrect.sh index b65452c0e06..b314d4d86eb 100755 --- a/ctdb/tests/INTEGRATION/database/recovery.003.no_resurrect.sh +++ b/ctdb/tests/INTEGRATION/database/recovery.003.no_resurrect.sh @@ -1,19 +1,13 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Ensure recovery doesn't resurrect deleted records from recently inactive nodes -EOF -} +# Ensure recovery doesn't resurrect deleted records from recently +# inactive nodes . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init testdb="rec_test.tdb" diff --git a/ctdb/tests/INTEGRATION/database/recovery.010.persistent.sh b/ctdb/tests/INTEGRATION/database/recovery.010.persistent.sh index 84e0ec15c43..d13a9a5ccac 100755 --- a/ctdb/tests/INTEGRATION/database/recovery.010.persistent.sh +++ b/ctdb/tests/INTEGRATION/database/recovery.010.persistent.sh @@ -1,39 +1,20 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -The persistent databases are recovered using sequence number. -The recovery is performed by picking the copy of the database from the -node that has the highest sequence number and ignore the content on all -other nodes. - - -Prerequisites: - -* An active CTDB cluster with at least 2 active nodes. - -Steps: - -1. Verify that the status on all of the ctdb nodes is 'OK'. -2. create a persistent test database -3. test that no seqnum record blends the database during recovery -4. test that seqnum record does not blend the database during recovery - -Expected results: - -* that 3,4 will recover the highest seqnum database - -EOF -} +# Ensure that persistent databases are correctly recovered by database +# sequence number +# +# 1. Create and wipe a persistent test database +# 2. Directly add a single record to the database on each node +# 3. Trigger a recover +# 4. Ensure that the database contains only a single record +# +# Repeat but with sequence numbers set by hand on each node . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init try_command_on_node 0 "$CTDB listnodes | wc -l" num_nodes="$out" diff --git a/ctdb/tests/INTEGRATION/database/recovery.011.continue.sh b/ctdb/tests/INTEGRATION/database/recovery.011.continue.sh index 81aa6e089d3..3376e062bed 100755 --- a/ctdb/tests/INTEGRATION/database/recovery.011.continue.sh +++ b/ctdb/tests/INTEGRATION/database/recovery.011.continue.sh @@ -1,33 +1,20 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -This test confirms that the deleted records are not resurrected after recovery. - -Steps: - -1. Create a persistent database -2. Add a record and update it few times. -3. Delete the record -4. Turn off one of the nodes -5. Add a record with same key. -6. Turn on the stopped node - -Expected results: - -* Check that the deleted record is present after recovery. - -EOF -} +# Confirm that the deleted records are not resurrected after recovery +# +# 1. Create a persistent database +# 2. Add a record and update it few times. +# 3. Delete the record +# 4. Use "ctdb stop" to stop one of the nodes +# 5. Add a record with same key. +# 6. Continue on the stopped node +# 7. Confirm that the record still exists . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init do_test() { diff --git a/ctdb/tests/INTEGRATION/database/transaction.001.ptrans.sh b/ctdb/tests/INTEGRATION/database/transaction.001.ptrans.sh index 72b18348398..556e52330a6 100755 --- a/ctdb/tests/INTEGRATION/database/transaction.001.ptrans.sh +++ b/ctdb/tests/INTEGRATION/database/transaction.001.ptrans.sh @@ -1,32 +1,15 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Verify that the ctdb ptrans works as expected - -Prerequisites: - -* An active CTDB cluster with at least 2 active nodes. - -Steps: - -1. Verify that the status on all of the ctdb nodes is 'OK'. -2. Pipe some operation to ctdb ptrans and validate the TDB contents with ctdb catdb - -Expected results: - -* ctdb ptrans works as expected. -EOF -} +# Verify that the 'ctdb ptrans' works as expected +# +# Pipe some operation to ctdb ptrans and validate the TDB contents +# with ctdb catdb . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init TESTDB="ptrans_test.tdb" diff --git a/ctdb/tests/INTEGRATION/database/transaction.002.loop.sh b/ctdb/tests/INTEGRATION/database/transaction.002.loop.sh index d47761216ce..d633c7c0f05 100755 --- a/ctdb/tests/INTEGRATION/database/transaction.002.loop.sh +++ b/ctdb/tests/INTEGRATION/database/transaction.002.loop.sh @@ -1,23 +1,12 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Verify that the transaction_loop test succeeds. - -Prerequisites: - -* An active CTDB cluster with at least 2 active nodes. -EOF -} +# Verify that the transaction_loop test succeeds . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init TESTDB="persistent_trans.tdb" diff --git a/ctdb/tests/INTEGRATION/database/transaction.003.loop_recovery.sh b/ctdb/tests/INTEGRATION/database/transaction.003.loop_recovery.sh index 7beacb9c913..05aadba04ce 100755 --- a/ctdb/tests/INTEGRATION/database/transaction.003.loop_recovery.sh +++ b/ctdb/tests/INTEGRATION/database/transaction.003.loop_recovery.sh @@ -1,15 +1,12 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Verify that the transaction_loop test succeeds with recoveries. +# Verify that the transaction_loop test succeeds with recoveries. -Prerequisites: +. "${TEST_SCRIPTS_DIR}/integration.bash" -* An active CTDB cluster with at least 2 active nodes. -EOF -} +set -e + +ctdb_test_init recovery_loop() { @@ -30,14 +27,6 @@ recovery_loop_start() ctdb_test_exit_hook_add "kill $RECLOOP_PID >/dev/null 2>&1" } -. "${TEST_SCRIPTS_DIR}/integration.bash" - -ctdb_test_init - -set -e - -cluster_is_healthy - TESTDB="persistent_trans.tdb" try_command_on_node 0 "$CTDB attach $TESTDB persistent" diff --git a/ctdb/tests/INTEGRATION/database/transaction.004.update_record.sh b/ctdb/tests/INTEGRATION/database/transaction.004.update_record.sh index 90b5ee06151..528303a6435 100755 --- a/ctdb/tests/INTEGRATION/database/transaction.004.update_record.sh +++ b/ctdb/tests/INTEGRATION/database/transaction.004.update_record.sh @@ -1,38 +1,21 @@ -#!/bin/bash - -test_info() -{ - cat <<EOF -UPDATE_RECORD control should be able to create new records and update -existing records in a persistent database. - -Prerequisites: - -* An active CTDB cluster with at least one active node. - -Steps: - -1. Verify that the status on all of the ctdb nodes is 'OK'. -2. create a persistent test database -3, wipe the database to make sure it is empty -4, create a new record -5, update the record - -Expected results: - -* 4 created record found in the tdb -* 5 updated record found in the tdb - -EOF -} +#!/usr/bin/env bash + +# Verify that "ctdb update_record_persistent" creates new records and +# updates existing records in a persistent database +# +# 1. Create and wipe a persistent test database +# 2. Do a recovery +# 3. Confirm that the database is empty +# 4. Create a new record using "ctdb update_record_persistent" +# 5. Confirm the record exists in the database using "ctdb cattdb" +# 6. Update the record's value using "ctdb update_record_persistent" +# 7. Confirm that the original value no longer exists using "ctdb cattdb" . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init try_command_on_node 0 "$CTDB listnodes | wc -l" num_nodes="$out" @@ -44,7 +27,7 @@ echo "Create persistent test database \"$test_db\"" try_command_on_node 0 $CTDB attach "$test_db" persistent -# 3, +# 3. echo "Wipe the persistent test database" try_command_on_node 0 $CTDB wipedb "$test_db" echo "Force a recovery" @@ -59,7 +42,7 @@ else exit 1 fi -# 4, +# 4. echo "Create a new record in the persistent database using UPDATE_RECORD" try_command_on_node 0 $CTDB_TEST_WRAPPER $VALGRIND update_record_persistent \ -D "$test_db" -k "Update_Record_Persistent" -v "FirstValue" @@ -72,7 +55,7 @@ else exit 1 fi -# 5, +# 5. echo Modify an existing record in the persistent database using UPDATE_RECORD try_command_on_node 0 $CTDB_TEST_WRAPPER $VALGRIND update_record_persistent \ -D "$test_db" -k "Update_Record_Persistent" -v "SecondValue" diff --git a/ctdb/tests/INTEGRATION/database/transaction.010.loop_recovery.sh b/ctdb/tests/INTEGRATION/database/transaction.010.loop_recovery.sh index 4d5c1297fa9..9de6c34be40 100755 --- a/ctdb/tests/INTEGRATION/database/transaction.010.loop_recovery.sh +++ b/ctdb/tests/INTEGRATION/database/transaction.010.loop_recovery.sh @@ -1,16 +1,13 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Verify that the transaction_loop test succeeds with recoveries for replicated -databases. +# Verify that the transaction_loop test succeeds with recoveries for +# replicated databases -Prerequisites: +. "${TEST_SCRIPTS_DIR}/integration.bash" -* An active CTDB cluster with at least 2 active nodes. -EOF -} +set -e + +ctdb_test_init recovery_loop() { @@ -31,14 +28,6 @@ recovery_loop_start() ctdb_test_exit_hook_add "kill $RECLOOP_PID >/dev/null 2>&1" } -. "${TEST_SCRIPTS_DIR}/integration.bash" - -ctdb_test_init - -set -e - -cluster_is_healthy - TESTDB="replicated_trans.tdb" try_command_on_node 0 "$CTDB attach $TESTDB replicated" diff --git a/ctdb/tests/INTEGRATION/database/traverse.001.one.sh b/ctdb/tests/INTEGRATION/database/traverse.001.one.sh index 7f3007d5105..1b3b7c2d25c 100755 --- a/ctdb/tests/INTEGRATION/database/traverse.001.one.sh +++ b/ctdb/tests/INTEGRATION/database/traverse.001.one.sh @@ -1,29 +1,17 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Confirm that traverses of volatile databases work as expected - -This is a very simple example. It writes a single record, updates it -on another node and then confirms that the correct value is found when -traversing. It then repeats this after removing the LMASTER role from -the node where the value is updated. - -Expected results: +# Confirm that traverses of volatile databases work as expected -* The expected records should be found - -EOF -} +# This is a very simple example. It writes a single record, updates it +# on another node and then confirms that the correct value is found when +# traversing. It then repeats this after removing the LMASTER role from +# the node where the value is updated. . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init # # Main test diff --git a/ctdb/tests/INTEGRATION/database/traverse.002.many.sh b/ctdb/tests/INTEGRATION/database/traverse.002.many.sh index d28f9c2814e..fb0dc98f52b 100755 --- a/ctdb/tests/INTEGRATION/database/traverse.002.many.sh +++ b/ctdb/tests/INTEGRATION/database/traverse.002.many.sh @@ -1,34 +1,16 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Test CTDB cluster wide traverse code. - -Prerequisites: - -* An active CTDB cluster with at least 2 active nodes. - -Steps: - -1. Create a test database -2. Add records on different nodes -3. Run traverse - -Expected results: - -* All records are retrieved. - -EOF -} +# Test cluster wide traverse code +# +# 1. Create a volatile test database +# 2. Add records on different nodes +# 3. Use "ctdb catdb" to confirm that all added records are present . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init try_command_on_node 0 "$CTDB listnodes" num_nodes=$(echo "$out" | wc -l) diff --git a/ctdb/tests/INTEGRATION/database/vacuum.001.fast.sh b/ctdb/tests/INTEGRATION/database/vacuum.001.fast.sh index 651de4df57c..27a2225f437 100755 --- a/ctdb/tests/INTEGRATION/database/vacuum.001.fast.sh +++ b/ctdb/tests/INTEGRATION/database/vacuum.001.fast.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Ensure that vacuuming deletes records on all nodes diff --git a/ctdb/tests/INTEGRATION/database/vacuum.002.full.sh b/ctdb/tests/INTEGRATION/database/vacuum.002.full.sh index cc1113b02f5..0dc8372a6a8 100755 --- a/ctdb/tests/INTEGRATION/database/vacuum.002.full.sh +++ b/ctdb/tests/INTEGRATION/database/vacuum.002.full.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Ensure a full vacuuming run deletes records diff --git a/ctdb/tests/INTEGRATION/database/vacuum.003.recreate.sh b/ctdb/tests/INTEGRATION/database/vacuum.003.recreate.sh index 00cb2c3df29..acb7b135e55 100755 --- a/ctdb/tests/INTEGRATION/database/vacuum.003.recreate.sh +++ b/ctdb/tests/INTEGRATION/database/vacuum.003.recreate.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Ensure that vacuuming does not delete a record that is recreated # before vacuuming completes. This needs at least 3 nodes. diff --git a/ctdb/tests/INTEGRATION/database/vacuum.030.locked.sh b/ctdb/tests/INTEGRATION/database/vacuum.030.locked.sh index 193a769096d..386252633c8 100755 --- a/ctdb/tests/INTEGRATION/database/vacuum.030.locked.sh +++ b/ctdb/tests/INTEGRATION/database/vacuum.030.locked.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Confirm that a record is not vacuumed if it is locked when the 1st # fast vacuuming run occurs on the node on which it was deleted, but diff --git a/ctdb/tests/INTEGRATION/database/vacuum.031.locked.sh b/ctdb/tests/INTEGRATION/database/vacuum.031.locked.sh index 284808c8775..b6adbf73c87 100755 --- a/ctdb/tests/INTEGRATION/database/vacuum.031.locked.sh +++ b/ctdb/tests/INTEGRATION/database/vacuum.031.locked.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Confirm that a record is vacuumed if it is locked on the deleting # node when the 2nd fast vacuuming run occurs, but vacuuming is diff --git a/ctdb/tests/INTEGRATION/database/vacuum.032.locked.sh b/ctdb/tests/INTEGRATION/database/vacuum.032.locked.sh index 2f983aa79d6..481d1d44428 100755 --- a/ctdb/tests/INTEGRATION/database/vacuum.032.locked.sh +++ b/ctdb/tests/INTEGRATION/database/vacuum.032.locked.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Confirm that a record is not vacuumed if it is locked on the lmaster # when the 3rd fast vacuuming run occurs, but is dropped from the diff --git a/ctdb/tests/INTEGRATION/database/vacuum.033.locked.sh b/ctdb/tests/INTEGRATION/database/vacuum.033.locked.sh index 47b6be2c7f2..63d7d1f3938 100755 --- a/ctdb/tests/INTEGRATION/database/vacuum.033.locked.sh +++ b/ctdb/tests/INTEGRATION/database/vacuum.033.locked.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Confirm that a record is not vacuumed if it is locked on the # deleting node when the 3rd fast vacuuming run occurs, but is dropped diff --git a/ctdb/tests/INTEGRATION/database/vacuum.034.locked.sh b/ctdb/tests/INTEGRATION/database/vacuum.034.locked.sh index 405ddbb8034..7f37ada3115 100755 --- a/ctdb/tests/INTEGRATION/database/vacuum.034.locked.sh +++ b/ctdb/tests/INTEGRATION/database/vacuum.034.locked.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Confirm that a record is not vacuumed if it is locked on another # (non-lmaster, non-deleting) node when the 3rd fast vacuuming run diff --git a/ctdb/tests/INTEGRATION/failover/pubips.001.list.sh b/ctdb/tests/INTEGRATION/failover/pubips.001.list.sh index d8e0097362f..2fc75b731b9 100755 --- a/ctdb/tests/INTEGRATION/failover/pubips.001.list.sh +++ b/ctdb/tests/INTEGRATION/failover/pubips.001.list.sh @@ -1,35 +1,12 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Verify that 'ctdb ip' shows the correct output. - -Prerequisites: - -* An active CTDB cluster with at least 2 active nodes. - -Steps: - -1. Verify that the status on all of the ctdb nodes is 'OK'. -2. Run 'ctdb ip' on one of the nodes and verify the list of IP - addresses displayed (cross check the result with the output of - 'ip addr show' on the node). -3. Verify that pipe-separated output is generated with the -X option. - -Expected results: - -* 'ctdb ip' shows the list of public IPs being served by a node. -EOF -} +# Verify that 'ctdb ip' shows the correct output . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init echo "Getting list of public IPs..." try_command_on_node -v 1 "$CTDB ip all | tail -n +2" diff --git a/ctdb/tests/INTEGRATION/failover/pubips.010.addip.sh b/ctdb/tests/INTEGRATION/failover/pubips.010.addip.sh index aefed54c45e..aba85dd0499 100755 --- a/ctdb/tests/INTEGRATION/failover/pubips.010.addip.sh +++ b/ctdb/tests/INTEGRATION/failover/pubips.010.addip.sh @@ -1,22 +1,12 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Verify that an IP address can be added to a node using 'ctdb addip'. - -This test does not do any network level checks to make sure IP -addresses are actually on interfaces. It just consults "ctdb ip". -EOF -} +# Verify that an IP address can be added to a node using 'ctdb addip' . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init select_test_node_and_ips get_test_ip_mask_and_iface diff --git a/ctdb/tests/INTEGRATION/failover/pubips.011.delip.sh b/ctdb/tests/INTEGRATION/failover/pubips.011.delip.sh index d3f0f3a012b..5235a9d3203 100755 --- a/ctdb/tests/INTEGRATION/failover/pubips.011.delip.sh +++ b/ctdb/tests/INTEGRATION/failover/pubips.011.delip.sh @@ -1,22 +1,12 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Verify that a node's public IP address can be deleted using 'ctdb deleteip'. - -This test does not do any network level checks to make sure IP -addresses are actually on interfaces. It just consults "ctdb ip". -EOF -} +# Verify that a node's public IP address can be deleted using 'ctdb deleteip' . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init select_test_node_and_ips diff --git a/ctdb/tests/INTEGRATION/failover/pubips.012.reloadips.sh b/ctdb/tests/INTEGRATION/failover/pubips.012.reloadips.sh index 451fca3a866..829b83930da 100755 --- a/ctdb/tests/INTEGRATION/failover/pubips.012.reloadips.sh +++ b/ctdb/tests/INTEGRATION/failover/pubips.012.reloadips.sh @@ -1,31 +1,15 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Verify that IPs can be rearrranged using 'ctdb reloadips'. - -Various sub-tests that remove addresses from the public_addresses file -on a node or delete the entire contents of the public_addresses file. +# Verify that IPs can be reconfigured using 'ctdb reloadips' -Prerequisites: - -* An active CTDB cluster with at least 2 active nodes. - -Expected results: - -* When addresses are deconfigured "ctdb ip" no longer reports them and - when added they are seen again. -EOF -} +# Various sub-tests that remove addresses from the public_addresses file +# on a node or delete the entire contents of the public_addresses file. . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init select_test_node_and_ips diff --git a/ctdb/tests/INTEGRATION/failover/pubips.013.failover_noop.sh b/ctdb/tests/INTEGRATION/failover/pubips.013.failover_noop.sh index 0c6fd833a5e..1c13a3ce3c2 100755 --- a/ctdb/tests/INTEGRATION/failover/pubips.013.failover_noop.sh +++ b/ctdb/tests/INTEGRATION/failover/pubips.013.failover_noop.sh @@ -1,17 +1,9 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Check that CTDB operates correctly if: +# Check that CTDB operates correctly if: -* failover is disabled; or -* there are 0 public IPs configured - -This test only does anything with local daemons. On a real cluster it -has no way of updating configuration. -EOF -} +# * failover is disabled; or +# * there are 0 public IPs configured . "${TEST_SCRIPTS_DIR}/integration.bash" diff --git a/ctdb/tests/INTEGRATION/failover/pubips.014.iface_gc.sh b/ctdb/tests/INTEGRATION/failover/pubips.014.iface_gc.sh index 2756f64ceb1..845b4b50fdb 100755 --- a/ctdb/tests/INTEGRATION/failover/pubips.014.iface_gc.sh +++ b/ctdb/tests/INTEGRATION/failover/pubips.014.iface_gc.sh @@ -1,19 +1,12 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Verify that an interface is deleted when all IPs on it are deleted. -EOF -} +# Verify that an interface is deleted when all IPs on it are deleted . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init select_test_node_and_ips diff --git a/ctdb/tests/INTEGRATION/failover/pubips.020.moveip.sh b/ctdb/tests/INTEGRATION/failover/pubips.020.moveip.sh index 699ccc3d2ee..68b5e079d66 100755 --- a/ctdb/tests/INTEGRATION/failover/pubips.020.moveip.sh +++ b/ctdb/tests/INTEGRATION/failover/pubips.020.moveip.sh @@ -1,25 +1,18 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Verify that 'ctdb moveip' allows movement of public IPs between cluster nodes. +# Verify that 'ctdb moveip' allows movement of public IPs between nodes -This test does not do any network level checks to make sure IP -addresses are actually on interfaces. It just consults "ctdb ip". +# This test does not do any network level checks to make sure IP +# addresses are actually on interfaces. It just consults "ctdb ip". -To work, this test ensures that IPAllocAlgorithm is not set to 0 -(Deterministic IPs) and sets NoIPFailback. -EOF -} +# To work, this test ensures that IPAllocAlgorithm is not set to 0 +# (Deterministic IPs) and sets NoIPFailback. . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init select_test_node_and_ips diff --git a/ctdb/tests/INTEGRATION/failover/pubips.030.disable_enable.sh b/ctdb/tests/INTEGRATION/failover/pubips.030.disable_enable.sh index c0bb62d1991..9a1616ceefc 100755 --- a/ctdb/tests/INTEGRATION/failover/pubips.030.disable_enable.sh +++ b/ctdb/tests/INTEGRATION/failover/pubips.030.disable_enable.sh @@ -1,22 +1,15 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Verify the operation of "ctdb disable" and "ctdb enable" -EOF -} +# Verify the operation of "ctdb disable" and "ctdb enable" . "${TEST_SCRIPTS_DIR}/integration.bash" +set -e + ctdb_test_init ######################################## -set -e - -cluster_is_healthy - select_test_node_and_ips echo "Disabling node $test_node" diff --git a/ctdb/tests/INTEGRATION/failover/pubips.032.stop_continue.sh b/ctdb/tests/INTEGRATION/failover/pubips.032.stop_continue.sh index d9a64508180..f5936b03d88 100755 --- a/ctdb/tests/INTEGRATION/failover/pubips.032.stop_continue.sh +++ b/ctdb/tests/INTEGRATION/failover/pubips.032.stop_continue.sh @@ -1,19 +1,12 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Verify the operation of "ctdb stop" and "ctdb continue" -EOF -} +# Verify the operation of "ctdb stop" and "ctdb continue" . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init select_test_node_and_ips diff --git a/ctdb/tests/INTEGRATION/failover/pubips.040.NoIPTakeover.sh b/ctdb/tests/INTEGRATION/failover/pubips.040.NoIPTakeover.sh index 9af55d8e7ef..a39e48d0883 100755 --- a/ctdb/tests/INTEGRATION/failover/pubips.040.NoIPTakeover.sh +++ b/ctdb/tests/INTEGRATION/failover/pubips.040.NoIPTakeover.sh @@ -1,34 +1,12 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Verify that 'ctdb setvar NoIPTakeover 1' stops ip addresses from being failed -over onto the node. - -Prerequisites: - -* An active CTDB cluster with at least 2 active nodes. - -Steps: - -1. Verify that the status on all of the ctdb nodes is 'OK'. -2. Use 'ctdb ip' on one of the nodes to list the IP addresses being - served. -3. Use 'ctdb moveip' to move an address from one node to another. -4. Verify that the IP is no longer being hosted by the first node and is now being hosted by the second node. - -Expected results: - -* 'ctdb moveip' allows an IP address to be moved between cluster nodes. -EOF -} +# Verify that 'ctdb setvar NoIPTakeover 1' stops IP addresses being taken over . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init +set -e -cluster_is_healthy +ctdb_test_init try_command_on_node 0 "$CTDB listnodes | wc -l" num_nodes="$out" diff --git a/ctdb/tests/INTEGRATION/failover/pubips.050.missing_ip.sh b/ctdb/tests/INTEGRATION/failover/pubips.050.missing_ip.sh index c455784c886..543f9a96d89 100755 --- a/ctdb/tests/INTEGRATION/failover/pubips.050.missing_ip.sh +++ b/ctdb/tests/INTEGRATION/failover/pubips.050.missing_ip.sh @@ -1,24 +1,17 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Verify that the recovery daemon handles unhosted IPs properly. +# Verify that the recovery daemon handles unhosted IPs properly -This test does not do any network level checks to make sure the IP -address is actually on an interface. It just consults "ctdb ip". +# This test does not do any network level checks to make sure the IP +# address is actually on an interface. It just consults "ctdb ip". -This is a variation of the "addip" test. -EOF -} +# This is a variation of the "addip" test. . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init select_test_node_and_ips diff --git a/ctdb/tests/INTEGRATION/simple/basics.000.onnode.sh b/ctdb/tests/INTEGRATION/simple/basics.000.onnode.sh index 7e74c088847..4ca6e462ad4 100755 --- a/ctdb/tests/INTEGRATION/simple/basics.000.onnode.sh +++ b/ctdb/tests/INTEGRATION/simple/basics.000.onnode.sh @@ -1,29 +1,12 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Use 'onnode' to confirm connectivity between all cluster nodes. - -Steps: - -1. Do a recursive "onnode all" to make sure all the nodes can connect - to each other. On a cluster this ensures that SSH keys are known - between all hosts, which will stop output being corrupted with - messages about nodes being added to the list of known hosts. - -Expected results: - -* 'onnode' works between all nodes. -EOF -} +# Use 'onnode' to confirm connectivity between all cluster nodes . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - +set -e -# +ctdb_test_init echo "Checking connectivity between nodes..." onnode all onnode -p all hostname diff --git a/ctdb/tests/INTEGRATION/simple/basics.001.listnodes.sh b/ctdb/tests/INTEGRATION/simple/basics.001.listnodes.sh index 722c9d2a035..aafe27e6cc1 100755 --- a/ctdb/tests/INTEGRATION/simple/basics.001.listnodes.sh +++ b/ctdb/tests/INTEGRATION/simple/basics.001.listnodes.sh @@ -1,34 +1,12 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Verify that 'ctdb listnodes' shows the list of nodes in a ctdb cluster. - -Prerequisites: - -* An active CTDB cluster with at least 2 active nodes. - -Steps: - -1. Verify that the status on all of the ctdb nodes is 'OK'. -2. Run 'ctdb listnodes' on all the nodes of the cluster. -3. Verify that one all the nodes the command displays a list of - current cluster nodes. - -Expected results: - -* 'ctdb listnodes' displays the correct information. -EOF -} +# Verify that 'ctdb listnodes' shows the list of nodes . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init try_command_on_node -v 0 "$CTDB listnodes" diff --git a/ctdb/tests/INTEGRATION/simple/basics.002.tunables.sh b/ctdb/tests/INTEGRATION/simple/basics.002.tunables.sh index 2e7a08b6f6f..6f362c685b3 100755 --- a/ctdb/tests/INTEGRATION/simple/basics.002.tunables.sh +++ b/ctdb/tests/INTEGRATION/simple/basics.002.tunables.sh @@ -1,19 +1,12 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Verify the operation of "ctdb listvars", "ctdb getvar", "ctdb setvar" -EOF -} +# Verify the operation of "ctdb listvars", "ctdb getvar", "ctdb setvar" . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init try_command_on_node -v 0 "$CTDB listvars" diff --git a/ctdb/tests/INTEGRATION/simple/basics.003.ping.sh b/ctdb/tests/INTEGRATION/simple/basics.003.ping.sh index 1ff37b903f7..8071762ca40 100755 --- a/ctdb/tests/INTEGRATION/simple/basics.003.ping.sh +++ b/ctdb/tests/INTEGRATION/simple/basics.003.ping.sh @@ -1,39 +1,21 @@ -#!/bin/bash - -test_info() -{ - cat <<EOF -Verify the operation of the 'ctdb ping' command. - -Prerequisites: - -* An active CTDB cluster with at least 2 active nodes. - -Steps: - -1. Verify that the status on all of the ctdb nodes is 'OK'. -2. Run the 'ctdb ping' command on one of the nodes and verify that it - shows valid and expected output. -3. Shutdown one of the cluster nodes, using the 'ctdb shutdown' - command. -4. Run the 'ctdb ping -n <node>' command from another node to this - node. -5. Verify that the command is not successful since th ctdb daemon is - not running on the node. - -Expected results: - -* The 'ctdb ping' command shows valid and expected output. -EOF -} +#!/usr/bin/env bash + +# Verify the operation of the 'ctdb ping' command +# +# 1. Run the 'ctdb ping' command on one of the nodes and verify that it +# shows valid and expected output. +# 2. Shutdown one of the cluster nodes, using the 'ctdb shutdown' +# command. +# 3. Run the 'ctdb ping -n <node>' command from another node to this +# node. +# 4. Verify that the command is not successful since th ctdb daemon is +# not running on the node. . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init try_command_on_node -v 0 "$CTDB ping -n 1" diff --git a/ctdb/tests/INTEGRATION/simple/basics.004.getpid.sh b/ctdb/tests/INTEGRATION/simple/basics.004.getpid.sh index 48540d052b2..27025df9309 100755 --- a/ctdb/tests/INTEGRATION/simple/basics.004.getpid.sh +++ b/ctdb/tests/INTEGRATION/simple/basics.004.getpid.sh @@ -1,34 +1,12 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Verify that 'ctdb getpid' works as expected. - -Prerequisites: - -* An active CTDB cluster with at least 2 active nodes. - -Steps: - -1. Verify that the status on all of the ctdb nodes is 'OK'. -2. Run 'ctdb getpid -n <number>' on the nodes to check the PID of the - ctdbd process. -3. Verify that the output is valid. - -Expected results: - -* 'ctdb getpid' shows valid output. -EOF -} +# Verify that 'ctdb getpid' works as expected . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init try_command_on_node 0 "$CTDB listnodes | wc -l" num_nodes="$out" diff --git a/ctdb/tests/INTEGRATION/simple/basics.005.process_exists.sh b/ctdb/tests/INTEGRATION/simple/basics.005.process_exists.sh index ae331bcec33..c6212fda52e 100755 --- a/ctdb/tests/INTEGRATION/simple/basics.005.process_exists.sh +++ b/ctdb/tests/INTEGRATION/simple/basics.005.process_exists.sh @@ -1,39 +1,15 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Verify that 'ctdb process-exists' shows correct information. - -The implementation is creative about how it gets PIDs for existing and -non-existing processes. - -Prerequisites: - -* An active CTDB cluster with at least 2 active nodes. - -Steps: - -1. Verify that the status on all of the ctdb nodes is 'OK'. -2. On one of the cluster nodes, get the PID of a ctdb client. -3. Run 'ctdb process-exists <pid>' on the node and verify that the - correct output is shown. -4. Run 'ctdb process-exists <pid>' with a pid of ctdb daemon - process and verify that the correct output is shown. +# Verify that 'ctdb process-exists' shows correct information -Expected results: - -* 'ctdb process-exists' shows the correct output. -EOF -} +# The implementation is creative about how it gets PIDs for existing and +# non-existing processes. . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init test_node=1 srvid=0xAE00000012345678 diff --git a/ctdb/tests/INTEGRATION/simple/basics.010.statistics.sh b/ctdb/tests/INTEGRATION/simple/basics.010.statistics.sh index cda6e5f478e..d97e0353b2e 100755 --- a/ctdb/tests/INTEGRATION/simple/basics.010.statistics.sh +++ b/ctdb/tests/INTEGRATION/simple/basics.010.statistics.sh @@ -1,35 +1,14 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Verify that 'ctdb statistics' works as expected. +# Verify that 'ctdb statistics' works as expected -This is pretty superficial and could do more validation. - -Prerequisites: - -* An active CTDB cluster with at least 2 active nodes. - -Steps: - -1. Verify that the status on all of the ctdb nodes is 'OK'. -2. Run 'ctdb statistics' on a node, and verify that the output is - valid. - -Expected results: - -* 'ctdb statistics' shows valid output on all the nodes. -EOF -} +# This is pretty superficial and could do more validation. . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init pattern='^(CTDB version 1|Current time of statistics[[:space:]]*:.*|Statistics collected since[[:space:]]*:.*|Gathered statistics for [[:digit:]]+ nodes|[[:space:]]+[[:alpha:]_]+[[:space:]]+[[:digit:]]+|[[:space:]]+(node|client|timeouts|locks)|[[:space:]]+([[:alpha:]_]+_latency|max_reclock_[[:alpha:]]+)[[:space:]]+[[:digit:]-]+\.[[:digit:]]+[[:space:]]sec|[[:space:]]*(locks_latency|reclock_ctdbd|reclock_recd|call_latency|lockwait_latency|childwrite_latency)[[:space:]]+MIN/AVG/MAX[[:space:]]+[-.[:digit:]]+/[-.[:digit:]]+/[-.[:digit:]]+ sec out of [[:digit:]]+|[[:space:]]+(hop_count_buckets|lock_buckets):[[:space:][:digit:]]+)$' diff --git a/ctdb/tests/INTEGRATION/simple/basics.011.statistics_reset.sh b/ctdb/tests/INTEGRATION/simple/basics.011.statistics_reset.sh index 1dce7b39965..51f34d9f034 100755 --- a/ctdb/tests/INTEGRATION/simple/basics.011.statistics_reset.sh +++ b/ctdb/tests/INTEGRATION/simple/basics.011.statistics_reset.sh @@ -1,36 +1,15 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Verify that 'ctdb statisticsreset' works as expected. - -This is pretty superficial. It just checks that a few particular -items reduce. - -Prerequisites: - -* An active CTDB cluster with at least 2 active nodes. - -Steps: - -1. Verify that the status on all of the ctdb nodes is 'OK'. -2. Run 'ctdb statisticsreset' on all nodes and verify that it executes - successfully. +# Verify that 'ctdb statisticsreset' works as expected -Expected results: - -* 'ctdb statisticsreset' executes successfully. -EOF -} +# This is pretty superficial. It just checks that a few particular +# items reduce. . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init try_command_on_node 0 "$CTDB listnodes | wc -l" num_nodes="$out" diff --git a/ctdb/tests/INTEGRATION/simple/cluster.001.isnotrecmaster.sh b/ctdb/tests/INTEGRATION/simple/cluster.001.isnotrecmaster.sh index 68e22879ba3..7d8620bc2f5 100755 --- a/ctdb/tests/INTEGRATION/simple/cluster.001.isnotrecmaster.sh +++ b/ctdb/tests/INTEGRATION/simple/cluster.001.isnotrecmaster.sh @@ -1,36 +1,12 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Verify the operation of 'ctdb isnotrecmaster'. - -Prerequisites: - -* An active CTDB cluster with at least 2 active nodes. - -Steps: - -1. Verify that the status on all of the ctdb nodes is 'OK'. -2. Run 'ctdb isnotrecmaster' on each node. - -3. Verify that only 1 node shows the output 'This node is the - recmaster' and all the other nodes show the output 'This node is - not the recmaster'. - -Expected results: - -* 'ctdb isnotrecmaster' shows the correct output. -EOF -} +# Verify the operation of 'ctdb isnotrecmaster' . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init cmd="$CTDB isnotrecmaster || true" try_command_on_node -v all "$cmd" diff --git a/ctdb/tests/INTEGRATION/simple/cluster.002.recmaster_yield.sh b/ctdb/tests/INTEGRATION/simple/cluster.002.recmaster_yield.sh index f32a784863f..67406dc29d2 100755 --- a/ctdb/tests/INTEGRATION/simple/cluster.002.recmaster_yield.sh +++ b/ctdb/tests/INTEGRATION/simple/cluster.002.recmaster_yield.sh @@ -1,34 +1,12 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Verify that 'ctdb stop' causes a node to yield the recovery master role. - -Prerequisites: - -* An active CTDB cluster with at least 2 active nodes. - -Steps: - -1. Determine which node is the recmaster. -2. Stop this node using the 'ctdb stop' command. -3. Verify that the status of the node changes to 'stopped'. -4. Verify that this node no longer has the recovery master role. - -Expected results: - -* The 'ctdb stop' command causes a node to yield the recmaster role. -EOF -} +# Verify that 'ctdb stop' causes a node to yield the recovery master role . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init echo "Finding out which node is the recovery master..." try_command_on_node -v 0 "$CTDB recmaster" diff --git a/ctdb/tests/INTEGRATION/simple/cluster.010.getrelock.sh b/ctdb/tests/INTEGRATION/simple/cluster.010.getrelock.sh index 8cedd34045b..58633af973d 100755 --- a/ctdb/tests/INTEGRATION/simple/cluster.010.getrelock.sh +++ b/ctdb/tests/INTEGRATION/simple/cluster.010.getrelock.sh @@ -1,21 +1,14 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Verify that "ctdb getreclock" gets the recovery lock correctly. +# Verify that "ctdb getreclock" gets the recovery lock correctly -Make sure the recovery lock is consistent across all nodes. -EOF -} +# Make sure the recovery lock is consistent across all nodes. . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init echo "Check that recovery lock is set the same on all nodes..." try_command_on_node -v -q all $CTDB getreclock diff --git a/ctdb/tests/INTEGRATION/simple/cluster.012.reclock_command.sh b/ctdb/tests/INTEGRATION/simple/cluster.012.reclock_command.sh index 8199b079610..76be340d7cb 100755 --- a/ctdb/tests/INTEGRATION/simple/cluster.012.reclock_command.sh +++ b/ctdb/tests/INTEGRATION/simple/cluster.012.reclock_command.sh @@ -1,15 +1,10 @@ -#!/bin/bash - -test_info() -{ - cat <<EOF -Check that CTDB operates correctly if the recovery lock is configured -as a command. - -This test only does anything with local daemons. On a real cluster it -has no way of updating configuration. -EOF -} +#!/usr/bin/env bash + +# Check that CTDB operates correctly if the recovery lock is configured +# as a command. + +# This test works only with local daemons. On a real cluster it has +# no way of updating configuration. . "${TEST_SCRIPTS_DIR}/integration.bash" diff --git a/ctdb/tests/INTEGRATION/simple/cluster.015.reclock_remove_lock.sh b/ctdb/tests/INTEGRATION/simple/cluster.015.reclock_remove_lock.sh index d74bcf819b4..eccf0bea9da 100755 --- a/ctdb/tests/INTEGRATION/simple/cluster.015.reclock_remove_lock.sh +++ b/ctdb/tests/INTEGRATION/simple/cluster.015.reclock_remove_lock.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Verify that the cluster recovers if the recovery lock is removed. diff --git a/ctdb/tests/INTEGRATION/simple/cluster.016.reclock_move_lock_dir.sh b/ctdb/tests/INTEGRATION/simple/cluster.016.reclock_move_lock_dir.sh index cd193f05a49..e6f63417abd 100755 --- a/ctdb/tests/INTEGRATION/simple/cluster.016.reclock_move_lock_dir.sh +++ b/ctdb/tests/INTEGRATION/simple/cluster.016.reclock_move_lock_dir.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Verify that if the directory containing the recovery lock is moved # then all nodes are banned (because they can't take the lock). diff --git a/ctdb/tests/INTEGRATION/simple/cluster.020.message_ring.sh b/ctdb/tests/INTEGRATION/simple/cluster.020.message_ring.sh index 6f90c8fd5bb..b841f5b02f2 100755 --- a/ctdb/tests/INTEGRATION/simple/cluster.020.message_ring.sh +++ b/ctdb/tests/INTEGRATION/simple/cluster.020.message_ring.sh @@ -1,23 +1,12 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Run the message_ring test and sanity check the output. - -Prerequisites: - -* An active CTDB cluster with at least 2 active nodes. -EOF -} +# Run the message_ring test and sanity check the output . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init try_command_on_node 0 "$CTDB listnodes | wc -l" num_nodes="$out" diff --git a/ctdb/tests/INTEGRATION/simple/cluster.021.tunnel_ring.sh b/ctdb/tests/INTEGRATION/simple/cluster.021.tunnel_ring.sh index c78c4209ea6..f86d0802218 100755 --- a/ctdb/tests/INTEGRATION/simple/cluster.021.tunnel_ring.sh +++ b/ctdb/tests/INTEGRATION/simple/cluster.021.tunnel_ring.sh @@ -1,23 +1,12 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Run tunnel_test and sanity check the output. - -Prerequisites: - -* An active CTDB cluster with at least 2 active nodes. -EOF -} +# Run tunnel_test and sanity check the output . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init try_command_on_node 0 "$CTDB listnodes | wc -l" num_nodes="$out" diff --git a/ctdb/tests/INTEGRATION/simple/cluster.090.unreachable.sh b/ctdb/tests/INTEGRATION/simple/cluster.090.unreachable.sh index d7c0c0f9100..2835e55cd97 100755 --- a/ctdb/tests/INTEGRATION/simple/cluster.090.unreachable.sh +++ b/ctdb/tests/INTEGRATION/simple/cluster.090.unreachable.sh @@ -1,44 +1,16 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Verify an error occurs if a ctdb command is run against a node without a ctdbd. +# Verify an error occurs if a ctdb command is run against a node +# without a ctdbd -That is, check that an error message is printed if an attempt is made -to execute a ctdb command against a node that is not running ctdbd. - -Prerequisites: - -* An active CTDB cluster with at least 2 active nodes. - -Steps: - -1. Verify that the status on all of the ctdb nodes is 'OK'. -2. Shutdown ctdb on a node using 'ctdb shutdown -n <node>'. -3. Verify that the status of the node changes to 'DISCONNECTED'. -4. Now run 'ctdb ip -n <node>' from another node. -5. Verify that an error message is printed stating that the node is - disconnected. -6. Execute some other commands against the shutdown node. For example, - disable, enable, ban, unban, listvars. -7. For each command, verify that an error message is printed stating - that the node is disconnected. - -Expected results: - -* For a node on which ctdb is not running, all commands display an - error message stating that the node is disconnected. -EOF -} +# That is, check that an error message is printed if an attempt is made +# to execute a ctdb command against a node that is not running ctdbd. . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init test_node=1 diff --git a/ctdb/tests/INTEGRATION/simple/cluster.091.version_check.sh b/ctdb/tests/INTEGRATION/simple/cluster.091.version_check.sh index fa934e8bef1..be7175036dc 100755 --- a/ctdb/tests/INTEGRATION/simple/cluster.091.version_check.sh +++ b/ctdb/tests/INTEGRATION/simple/cluster.091.version_check.sh @@ -1,11 +1,6 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Check that the CTDB version consistency checking operates correctly. -EOF -} +# Check that the CTDB version consistency checking operates correctly . "${TEST_SCRIPTS_DIR}/integration.bash" @@ -15,8 +10,6 @@ ctdb_test_skip_on_cluster ctdb_test_init -cluster_is_healthy - select_test_node try_command_on_node -v "$test_node" ctdb version diff --git a/ctdb/tests/INTEGRATION/simple/debug.001.getdebug.sh b/ctdb/tests/INTEGRATION/simple/debug.001.getdebug.sh index 979392f9a1d..2220a208125 100755 --- a/ctdb/tests/INTEGRATION/simple/debug.001.getdebug.sh +++ b/ctdb/tests/INTEGRATION/simple/debug.001.getdebug.sh @@ -1,32 +1,12 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Verify that 'ctdb getdebug' works as expected. - -Prerequisites: - -* An active CTDB cluster with at least 2 active nodes. - -Steps: - -1. Verify that the status on all of the ctdb nodes is 'OK'. -2. Get the current debug level on a node, using 'ctdb getdebug -n <node>'. - -Expected results: - -* 'ctdb getdebug' shows the debug level on all the nodes. -EOF -} +# Verify that 'ctdb getdebug' works as expected . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init try_command_on_node 0 "$CTDB listnodes | wc -l" num_nodes="$out" diff --git a/ctdb/tests/INTEGRATION/simple/debug.002.setdebug.sh b/ctdb/tests/INTEGRATION/simple/debug.002.setdebug.sh index 3e04ca33733..dd5949eec77 100755 --- a/ctdb/tests/INTEGRATION/simple/debug.002.setdebug.sh +++ b/ctdb/tests/INTEGRATION/simple/debug.002.setdebug.sh @@ -1,23 +1,16 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Verify that 'ctdb setdebug' works as expected. +# Verify that 'ctdb setdebug' works as expected. -This is a little superficial. It checks that CTDB thinks the debug -level has been changed but doesn't actually check that logging occurs -at the new level. -EOF -} +# This is a little superficial. It checks that CTDB thinks the debug +# level has been changed but doesn't actually check that logging occurs +# at the new level. . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init select_test_node diff --git a/ctdb/tests/INTEGRATION/simple/debug.003.dumpmemory.sh b/ctdb/tests/INTEGRATION/simple/debug.003.dumpmemory.sh index 3d976c17d0f..6205c27c6ca 100755 --- a/ctdb/tests/INTEGRATION/simple/debug.003.dumpmemory.sh +++ b/ctdb/tests/INTEGRATION/simple/debug.003.dumpmemory.sh @@ -1,32 +1,12 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Verify that 'ctdb dumpmemory' shows expected output. - -Prerequisites: - -* An active CTDB cluster with at least 2 active nodes. - -Steps: - -1. Verify that the status on all of the ctdb nodes is 'OK'. -2. Run 'ctdb dumpmemory' and verify that it shows expected output - -Expected results: - -* 'ctdb dumpmemory' sows valid output. -EOF -} +# Verify that 'ctdb dumpmemory' shows expected output . "${TEST_SCRIPTS_DIR}/integration.bash" -ctdb_test_init - set -e -cluster_is_healthy +ctdb_test_init pat='^([[:space:]].+[[:space:]]+contains[[:space:]]+[[:digit:]]+ bytes in[[:space:]]+[[:digit:]]+ blocks \(ref [[:digit:]]+\)[[:space:]]+0x[[:xdigit:]]+|[[:space:]]+reference to: .+|full talloc report on .+ \(total[[:space:]]+[[:digit:]]+ bytes in [[:digit:]]+ blocks\))$' diff --git a/ctdb/tests/INTEGRATION/simple/eventscripts.001.zero_scripts.sh b/ctdb/tests/INTEGRATION/simple/eventscripts.001.zero_scripts.sh index 4e55319000e..fb1d031e982 100755 --- a/ctdb/tests/INTEGRATION/simple/eventscripts.001.zero_scripts.sh +++ b/ctdb/tests/INTEGRATION/simple/eventscripts.001.zero_scripts.sh @@ -1,14 +1,7 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Check that CTDB operated correctly if there are 0 event scripts +# Check that CTDB operates correctly if there are 0 event scripts -This test only does anything with local daemons. On a real cluster it -has no way of updating configuration. -EOF -} . "${TEST_SCRIPTS_DIR}/integration.bash" diff --git a/ctdb/tests/INTEGRATION/simple/eventscripts.090.debug_hung.sh b/ctdb/tests/INTEGRATION/simple/eventscripts.090.debug_hung.sh index 0e39496155e..046989cabb1 100755 --- a/ctdb/tests/INTEGRATION/simple/eventscripts.090.debug_hung.sh +++ b/ctdb/tests/INTEGRATION/simple/eventscripts.090.debug_hung.sh @@ -1,19 +1,6 @@ -#!/bin/bash +#!/usr/bin/env bash -test_info() -{ - cat <<EOF -Verify CTDB's debugging of timed out eventscripts - -Prerequisites: - -* An active CTDB cluster with monitoring enabled - -Expected results: - -* When an eventscript times out the correct debugging is executed. -EOF -} +# Verify CTDB's debugging of timed out eventscripts . "${TEST_SCRIPTS_DIR}/integration.bash" @@ -23,8 +10,6 @@ ctdb_test_skip_on_cluster ctdb_test_init -cluster_is_healthy - select_test_node #################### |