summaryrefslogtreecommitdiff
path: root/ctdb
diff options
context:
space:
mode:
authorMartin Schwenke <martin@meltin.net>2022-01-10 19:18:14 +1100
committerMartin Schwenke <martins@samba.org>2022-01-17 10:21:33 +0000
commitf5a39058f0743f5607df91cb698a2b15618e1360 (patch)
tree4890d63e38d47490bba2abf1bb93ce2682d8d934 /ctdb
parentd752a92e1153fa355b0cbaa1f482fdc0d88e42f5 (diff)
downloadsamba-f5a39058f0743f5607df91cb698a2b15618e1360.tar.gz
ctdb-config: [cluster] recovery lock -> [cluster] cluster lock
Retain "recovery lock" and mark as deprecated for backward compatibility. Some documentation is still inconsistent. Signed-off-by: Martin Schwenke <martin@meltin.net> Reviewed-by: Amitay Isaacs <amitay@gmail.com>
Diffstat (limited to 'ctdb')
-rw-r--r--ctdb/cluster/cluster_conf.c25
-rw-r--r--ctdb/cluster/cluster_conf.h1
-rw-r--r--ctdb/config/ctdb.conf10
-rw-r--r--ctdb/doc/cluster_mutex_helper.txt6
-rw-r--r--ctdb/doc/ctdb-etcd.7.xml4
-rw-r--r--ctdb/doc/ctdb.conf.5.xml8
-rw-r--r--ctdb/doc/ctdb_mutex_ceph_rados_helper.7.xml6
-rw-r--r--ctdb/doc/examples/ctdb.conf2
-rw-r--r--ctdb/server/ctdb_config.c4
-rw-r--r--ctdb/server/ctdb_config.h1
-rw-r--r--ctdb/server/ctdbd.c9
-rwxr-xr-xctdb/tests/UNIT/cunit/config_test_001.sh1
-rwxr-xr-xctdb/tests/UNIT/cunit/config_test_004.sh21
-rwxr-xr-xctdb/tests/local_daemons.sh30
-rw-r--r--ctdb/utils/ceph/ctdb_mutex_ceph_rados_helper.c2
-rwxr-xr-xctdb/utils/ceph/test_ceph_rados_reclock.sh4
-rwxr-xr-xctdb/utils/etcd/ctdb_etcd_lock4
17 files changed, 95 insertions, 43 deletions
diff --git a/ctdb/cluster/cluster_conf.c b/ctdb/cluster/cluster_conf.c
index be79d5942a8..3b86bad80ef 100644
--- a/ctdb/cluster/cluster_conf.c
+++ b/ctdb/cluster/cluster_conf.c
@@ -113,6 +113,24 @@ good:
mode);
}
+static bool validate_recovery_lock(const char *key,
+ const char *old_reclock,
+ const char *new_reclock,
+ enum conf_update_mode mode)
+{
+ bool status;
+
+ if (new_reclock != NULL) {
+ D_WARNING("Configuration option [%s] -> %s is deprecated\n",
+ CLUSTER_CONF_SECTION,
+ key);
+ }
+
+ status = check_static_string_change(key, old_reclock, new_reclock, mode);
+
+ return status;
+}
+
void cluster_conf_init(struct conf_context *conf)
{
conf_define_section(conf, CLUSTER_CONF_SECTION, NULL);
@@ -129,7 +147,12 @@ void cluster_conf_init(struct conf_context *conf)
validate_node_address);
conf_define_string(conf,
CLUSTER_CONF_SECTION,
- CLUSTER_CONF_RECOVERY_LOCK,
+ CLUSTER_CONF_CLUSTER_LOCK,
NULL,
check_static_string_change);
+ conf_define_string(conf,
+ CLUSTER_CONF_SECTION,
+ CLUSTER_CONF_RECOVERY_LOCK,
+ NULL,
+ validate_recovery_lock);
}
diff --git a/ctdb/cluster/cluster_conf.h b/ctdb/cluster/cluster_conf.h
index 6b797ef1085..cdd42e15b7d 100644
--- a/ctdb/cluster/cluster_conf.h
+++ b/ctdb/cluster/cluster_conf.h
@@ -26,6 +26,7 @@
#define CLUSTER_CONF_TRANSPORT "transport"
#define CLUSTER_CONF_NODE_ADDRESS "node address"
+#define CLUSTER_CONF_CLUSTER_LOCK "cluster lock"
#define CLUSTER_CONF_RECOVERY_LOCK "recovery lock"
void cluster_conf_init(struct conf_context *conf);
diff --git a/ctdb/config/ctdb.conf b/ctdb/config/ctdb.conf
index 5440600a435..8e1b3760973 100644
--- a/ctdb/config/ctdb.conf
+++ b/ctdb/config/ctdb.conf
@@ -11,12 +11,12 @@
# log level = NOTICE
[cluster]
- # Shared recovery lock file to avoid split brain. Daemon
- # default is no recovery lock. Do NOT run CTDB without a
- # recovery lock file unless you know exactly what you are
+ # Shared cluster lock file to avoid split brain. Daemon
+ # default is no cluster lock. Do NOT run CTDB without a
+ # cluster lock file unless you know exactly what you are
# doing.
#
- # Please see the RECOVERY LOCK section in ctdb(7) for more
+ # Please see the CLUSTER LOCK section in ctdb(7) for more
# details.
#
- # recovery lock = !/bin/false RECOVERY LOCK NOT CONFIGURED
+ # cluster lock = !/bin/false CLUSTER LOCK NOT CONFIGURED
diff --git a/ctdb/doc/cluster_mutex_helper.txt b/ctdb/doc/cluster_mutex_helper.txt
index 20c8eb2b51d..4ee018ffc94 100644
--- a/ctdb/doc/cluster_mutex_helper.txt
+++ b/ctdb/doc/cluster_mutex_helper.txt
@@ -5,11 +5,11 @@ CTDB uses cluster-wide mutexes to protect against a "split brain",
which could occur if the cluster becomes partitioned due to network
failure or similar.
-CTDB uses a cluster-wide mutex for its "recovery lock", which is used
+CTDB uses a cluster-wide mutex for its "cluster lock", which is used
to ensure that only one database recovery can happen at a time. For
-an overview of recovery lock configuration see the RECOVERY LOCK
+an overview of cluster lock configuration see the CLUSTER LOCK
section in ctdb(7). CTDB tries to ensure correct operation of the
-recovery lock by attempting to take the recovery lock when CTDB knows
+cluster lock by attempting to take the cluster lock when CTDB knows
that it should already be held.
By default, CTDB uses a supplied mutex helper that uses a fcntl(2)
diff --git a/ctdb/doc/ctdb-etcd.7.xml b/ctdb/doc/ctdb-etcd.7.xml
index 5d7a0e05366..f84989f854f 100644
--- a/ctdb/doc/ctdb-etcd.7.xml
+++ b/ctdb/doc/ctdb-etcd.7.xml
@@ -60,7 +60,7 @@
<para>
ctdb_etcd_lock is intended to be run as a mutex helper for CTDB. It
will try to connect to an existing etcd cluster and grab a lock in that
- cluster to function as CTDB's recovery lock. Please see
+ cluster to function as CTDB's cluster lock. Please see
<emphasis>ctdb/doc/cluster_mutex_helper.txt</emphasis> for details on
the mutex helper API. To use this, include the following line in
the <literal>[cluster]</literal> section of
@@ -68,7 +68,7 @@
<manvolnum>5</manvolnum></citerefentry>:
</para>
<screen format="linespecific">
-recovery lock = !/usr/local/usr/libexec/ctdb/ctdb_etcd_lock
+cluster lock = !/usr/local/usr/libexec/ctdb/ctdb_etcd_lock
</screen>
<para>
You can also pass "-v", "-vv", or "-vvv" to include verbose output in
diff --git a/ctdb/doc/ctdb.conf.5.xml b/ctdb/doc/ctdb.conf.5.xml
index f4ef9599d9c..910ac4102f6 100644
--- a/ctdb/doc/ctdb.conf.5.xml
+++ b/ctdb/doc/ctdb.conf.5.xml
@@ -175,20 +175,20 @@
<variablelist>
<varlistentry>
- <term>recovery lock = <parameter>LOCK</parameter></term>
+ <term>cluster lock = <parameter>LOCK</parameter></term>
<listitem>
<para>
LOCK specifies the cluster-wide mutex used to detect and
prevent a partitioned cluster (or "split brain").
</para>
<para>
- For information about the recovery lock please see the
- <citetitle>RECOVERY LOCK</citetitle> section in
+ For information about the cluster lock please see the
+ <citetitle>CLUSTER LOCK</citetitle> section in
<citerefentry><refentrytitle>ctdb</refentrytitle>
<manvolnum>7</manvolnum></citerefentry>.
</para>
<para>
- Default: NONE. However, uses of a recovery lock is
+ Default: NONE. However, uses of a cluster lock is
<emphasis>strongly recommended</emphasis>.
</para>
</listitem>
diff --git a/ctdb/doc/ctdb_mutex_ceph_rados_helper.7.xml b/ctdb/doc/ctdb_mutex_ceph_rados_helper.7.xml
index e055dbba614..dd3dbabdd50 100644
--- a/ctdb/doc/ctdb_mutex_ceph_rados_helper.7.xml
+++ b/ctdb/doc/ctdb_mutex_ceph_rados_helper.7.xml
@@ -19,7 +19,7 @@
<refsect1>
<title>DESCRIPTION</title>
<para>
- ctdb_mutex_ceph_rados_helper can be used as a recovery lock provider
+ ctdb_mutex_ceph_rados_helper can be used as a cluster lock provider
for CTDB. When configured, split brain avoidance during CTDB recovery
will be handled using locks against an object located in a Ceph RADOS
pool.
@@ -29,7 +29,7 @@
<manvolnum>5</manvolnum></citerefentry>:
</para>
<screen format="linespecific">
-recovery lock = !ctdb_mutex_ceph_rados_helper [Cluster] [User] [Pool] [Object]
+cluster lock = !ctdb_mutex_ceph_rados_helper [Cluster] [User] [Pool] [Object]
Cluster: Ceph cluster name (e.g. ceph)
User: Ceph cluster user name (e.g. client.admin)
@@ -44,7 +44,7 @@ Object: Ceph RADOS object name
</para>
<para>
For informational purposes, ctdb_mutex_ceph_rados_helper will also
- register the recovery lock holder in Ceph Manager's service map.
+ register the cluster lock holder in Ceph Manager's service map.
</para>
</refsect1>
diff --git a/ctdb/doc/examples/ctdb.conf b/ctdb/doc/examples/ctdb.conf
index fa5e8f7894c..3a8ccc658b8 100644
--- a/ctdb/doc/examples/ctdb.conf
+++ b/ctdb/doc/examples/ctdb.conf
@@ -47,7 +47,7 @@
log level = NOTICE
[cluster]
- recovery lock = /shared/recovery.lock
+ cluster lock = /shared/cluster.lock
#
# Nodes configuration
diff --git a/ctdb/server/ctdb_config.c b/ctdb/server/ctdb_config.c
index 750b909cd3d..17e697996ab 100644
--- a/ctdb/server/ctdb_config.c
+++ b/ctdb/server/ctdb_config.c
@@ -51,6 +51,10 @@ static void setup_config_pointers(struct conf_context *conf)
&ctdb_config.node_address);
conf_assign_string_pointer(conf,
CLUSTER_CONF_SECTION,
+ CLUSTER_CONF_CLUSTER_LOCK,
+ &ctdb_config.cluster_lock);
+ conf_assign_string_pointer(conf,
+ CLUSTER_CONF_SECTION,
CLUSTER_CONF_RECOVERY_LOCK,
&ctdb_config.recovery_lock);
diff --git a/ctdb/server/ctdb_config.h b/ctdb/server/ctdb_config.h
index f2f75972661..f079d495445 100644
--- a/ctdb/server/ctdb_config.h
+++ b/ctdb/server/ctdb_config.h
@@ -26,6 +26,7 @@ struct ctdb_config {
/* Cluster */
const char *transport;
const char *node_address;
+ const char *cluster_lock;
const char *recovery_lock;
/* Database */
diff --git a/ctdb/server/ctdbd.c b/ctdb/server/ctdbd.c
index 72d811f1c8f..e333d7cb7cb 100644
--- a/ctdb/server/ctdbd.c
+++ b/ctdb/server/ctdbd.c
@@ -282,10 +282,13 @@ int main(int argc, const char *argv[])
goto fail;
}
- if (ctdb_config.recovery_lock == NULL) {
- D_WARNING("Recovery lock not set\n");
+ if (ctdb_config.cluster_lock != NULL) {
+ ctdb->recovery_lock = ctdb_config.cluster_lock;
+ } else if (ctdb_config.recovery_lock != NULL) {
+ ctdb->recovery_lock = ctdb_config.recovery_lock;
+ } else {
+ D_WARNING("Cluster lock not set\n");
}
- ctdb->recovery_lock = ctdb_config.recovery_lock;
/* tell ctdb what address to listen on */
if (ctdb_config.node_address) {
diff --git a/ctdb/tests/UNIT/cunit/config_test_001.sh b/ctdb/tests/UNIT/cunit/config_test_001.sh
index 9a8682b04e6..b4a64ab790c 100755
--- a/ctdb/tests/UNIT/cunit/config_test_001.sh
+++ b/ctdb/tests/UNIT/cunit/config_test_001.sh
@@ -34,6 +34,7 @@ ok <<EOF
[cluster]
# transport = tcp
# node address =
+ # cluster lock =
# recovery lock =
[database]
# volatile database directory = ${database_volatile_dbdir}
diff --git a/ctdb/tests/UNIT/cunit/config_test_004.sh b/ctdb/tests/UNIT/cunit/config_test_004.sh
index 10a36567dac..bebf7946bf6 100755
--- a/ctdb/tests/UNIT/cunit/config_test_004.sh
+++ b/ctdb/tests/UNIT/cunit/config_test_004.sh
@@ -29,7 +29,7 @@ unit_test ctdb-config get "cluster" "node address"
ok <<EOF
EOF
-unit_test ctdb-config get "cluster" "recovery lock"
+unit_test ctdb-config get "cluster" "cluster lock"
cat > "$conffile" <<EOF
[cluster]
@@ -72,3 +72,22 @@ conf: validation for option "node address" failed
Failed to load config file $conffile
EOF
unit_test ctdb-config validate
+
+cat > "$conffile" <<EOF
+[cluster]
+ cluster lock = /foo/bar
+EOF
+
+required_result 0 <<EOF
+EOF
+unit_test ctdb-config validate
+
+cat > "$conffile" <<EOF
+[cluster]
+ recovery lock = /foo/bar
+EOF
+
+required_result 0 <<EOF
+Configuration option [cluster] -> recovery lock is deprecated
+EOF
+unit_test ctdb-config -d WARNING validate
diff --git a/ctdb/tests/local_daemons.sh b/ctdb/tests/local_daemons.sh
index 626cc192876..c75a554c548 100755
--- a/ctdb/tests/local_daemons.sh
+++ b/ctdb/tests/local_daemons.sh
@@ -127,7 +127,7 @@ Options:
-N <file> Nodes file (default: automatically generated)
-n <num> Number of nodes (default: 3)
-P <file> Public addresses file (default: automatically generated)
- -R Use a command for the recovery lock (default: use a file)
+ -R Use a command for the cluster lock (default: use a file)
-r <time> Like -R and set recheck interval to <time> (default: use a file)
-S <library> Socket wrapper shared library to preload (default: none)
-6 Generate IPv6 IPs for nodes, public addresses (default: IPv4)
@@ -142,8 +142,8 @@ local_daemons_setup ()
_nodes_file=""
_num_nodes=3
_public_addresses_file=""
- _recovery_lock_use_command=false
- _recovery_lock_recheck_interval=""
+ _cluster_lock_use_command=false
+ _cluster_lock_recheck_interval=""
_socket_wrapper=""
_use_ipv6=false
@@ -155,9 +155,9 @@ local_daemons_setup ()
N) _nodes_file="$OPTARG" ;;
n) _num_nodes="$OPTARG" ;;
P) _public_addresses_file="$OPTARG" ;;
- R) _recovery_lock_use_command=true ;;
- r) _recovery_lock_use_command=true
- _recovery_lock_recheck_interval="$OPTARG"
+ R) _cluster_lock_use_command=true ;;
+ r) _cluster_lock_use_command=true
+ _cluster_lock_recheck_interval="$OPTARG"
;;
S) _socket_wrapper="$OPTARG" ;;
6) _use_ipv6=true ;;
@@ -191,16 +191,16 @@ local_daemons_setup ()
$_use_ipv6 >"$_public_addresses_all"
fi
- _recovery_lock_dir="${directory}/shared/.ctdb"
- mkdir -p "$_recovery_lock_dir"
- _recovery_lock="${_recovery_lock_dir}/rec.lock"
- if $_recovery_lock_use_command ; then
+ _cluster_lock_dir="${directory}/shared/.ctdb"
+ mkdir -p "$_cluster_lock_dir"
+ _cluster_lock="${_cluster_lock_dir}/cluster.lock"
+ if $_cluster_lock_use_command ; then
_helper="${CTDB_SCRIPTS_HELPER_BINDIR}/ctdb_mutex_fcntl_helper"
- _t="! ${_helper} ${_recovery_lock}"
- if [ -n "$_recovery_lock_recheck_interval" ] ; then
- _t="${_t} ${_recovery_lock_recheck_interval}"
+ _t="! ${_helper} ${_cluster_lock}"
+ if [ -n "$_cluster_lock_recheck_interval" ] ; then
+ _t="${_t} ${_cluster_lock_recheck_interval}"
fi
- _recovery_lock="$_t"
+ _cluster_lock="$_t"
fi
if [ -n "$_socket_wrapper" ] ; then
@@ -241,7 +241,7 @@ local_daemons_setup ()
log level = INFO
[cluster]
- recovery lock = ${_recovery_lock}
+ cluster lock = ${_cluster_lock}
node address = ${_node_ip}
[database]
diff --git a/ctdb/utils/ceph/ctdb_mutex_ceph_rados_helper.c b/ctdb/utils/ceph/ctdb_mutex_ceph_rados_helper.c
index bdbb8df7104..7d868a38b23 100644
--- a/ctdb/utils/ceph/ctdb_mutex_ceph_rados_helper.c
+++ b/ctdb/utils/ceph/ctdb_mutex_ceph_rados_helper.c
@@ -28,7 +28,7 @@
#define CTDB_MUTEX_CEPH_LOCK_NAME "ctdb_reclock_mutex"
#define CTDB_MUTEX_CEPH_LOCK_COOKIE CTDB_MUTEX_CEPH_LOCK_NAME
-#define CTDB_MUTEX_CEPH_LOCK_DESC "CTDB recovery lock"
+#define CTDB_MUTEX_CEPH_LOCK_DESC "CTDB cluster lock"
/*
* During failover it may take up to <lock duration> seconds before the
* newly elected recovery master can obtain the lock.
diff --git a/ctdb/utils/ceph/test_ceph_rados_reclock.sh b/ctdb/utils/ceph/test_ceph_rados_reclock.sh
index 1848c104ea5..bfb9c322cc8 100755
--- a/ctdb/utils/ceph/test_ceph_rados_reclock.sh
+++ b/ctdb/utils/ceph/test_ceph_rados_reclock.sh
@@ -84,7 +84,7 @@ LOCKER_COOKIE="$(jq -r '.lockers[0].cookie' ${TMP_DIR}/lock_state_first)"
[ "$LOCKER_COOKIE" == "ctdb_reclock_mutex" ] \
|| _fail "unexpected locker cookie: $LOCKER_COOKIE"
LOCKER_DESC="$(jq -r '.lockers[0].description' ${TMP_DIR}/lock_state_first)"
-[ "$LOCKER_DESC" == "CTDB recovery lock" ] \
+[ "$LOCKER_DESC" == "CTDB cluster lock" ] \
|| _fail "unexpected locker description: $LOCKER_DESC"
LOCKER_EXP="$(jq -r '.lockers[0].expiration' ${TMP_DIR}/lock_state_first)"
[ "$LOCKER_EXP" == "0.000000" ] \
@@ -146,7 +146,7 @@ LOCKER_COOKIE="$(jq -r '.lockers[0].cookie' ${TMP_DIR}/lock_state_fourth)"
[ "$LOCKER_COOKIE" == "ctdb_reclock_mutex" ] \
|| _fail "unexpected locker cookie: $LOCKER_COOKIE"
LOCKER_DESC="$(jq -r '.lockers[0].description' ${TMP_DIR}/lock_state_fourth)"
-[ "$LOCKER_DESC" == "CTDB recovery lock" ] \
+[ "$LOCKER_DESC" == "CTDB cluster lock" ] \
|| _fail "unexpected locker description: $LOCKER_DESC"
kill $locker_pid || exit 1
diff --git a/ctdb/utils/etcd/ctdb_etcd_lock b/ctdb/utils/etcd/ctdb_etcd_lock
index 5e722267cd3..dac24361e77 100755
--- a/ctdb/utils/etcd/ctdb_etcd_lock
+++ b/ctdb/utils/etcd/ctdb_etcd_lock
@@ -19,11 +19,11 @@
This script is intended to be run as a mutex helper for CTDB. It will try to
connect to an existing etcd cluster and grab an etcd.Lock() to function as
-CTDB's recovery lock. Please see ctdb/doc/cluster_mutex_helper.txt for
+CTDB's cluster lock. Please see ctdb/doc/cluster_mutex_helper.txt for
details on what we're SUPPOSED to be doing. :) To use this, include
the following line in the ctdb.conf:
- recovery lock = !/path/to/script
+ cluster lock = !/path/to/script
You can also pass "-v", "-vv", or "-vvv" to include verbose output in the
CTDB log. Additional "v"s indicate increases in verbosity.