summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Haller <thaller@redhat.com>2018-11-13 09:25:55 +0100
committerThomas Haller <thaller@redhat.com>2018-11-13 18:30:03 +0100
commit80220024cc8793759fedaaffd383958bb9db44c0 (patch)
tree80f0208a6bc49017ebd5178facab9deaec505072
parenta0efc69f46b956216f35d9d1de6ce43a0b77ab91 (diff)
downloadNetworkManager-80220024cc8793759fedaaffd383958bb9db44c0.tar.gz
shared: use atomic operation for accessing global hash seed
- fix thread-safety by adding a memory barrier (g_atomic_pointer_get()) to the double-checked locking pattern when initializing the hash key. - generate the random data outside the lock. Calling nm_utils_random_bytes() within the lock is ugly, because we don't want to assume that the function has no side effects which are prone to dead-lock. There is no problem attempting to generate the random data without lock, and only use it when the race is won.
-rw-r--r--shared/nm-utils/nm-hash-utils.c51
1 files changed, 32 insertions, 19 deletions
diff --git a/shared/nm-utils/nm-hash-utils.c b/shared/nm-utils/nm-hash-utils.c
index 4bc12b7c3a..9f164a119e 100644
--- a/shared/nm-utils/nm-hash-utils.c
+++ b/shared/nm-utils/nm-hash-utils.c
@@ -40,53 +40,66 @@ static const guint8 *volatile global_seed = NULL;
static const guint8 *
_get_hash_key_init (void)
{
+ static gsize g_lock;
/* the returned hash is aligned to guin64, hence, it is safe
* to use it as guint* or guint64* pointer. */
static union {
guint8 v8[HASH_KEY_SIZE];
} g_arr _nm_alignas (guint64);
- static gsize g_lock;
const guint8 *g;
- CSipHash siph_state;
- uint64_t h;
- guint *p;
+ union {
+ guint8 v8[HASH_KEY_SIZE];
+ guint vuint;
+ } t_arr;
- g = global_seed;
+again:
+ g = g_atomic_pointer_get (&global_seed);
if (G_LIKELY (g != NULL)) {
nm_assert (g == g_arr.v8);
return g;
}
- if (g_once_init_enter (&g_lock)) {
+ {
+ CSipHash siph_state;
+ uint64_t h;
- nm_utils_random_bytes (g_arr.v8, sizeof (g_arr.v8));
+ /* initialize a random key in t_arr. */
+
+ nm_utils_random_bytes (&t_arr, sizeof (t_arr));
/* use siphash() of the key-size, to mangle the first guint. Otherwise,
* the first guint has only the entropy that nm_utils_random_bytes()
- * generated for the first 4 bytes and relies on a good random generator. */
- c_siphash_init (&siph_state, g_arr.v8);
- c_siphash_append (&siph_state, g_arr.v8, sizeof (g_arr.v8));
+ * generated for the first 4 bytes and relies on a good random generator.
+ *
+ * The first int is especially intersting for nm_hash_static() below, and we
+ * want to have it all the entropy of t_arr. */
+ c_siphash_init (&siph_state, t_arr.v8);
+ c_siphash_append (&siph_state, (const guint8 *) &t_arr, sizeof (t_arr));
h = c_siphash_finalize (&siph_state);
- p = (guint *) g_arr.v8;
if (sizeof (guint) < sizeof (h))
- *p = *p ^ ((guint) (h & 0xFFFFFFFFu)) ^ ((guint) (h >> 32));
+ t_arr.vuint = t_arr.vuint ^ ((guint) (h & 0xFFFFFFFFu)) ^ ((guint) (h >> 32));
else
- *p = *p ^ ((guint) (h & 0xFFFFFFFFu));
+ t_arr.vuint = t_arr.vuint ^ ((guint) (h & 0xFFFFFFFFu));
+ }
- g_atomic_pointer_compare_and_exchange (&global_seed, NULL, g_arr.v8);
- g_once_init_leave (&g_lock, 1);
+ if (!g_once_init_enter (&g_lock)) {
+ /* lost a race. The random key is already initialized. */
+ goto again;
}
- nm_assert (global_seed == g_arr.v8);
- return g_arr.v8;
+ memcpy (g_arr.v8, t_arr.v8, HASH_KEY_SIZE);
+ g = g_arr.v8;
+ g_atomic_pointer_set (&global_seed, g);
+ g_once_init_leave (&g_lock, 1);
+ return g;
}
#define _get_hash_key() \
({ \
const guint8 *_g; \
\
- _g = global_seed; \
- if (G_UNLIKELY (_g == NULL)) \
+ _g = g_atomic_pointer_get (&global_seed); \
+ if (G_UNLIKELY (!_g)) \
_g = _get_hash_key_init (); \
_g; \
})