summaryrefslogtreecommitdiff
path: root/glib/gbitlock.c
diff options
context:
space:
mode:
authorRyan Lortie <desrt@desrt.ca>2011-05-25 11:08:20 +0200
committerMatthias Clasen <mclasen@redhat.com>2011-06-03 20:50:18 -0400
commitdf0b208831a797f402d9f86263e6fa5378da47cc (patch)
treede4e513665fba19c45d42228d04fd33fed2881e7 /glib/gbitlock.c
parent1a80405a36eae6777cf8dc809da5883ba666f41c (diff)
downloadglib-df0b208831a797f402d9f86263e6fa5378da47cc.tar.gz
Implement pointer sized bitlocks
Based on a patch from Alexander Larsson. https://bugzilla.gnome.org/show_bug.cgi?id=651467
Diffstat (limited to 'glib/gbitlock.c')
-rw-r--r--glib/gbitlock.c193
1 files changed, 193 insertions, 0 deletions
diff --git a/glib/gbitlock.c b/glib/gbitlock.c
index 75e045c89..72ee96cd1 100644
--- a/glib/gbitlock.c
+++ b/glib/gbitlock.c
@@ -22,6 +22,7 @@
#include "gbitlock.h"
+#include <glib/gmessages.h>
#include <glib/gatomic.h>
#include <glib/gslist.h>
#include <glib/gthread.h>
@@ -334,3 +335,195 @@ g_bit_unlock (volatile gint *address,
g_futex_wake (address);
}
}
+
+
+/* We emulate pointer-sized futex(2) because the kernel API only
+ * supports integers.
+ *
+ * We assume that the 'interesting' part is always the lower order bits.
+ * This assumption holds because pointer bitlocks are restricted to
+ * using the low order bits of the pointer as the lock.
+ *
+ * On 32 bits, there is nothing to do since the pointer size is equal to
+ * the integer size. On little endian the lower-order bits don't move,
+ * so do nothing. Only on 64bit big endian do we need to do a bit of
+ * pointer arithmetic: the low order bits are shifted by 4 bytes. We
+ * have a helper function that always does the right thing here.
+ *
+ * Since we always consider the low-order bits of the integer value, a
+ * simple cast from (gsize) to (guint) always takes care of that.
+ *
+ * After that, pointer-sized futex becomes as simple as:
+ *
+ * g_futex_wait (g_futex_int_address (address), (guint) value);
+ *
+ * and
+ *
+ * g_futex_wake (g_futex_int_address (int_address));
+ */
+static const volatile gint *
+g_futex_int_address (const volatile void *address)
+{
+ const volatile gint *int_address = address;
+
+#if G_BYTE_ORDER == G_BIG_ENDIAN && GLIB_SIZEOF_VOID_P == 8
+ int_address++;
+#endif
+
+ return int_address;
+}
+
+/**
+ * g_pointer_bit_lock:
+ * @address: a pointer to a #gpointer-sized value
+ * @lock_bit: a bit value between 0 and 31
+ *
+ * This is equivalent to g_bit_lock, but working on pointers (or other
+ * pointer-sized values).
+ *
+ * For portability reasons, you may only lock on the bottom 32 bits of
+ * the pointer.
+ *
+ * Since: 2.30
+ **/
+void
+(g_pointer_bit_lock) (volatile void *address,
+ gint lock_bit)
+{
+ g_return_if_fail (lock_bit < 32);
+
+ {
+#if defined (__GNUC__) && (defined (i386) || defined (__amd64__))
+ retry:
+ asm volatile goto ("lock bts %1, (%0)\n"
+ "jc %l[contended]"
+ : /* no output */
+ : "r" (address), "r" ((gsize) lock_bit)
+ : "cc", "memory"
+ : contended);
+ return;
+
+ contended:
+ {
+ volatile gsize *pointer_address = address;
+ gsize mask = 1u << lock_bit;
+ gsize v;
+
+ v = (gsize) g_atomic_pointer_get (pointer_address);
+ if (v & mask)
+ {
+ guint class = ((gsize) address) % G_N_ELEMENTS (g_bit_lock_contended);
+
+ g_atomic_int_add (&g_bit_lock_contended[class], +1);
+ g_futex_wait (g_futex_int_address (address), v);
+ g_atomic_int_add (&g_bit_lock_contended[class], -1);
+ }
+ }
+ goto retry;
+#else
+ volatile gsize *pointer_address = address;
+ gsize mask = 1u << lock_bit;
+ gsize v;
+
+ retry:
+ v = g_atomic_pointer_or (pointer_address, mask);
+ if (v & mask)
+ /* already locked */
+ {
+ guint class = ((gsize) address) % G_N_ELEMENTS (g_bit_lock_contended);
+
+ g_atomic_int_add (&g_bit_lock_contended[class], +1);
+ g_futex_wait (g_futex_int_address (address), (guint) v);
+ g_atomic_int_add (&g_bit_lock_contended[class], -1);
+
+ goto retry;
+ }
+#endif
+ }
+}
+
+/**
+ * g_pointer_bit_trylock:
+ * @address: a pointer to a #gpointer-sized value
+ * @lock_bit: a bit value between 0 and 31
+ * @returns: %TRUE if the lock was acquired
+ *
+ * This is equivalent to g_bit_trylock, but working on pointers (or
+ * other pointer-sized values).
+ *
+ * For portability reasons, you may only lock on the bottom 32 bits of
+ * the pointer.
+ *
+ * Since: 2.30
+ **/
+gboolean
+(g_pointer_bit_trylock) (volatile void *address,
+ gint lock_bit)
+{
+ g_return_val_if_fail (lock_bit < 32, FALSE);
+
+ {
+#if defined (__GNUC__) && (defined (i386) || defined (__amd64__))
+ gboolean result;
+
+ asm volatile ("lock bts %2, (%1)\n"
+ "setnc %%al\n"
+ "movzx %%al, %0"
+ : "=r" (result)
+ : "r" (address), "r" ((gsize) lock_bit)
+ : "cc", "memory");
+
+ return result;
+#else
+ volatile gsize *pointer_address = address;
+ gsize mask = 1u << lock_bit;
+ gsize v;
+
+ g_return_val_if_fail (lock_bit < 32, FALSE);
+
+ v = g_atomic_pointer_or (pointer_address, mask);
+
+ return ~v & mask;
+#endif
+ }
+}
+
+/**
+ * g_pointer_bit_unlock:
+ * @address: a pointer to a #gpointer-sized value
+ * @lock_bit: a bit value between 0 and 31
+ *
+ * This is equivalent to g_bit_unlock, but working on pointers (or other
+ * pointer-sized values).
+ *
+ * For portability reasons, you may only lock on the bottom 32 bits of
+ * the pointer.
+ *
+ * Since: 2.30
+ **/
+void
+(g_pointer_bit_unlock) (volatile void *address,
+ gint lock_bit)
+{
+ g_return_if_fail (lock_bit < 32);
+
+ {
+#if defined (__GNUC__) && (defined (i386) || defined (__amd64__))
+ asm volatile ("lock btr %1, (%0)"
+ : /* no output */
+ : "r" (address), "r" ((gsize) lock_bit)
+ : "cc", "memory");
+#else
+ volatile gsize *pointer_address = address;
+ gsize mask = 1u << lock_bit;
+
+ g_atomic_pointer_and (pointer_address, ~mask);
+#endif
+
+ {
+ guint class = ((gsize) address) % G_N_ELEMENTS (g_bit_lock_contended);
+ if (g_atomic_int_get (&g_bit_lock_contended[class]))
+ g_futex_wake (g_futex_int_address (address));
+ }
+ }
+}