summaryrefslogtreecommitdiff
path: root/libjava/sysdep/i386/locks.h
diff options
context:
space:
mode:
Diffstat (limited to 'libjava/sysdep/i386/locks.h')
-rw-r--r--libjava/sysdep/i386/locks.h54
1 files changed, 33 insertions, 21 deletions
diff --git a/libjava/sysdep/i386/locks.h b/libjava/sysdep/i386/locks.h
index 7b99f0bd781..9d130b0f515 100644
--- a/libjava/sysdep/i386/locks.h
+++ b/libjava/sysdep/i386/locks.h
@@ -1,6 +1,6 @@
/* locks.h - Thread synchronization primitives. X86/x86-64 implementation.
- Copyright (C) 2002, 2011 Free Software Foundation
+ Copyright (C) 2002 Free Software Foundation
This file is part of libgcj.
@@ -23,25 +23,19 @@ compare_and_swap(volatile obj_addr_t *addr,
obj_addr_t old,
obj_addr_t new_val)
{
- return __sync_bool_compare_and_swap (addr, old, new_val);
-}
-
-// Ensure that subsequent instructions do not execute on stale
-// data that was loaded from memory before the barrier.
-// On X86/x86-64, the hardware ensures that reads are properly ordered.
-inline static void
-read_barrier()
-{
-}
-
-// Ensure that prior stores to memory are completed with respect to other
-// processors.
-inline static void
-write_barrier()
-{
- /* x86-64/X86 does not reorder writes. We just need to ensure that
- gcc also doesn't. */
- __asm__ __volatile__(" " : : : "memory");
+ char result;
+#ifdef __x86_64__
+ __asm__ __volatile__("lock; cmpxchgq %2, %0; setz %1"
+ : "=m"(*(addr)), "=q"(result)
+ : "r" (new_val), "a"(old), "m"(*addr)
+ : "memory");
+#else
+ __asm__ __volatile__("lock; cmpxchgl %2, %0; setz %1"
+ : "=m"(*addr), "=q"(result)
+ : "r" (new_val), "a"(old), "m"(*addr)
+ : "memory");
+#endif
+ return (bool) result;
}
// Set *addr to new_val with release semantics, i.e. making sure
@@ -52,7 +46,7 @@ write_barrier()
inline static void
release_set(volatile obj_addr_t *addr, obj_addr_t new_val)
{
- write_barrier ();
+ __asm__ __volatile__(" " : : : "memory");
*(addr) = new_val;
}
@@ -66,4 +60,22 @@ compare_and_swap_release(volatile obj_addr_t *addr,
{
return compare_and_swap(addr, old, new_val);
}
+
+// Ensure that subsequent instructions do not execute on stale
+// data that was loaded from memory before the barrier.
+// On X86/x86-64, the hardware ensures that reads are properly ordered.
+inline static void
+read_barrier()
+{
+}
+
+// Ensure that prior stores to memory are completed with respect to other
+// processors.
+inline static void
+write_barrier()
+{
+ /* x86-64/X86 does not reorder writes. We just need to ensure that
+ gcc also doesn't. */
+ __asm__ __volatile__(" " : : : "memory");
+}
#endif