diff options
author | dje <dje@138bc75d-0d04-0410-961f-82ee72b054a4> | 2012-06-20 13:02:56 +0000 |
---|---|---|
committer | dje <dje@138bc75d-0d04-0410-961f-82ee72b054a4> | 2012-06-20 13:02:56 +0000 |
commit | e13065bc181173670251ee505e894f3935ac530f (patch) | |
tree | e879acf299bb2060d99c8b970fb37dc1bdb03b6e /libjava | |
parent | ac4a80001880ca8fd7559892447025af5c9ee0a6 (diff) | |
download | gcc-e13065bc181173670251ee505e894f3935ac530f.tar.gz |
2012-06-20 David Edelsohn <dje.gcc@gmail.com>
Alan Modra <amodra@gmail.com>
* sysdep/powerpc/locks.h (compare_and_swap): Use GCC atomic
intrinsics.
(release_set): Same.
(compare_and_swap_release): Same.
(read_barrier): Same.
(write_barrier): Same.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@188829 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'libjava')
-rw-r--r-- | libjava/ChangeLog | 10 | ||||
-rw-r--r-- | libjava/sysdep/powerpc/locks.h | 76 |
2 files changed, 36 insertions, 50 deletions
diff --git a/libjava/ChangeLog b/libjava/ChangeLog index 8a6dd0f1d90..152e9d7db08 100644 --- a/libjava/ChangeLog +++ b/libjava/ChangeLog @@ -1,3 +1,13 @@ +2012-06-20 David Edelsohn <dje.gcc@gmail.com> + Alan Modra <amodra@gmail.com> + + * sysdep/powerpc/locks.h (compare_and_swap): Use GCC atomic + intrinsics. + (release_set): Same. + (compare_and_swap_release): Same. + (read_barrier): Same. + (write_barrier): Same. + 2012-06-15 Andreas Schwab <schwab@linux-m68k.org> * sysdep/m68k/locks.h (compare_and_swap): Use diff --git a/libjava/sysdep/powerpc/locks.h b/libjava/sysdep/powerpc/locks.h index 2e9eb0eb36c..ecff4e251de 100644 --- a/libjava/sysdep/powerpc/locks.h +++ b/libjava/sysdep/powerpc/locks.h @@ -11,87 +11,63 @@ details. */ #ifndef __SYSDEP_LOCKS_H__ #define __SYSDEP_LOCKS_H__ -#ifdef __LP64__ -#define _LARX "ldarx " -#define _STCX "stdcx. " -#else -#define _LARX "lwarx " -#ifdef __PPC405__ -#define _STCX "sync; stwcx. " -#else -#define _STCX "stwcx. " -#endif -#endif - typedef size_t obj_addr_t; /* Integer type big enough for object */ /* address. */ +// Atomically replace *addr by new_val if it was initially equal to old. +// Return true if the comparison succeeded. +// Assumed to have acquire semantics, i.e. later memory operations +// cannot execute before the compare_and_swap finishes. + inline static bool -compare_and_swap (volatile obj_addr_t *addr, obj_addr_t old, +compare_and_swap (volatile obj_addr_t *addr, + obj_addr_t old, obj_addr_t new_val) { - obj_addr_t ret; - - __asm__ __volatile__ ( - " " _LARX "%0,0,%1 \n" - " xor. %0,%3,%0\n" - " bne $+12\n" - " " _STCX "%2,0,%1\n" - " bne- $-16\n" - : "=&r" (ret) - : "r" (addr), "r" (new_val), "r" (old) - : "cr0", "memory"); - - /* This version of __compare_and_swap is to be used when acquiring - a lock, so we don't need to worry about whether other memory - operations have completed, but we do need to be sure that any loads - after this point really occur after we have acquired the lock. */ - __asm__ __volatile__ ("isync" : : : "memory"); - return ret == 0; + return __atomic_compare_exchange_n (addr, &old, new_val, 0, + __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); } + +// Set *addr to new_val with release semantics, i.e. making sure +// that prior loads and stores complete before this +// assignment. + inline static void release_set (volatile obj_addr_t *addr, obj_addr_t new_val) { - __asm__ __volatile__ ("sync" : : : "memory"); - *addr = new_val; + __atomic_store_n(addr, val, __ATOMIC_RELEASE); } + +// Compare_and_swap with release semantics instead of acquire semantics. + inline static bool compare_and_swap_release (volatile obj_addr_t *addr, obj_addr_t old, obj_addr_t new_val) { - obj_addr_t ret; - - __asm__ __volatile__ ("sync" : : : "memory"); - - __asm__ __volatile__ ( - " " _LARX "%0,0,%1 \n" - " xor. %0,%3,%0\n" - " bne $+12\n" - " " _STCX "%2,0,%1\n" - " bne- $-16\n" - : "=&r" (ret) - : "r" (addr), "r" (new_val), "r" (old) - : "cr0", "memory"); - - return ret == 0; + return __atomic_compare_exchange_n (addr, &old, new_val, 0, + __ATOMIC_RELEASE, __ATOMIC_RELAXED); } + // Ensure that subsequent instructions do not execute on stale // data that was loaded from memory before the barrier. + inline static void read_barrier () { - __asm__ __volatile__ ("isync" : : : "memory"); + __atomic_thread_fence (__ATOMIC_ACQUIRE); } + // Ensure that prior stores to memory are completed with respect to other // processors. + inline static void write_barrier () { - __asm__ __volatile__ ("sync" : : : "memory"); + __atomic_thread_fence (__ATOMIC_RELEASE); } #endif |