diff options
author | amodra <amodra@138bc75d-0d04-0410-961f-82ee72b054a4> | 2002-07-29 23:26:27 +0000 |
---|---|---|
committer | amodra <amodra@138bc75d-0d04-0410-961f-82ee72b054a4> | 2002-07-29 23:26:27 +0000 |
commit | 36d577b1566d9816e21daeff48a9508976dd00a5 (patch) | |
tree | b388d8a32006e82e5926d94d8e3e5fc517f11e67 /libjava/sysdep | |
parent | ce9a1f8166521ff63323a91e22ee14c0edfefda6 (diff) | |
download | gcc-36d577b1566d9816e21daeff48a9508976dd00a5.tar.gz |
* sysdep/powerpc/locks.h: Formatting.
(_LARX): Define.
(_STCX): Define.
(compare_and_swap): Use _LARX and _STCX.
(compare_and_swap_release): Likewise.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@55855 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'libjava/sysdep')
-rw-r--r-- | libjava/sysdep/powerpc/locks.h | 49 |
1 files changed, 31 insertions, 18 deletions
diff --git a/libjava/sysdep/powerpc/locks.h b/libjava/sysdep/powerpc/locks.h index 4d4532b1f83..c9c3183d6bc 100644 --- a/libjava/sysdep/powerpc/locks.h +++ b/libjava/sysdep/powerpc/locks.h @@ -11,26 +11,38 @@ details. */ #ifndef __SYSDEP_LOCKS_H__ #define __SYSDEP_LOCKS_H__ +#ifdef __powerpc64__ +#define _LARX "ldarx " +#define _STCX "stdcx. " +#else +#define _LARX "lwarx " +#ifdef __PPC405__ +#define _STCX "sync; stwcx. " +#else +#define _STCX "stwcx. " +#endif +#endif + typedef size_t obj_addr_t; /* Integer type big enough for object */ /* address. */ inline static bool -compare_and_swap(volatile obj_addr_t *addr, - obj_addr_t old, - obj_addr_t new_val) +compare_and_swap (volatile obj_addr_t *addr, obj_addr_t old, + obj_addr_t new_val) { int ret; __asm__ __volatile__ ( - "0: lwarx %0,0,%1 ;" + "0: " _LARX "%0,0,%1 ;" " xor. %0,%3,%0;" " bne 1f;" - " stwcx. %2,0,%1;" + " " _STCX "%2,0,%1;" " bne- 0b;" "1: " - : "=&r"(ret) - : "r"(addr), "r"(new_val), "r"(old) + : "=&r" (ret) + : "r" (addr), "r" (new_val), "r" (old) : "cr0", "memory"); + /* This version of __compare_and_swap is to be used when acquiring a lock, so we don't need to worry about whether other memory operations have completed, but we do need to be sure that any loads @@ -40,37 +52,38 @@ compare_and_swap(volatile obj_addr_t *addr, } inline static void -release_set(volatile obj_addr_t *addr, obj_addr_t new_val) +release_set (volatile obj_addr_t *addr, obj_addr_t new_val) { __asm__ __volatile__ ("sync" : : : "memory"); - *(addr) = new_val; + *addr = new_val; } inline static bool -compare_and_swap_release(volatile obj_addr_t *addr, - obj_addr_t old, - obj_addr_t new_val) +compare_and_swap_release (volatile obj_addr_t *addr, obj_addr_t old, + obj_addr_t new_val) { int ret; __asm__ __volatile__ ("sync" : : : "memory"); + __asm__ __volatile__ ( - "0: lwarx %0,0,%1 ;" + "0: " _LARX "%0,0,%1 ;" " xor. %0,%3,%0;" " bne 1f;" - " stwcx. %2,0,%1;" + " " _STCX "%2,0,%1;" " bne- 0b;" "1: " - : "=&r"(ret) - : "r"(addr), "r"(new_val), "r"(old) + : "=&r" (ret) + : "r" (addr), "r" (new_val), "r" (old) : "cr0", "memory"); + return ret == 0; } // Ensure that subsequent instructions do not execute on stale // data that was loaded from memory before the barrier. inline static void -read_barrier() +read_barrier () { __asm__ __volatile__ ("isync" : : : "memory"); } @@ -78,7 +91,7 @@ read_barrier() // Ensure that prior stores to memory are completed with respect to other // processors. inline static void -write_barrier() +write_barrier () { __asm__ __volatile__ ("sync" : : : "memory"); } |