summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorH.J. Lu <hjl.tools@gmail.com>2012-05-01 14:31:30 +0400
committerIvan Maidanski <ivmai@mail.ru>2012-05-01 18:27:49 +0400
commit1da5cd270ae96e4a95af76e2c43a9a6bbee66ad9 (patch)
tree23d04d4ac6edb05e6a363cea27205ba183f0f3eb
parentb4993e167ce894d9db7f8f25e8863d95211a8d12 (diff)
downloadlibatomic_ops-1da5cd270ae96e4a95af76e2c43a9a6bbee66ad9.tar.gz
Fix for x32 by removing 'q' suffix in x86-64 instructions
(Apply commit 711f7fe from 'master' branch) We do not need the 'q' suffix on x86_64 atomic instructions for AO_t which is defined as "unsigned long"; the later is 32-bit for x32 and 64-bit for x86-64; the register operand in x86-64 atomic instructions is sufficient to properly determine the register size. * src/atomic_ops/sysdeps/gcc/x86_64.h (AO_fetch_and_add_full, AO_and_full, AO_or_full, AO_xor_full, AO_compare_and_swap_full, AO_fetch_compare_and_swap_full): Remove 'q' suffix in asm instruction.
-rw-r--r--src/atomic_ops/sysdeps/gcc/x86_64.h6
1 files changed, 3 insertions, 3 deletions
diff --git a/src/atomic_ops/sysdeps/gcc/x86_64.h b/src/atomic_ops/sysdeps/gcc/x86_64.h
index 3cbe76e..7da6910 100644
--- a/src/atomic_ops/sysdeps/gcc/x86_64.h
+++ b/src/atomic_ops/sysdeps/gcc/x86_64.h
@@ -47,7 +47,7 @@ AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
{
AO_t result;
- __asm__ __volatile__ ("lock; xaddq %0, %1" :
+ __asm__ __volatile__ ("lock; xadd %0, %1" :
"=r" (result), "=m" (*p) : "0" (incr), "m" (*p)
: "memory");
return result;
@@ -93,7 +93,7 @@ AO_int_fetch_and_add_full (volatile unsigned int *p, unsigned int incr)
AO_INLINE void
AO_or_full (volatile AO_t *p, AO_t incr)
{
- __asm__ __volatile__ ("lock; orq %1, %0" :
+ __asm__ __volatile__ ("lock; or %1, %0" :
"=m" (*p) : "r" (incr), "m" (*p) : "memory");
}
#define AO_HAVE_or_full
@@ -118,7 +118,7 @@ AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val)
return (int)__sync_bool_compare_and_swap(addr, old, new_val);
# else
char result;
- __asm__ __volatile__("lock; cmpxchgq %3, %0; setz %1"
+ __asm__ __volatile__("lock; cmpxchg %3, %0; setz %1"
: "=m" (*addr), "=a" (result)
: "m" (*addr), "r" (new_val), "a" (old) : "memory");
return (int) result;