summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorivmai <ivmai>2011-06-03 09:17:00 +0000
committerIvan Maidanski <ivmai@mail.ru>2011-07-25 16:03:26 +0400
commitc132f9d50440a1dcf9d73f9f05d83af9e40b91dc (patch)
tree880518ab7c84433b8b7b82f5b621461baaf9c2a2
parent9ba4c7a5ce4352081779900a3983889d352a94c9 (diff)
downloadlibatomic_ops-c132f9d50440a1dcf9d73f9f05d83af9e40b91dc.tar.gz
2011-06-03 Ivan Maidanski <ivmai@mail.ru>
* src/atomic_ops/sysdeps/aligned_atomic_load_store.h: Remove blank line between AO_func and AO_HAVE_func definitions. * src/atomic_ops/sysdeps/atomic_load_store.h: Ditto. * src/atomic_ops/sysdeps/char_atomic_load_store.h: Ditto. * src/atomic_ops/sysdeps/generic_pthread.h: Ditto. * src/atomic_ops/sysdeps/int_aligned_atomic_load_store.h: Ditto. * src/atomic_ops/sysdeps/int_atomic_load_store.h: Ditto. * src/atomic_ops/sysdeps/ordered.h: Ditto. * src/atomic_ops/sysdeps/ordered_except_wr.h: Ditto. * src/atomic_ops/sysdeps/read_ordered.h: Ditto. * src/atomic_ops/sysdeps/short_aligned_atomic_load_store.h: Ditto. * src/atomic_ops/sysdeps/short_atomic_load_store.h: Ditto. * src/atomic_ops/sysdeps/gcc/alpha.h: Ditto. * src/atomic_ops/sysdeps/gcc/arm.h: Ditto. * src/atomic_ops/sysdeps/gcc/cris.h: Ditto. * src/atomic_ops/sysdeps/gcc/hppa.h: Ditto. * src/atomic_ops/sysdeps/gcc/ia64.h: Ditto. * src/atomic_ops/sysdeps/gcc/m68k.h: Ditto. * src/atomic_ops/sysdeps/gcc/mips.h: Ditto. * src/atomic_ops/sysdeps/gcc/powerpc.h: Ditto. * src/atomic_ops/sysdeps/gcc/s390.h: Ditto. * src/atomic_ops/sysdeps/gcc/sparc.h: Ditto. * src/atomic_ops/sysdeps/gcc/x86.h: Ditto. * src/atomic_ops/sysdeps/gcc/x86_64.h: Ditto. * src/atomic_ops/sysdeps/hpc/hppa.h: Ditto. * src/atomic_ops/sysdeps/hpc/ia64.h: Ditto. * src/atomic_ops/sysdeps/ibmc/powerpc.h: Ditto. * src/atomic_ops/sysdeps/msftc/common32_defs.h: Ditto. * src/atomic_ops/sysdeps/msftc/x86.h: Ditto. * src/atomic_ops/sysdeps/msftc/x86_64.h: Ditto. * src/atomic_ops/sysdeps/sunc/sparc.h: Ditto. * src/atomic_ops/sysdeps/sunc/x86.h: Ditto. * src/atomic_ops/sysdeps/sunc/x86_64.h: Ditto. * src/atomic_ops/sysdeps/aligned_atomic_load_store.h: Reformat comment. * src/atomic_ops/sysdeps/atomic_load_store.h: Ditto. * src/atomic_ops/sysdeps/char_atomic_load_store.h: Ditto. * src/atomic_ops/sysdeps/int_aligned_atomic_load_store.h: Ditto. * src/atomic_ops/sysdeps/int_atomic_load_store.h: Ditto. * src/atomic_ops/sysdeps/ordered.h: Ditto. * src/atomic_ops/sysdeps/gcc/arm.h: Ditto. * src/atomic_ops/sysdeps/test_and_set_t_is_char.h: Remove file tail blank lines. * src/atomic_ops/sysdeps/gcc/arm.h (AO_test_and_set_full): Don't define for ARMv2. * src/atomic_ops/sysdeps/gcc/powerpc.h (AO_load_acquire, AO_test_and_set, AO_compare_and_swap): Merge adjacent definitions. * src/atomic_ops/sysdeps/ibmc/powerpc.h (AO_HAVE_store_release): Define. * src/atomic_ops/sysdeps/sunc/sparc.h: Expand all tabs to spaces; remove trailing spaces at EOLn.
-rw-r--r--ChangeLog54
-rw-r--r--src/atomic_ops/sysdeps/aligned_atomic_load_store.h8
-rw-r--r--src/atomic_ops/sysdeps/atomic_load_store.h8
-rw-r--r--src/atomic_ops/sysdeps/char_atomic_load_store.h8
-rw-r--r--src/atomic_ops/sysdeps/gcc/alpha.h3
-rw-r--r--src/atomic_ops/sysdeps/gcc/arm.h80
-rw-r--r--src/atomic_ops/sysdeps/gcc/cris.h30
-rw-r--r--src/atomic_ops/sysdeps/gcc/hppa.h3
-rw-r--r--src/atomic_ops/sysdeps/gcc/ia64.h14
-rw-r--r--src/atomic_ops/sysdeps/gcc/m68k.h3
-rw-r--r--src/atomic_ops/sysdeps/gcc/mips.h5
-rw-r--r--src/atomic_ops/sysdeps/gcc/powerpc.h81
-rw-r--r--src/atomic_ops/sysdeps/gcc/s390.h3
-rw-r--r--src/atomic_ops/sysdeps/gcc/sparc.h4
-rw-r--r--src/atomic_ops/sysdeps/gcc/x86.h8
-rw-r--r--src/atomic_ops/sysdeps/gcc/x86_64.h16
-rw-r--r--src/atomic_ops/sysdeps/generic_pthread.h19
-rw-r--r--src/atomic_ops/sysdeps/hpc/hppa.h3
-rw-r--r--src/atomic_ops/sysdeps/hpc/ia64.h9
-rw-r--r--src/atomic_ops/sysdeps/ibmc/powerpc.h12
-rw-r--r--src/atomic_ops/sysdeps/int_aligned_atomic_load_store.h8
-rw-r--r--src/atomic_ops/sysdeps/int_atomic_load_store.h8
-rw-r--r--src/atomic_ops/sysdeps/msftc/common32_defs.h4
-rw-r--r--src/atomic_ops/sysdeps/msftc/x86.h4
-rw-r--r--src/atomic_ops/sysdeps/msftc/x86_64.h17
-rw-r--r--src/atomic_ops/sysdeps/ordered.h7
-rw-r--r--src/atomic_ops/sysdeps/ordered_except_wr.h57
-rw-r--r--src/atomic_ops/sysdeps/read_ordered.h97
-rw-r--r--src/atomic_ops/sysdeps/short_aligned_atomic_load_store.h2
-rw-r--r--src/atomic_ops/sysdeps/short_atomic_load_store.h2
-rw-r--r--src/atomic_ops/sysdeps/sunc/sparc.h17
-rw-r--r--src/atomic_ops/sysdeps/sunc/x86.h8
-rw-r--r--src/atomic_ops/sysdeps/sunc/x86_64.h8
-rw-r--r--src/atomic_ops/sysdeps/test_and_set_t_is_char.h13
34 files changed, 223 insertions, 400 deletions
diff --git a/ChangeLog b/ChangeLog
index 87efa3d..fc22a51 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,57 @@
+2011-06-03 Ivan Maidanski <ivmai@mail.ru>
+
+ * src/atomic_ops/sysdeps/aligned_atomic_load_store.h: Remove
+ blank line between AO_func and AO_HAVE_func definitions.
+ * src/atomic_ops/sysdeps/atomic_load_store.h: Ditto.
+ * src/atomic_ops/sysdeps/char_atomic_load_store.h: Ditto.
+ * src/atomic_ops/sysdeps/generic_pthread.h: Ditto.
+ * src/atomic_ops/sysdeps/int_aligned_atomic_load_store.h: Ditto.
+ * src/atomic_ops/sysdeps/int_atomic_load_store.h: Ditto.
+ * src/atomic_ops/sysdeps/ordered.h: Ditto.
+ * src/atomic_ops/sysdeps/ordered_except_wr.h: Ditto.
+ * src/atomic_ops/sysdeps/read_ordered.h: Ditto.
+ * src/atomic_ops/sysdeps/short_aligned_atomic_load_store.h: Ditto.
+ * src/atomic_ops/sysdeps/short_atomic_load_store.h: Ditto.
+ * src/atomic_ops/sysdeps/gcc/alpha.h: Ditto.
+ * src/atomic_ops/sysdeps/gcc/arm.h: Ditto.
+ * src/atomic_ops/sysdeps/gcc/cris.h: Ditto.
+ * src/atomic_ops/sysdeps/gcc/hppa.h: Ditto.
+ * src/atomic_ops/sysdeps/gcc/ia64.h: Ditto.
+ * src/atomic_ops/sysdeps/gcc/m68k.h: Ditto.
+ * src/atomic_ops/sysdeps/gcc/mips.h: Ditto.
+ * src/atomic_ops/sysdeps/gcc/powerpc.h: Ditto.
+ * src/atomic_ops/sysdeps/gcc/s390.h: Ditto.
+ * src/atomic_ops/sysdeps/gcc/sparc.h: Ditto.
+ * src/atomic_ops/sysdeps/gcc/x86.h: Ditto.
+ * src/atomic_ops/sysdeps/gcc/x86_64.h: Ditto.
+ * src/atomic_ops/sysdeps/hpc/hppa.h: Ditto.
+ * src/atomic_ops/sysdeps/hpc/ia64.h: Ditto.
+ * src/atomic_ops/sysdeps/ibmc/powerpc.h: Ditto.
+ * src/atomic_ops/sysdeps/msftc/common32_defs.h: Ditto.
+ * src/atomic_ops/sysdeps/msftc/x86.h: Ditto.
+ * src/atomic_ops/sysdeps/msftc/x86_64.h: Ditto.
+ * src/atomic_ops/sysdeps/sunc/sparc.h: Ditto.
+ * src/atomic_ops/sysdeps/sunc/x86.h: Ditto.
+ * src/atomic_ops/sysdeps/sunc/x86_64.h: Ditto.
+ * src/atomic_ops/sysdeps/aligned_atomic_load_store.h: Reformat
+ comment.
+ * src/atomic_ops/sysdeps/atomic_load_store.h: Ditto.
+ * src/atomic_ops/sysdeps/char_atomic_load_store.h: Ditto.
+ * src/atomic_ops/sysdeps/int_aligned_atomic_load_store.h: Ditto.
+ * src/atomic_ops/sysdeps/int_atomic_load_store.h: Ditto.
+ * src/atomic_ops/sysdeps/ordered.h: Ditto.
+ * src/atomic_ops/sysdeps/gcc/arm.h: Ditto.
+ * src/atomic_ops/sysdeps/test_and_set_t_is_char.h: Remove file
+ tail blank lines.
+ * src/atomic_ops/sysdeps/gcc/arm.h (AO_test_and_set_full): Don't
+ define for ARMv2.
+ * src/atomic_ops/sysdeps/gcc/powerpc.h (AO_load_acquire,
+ AO_test_and_set, AO_compare_and_swap): Merge adjacent definitions.
+ * src/atomic_ops/sysdeps/ibmc/powerpc.h (AO_HAVE_store_release):
+ Define.
+ * src/atomic_ops/sysdeps/sunc/sparc.h: Expand all tabs to spaces;
+ remove trailing spaces at EOLn.
+
2011-06-02 Ivan Maidanski <ivmai@mail.ru>
* tests/test_malloc.c (main): Remove unused "exper_n" local
diff --git a/src/atomic_ops/sysdeps/aligned_atomic_load_store.h b/src/atomic_ops/sysdeps/aligned_atomic_load_store.h
index 071bea0..d24fe1d 100644
--- a/src/atomic_ops/sysdeps/aligned_atomic_load_store.h
+++ b/src/atomic_ops/sysdeps/aligned_atomic_load_store.h
@@ -20,10 +20,8 @@
* SOFTWARE.
*/
-/*
- * Definitions for architectures on which loads and stores of AO_t are
- * atomic fo all legal alignments.
- */
+/* Definitions for architectures on which loads and stores of AO_t are */
+/* atomic fo all legal alignments. */
AO_INLINE AO_t
AO_load(const volatile AO_t *addr)
@@ -33,7 +31,6 @@ AO_load(const volatile AO_t *addr)
/* volatile adds barrier semantics. */
return *(AO_t *)addr;
}
-
#define AO_HAVE_load
AO_INLINE void
@@ -42,5 +39,4 @@ AO_store(volatile AO_t *addr, AO_t new_val)
assert(((size_t)addr & (sizeof(AO_t) - 1)) == 0);
(*(AO_t *)addr) = new_val;
}
-
#define AO_HAVE_store
diff --git a/src/atomic_ops/sysdeps/atomic_load_store.h b/src/atomic_ops/sysdeps/atomic_load_store.h
index e4bf103..1210891 100644
--- a/src/atomic_ops/sysdeps/atomic_load_store.h
+++ b/src/atomic_ops/sysdeps/atomic_load_store.h
@@ -20,10 +20,8 @@
* SOFTWARE.
*/
-/*
- * Definitions for architectures on which loads and stores of AO_t are
- * atomic for all legal alignments.
- */
+/* Definitions for architectures on which loads and stores of AO_t are */
+/* atomic for all legal alignments. */
AO_INLINE AO_t
AO_load(const volatile AO_t *addr)
@@ -32,7 +30,6 @@ AO_load(const volatile AO_t *addr)
/* volatile adds barrier semantics. */
return (*(const AO_t *)addr);
}
-
#define AO_HAVE_load
AO_INLINE void
@@ -40,5 +37,4 @@ AO_store(volatile AO_t *addr, AO_t new_val)
{
(*(AO_t *)addr) = new_val;
}
-
#define AO_HAVE_store
diff --git a/src/atomic_ops/sysdeps/char_atomic_load_store.h b/src/atomic_ops/sysdeps/char_atomic_load_store.h
index ca12541..ae7005a 100644
--- a/src/atomic_ops/sysdeps/char_atomic_load_store.h
+++ b/src/atomic_ops/sysdeps/char_atomic_load_store.h
@@ -20,10 +20,8 @@
* SOFTWARE.
*/
-/*
- * Definitions for architectures on which loads and stores of unsigned char are
- * atomic for all legal alignments.
- */
+/* Definitions for architectures on which loads and stores of unsigned */
+/* char are atomic for all legal alignments. */
AO_INLINE unsigned char
AO_char_load(const volatile unsigned char *addr)
@@ -32,7 +30,6 @@ AO_char_load(const volatile unsigned char *addr)
/* volatile adds barrier semantics. */
return (*(const unsigned char *)addr);
}
-
#define AO_HAVE_char_load
AO_INLINE void
@@ -40,5 +37,4 @@ AO_char_store(volatile unsigned char *addr, unsigned char new_val)
{
(*(unsigned char *)addr) = new_val;
}
-
#define AO_HAVE_char_store
diff --git a/src/atomic_ops/sysdeps/gcc/alpha.h b/src/atomic_ops/sysdeps/gcc/alpha.h
index ea6293c..8a7bc2c 100644
--- a/src/atomic_ops/sysdeps/gcc/alpha.h
+++ b/src/atomic_ops/sysdeps/gcc/alpha.h
@@ -27,7 +27,6 @@ AO_nop_full(void)
{
__asm__ __volatile__("mb" : : : "memory");
}
-
#define AO_HAVE_nop_full
AO_INLINE void
@@ -35,7 +34,6 @@ AO_nop_write(void)
{
__asm__ __volatile__("wmb" : : : "memory");
}
-
#define AO_HAVE_nop_write
/* mb should be used for AO_nop_read(). That's the default. */
@@ -62,5 +60,4 @@ AO_compare_and_swap(volatile AO_t *addr,
:"memory");
return was_equal;
}
-
#define AO_HAVE_compare_and_swap
diff --git a/src/atomic_ops/sysdeps/gcc/arm.h b/src/atomic_ops/sysdeps/gcc/arm.h
index 7f9010d..2393686 100644
--- a/src/atomic_ops/sysdeps/gcc/arm.h
+++ b/src/atomic_ops/sysdeps/gcc/arm.h
@@ -19,17 +19,16 @@
#include "../test_and_set_t_is_ao_t.h" /* Probably suboptimal */
-/* NEC LE-IT: ARMv6 is the first architecture providing support for simple LL/SC
- * A data memory barrier must be raised via CP15 command (see documentation).
- *
- * ARMv7 is compatible to ARMv6 but has a simpler command for issuing a
- * memory barrier (DMB). Raising it via CP15 should still work as told me by the
- * support engineers. If it turns out to be much quicker than we should implement
- * custom code for ARMv7 using the asm { dmb } command.
- *
- * If only a single processor is used, we can define AO_UNIPROCESSOR
- * and do not need to access CP15 for ensuring a DMB
-*/
+/* NEC LE-IT: ARMv6 is the first architecture providing support for */
+/* simple LL/SC. A data memory barrier must be raised via CP15 command */
+/* (see documentation). */
+/* ARMv7 is compatible to ARMv6 but has a simpler command for issuing */
+/* a memory barrier (DMB). Raising it via CP15 should still work as */
+/* told me by the support engineers. If it turns out to be much quicker */
+/* than we should implement custom code for ARMv7 using the asm { dmb } */
+/* instruction. */
+/* If only a single processor is used, we can define AO_UNIPROCESSOR */
+/* and do not need to access CP15 for ensuring a DMB. */
/* NEC LE-IT: gcc has no way to easily check the arm architecture */
/* but it defines only one of __ARM_ARCH_x__ to be true. */
@@ -52,7 +51,6 @@ AO_nop_full(void)
: "=&r"(dest) : : "memory");
#endif
}
-
#define AO_HAVE_nop_full
/* NEC LE-IT: AO_t load is simple reading */
@@ -106,16 +104,14 @@ AO_INLINE void AO_store(volatile AO_t *addr, AO_t value)
#define AO_HAVE_store
/* NEC LE-IT: replace the SWAP as recommended by ARM:
-
"Applies to: ARM11 Cores
- Though the SWP instruction will still work with ARM V6 cores, it is
- recommended to use the new V6 synchronization instructions. The SWP
- instruction produces 'locked' read and write accesses which are atomic,
- i.e. another operation cannot be done between these locked accesses which
- ties up external bus (AHB,AXI) bandwidth and can increase worst case
- interrupt latencies. LDREX,STREX are more flexible, other instructions can
- be done between the LDREX and STREX accesses.
- "
+ Though the SWP instruction will still work with ARM V6 cores, it is
+ recommended to use the new V6 synchronization instructions. The SWP
+ instruction produces 'locked' read and write accesses which are atomic,
+ i.e. another operation cannot be done between these locked accesses which
+ ties up external bus (AHB,AXI) bandwidth and can increase worst case
+ interrupt latencies. LDREX,STREX are more flexible, other instructions
+ can be done between the LDREX and STREX accesses."
*/
AO_INLINE AO_TS_t
AO_test_and_set(volatile AO_TS_t *addr)
@@ -135,7 +131,6 @@ AO_test_and_set(volatile AO_TS_t *addr)
return oldval;
}
-
#define AO_HAVE_test_and_set
/* NEC LE-IT: fetch and add for ARMv6 */
@@ -157,7 +152,6 @@ AO_fetch_and_add(volatile AO_t *p, AO_t incr)
return result;
}
-
#define AO_HAVE_fetch_and_add
/* NEC LE-IT: fetch and add1 for ARMv6 */
@@ -179,7 +173,6 @@ AO_fetch_and_add1(volatile AO_t *p)
return result;
}
-
#define AO_HAVE_fetch_and_add1
/* NEC LE-IT: fetch and sub for ARMv6 */
@@ -201,7 +194,6 @@ AO_fetch_and_sub1(volatile AO_t *p)
return result;
}
-
#define AO_HAVE_fetch_and_sub1
/* NEC LE-IT: compare and swap */
@@ -273,23 +265,25 @@ AO_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val)
/* It appears that SWP is the only simple memory barrier. */
#include "../all_atomic_load_store.h"
-AO_INLINE AO_TS_VAL_t
-AO_test_and_set_full(volatile AO_TS_t *addr)
-{
- AO_TS_VAL_t oldval;
- /* SWP on ARM is very similar to XCHG on x86. */
- /* The first operand is the result, the second the value */
- /* to be stored. Both registers must be different from addr. */
- /* Make the address operand an early clobber output so it */
- /* doesn't overlap with the other operands. The early clobber*/
- /* on oldval is necessary to prevent the compiler allocating */
- /* them to the same register if they are both unused. */
- __asm__ __volatile__("swp %0, %2, [%3]"
- : "=&r"(oldval), "=&r"(addr)
- : "r"(1), "1"(addr)
- : "memory");
- return oldval;
-}
-#define AO_HAVE_test_and_set_full
+#if !defined(__ARM_ARCH_2__)
+ AO_INLINE AO_TS_VAL_t
+ AO_test_and_set_full(volatile AO_TS_t *addr)
+ {
+ AO_TS_VAL_t oldval;
+ /* SWP on ARM is very similar to XCHG on x86. */
+ /* The first operand is the result, the second the value */
+ /* to be stored. Both registers must be different from addr. */
+ /* Make the address operand an early clobber output so it */
+ /* doesn't overlap with the other operands. The early clobber */
+ /* on oldval is necessary to prevent the compiler allocating */
+ /* them to the same register if they are both unused. */
+ __asm__ __volatile__("swp %0, %2, [%3]"
+ : "=&r"(oldval), "=&r"(addr)
+ : "r"(1), "1"(addr)
+ : "memory");
+ return oldval;
+ }
+# define AO_HAVE_test_and_set_full
+#endif /* !__ARM_ARCH_2__ */
#endif /* __ARM_ARCH_x */
diff --git a/src/atomic_ops/sysdeps/gcc/cris.h b/src/atomic_ops/sysdeps/gcc/cris.h
index 3864905..cbca1e7 100644
--- a/src/atomic_ops/sysdeps/gcc/cris.h
+++ b/src/atomic_ops/sysdeps/gcc/cris.h
@@ -1,30 +1,30 @@
/*
* Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
- *
+ *
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * SOFTWARE.
*
* Most of this code originally comes from Hans-Peter Nilsson. It is included
* here with his permission.
*
* This version has not been tested. It was coped here from a GC
* patch so that we wouldn't lose the code in the upgrade to gc7.
- */
+ */
#include "../all_atomic_load_store.h"
@@ -55,17 +55,15 @@ AO_test_and_set_full(volatile AO_TS_t *addr) {
/* Note the use of a dummy output of *addr to expose the write. The
memory barrier is to stop *other* writes being moved past this code. */
__asm__ __volatile__("clearf\n"
- "0:\n\t"
- "movu.b [%2],%0\n\t"
- "ax\n\t"
- "move.b %3,[%2]\n\t"
- "bwf 0b\n\t"
- "clearf"
- : "=&r" (ret), "=m" (*addr)
- : "r" (addr), "r" ((int) 1), "m" (*addr)
- : "memory");
+ "0:\n\t"
+ "movu.b [%2],%0\n\t"
+ "ax\n\t"
+ "move.b %3,[%2]\n\t"
+ "bwf 0b\n\t"
+ "clearf"
+ : "=&r" (ret), "=m" (*addr)
+ : "r" (addr), "r" ((int) 1), "m" (*addr)
+ : "memory");
return ret;
}
-
#define AO_HAVE_test_and_set_full
-
diff --git a/src/atomic_ops/sysdeps/gcc/hppa.h b/src/atomic_ops/sysdeps/gcc/hppa.h
index 663f6b6..e7365e0 100644
--- a/src/atomic_ops/sysdeps/gcc/hppa.h
+++ b/src/atomic_ops/sysdeps/gcc/hppa.h
@@ -83,6 +83,7 @@ AO_test_and_set_full(volatile AO_TS_t * addr)
volatile unsigned int *a = __ldcw_align (addr);
return (AO_TS_VAL_t) __ldcw (a);
}
+#define AO_HAVE_test_and_set_full
AO_INLINE void
AO_pa_clear(volatile AO_TS_t * addr)
@@ -92,5 +93,3 @@ AO_pa_clear(volatile AO_TS_t * addr)
*a = 1;
}
#define AO_CLEAR(addr) AO_pa_clear(addr)
-
-#define AO_HAVE_test_and_set_full
diff --git a/src/atomic_ops/sysdeps/gcc/ia64.h b/src/atomic_ops/sysdeps/gcc/ia64.h
index 19ad36e..60c7a7e 100644
--- a/src/atomic_ops/sysdeps/gcc/ia64.h
+++ b/src/atomic_ops/sysdeps/gcc/ia64.h
@@ -83,7 +83,6 @@ AO_fetch_and_add1_release (volatile AO_t *addr)
"=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory");
return result;
}
-
#define AO_HAVE_fetch_and_add1_release
AO_INLINE AO_t
@@ -96,7 +95,6 @@ AO_fetch_and_sub1_acquire (volatile AO_t *addr)
"=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory");
return result;
}
-
#define AO_HAVE_fetch_and_sub1_acquire
AO_INLINE AO_t
@@ -109,7 +107,6 @@ AO_fetch_and_sub1_release (volatile AO_t *addr)
"=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory");
return result;
}
-
#define AO_HAVE_fetch_and_sub1_release
#ifndef _ILP32
@@ -134,7 +131,6 @@ AO_int_fetch_and_add1_release (volatile unsigned int *addr)
"=r" (result): AO_IN_ADDR :"memory");
return result;
}
-
#define AO_HAVE_int_fetch_and_add1_release
AO_INLINE unsigned int
@@ -146,7 +142,6 @@ AO_int_fetch_and_sub1_acquire (volatile unsigned int *addr)
"=r" (result): AO_IN_ADDR :"memory");
return result;
}
-
#define AO_HAVE_int_fetch_and_sub1_acquire
AO_INLINE unsigned int
@@ -158,7 +153,6 @@ AO_int_fetch_and_sub1_release (volatile unsigned int *addr)
"=r" (result): AO_IN_ADDR :"memory");
return result;
}
-
#define AO_HAVE_int_fetch_and_sub1_release
#endif /* !_ILP32 */
@@ -177,7 +171,6 @@ AO_compare_and_swap_acquire(volatile AO_t *addr,
: "memory");
return (oldval == old);
}
-
#define AO_HAVE_compare_and_swap_acquire
AO_INLINE int
@@ -194,7 +187,6 @@ AO_compare_and_swap_release(volatile AO_t *addr,
: "memory");
return (oldval == old);
}
-
#define AO_HAVE_compare_and_swap_release
AO_INLINE int
@@ -209,7 +201,6 @@ AO_char_compare_and_swap_acquire(volatile unsigned char *addr,
: "memory");
return (oldval == old);
}
-
#define AO_HAVE_char_compare_and_swap_acquire
AO_INLINE int
@@ -224,7 +215,6 @@ AO_char_compare_and_swap_release(volatile unsigned char *addr,
: "memory");
return (oldval == old);
}
-
#define AO_HAVE_char_compare_and_swap_release
AO_INLINE int
@@ -239,7 +229,6 @@ AO_short_compare_and_swap_acquire(volatile unsigned short *addr,
: "memory");
return (oldval == old);
}
-
#define AO_HAVE_short_compare_and_swap_acquire
AO_INLINE int
@@ -254,7 +243,6 @@ AO_short_compare_and_swap_release(volatile unsigned short *addr,
: "memory");
return (oldval == old);
}
-
#define AO_HAVE_short_compare_and_swap_release
#ifndef _ILP32
@@ -269,7 +257,6 @@ AO_int_compare_and_swap_acquire(volatile unsigned int *addr,
: AO_IN_ADDR, "r"(new_val), "r"((AO_t)old) : "memory");
return (oldval == old);
}
-
#define AO_HAVE_int_compare_and_swap_acquire
AO_INLINE int
@@ -282,7 +269,6 @@ AO_int_compare_and_swap_release(volatile unsigned int *addr,
: AO_IN_ADDR, "r"(new_val), "r"((AO_t)old) : "memory");
return (oldval == old);
}
-
#define AO_HAVE_int_compare_and_swap_release
#endif /* !_ILP32 */
diff --git a/src/atomic_ops/sysdeps/gcc/m68k.h b/src/atomic_ops/sysdeps/gcc/m68k.h
index a815d81..ffa38d7 100644
--- a/src/atomic_ops/sysdeps/gcc/m68k.h
+++ b/src/atomic_ops/sysdeps/gcc/m68k.h
@@ -45,7 +45,6 @@ AO_test_and_set_full(volatile AO_TS_t *addr) {
: "memory");
return oldval;
}
-
#define AO_HAVE_test_and_set_full
/* Returns nonzero if the comparison succeeded. */
@@ -62,8 +61,6 @@ AO_compare_and_swap_full(volatile AO_t *addr,
: "memory");
return -result;
}
-
#define AO_HAVE_compare_and_swap_full
-
#include "../ao_t_is_int.h"
diff --git a/src/atomic_ops/sysdeps/gcc/mips.h b/src/atomic_ops/sysdeps/gcc/mips.h
index bc2a236..2e1e979 100644
--- a/src/atomic_ops/sysdeps/gcc/mips.h
+++ b/src/atomic_ops/sysdeps/gcc/mips.h
@@ -38,7 +38,6 @@ AO_nop_full(void)
" .set pop "
: : : "memory");
}
-
#define AO_HAVE_nop_full
AO_INLINE int
@@ -65,7 +64,6 @@ AO_compare_and_swap(volatile AO_t *addr, AO_t old, AO_t new_val)
: "memory");
return was_equal;
}
-
#define AO_HAVE_compare_and_swap
/* FIXME: I think the implementations below should be automatically */
@@ -77,7 +75,6 @@ AO_compare_and_swap_acquire(volatile AO_t *addr, AO_t old, AO_t new_val) {
AO_nop_full();
return result;
}
-
#define AO_HAVE_compare_and_swap_acquire
AO_INLINE int
@@ -85,7 +82,6 @@ AO_compare_and_swap_release(volatile AO_t *addr, AO_t old, AO_t new_val) {
AO_nop_full();
return AO_compare_and_swap(addr, old, new_val);
}
-
#define AO_HAVE_compare_and_swap_release
AO_INLINE int
@@ -96,7 +92,6 @@ AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val) {
AO_nop_full();
return result;
}
-
#define AO_HAVE_compare_and_swap_full
/*
diff --git a/src/atomic_ops/sysdeps/gcc/powerpc.h b/src/atomic_ops/sysdeps/gcc/powerpc.h
index 5d2f386..d3e3839 100644
--- a/src/atomic_ops/sysdeps/gcc/powerpc.h
+++ b/src/atomic_ops/sysdeps/gcc/powerpc.h
@@ -41,7 +41,6 @@ AO_nop_full(void)
{
__asm__ __volatile__("sync" : : : "memory");
}
-
#define AO_HAVE_nop_full
/* lwsync apparently works for everything but a StoreLoad barrier. */
@@ -68,12 +67,11 @@ AO_lwsync(void)
/* cheaper. And the documentation is fairly explicit that this also */
/* has acquire semantics. */
/* ppc64 uses ld not lwz */
-#if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
AO_INLINE AO_t
AO_load_acquire(const volatile AO_t *addr)
{
AO_t result;
-
+#if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
__asm__ __volatile__ (
"ld%U1%X1 %0,%1\n"
"cmpw %0,%0\n"
@@ -81,14 +79,7 @@ AO_load_acquire(const volatile AO_t *addr)
"1: isync\n"
: "=r" (result)
: "m"(*addr) : "memory", "cr0");
- return result;
-}
#else
-AO_INLINE AO_t
-AO_load_acquire(const volatile AO_t *addr)
-{
- AO_t result;
-
/* FIXME: We should get gcc to allocate one of the condition */
/* registers. I always got "impossible constraint" when I */
/* tried the "y" constraint. */
@@ -99,9 +90,9 @@ AO_load_acquire(const volatile AO_t *addr)
"1: isync\n"
: "=r" (result)
: "m"(*addr) : "memory", "cc");
+#endif
return result;
}
-#endif
#define AO_HAVE_load_acquire
/* We explicitly specify store_release, since it relies */
@@ -112,16 +103,15 @@ AO_store_release(volatile AO_t *addr, AO_t value)
AO_lwsync();
*addr = value;
}
-
#define AO_HAVE_load_acquire
/* This is similar to the code in the garbage collector. Deleting */
/* this and having it synthesized from compare_and_swap would probably */
/* only cost us a load immediate instruction. */
-#if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
-/* Completely untested. And we should be using smaller objects anyway. */
AO_INLINE AO_TS_VAL_t
AO_test_and_set(volatile AO_TS_t *addr) {
+#if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
+/* Completely untested. And we should be using smaller objects anyway. */
unsigned long oldval;
unsigned long temp = 1; /* locked value */
@@ -135,14 +125,7 @@ AO_test_and_set(volatile AO_TS_t *addr) {
: "=&r"(oldval)
: "r"(addr), "r"(temp)
: "memory", "cr0");
-
- return (AO_TS_VAL_t)oldval;
-}
-
#else
-
-AO_INLINE AO_TS_VAL_t
-AO_test_and_set(volatile AO_TS_t *addr) {
int oldval;
int temp = 1; /* locked value */
@@ -156,12 +139,9 @@ AO_test_and_set(volatile AO_TS_t *addr) {
: "=&r"(oldval)
: "r"(addr), "r"(temp)
: "memory", "cr0");
-
+#endif
return (AO_TS_VAL_t)oldval;
}
-
-#endif
-
#define AO_HAVE_test_and_set
AO_INLINE AO_TS_VAL_t
@@ -170,7 +150,6 @@ AO_test_and_set_acquire(volatile AO_TS_t *addr) {
AO_lwsync();
return result;
}
-
#define AO_HAVE_test_and_set_acquire
AO_INLINE AO_TS_VAL_t
@@ -178,7 +157,6 @@ AO_test_and_set_release(volatile AO_TS_t *addr) {
AO_lwsync();
return AO_test_and_set(addr);
}
-
#define AO_HAVE_test_and_set_release
AO_INLINE AO_TS_VAL_t
@@ -189,16 +167,14 @@ AO_test_and_set_full(volatile AO_TS_t *addr) {
AO_lwsync();
return result;
}
-
#define AO_HAVE_test_and_set_full
-#if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
-/* FIXME: Completely untested. */
AO_INLINE int
AO_compare_and_swap(volatile AO_t *addr, AO_t old, AO_t new_val) {
AO_t oldval;
int result = 0;
-
+#if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
+/* FIXME: Completely untested. */
__asm__ __volatile__(
"1:ldarx %0,0,%2\n" /* load and reserve */
"cmpd %0, %4\n" /* if load is not equal to */
@@ -210,17 +186,7 @@ AO_compare_and_swap(volatile AO_t *addr, AO_t old, AO_t new_val) {
: "=&r"(oldval), "=&r"(result)
: "r"(addr), "r"(new_val), "r"(old), "1"(result)
: "memory", "cr0");
-
- return result;
-}
-
#else
-
-AO_INLINE int
-AO_compare_and_swap(volatile AO_t *addr, AO_t old, AO_t new_val) {
- AO_t oldval;
- int result = 0;
-
__asm__ __volatile__(
"1:lwarx %0,0,%2\n" /* load and reserve */
"cmpw %0, %4\n" /* if load is not equal to */
@@ -232,11 +198,9 @@ AO_compare_and_swap(volatile AO_t *addr, AO_t old, AO_t new_val) {
: "=&r"(oldval), "=&r"(result)
: "r"(addr), "r"(new_val), "r"(old), "1"(result)
: "memory", "cr0");
-
+#endif
return result;
}
-#endif
-
#define AO_HAVE_compare_and_swap
AO_INLINE int
@@ -245,7 +209,6 @@ AO_compare_and_swap_acquire(volatile AO_t *addr, AO_t old, AO_t new_val) {
AO_lwsync();
return result;
}
-
#define AO_HAVE_compare_and_swap_acquire
AO_INLINE int
@@ -253,7 +216,6 @@ AO_compare_and_swap_release(volatile AO_t *addr, AO_t old, AO_t new_val) {
AO_lwsync();
return AO_compare_and_swap(addr, old, new_val);
}
-
#define AO_HAVE_compare_and_swap_release
AO_INLINE int
@@ -264,17 +226,14 @@ AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val) {
AO_lwsync();
return result;
}
-
#define AO_HAVE_compare_and_swap_full
-#if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
-/* FIXME: Completely untested. */
-
AO_INLINE AO_t
AO_fetch_and_add(volatile AO_t *addr, AO_t incr) {
AO_t oldval;
AO_t newval;
-
+#if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
+/* FIXME: Completely untested. */
__asm__ __volatile__(
"1:ldarx %0,0,%2\n" /* load and reserve */
"add %1,%0,%3\n" /* increment */
@@ -283,19 +242,7 @@ AO_fetch_and_add(volatile AO_t *addr, AO_t incr) {
: "=&r"(oldval), "=&r"(newval)
: "r"(addr), "r"(incr)
: "memory", "cr0");
-
- return oldval;
-}
-
-#define AO_HAVE_fetch_and_add
-
#else
-
-AO_INLINE AO_t
-AO_fetch_and_add(volatile AO_t *addr, AO_t incr) {
- AO_t oldval;
- AO_t newval;
-
__asm__ __volatile__(
"1:lwarx %0,0,%2\n" /* load and reserve */
"add %1,%0,%3\n" /* increment */
@@ -304,21 +251,17 @@ AO_fetch_and_add(volatile AO_t *addr, AO_t incr) {
: "=&r"(oldval), "=&r"(newval)
: "r"(addr), "r"(incr)
: "memory", "cr0");
-
+#endif
return oldval;
}
-
#define AO_HAVE_fetch_and_add
-#endif
-
AO_INLINE AO_t
AO_fetch_and_add_acquire(volatile AO_t *addr, AO_t incr) {
AO_t result = AO_fetch_and_add(addr, incr);
AO_lwsync();
return result;
}
-
#define AO_HAVE_fetch_and_add_acquire
AO_INLINE AO_t
@@ -326,7 +269,6 @@ AO_fetch_and_add_release(volatile AO_t *addr, AO_t incr) {
AO_lwsync();
return AO_fetch_and_add(addr, incr);
}
-
#define AO_HAVE_fetch_and_add_release
AO_INLINE AO_t
@@ -337,7 +279,6 @@ AO_fetch_and_add_full(volatile AO_t *addr, AO_t incr) {
AO_lwsync();
return result;
}
-
#define AO_HAVE_fetch_and_add_full
#if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
diff --git a/src/atomic_ops/sysdeps/gcc/s390.h b/src/atomic_ops/sysdeps/gcc/s390.h
index c9facf6..b8563e6 100644
--- a/src/atomic_ops/sysdeps/gcc/s390.h
+++ b/src/atomic_ops/sysdeps/gcc/s390.h
@@ -41,7 +41,7 @@
/* Presumably they're cheaper than CS? */
AO_INLINE AO_t AO_compare_and_swap_full(volatile AO_t *addr,
- AO_t old, AO_t new_val)
+ AO_t old, AO_t new_val)
{
int retval;
__asm__ __volatile__ (
@@ -57,7 +57,6 @@ AO_INLINE AO_t AO_compare_and_swap_full(volatile AO_t *addr,
: "cc", "memory");
return retval == 0;
}
-
#define AO_HAVE_compare_and_swap_full
/* FIXME: Add double-wide compare-and-swap for 32-bit executables. */
diff --git a/src/atomic_ops/sysdeps/gcc/sparc.h b/src/atomic_ops/sysdeps/gcc/sparc.h
index b264b0d..41bc2f5 100644
--- a/src/atomic_ops/sysdeps/gcc/sparc.h
+++ b/src/atomic_ops/sysdeps/gcc/sparc.h
@@ -36,7 +36,6 @@ AO_test_and_set_full(volatile AO_TS_t *addr) {
: "m"(*addr) : "memory");
return oldval;
}
-
#define AO_HAVE_test_and_set_full
#ifndef AO_NO_SPARC_V9
@@ -61,9 +60,8 @@ AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val) {
: "memory", "cc");
return (int)ret;
}
-
#define AO_HAVE_compare_and_swap_full
-#endif /* AO_NO_SPARC_V9 */
+#endif /* !AO_NO_SPARC_V9 */
/* FIXME: This needs to be extended for SPARC v8 and v9. */
/* SPARC V8 also has swap. V9 has CAS. */
diff --git a/src/atomic_ops/sysdeps/gcc/x86.h b/src/atomic_ops/sysdeps/gcc/x86.h
index d6b95af..5e6d7fa 100644
--- a/src/atomic_ops/sysdeps/gcc/x86.h
+++ b/src/atomic_ops/sysdeps/gcc/x86.h
@@ -42,7 +42,6 @@ AO_nop_full(void)
{
__asm__ __volatile__("mfence" : : : "memory");
}
-
#define AO_HAVE_nop_full
#else
@@ -67,7 +66,6 @@ AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
: "memory");
return result;
}
-
#define AO_HAVE_fetch_and_add_full
AO_INLINE unsigned char
@@ -80,7 +78,6 @@ AO_char_fetch_and_add_full (volatile unsigned char *p, unsigned char incr)
: "memory");
return result;
}
-
#define AO_HAVE_char_fetch_and_add_full
AO_INLINE unsigned short
@@ -93,7 +90,6 @@ AO_short_fetch_and_add_full (volatile unsigned short *p, unsigned short incr)
: "memory");
return result;
}
-
#define AO_HAVE_short_fetch_and_add_full
/* Really only works for 486 and later */
@@ -103,7 +99,6 @@ AO_or_full (volatile AO_t *p, AO_t incr)
__asm__ __volatile__ ("lock; orl %1, %0" :
"=m" (*p) : "r" (incr), "m" (*p) : "memory");
}
-
#define AO_HAVE_or_full
AO_INLINE AO_TS_VAL_t
@@ -121,7 +116,6 @@ AO_test_and_set_full(volatile AO_TS_t *addr)
: "0"(0xff), "m"(*addr) : "memory");
return (AO_TS_VAL_t)oldval;
}
-
#define AO_HAVE_test_and_set_full
/* Returns nonzero if the comparison succeeded. */
@@ -138,7 +132,6 @@ AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val)
return (int)result;
# endif
}
-
#define AO_HAVE_compare_and_swap_full
/* Returns nonzero if the comparison succeeded. */
@@ -172,7 +165,6 @@ AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
#endif
return (int) result;
}
-
#define AO_HAVE_compare_double_and_swap_double_full
#include "../ao_t_is_int.h"
diff --git a/src/atomic_ops/sysdeps/gcc/x86_64.h b/src/atomic_ops/sysdeps/gcc/x86_64.h
index 3d3eb74..0f68c1e 100644
--- a/src/atomic_ops/sysdeps/gcc/x86_64.h
+++ b/src/atomic_ops/sysdeps/gcc/x86_64.h
@@ -37,7 +37,6 @@ AO_nop_full(void)
/* Note: "mfence" (SSE2) is supported on all x86_64/amd64 chips. */
__asm__ __volatile__("mfence" : : : "memory");
}
-
#define AO_HAVE_nop_full
/* As far as we can tell, the lfence and sfence instructions are not */
@@ -53,7 +52,6 @@ AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
: "memory");
return result;
}
-
#define AO_HAVE_fetch_and_add_full
AO_INLINE unsigned char
@@ -66,7 +64,6 @@ AO_char_fetch_and_add_full (volatile unsigned char *p, unsigned char incr)
: "memory");
return result;
}
-
#define AO_HAVE_char_fetch_and_add_full
AO_INLINE unsigned short
@@ -79,7 +76,6 @@ AO_short_fetch_and_add_full (volatile unsigned short *p, unsigned short incr)
: "memory");
return result;
}
-
#define AO_HAVE_short_fetch_and_add_full
AO_INLINE unsigned int
@@ -92,7 +88,6 @@ AO_int_fetch_and_add_full (volatile unsigned int *p, unsigned int incr)
: "memory");
return result;
}
-
#define AO_HAVE_int_fetch_and_add_full
AO_INLINE void
@@ -101,7 +96,6 @@ AO_or_full (volatile AO_t *p, AO_t incr)
__asm__ __volatile__ ("lock; orq %1, %0" :
"=m" (*p) : "r" (incr), "m" (*p) : "memory");
}
-
#define AO_HAVE_or_full
AO_INLINE AO_TS_VAL_t
@@ -119,7 +113,6 @@ AO_test_and_set_full(volatile AO_TS_t *addr)
: "0"(0xff), "m"(*addr) : "memory");
return (AO_TS_VAL_t)oldval;
}
-
#define AO_HAVE_test_and_set_full
/* Returns nonzero if the comparison succeeded. */
@@ -136,10 +129,10 @@ AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val)
return (int) result;
# endif
}
-
#define AO_HAVE_compare_and_swap_full
#ifdef AO_CMPXCHG16B_AVAILABLE
+
/* NEC LE-IT: older AMD Opterons are missing this instruction.
* On these machines SIGILL will be thrown.
* Define AO_WEAK_DOUBLE_CAS_EMULATION to have an emulated
@@ -162,6 +155,7 @@ AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
return (int) result;
}
#define AO_HAVE_compare_double_and_swap_double_full
+
#else
/* this one provides spinlock based emulation of CAS implemented in */
/* atomic_ops.c. We probably do not want to do this here, since it is */
@@ -177,10 +171,10 @@ AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
AO_t old_val1, AO_t old_val2,
AO_t new_val1, AO_t new_val2)
{
- return AO_compare_double_and_swap_double_emulation(addr,
- old_val1, old_val2,
- new_val1, new_val2);
+ return AO_compare_double_and_swap_double_emulation(addr, old_val1, old_val2,
+ new_val1, new_val2);
}
#define AO_HAVE_compare_double_and_swap_double_full
#endif /* AO_WEAK_DOUBLE_CAS_EMULATION */
+
#endif /* AO_CMPXCHG16B_AVAILABLE */
diff --git a/src/atomic_ops/sysdeps/generic_pthread.h b/src/atomic_ops/sysdeps/generic_pthread.h
index 90bf117..55a1967 100644
--- a/src/atomic_ops/sysdeps/generic_pthread.h
+++ b/src/atomic_ops/sysdeps/generic_pthread.h
@@ -43,7 +43,6 @@ AO_nop_full(void)
pthread_mutex_lock(&AO_pt_lock);
pthread_mutex_unlock(&AO_pt_lock);
}
-
#define AO_HAVE_nop_full
AO_INLINE AO_t
@@ -55,7 +54,6 @@ AO_load_full(const volatile AO_t *addr)
pthread_mutex_unlock(&AO_pt_lock);
return result;
}
-
#define AO_HAVE_load_full
AO_INLINE void
@@ -65,7 +63,6 @@ AO_store_full(volatile AO_t *addr, AO_t val)
*addr = val;
pthread_mutex_unlock(&AO_pt_lock);
}
-
#define AO_HAVE_store_full
AO_INLINE unsigned char
@@ -77,7 +74,6 @@ AO_char_load_full(const volatile unsigned char *addr)
pthread_mutex_unlock(&AO_pt_lock);
return result;
}
-
#define AO_HAVE_char_load_full
AO_INLINE void
@@ -87,7 +83,6 @@ AO_char_store_full(volatile unsigned char *addr, unsigned char val)
*addr = val;
pthread_mutex_unlock(&AO_pt_lock);
}
-
#define AO_HAVE_char_store_full
AO_INLINE unsigned short
@@ -99,7 +94,6 @@ AO_short_load_full(const volatile unsigned short *addr)
pthread_mutex_unlock(&AO_pt_lock);
return result;
}
-
#define AO_HAVE_short_load_full
AO_INLINE void
@@ -109,7 +103,6 @@ AO_short_store_full(volatile unsigned short *addr, unsigned short val)
*addr = val;
pthread_mutex_unlock(&AO_pt_lock);
}
-
#define AO_HAVE_short_store_full
AO_INLINE unsigned int
@@ -121,7 +114,6 @@ AO_int_load_full(const volatile unsigned int *addr)
pthread_mutex_unlock(&AO_pt_lock);
return result;
}
-
#define AO_HAVE_int_load_full
AO_INLINE void
@@ -131,7 +123,6 @@ AO_int_store_full(volatile unsigned int *addr, unsigned int val)
*addr = val;
pthread_mutex_unlock(&AO_pt_lock);
}
-
#define AO_HAVE_int_store_full
AO_INLINE AO_TS_VAL_t
@@ -145,7 +136,6 @@ AO_test_and_set_full(volatile AO_TS_t *addr)
assert(result == AO_TS_SET || result == AO_TS_CLEAR);
return result;
}
-
#define AO_HAVE_test_and_set_full
AO_INLINE AO_t
@@ -159,7 +149,6 @@ AO_fetch_and_add_full(volatile AO_t *p, AO_t incr)
pthread_mutex_unlock(&AO_pt_lock);
return tmp;
}
-
#define AO_HAVE_fetch_and_add_full
AO_INLINE unsigned char
@@ -173,7 +162,6 @@ AO_char_fetch_and_add_full(volatile unsigned char *p, unsigned char incr)
pthread_mutex_unlock(&AO_pt_lock);
return tmp;
}
-
#define AO_HAVE_char_fetch_and_add_full
AO_INLINE unsigned short
@@ -187,7 +175,6 @@ AO_short_fetch_and_add_full(volatile unsigned short *p, unsigned short incr)
pthread_mutex_unlock(&AO_pt_lock);
return tmp;
}
-
#define AO_HAVE_short_fetch_and_add_full
AO_INLINE unsigned int
@@ -201,7 +188,6 @@ AO_int_fetch_and_add_full(volatile unsigned int *p, unsigned int incr)
pthread_mutex_unlock(&AO_pt_lock);
return tmp;
}
-
#define AO_HAVE_int_fetch_and_add_full
AO_INLINE void
@@ -214,7 +200,6 @@ AO_or_full(volatile AO_t *p, AO_t incr)
*p = (tmp | incr);
pthread_mutex_unlock(&AO_pt_lock);
}
-
#define AO_HAVE_or_full
AO_INLINE int
@@ -232,7 +217,6 @@ AO_compare_and_swap_full(volatile AO_t *addr,
pthread_mutex_unlock(&AO_pt_lock);
return 0;
}
-
#define AO_HAVE_compare_and_swap_full
/* Unlike real architectures, we define both double-width CAS variants. */
@@ -241,7 +225,6 @@ typedef struct {
AO_t AO_val1;
AO_t AO_val2;
} AO_double_t;
-
#define AO_HAVE_double_t
AO_INLINE int
@@ -261,7 +244,6 @@ AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
pthread_mutex_unlock(&AO_pt_lock);
return 0;
}
-
#define AO_HAVE_compare_double_and_swap_double_full
AO_INLINE int
@@ -281,7 +263,6 @@ AO_compare_and_swap_double_full(volatile AO_double_t *addr,
pthread_mutex_unlock(&AO_pt_lock);
return 0;
}
-
#define AO_HAVE_compare_and_swap_double_full
/* We can't use hardware loads and stores, since they don't */
diff --git a/src/atomic_ops/sysdeps/hpc/hppa.h b/src/atomic_ops/sysdeps/hpc/hppa.h
index fe11fec..b34d6bc 100644
--- a/src/atomic_ops/sysdeps/hpc/hppa.h
+++ b/src/atomic_ops/sysdeps/hpc/hppa.h
@@ -87,6 +87,7 @@ AO_test_and_set_full(volatile AO_TS_t * addr)
__ldcw (a, ret);
return ret;
}
+#define AO_HAVE_test_and_set_full
AO_INLINE void
AO_pa_clear(volatile AO_TS_t * addr)
@@ -97,5 +98,3 @@ AO_pa_clear(volatile AO_TS_t * addr)
*(volatile unsigned int *)a = 1;
}
#define AO_CLEAR(addr) AO_pa_clear(addr)
-
-#define AO_HAVE_test_and_set_full
diff --git a/src/atomic_ops/sysdeps/hpc/ia64.h b/src/atomic_ops/sysdeps/hpc/ia64.h
index 1cfec1c..b793fcb 100644
--- a/src/atomic_ops/sysdeps/hpc/ia64.h
+++ b/src/atomic_ops/sysdeps/hpc/ia64.h
@@ -63,7 +63,6 @@ AO_fetch_and_add1_release (volatile AO_t *p)
return _Asm_fetchadd(AO_T_FASIZE, _SEM_REL, p, 1,
_LDHINT_NONE, _UP_MEM_FENCE);
}
-
#define AO_HAVE_fetch_and_add1_release
AO_INLINE AO_t
@@ -72,7 +71,6 @@ AO_fetch_and_sub1_acquire (volatile AO_t *p)
return _Asm_fetchadd(AO_T_FASIZE, _SEM_ACQ, p, -1,
_LDHINT_NONE, _DOWN_MEM_FENCE);
}
-
#define AO_HAVE_fetch_and_sub1_acquire
AO_INLINE AO_t
@@ -81,7 +79,6 @@ AO_fetch_and_sub1_release (volatile AO_t *p)
return _Asm_fetchadd(AO_T_FASIZE, _SEM_REL, p, -1,
_LDHINT_NONE, _UP_MEM_FENCE);
}
-
#define AO_HAVE_fetch_and_sub1_release
AO_INLINE int
@@ -95,7 +92,6 @@ AO_compare_and_swap_acquire(volatile AO_t *addr,
new_val, _LDHINT_NONE, _DOWN_MEM_FENCE);
return (oldval == old);
}
-
#define AO_HAVE_compare_and_swap_acquire
AO_INLINE int
@@ -109,7 +105,6 @@ AO_compare_and_swap_release(volatile AO_t *addr,
/* Hopefully the compiler knows not to reorder the above two? */
return (oldval == old);
}
-
#define AO_HAVE_compare_and_swap_release
AO_INLINE int
@@ -123,7 +118,6 @@ AO_char_compare_and_swap_acquire(volatile unsigned char *addr,
new_val, _LDHINT_NONE, _DOWN_MEM_FENCE);
return (oldval == old);
}
-
#define AO_HAVE_char_compare_and_swap_acquire
AO_INLINE int
@@ -137,7 +131,6 @@ AO_char_compare_and_swap_release(volatile unsigned char *addr,
/* Hopefully the compiler knows not to reorder the above two? */
return (oldval == old);
}
-
#define AO_HAVE_char_compare_and_swap_release
AO_INLINE int
@@ -151,7 +144,6 @@ AO_short_compare_and_swap_acquire(volatile unsigned short *addr,
new_val, _LDHINT_NONE, _DOWN_MEM_FENCE);
return (oldval == old);
}
-
#define AO_HAVE_short_compare_and_swap_acquire
AO_INLINE int
@@ -165,7 +157,6 @@ AO_short_compare_and_swap_release(volatile unsigned short *addr,
/* Hopefully the compiler knows not to reorder the above two? */
return (oldval == old);
}
-
#define AO_HAVE_short_compare_and_swap_release
#ifndef __LP64__
diff --git a/src/atomic_ops/sysdeps/ibmc/powerpc.h b/src/atomic_ops/sysdeps/ibmc/powerpc.h
index 6aba2b4..3e4f539 100644
--- a/src/atomic_ops/sysdeps/ibmc/powerpc.h
+++ b/src/atomic_ops/sysdeps/ibmc/powerpc.h
@@ -41,7 +41,6 @@ AO_load_acquire(const volatile AO_t *addr)
AO_lwsync();
return result;
}
-
#define AO_HAVE_load_acquire
AO_INLINE void
@@ -50,8 +49,7 @@ AO_store_release(volatile AO_t *addr, AO_t value)
AO_lwsync();
*addr = value;
}
-
-#define AO_HAVE_load_acquire
+#define AO_HAVE_store_release
/* This is similar to the code in the garbage collector. Deleting */
/* this and having it synthesized from compare_and_swap would probably */
@@ -60,7 +58,6 @@ AO_store_release(volatile AO_t *addr, AO_t value)
AO_test_and_set(volatile AO_TS_t *addr) {
# error FIXME Implement me
}
-
#define AO_HAVE_test_and_set*/
AO_INLINE AO_TS_VAL_t
@@ -69,7 +66,6 @@ AO_test_and_set_acquire(volatile AO_TS_t *addr) {
AO_lwsync();
return result;
}
-
#define AO_HAVE_test_and_set_acquire
AO_INLINE AO_TS_VAL_t
@@ -77,7 +73,6 @@ AO_test_and_set_release(volatile AO_TS_t *addr) {
AO_lwsync();
return AO_test_and_set(addr);
}
-
#define AO_HAVE_test_and_set_release
AO_INLINE AO_TS_VAL_t
@@ -88,14 +83,12 @@ AO_test_and_set_full(volatile AO_TS_t *addr) {
AO_lwsync();
return result;
}
-
#define AO_HAVE_test_and_set_full
/*AO_INLINE AO_t
AO_compare_and_swap(volatile AO_t *addr, AO_t old, AO_t new_val) {
# error FIXME Implement me
}
-
#define AO_HAVE_compare_and_swap*/
AO_INLINE AO_t
@@ -104,7 +97,6 @@ AO_compare_and_swap_acquire(volatile AO_t *addr, AO_t old, AO_t new_val) {
AO_lwsync();
return result;
}
-
#define AO_HAVE_compare_and_swap_acquire
AO_INLINE AO_t
@@ -112,7 +104,6 @@ AO_compare_and_swap_release(volatile AO_t *addr, AO_t old, AO_t new_val) {
AO_lwsync();
return AO_compare_and_swap(addr, old, new_val);
}
-
#define AO_HAVE_compare_and_swap_release
AO_INLINE AO_t
@@ -123,7 +114,6 @@ AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val) {
AO_lwsync();
return result;
}
-
#define AO_HAVE_compare_and_swap_full
/* FIXME: We should also implement fetch_and_add and or primitives */
diff --git a/src/atomic_ops/sysdeps/int_aligned_atomic_load_store.h b/src/atomic_ops/sysdeps/int_aligned_atomic_load_store.h
index e1c373c..1dcb3b2 100644
--- a/src/atomic_ops/sysdeps/int_aligned_atomic_load_store.h
+++ b/src/atomic_ops/sysdeps/int_aligned_atomic_load_store.h
@@ -20,10 +20,8 @@
* SOFTWARE.
*/
-/*
- * Definitions for architectures on which loads and stores of unsigned int are
- * atomic for all legal alignments.
- */
+/* Definitions for architectures on which loads and stores of unsigned */
+/* int are atomic for all legal alignments. */
AO_INLINE unsigned int
AO_int_load(const volatile unsigned int *addr)
@@ -33,7 +31,6 @@ AO_int_load(const volatile unsigned int *addr)
/* volatile adds barrier semantics. */
return (*(unsigned int *)addr);
}
-
#define AO_HAVE_int_load
AO_INLINE void
@@ -42,5 +39,4 @@ AO_int_store(volatile unsigned int *addr, unsigned int new_val)
assert(((size_t)addr & (sizeof(unsigned int) - 1)) == 0);
(*(unsigned int *)addr) = new_val;
}
-
#define AO_HAVE_int_store
diff --git a/src/atomic_ops/sysdeps/int_atomic_load_store.h b/src/atomic_ops/sysdeps/int_atomic_load_store.h
index 8892692..0c3777b 100644
--- a/src/atomic_ops/sysdeps/int_atomic_load_store.h
+++ b/src/atomic_ops/sysdeps/int_atomic_load_store.h
@@ -20,10 +20,8 @@
* SOFTWARE.
*/
-/*
- * Definitions for architectures on which loads and stores of unsigned int are
- * atomic for all legal alignments.
- */
+/* Definitions for architectures on which loads and stores of unsigned */
+/* int are atomic for all legal alignments. */
AO_INLINE unsigned int
AO_int_load(const volatile unsigned int *addr)
@@ -32,7 +30,6 @@ AO_int_load(const volatile unsigned int *addr)
/* volatile adds barrier semantics. */
return (*(const unsigned int *)addr);
}
-
#define AO_HAVE_int_load
AO_INLINE void
@@ -40,5 +37,4 @@ AO_int_store(volatile unsigned int *addr, unsigned int new_val)
{
(*(unsigned int *)addr) = new_val;
}
-
#define AO_HAVE_int_store
diff --git a/src/atomic_ops/sysdeps/msftc/common32_defs.h b/src/atomic_ops/sysdeps/msftc/common32_defs.h
index 96adba2..5d519f0 100644
--- a/src/atomic_ops/sysdeps/msftc/common32_defs.h
+++ b/src/atomic_ops/sysdeps/msftc/common32_defs.h
@@ -83,7 +83,6 @@ AO_fetch_and_add_full(volatile AO_t *p, AO_t incr)
return _InterlockedExchangeAdd((LONG AO_INTERLOCKED_VOLATILE *)p,
(LONG)incr);
}
-
#define AO_HAVE_fetch_and_add_full
AO_INLINE AO_t
@@ -91,7 +90,6 @@ AO_fetch_and_add1_full(volatile AO_t *p)
{
return _InterlockedIncrement((LONG AO_INTERLOCKED_VOLATILE *)p) - 1;
}
-
#define AO_HAVE_fetch_and_add1_full
AO_INLINE AO_t
@@ -99,7 +97,6 @@ AO_fetch_and_sub1_full(volatile AO_t *p)
{
return _InterlockedDecrement((LONG AO_INTERLOCKED_VOLATILE *)p) + 1;
}
-
#define AO_HAVE_fetch_and_sub1_full
#ifdef AO_ASSUME_WINDOWS98
@@ -117,6 +114,5 @@ AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val)
== (LONG)old;
# endif
}
-
# define AO_HAVE_compare_and_swap_full
#endif /* AO_ASSUME_WINDOWS98 */
diff --git a/src/atomic_ops/sysdeps/msftc/x86.h b/src/atomic_ops/sysdeps/msftc/x86.h
index 347b66c..3ab17a1 100644
--- a/src/atomic_ops/sysdeps/msftc/x86.h
+++ b/src/atomic_ops/sysdeps/msftc/x86.h
@@ -57,7 +57,6 @@ AO_nop_full(void)
{
__asm { mfence }
}
-
#define AO_HAVE_nop_full
#else
@@ -79,7 +78,6 @@ AO_test_and_set_full(volatile AO_TS_t *addr)
}
/* Ignore possible "missing return value" warning here. */
}
-
#define AO_HAVE_test_and_set_full
#ifdef _WIN64
@@ -87,6 +85,7 @@ AO_test_and_set_full(volatile AO_TS_t *addr)
#endif
#ifdef AO_ASSUME_VISTA
+
/* NEC LE-IT: whenever we run on a pentium class machine we have that
* certain function */
@@ -115,6 +114,7 @@ AO_double_compare_and_swap_full(volatile AO_double_t *addr,
}
#define AO_HAVE_double_compare_and_swap_full
#endif /* __cplusplus */
+
#endif /* AO_ASSUME_VISTA */
#include "../ao_t_is_int.h"
diff --git a/src/atomic_ops/sysdeps/msftc/x86_64.h b/src/atomic_ops/sysdeps/msftc/x86_64.h
index efc3e08..135a053 100644
--- a/src/atomic_ops/sysdeps/msftc/x86_64.h
+++ b/src/atomic_ops/sysdeps/msftc/x86_64.h
@@ -59,7 +59,6 @@ AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
{
return _InterlockedExchangeAdd64((LONGLONG volatile *)p, (LONGLONG)incr);
}
-
#define AO_HAVE_fetch_and_add_full
AO_INLINE AO_t
@@ -67,7 +66,6 @@ AO_fetch_and_add1_full (volatile AO_t *p)
{
return _InterlockedIncrement64((LONGLONG volatile *)p) - 1;
}
-
#define AO_HAVE_fetch_and_add1_full
AO_INLINE AO_t
@@ -75,7 +73,6 @@ AO_fetch_and_sub1_full (volatile AO_t *p)
{
return _InterlockedDecrement64((LONGLONG volatile *)p) + 1;
}
-
#define AO_HAVE_fetch_and_sub1_full
AO_INLINE int
@@ -86,7 +83,6 @@ AO_compare_and_swap_full(volatile AO_t *addr,
(LONGLONG)new_val, (LONGLONG)old)
== (LONGLONG)old;
}
-
#define AO_HAVE_compare_and_swap_full
/* As far as we can tell, the lfence and sfence instructions are not */
@@ -100,7 +96,6 @@ AO_nop_full(void)
/* Note: "mfence" (SSE2) is supported on all x86_64/amd64 chips. */
__asm { mfence }
}
-
#define AO_HAVE_nop_full
AO_INLINE AO_TS_VAL_t
@@ -113,13 +108,11 @@ AO_test_and_set_full(volatile AO_TS_t *addr)
xchg byte ptr [rbx],al ;
}
}
-
#define AO_HAVE_test_and_set_full
#endif /* AO_ASM_X64_AVAILABLE */
#ifdef AO_CMPXCHG16B_AVAILABLE
-
/* AO_compare_double_and_swap_double_full needs implementation for Win64.
* Also see ../gcc/x86_64.h for partial old Opteron workaround.
*/
@@ -139,15 +132,11 @@ AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
return _InterlockedCompareExchange128((volatile __int64 *)addr,
new_val2 /* high */, new_val1 /* low */, comparandResult);
}
-
# define AO_HAVE_compare_double_and_swap_double_full
# elif defined(AO_ASM_X64_AVAILABLE)
-
- /* If there is no intrinsic _InterlockedCompareExchange128 then we
- * need basically what's given below.
- */
-
+ /* If there is no intrinsic _InterlockedCompareExchange128 then we */
+ /* need basically what's given below. */
AO_INLINE int
AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
AO_t old_val1, AO_t old_val2,
@@ -163,9 +152,7 @@ AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
setz rax ;
}
}
-
# define AO_HAVE_compare_double_and_swap_double_full
-
# endif /* _MSC_VER >= 1500 || AO_ASM_X64_AVAILABLE */
#endif /* AO_CMPXCHG16B_AVAILABLE */
diff --git a/src/atomic_ops/sysdeps/ordered.h b/src/atomic_ops/sysdeps/ordered.h
index da77b63..ba9822d 100644
--- a/src/atomic_ops/sysdeps/ordered.h
+++ b/src/atomic_ops/sysdeps/ordered.h
@@ -20,10 +20,8 @@
* SOFTWARE.
*/
-/*
- * These are common definitions for architectures that provide processor
- * ordered memory operations.
- */
+/* These are common definitions for architectures that provide */
+/* processor ordered memory operations. */
#include "ordered_except_wr.h"
@@ -32,5 +30,4 @@ AO_nop_full(void)
{
AO_compiler_barrier();
}
-
#define AO_HAVE_nop_full
diff --git a/src/atomic_ops/sysdeps/ordered_except_wr.h b/src/atomic_ops/sysdeps/ordered_except_wr.h
index ee51aff..8b5c48d 100644
--- a/src/atomic_ops/sysdeps/ordered_except_wr.h
+++ b/src/atomic_ops/sysdeps/ordered_except_wr.h
@@ -36,65 +36,56 @@ AO_nop_write(void)
/* sfence according to Intel docs. Pentium 3 and up. */
/* Unnecessary for cached accesses? */
}
-
#define AO_HAVE_NOP_WRITE
#if defined(AO_HAVE_store)
-
-AO_INLINE void
-AO_store_write(volatile AO_t *addr, AO_t val)
-{
- AO_compiler_barrier();
- AO_store(addr, val);
-}
+ AO_INLINE void
+ AO_store_write(volatile AO_t *addr, AO_t val)
+ {
+ AO_compiler_barrier();
+ AO_store(addr, val);
+ }
# define AO_HAVE_store_write
# define AO_store_release(addr, val) AO_store_write(addr, val)
# define AO_HAVE_store_release
-
#endif /* AO_HAVE_store */
#if defined(AO_HAVE_char_store)
-
-AO_INLINE void
-AO_char_store_write(volatile unsigned char *addr, unsigned char val)
-{
- AO_compiler_barrier();
- AO_char_store(addr, val);
-}
+ AO_INLINE void
+ AO_char_store_write(volatile unsigned char *addr, unsigned char val)
+ {
+ AO_compiler_barrier();
+ AO_char_store(addr, val);
+ }
# define AO_HAVE_char_store_write
# define AO_char_store_release(addr, val) AO_char_store_write(addr, val)
# define AO_HAVE_char_store_release
-
#endif /* AO_HAVE_char_store */
#if defined(AO_HAVE_short_store)
-
-AO_INLINE void
-AO_short_store_write(volatile unsigned short *addr, unsigned short val)
-{
- AO_compiler_barrier();
- AO_short_store(addr, val);
-}
+ AO_INLINE void
+ AO_short_store_write(volatile unsigned short *addr, unsigned short val)
+ {
+ AO_compiler_barrier();
+ AO_short_store(addr, val);
+ }
# define AO_HAVE_short_store_write
# define AO_short_store_release(addr, val) AO_short_store_write(addr, val)
# define AO_HAVE_short_store_release
-
#endif /* AO_HAVE_short_store */
#if defined(AO_HAVE_int_store)
-
-AO_INLINE void
-AO_int_store_write(volatile unsigned int *addr, unsigned int val)
-{
- AO_compiler_barrier();
- AO_int_store(addr, val);
-}
+ AO_INLINE void
+ AO_int_store_write(volatile unsigned int *addr, unsigned int val)
+ {
+ AO_compiler_barrier();
+ AO_int_store(addr, val);
+ }
# define AO_HAVE_int_store_write
# define AO_int_store_release(addr, val) AO_int_store_write(addr, val)
# define AO_HAVE_int_store_release
-
#endif /* AO_HAVE_int_store */
diff --git a/src/atomic_ops/sysdeps/read_ordered.h b/src/atomic_ops/sysdeps/read_ordered.h
index 1589e5c..c66ac4e 100644
--- a/src/atomic_ops/sysdeps/read_ordered.h
+++ b/src/atomic_ops/sysdeps/read_ordered.h
@@ -32,69 +32,60 @@ AO_nop_read(void)
{
AO_compiler_barrier();
}
-
#define AO_HAVE_NOP_READ
#ifdef AO_HAVE_load
-
-AO_INLINE AO_t
-AO_load_read(const volatile AO_t *addr)
-{
- AO_t result = AO_load(addr);
- AO_compiler_barrier();
- return result;
-}
-#define AO_HAVE_load_read
-
-#define AO_load_acquire(addr) AO_load_read(addr)
-#define AO_HAVE_load_acquire
-
+ AO_INLINE AO_t
+ AO_load_read(const volatile AO_t *addr)
+ {
+ AO_t result = AO_load(addr);
+ AO_compiler_barrier();
+ return result;
+ }
+# define AO_HAVE_load_read
+
+# define AO_load_acquire(addr) AO_load_read(addr)
+# define AO_HAVE_load_acquire
#endif /* AO_HAVE_load */
#ifdef AO_HAVE_char_load
-
-AO_INLINE AO_t
-AO_char_load_read(const volatile unsigned char *addr)
-{
- AO_t result = AO_char_load(addr);
- AO_compiler_barrier();
- return result;
-}
-#define AO_HAVE_char_load_read
-
-#define AO_char_load_acquire(addr) AO_char_load_read(addr)
-#define AO_HAVE_char_load_acquire
-
+ AO_INLINE AO_t
+ AO_char_load_read(const volatile unsigned char *addr)
+ {
+ AO_t result = AO_char_load(addr);
+ AO_compiler_barrier();
+ return result;
+ }
+# define AO_HAVE_char_load_read
+
+# define AO_char_load_acquire(addr) AO_char_load_read(addr)
+# define AO_HAVE_char_load_acquire
#endif /* AO_HAVE_char_load */
#ifdef AO_HAVE_short_load
-
-AO_INLINE AO_t
-AO_short_load_read(const volatile unsigned short *addr)
-{
- AO_t result = AO_short_load(addr);
- AO_compiler_barrier();
- return result;
-}
-#define AO_HAVE_short_load_read
-
-#define AO_short_load_acquire(addr) AO_short_load_read(addr)
-#define AO_HAVE_short_load_acquire
-
+ AO_INLINE AO_t
+ AO_short_load_read(const volatile unsigned short *addr)
+ {
+ AO_t result = AO_short_load(addr);
+ AO_compiler_barrier();
+ return result;
+ }
+# define AO_HAVE_short_load_read
+
+# define AO_short_load_acquire(addr) AO_short_load_read(addr)
+# define AO_HAVE_short_load_acquire
#endif /* AO_HAVE_short_load */
#ifdef AO_HAVE_int_load
-
-AO_INLINE AO_t
-AO_int_load_read(const volatile unsigned int *addr)
-{
- AO_t result = AO_int_load(addr);
- AO_compiler_barrier();
- return result;
-}
-#define AO_HAVE_int_load_read
-
-#define AO_int_load_acquire(addr) AO_int_load_read(addr)
-#define AO_HAVE_int_load_acquire
-
+ AO_INLINE AO_t
+ AO_int_load_read(const volatile unsigned int *addr)
+ {
+ AO_t result = AO_int_load(addr);
+ AO_compiler_barrier();
+ return result;
+ }
+# define AO_HAVE_int_load_read
+
+# define AO_int_load_acquire(addr) AO_int_load_read(addr)
+# define AO_HAVE_int_load_acquire
#endif /* AO_HAVE_int_load */
diff --git a/src/atomic_ops/sysdeps/short_aligned_atomic_load_store.h b/src/atomic_ops/sysdeps/short_aligned_atomic_load_store.h
index 164ba97..1340934 100644
--- a/src/atomic_ops/sysdeps/short_aligned_atomic_load_store.h
+++ b/src/atomic_ops/sysdeps/short_aligned_atomic_load_store.h
@@ -33,7 +33,6 @@ AO_short_load(const volatile unsigned short *addr)
/* volatile adds barrier semantics. */
return (*(unsigned short *)addr);
}
-
#define AO_HAVE_short_load
AO_INLINE void
@@ -42,5 +41,4 @@ AO_short_store(volatile unsigned short *addr, unsigned short new_val)
assert(((size_t)addr & (sizeof(unsigned short) - 1)) == 0);
(*(unsigned short *)addr) = new_val;
}
-
#define AO_HAVE_short_store
diff --git a/src/atomic_ops/sysdeps/short_atomic_load_store.h b/src/atomic_ops/sysdeps/short_atomic_load_store.h
index 9a88db5..3f3794c 100644
--- a/src/atomic_ops/sysdeps/short_atomic_load_store.h
+++ b/src/atomic_ops/sysdeps/short_atomic_load_store.h
@@ -32,7 +32,6 @@ AO_short_load(const volatile unsigned short *addr)
/* volatile adds barrier semantics. */
return (*(const unsigned short *)addr);
}
-
#define AO_HAVE_short_load
AO_INLINE void
@@ -40,5 +39,4 @@ AO_short_store(volatile unsigned short *addr, unsigned short new_val)
{
(*(unsigned short *)addr) = new_val;
}
-
#define AO_HAVE_short_store
diff --git a/src/atomic_ops/sysdeps/sunc/sparc.h b/src/atomic_ops/sysdeps/sunc/sparc.h
index 3578722..1083a99 100644
--- a/src/atomic_ops/sysdeps/sunc/sparc.h
+++ b/src/atomic_ops/sysdeps/sunc/sparc.h
@@ -1,23 +1,23 @@
/*
* Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
- *
+ *
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * SOFTWARE.
*/
#include "../all_atomic_load_store.h"
@@ -25,14 +25,13 @@
/* Real SPARC code uses TSO: */
#include "../ordered_except_wr.h"
-/* Test_and_set location is just a byte. */
+/* Test_and_set location is just a byte. */
#include "../test_and_set_t_is_char.h"
extern AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t *addr);
-/* Implemented in separate .S file, for now. */
-
+/* Implemented in separate .S file, for now. */
#define AO_HAVE_test_and_set_full
-/* FIXME: Like the gcc version, this needs to be extended for V8 */
-/* and V9. */
+/* FIXME: Like the gcc version, this needs to be extended for V8 */
+/* and V9. */
diff --git a/src/atomic_ops/sysdeps/sunc/x86.h b/src/atomic_ops/sysdeps/sunc/x86.h
index e1e054a..ab34db9 100644
--- a/src/atomic_ops/sysdeps/sunc/x86.h
+++ b/src/atomic_ops/sysdeps/sunc/x86.h
@@ -39,7 +39,6 @@ AO_nop_full(void)
{
__asm__ __volatile__ ("mfence" : : : "memory");
}
-
#define AO_HAVE_nop_full
#else
@@ -64,7 +63,6 @@ AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
: "memory");
return result;
}
-
#define AO_HAVE_fetch_and_add_full
AO_INLINE unsigned char
@@ -77,7 +75,6 @@ AO_char_fetch_and_add_full (volatile unsigned char *p, unsigned char incr)
: "memory");
return result;
}
-
#define AO_HAVE_char_fetch_and_add_full
AO_INLINE unsigned short
@@ -90,7 +87,6 @@ AO_short_fetch_and_add_full (volatile unsigned short *p, unsigned short incr)
: "memory");
return result;
}
-
#define AO_HAVE_short_fetch_and_add_full
/* Really only works for 486 and later */
@@ -101,7 +97,6 @@ AO_or_full (volatile AO_t *p, AO_t incr)
"=m" (*p) : "r" (incr) /* , "m" (*p) */
: "memory");
}
-
#define AO_HAVE_or_full
AO_INLINE AO_TS_VAL_t
@@ -115,7 +110,6 @@ AO_test_and_set_full (volatile AO_TS_t *addr)
: "memory");
return (AO_TS_VAL_t)oldval;
}
-
#define AO_HAVE_test_and_set_full
/* Returns nonzero if the comparison succeeded. */
@@ -128,7 +122,6 @@ AO_compare_and_swap_full (volatile AO_t *addr, AO_t old, AO_t new_val)
: "r" (new_val), "a"(old) : "memory");
return (int) result;
}
-
#define AO_HAVE_compare_and_swap_full
#if 0
@@ -165,7 +158,6 @@ AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
#endif
return (int) result;
}
-
#define AO_HAVE_compare_double_and_swap_double_full
#endif
diff --git a/src/atomic_ops/sysdeps/sunc/x86_64.h b/src/atomic_ops/sysdeps/sunc/x86_64.h
index 4d47c2f..b07169e 100644
--- a/src/atomic_ops/sysdeps/sunc/x86_64.h
+++ b/src/atomic_ops/sysdeps/sunc/x86_64.h
@@ -37,7 +37,6 @@ AO_nop_full(void)
/* Note: "mfence" (SSE2) is supported on all x86_64/amd64 chips. */
__asm__ __volatile__ ("mfence" : : : "memory");
}
-
#define AO_HAVE_nop_full
/* As far as we can tell, the lfence and sfence instructions are not */
@@ -53,7 +52,6 @@ AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
: "memory");
return result;
}
-
#define AO_HAVE_fetch_and_add_full
AO_INLINE unsigned char
@@ -66,7 +64,6 @@ AO_char_fetch_and_add_full (volatile unsigned char *p, unsigned char incr)
: "memory");
return result;
}
-
#define AO_HAVE_char_fetch_and_add_full
AO_INLINE unsigned short
@@ -79,7 +76,6 @@ AO_short_fetch_and_add_full (volatile unsigned short *p, unsigned short incr)
: "memory");
return result;
}
-
#define AO_HAVE_short_fetch_and_add_full
AO_INLINE unsigned int
@@ -92,7 +88,6 @@ AO_int_fetch_and_add_full (volatile unsigned int *p, unsigned int incr)
: "memory");
return result;
}
-
#define AO_HAVE_int_fetch_and_add_full
AO_INLINE void
@@ -102,7 +97,6 @@ AO_or_full (volatile AO_t *p, AO_t incr)
"=m" (*p) : "r" (incr) /* , "m" (*p) */
: "memory");
}
-
#define AO_HAVE_or_full
AO_INLINE AO_TS_VAL_t
@@ -116,7 +110,6 @@ AO_test_and_set_full (volatile AO_TS_t *addr)
: "memory");
return (AO_TS_VAL_t)oldval;
}
-
#define AO_HAVE_test_and_set_full
/* Returns nonzero if the comparison succeeded. */
@@ -129,7 +122,6 @@ AO_compare_and_swap_full (volatile AO_t *addr, AO_t old, AO_t new_val)
: "r" (new_val), "a"(old) : "memory");
return (int) result;
}
-
#define AO_HAVE_compare_and_swap_full
#ifdef AO_CMPXCHG16B_AVAILABLE
diff --git a/src/atomic_ops/sysdeps/test_and_set_t_is_char.h b/src/atomic_ops/sysdeps/test_and_set_t_is_char.h
index bde0f21..8e265aa 100644
--- a/src/atomic_ops/sysdeps/test_and_set_t_is_char.h
+++ b/src/atomic_ops/sysdeps/test_and_set_t_is_char.h
@@ -1,24 +1,24 @@
/*
* Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
- *
+ *
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+ * SOFTWARE.
+ */
/*
* These are common definitions for architectures on which test_and_set
@@ -33,6 +33,3 @@ typedef enum {AO_BYTE_TS_clear = 0, AO_BYTE_TS_set = 0xff} AO_BYTE_TS_val;
#define AO_TS_SET AO_BYTE_TS_set
#define AO_CHAR_TS_T 1
-
-
-