summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorIvan Maidanski <ivmai@mail.ru>2021-10-20 10:37:18 +0300
committerIvan Maidanski <ivmai@mail.ru>2021-10-20 18:38:50 +0300
commit086a92a1da59a435b7cd3fa91119a45693b8beb5 (patch)
treef8447cec9b7289754c5fe156c19149584dfa62eb /src
parent1f8f5af84b896668838fcb032b030ee8044fd0e0 (diff)
downloadlibatomic_ops-086a92a1da59a435b7cd3fa91119a45693b8beb5.tar.gz
Support double-wide CAS on armv7+ (MS VC)
* src/atomic_ops/sysdeps/msftc/arm.h [_M_ARM>=6]: Remove TODO about double-wide operations support. * src/atomic_ops/sysdeps/msftc/arm.h [_M_ARM>=7 && !AO_NO_DOUBLE_CAS]: Include standard_ao_double_t.h. * src/atomic_ops/sysdeps/msftc/arm.h [_M_ARM>=7 && !AO_NO_DOUBLE_CAS] (_InterlockedCompareExchange64, _InterlockedCompareExchange64_acq, _InterlockedCompareExchange64_nf, _InterlockedCompareExchange64_rel): Specify as intrinsic. * src/atomic_ops/sysdeps/msftc/arm.h [_M_ARM>=7 && !AO_NO_DOUBLE_CAS] (AO_double_compare_and_swap, AO_double_compare_and_swap_acquire, AO_double_compare_and_swap_release, AO_double_compare_and_swap_full): Implement (similar to that in msftc/x86.h).
Diffstat (limited to 'src')
-rw-r--r--src/atomic_ops/sysdeps/msftc/arm.h64
1 files changed, 60 insertions, 4 deletions
diff --git a/src/atomic_ops/sysdeps/msftc/arm.h b/src/atomic_ops/sysdeps/msftc/arm.h
index 2a66551..8579238 100644
--- a/src/atomic_ops/sysdeps/msftc/arm.h
+++ b/src/atomic_ops/sysdeps/msftc/arm.h
@@ -52,11 +52,67 @@
#if _M_ARM >= 6
/* ARMv6 is the first architecture providing support for simple LL/SC. */
-/* #include "../standard_ao_double_t.h" */
-/* TODO: implement double-wide operations (similar to x86). */
-
#else /* _M_ARM < 6 */
-
/* TODO: implement AO_test_and_set_full using SWP. */
#endif /* _M_ARM < 6 */
+
+#if _M_ARM >= 7 && !defined(AO_NO_DOUBLE_CAS)
+
+# include "../standard_ao_double_t.h"
+
+/* These intrinsics are supposed to use LDREXD/STREXD. */
+# pragma intrinsic (_InterlockedCompareExchange64)
+# pragma intrinsic (_InterlockedCompareExchange64_acq)
+# pragma intrinsic (_InterlockedCompareExchange64_nf)
+# pragma intrinsic (_InterlockedCompareExchange64_rel)
+
+ AO_INLINE int
+ AO_double_compare_and_swap(volatile AO_double_t *addr,
+ AO_double_t old_val, AO_double_t new_val)
+ {
+ AO_ASSERT_ADDR_ALIGNED(addr);
+ return (double_ptr_storage)_InterlockedCompareExchange64_nf(
+ (__int64 volatile *)addr,
+ new_val.AO_whole /* exchange */,
+ old_val.AO_whole) == old_val.AO_whole;
+ }
+# define AO_HAVE_double_compare_and_swap
+
+ AO_INLINE int
+ AO_double_compare_and_swap_acquire(volatile AO_double_t *addr,
+ AO_double_t old_val, AO_double_t new_val)
+ {
+ AO_ASSERT_ADDR_ALIGNED(addr);
+ return (double_ptr_storage)_InterlockedCompareExchange64_acq(
+ (__int64 volatile *)addr,
+ new_val.AO_whole /* exchange */,
+ old_val.AO_whole) == old_val.AO_whole;
+ }
+# define AO_HAVE_double_compare_and_swap_acquire
+
+ AO_INLINE int
+ AO_double_compare_and_swap_release(volatile AO_double_t *addr,
+ AO_double_t old_val, AO_double_t new_val)
+ {
+ AO_ASSERT_ADDR_ALIGNED(addr);
+ return (double_ptr_storage)_InterlockedCompareExchange64_rel(
+ (__int64 volatile *)addr,
+ new_val.AO_whole /* exchange */,
+ old_val.AO_whole) == old_val.AO_whole;
+ }
+# define AO_HAVE_double_compare_and_swap_release
+
+ AO_INLINE int
+ AO_double_compare_and_swap_full(volatile AO_double_t *addr,
+ AO_double_t old_val, AO_double_t new_val)
+ {
+ AO_ASSERT_ADDR_ALIGNED(addr);
+ return (double_ptr_storage)_InterlockedCompareExchange64(
+ (__int64 volatile *)addr,
+ new_val.AO_whole /* exchange */,
+ old_val.AO_whole) == old_val.AO_whole;
+ }
+# define AO_HAVE_double_compare_and_swap_full
+
+#endif /* _M_ARM >= 7 && !AO_NO_DOUBLE_CAS */