summaryrefslogtreecommitdiff
path: root/libstdc++-v3/include/bits
diff options
context:
space:
mode:
authoramacleod <amacleod@138bc75d-0d04-0410-961f-82ee72b054a4>2011-11-07 20:06:39 +0000
committeramacleod <amacleod@138bc75d-0d04-0410-961f-82ee72b054a4>2011-11-07 20:06:39 +0000
commit8808bf16125e1bea5cd2e969d19a53b9618593f1 (patch)
treedeab25b73551eb04852240c97020eb3e7a1060c6 /libstdc++-v3/include/bits
parent6df74ab4271c036e5c80aa5b67104c98c0dda1d5 (diff)
downloadgcc-8808bf16125e1bea5cd2e969d19a53b9618593f1.tar.gz
2011-11-07 Andrew MacLeod <amacleod@redhat.com>
libstdc++-v3 * include/bits/atomic_base.h (atomic_thread_fence): Call builtin. (atomic_signal_fence): Call builtin. (atomic_flag::test_and_set): Call __atomic_exchange when it is lockfree, otherwise fall back to call __sync_lock_test_and_set. (atomic_flag::clear): Call __atomic_store when it is lockfree, otherwise fall back to call __sync_lock_release. gcc * doc/extend.texi: Docuemnt behaviour change for __atomic_exchange and __atomic_store. * optabs.c (expand_atomic_exchange): Expand to __sync_lock_test_and_set only when originated from that builtin. (expand_atomic_store): Expand to __sync_lock_release when originated from that builtin. * builtins.c (expand_builtin_sync_lock_test_and_set): Add flag that expand_atomic_exchange call originated from here. (expand_builtin_sync_lock_release): Add flag that expand_atomic_store call originated from here. (expand_builtin_atomic_exchange): Add origination flag. (expand_builtin_atomic_store): Add origination flag. * expr.h (expand_atomic_exchange, expand_atomic_store): Add boolean parameters to indicate implementation fall back options. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@181111 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'libstdc++-v3/include/bits')
-rw-r--r--libstdc++-v3/include/bits/atomic_base.h60
1 files changed, 54 insertions, 6 deletions
diff --git a/libstdc++-v3/include/bits/atomic_base.h b/libstdc++-v3/include/bits/atomic_base.h
index 2e4a6a1894a..e297eb0e6ad 100644
--- a/libstdc++-v3/include/bits/atomic_base.h
+++ b/libstdc++-v3/include/bits/atomic_base.h
@@ -69,10 +69,16 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
}
void
- atomic_thread_fence(memory_order) noexcept;
+ atomic_thread_fence(memory_order __m) noexcept
+ {
+ __atomic_thread_fence (__m);
+ }
void
- atomic_signal_fence(memory_order) noexcept;
+ atomic_signal_fence(memory_order __m) noexcept
+ {
+ __atomic_signal_fence (__m);
+ }
/// kill_dependency
template<typename _Tp>
@@ -261,13 +267,35 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
bool
test_and_set(memory_order __m = memory_order_seq_cst) noexcept
{
- return __atomic_exchange_n(&_M_i, 1, __m);
+ /* The standard *requires* this to be lock free. If exchange is not
+ always lock free, the resort to the old test_and_set. */
+ if (__atomic_always_lock_free (sizeof (_M_i), 0))
+ return __atomic_exchange_n(&_M_i, 1, __m);
+ else
+ {
+ /* Sync test and set is only guaranteed to be acquire. */
+ if (__m == memory_order_seq_cst || __m == memory_order_release
+ || __m == memory_order_acq_rel)
+ atomic_thread_fence (__m);
+ return __sync_lock_test_and_set (&_M_i, 1);
+ }
}
bool
test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept
{
- return __atomic_exchange_n(&_M_i, 1, __m);
+ /* The standard *requires* this to be lock free. If exchange is not
+ always lock free, the resort to the old test_and_set. */
+ if (__atomic_always_lock_free (sizeof (_M_i), 0))
+ return __atomic_exchange_n(&_M_i, 1, __m);
+ else
+ {
+ /* Sync test and set is only guaranteed to be acquire. */
+ if (__m == memory_order_seq_cst || __m == memory_order_release
+ || __m == memory_order_acq_rel)
+ atomic_thread_fence (__m);
+ return __sync_lock_test_and_set (&_M_i, 1);
+ }
}
void
@@ -277,7 +305,17 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
- __atomic_store_n(&_M_i, 0, __m);
+ /* The standard *requires* this to be lock free. If store is not always
+ lock free, the resort to the old style __sync_lock_release. */
+ if (__atomic_always_lock_free (sizeof (_M_i), 0))
+ __atomic_store_n(&_M_i, 0, __m);
+ else
+ {
+ __sync_lock_release (&_M_i, 0);
+ /* __sync_lock_release is only guaranteed to be a release barrier. */
+ if (__m == memory_order_seq_cst)
+ atomic_thread_fence (__m);
+ }
}
void
@@ -287,7 +325,17 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
- __atomic_store_n(&_M_i, 0, __m);
+ /* The standard *requires* this to be lock free. If store is not always
+ lock free, the resort to the old style __sync_lock_release. */
+ if (__atomic_always_lock_free (sizeof (_M_i), 0))
+ __atomic_store_n(&_M_i, 0, __m);
+ else
+ {
+ __sync_lock_release (&_M_i, 0);
+ /* __sync_lock_release is only guaranteed to be a release barrier. */
+ if (__m == memory_order_seq_cst)
+ atomic_thread_fence (__m);
+ }
}
};