diff options
author | Andrew MacLeod <amacleod@redhat.com> | 2011-11-07 20:06:39 +0000 |
---|---|---|
committer | Andrew Macleod <amacleod@gcc.gnu.org> | 2011-11-07 20:06:39 +0000 |
commit | 0669295b1e7991bb5465267d4bd06576883e648b (patch) | |
tree | deab25b73551eb04852240c97020eb3e7a1060c6 /libstdc++-v3 | |
parent | fd83db3d51f6379186a012f817d1f1ed003500b0 (diff) | |
download | gcc-0669295b1e7991bb5465267d4bd06576883e648b.tar.gz |
atomic_base.h (atomic_thread_fence): Call builtin.
2011-11-07 Andrew MacLeod <amacleod@redhat.com>
libstdc++-v3
* include/bits/atomic_base.h (atomic_thread_fence): Call builtin.
(atomic_signal_fence): Call builtin.
(atomic_flag::test_and_set): Call __atomic_exchange when it is lockfree,
otherwise fall back to call __sync_lock_test_and_set.
(atomic_flag::clear): Call __atomic_store when it is lockfree,
otherwise fall back to call __sync_lock_release.
gcc
* doc/extend.texi: Docuemnt behaviour change for __atomic_exchange and
__atomic_store.
* optabs.c (expand_atomic_exchange): Expand to __sync_lock_test_and_set
only when originated from that builtin.
(expand_atomic_store): Expand to __sync_lock_release when originated
from that builtin.
* builtins.c (expand_builtin_sync_lock_test_and_set): Add flag that
expand_atomic_exchange call originated from here.
(expand_builtin_sync_lock_release): Add flag that expand_atomic_store
call originated from here.
(expand_builtin_atomic_exchange): Add origination flag.
(expand_builtin_atomic_store): Add origination flag.
* expr.h (expand_atomic_exchange, expand_atomic_store): Add boolean
parameters to indicate implementation fall back options.
From-SVN: r181111
Diffstat (limited to 'libstdc++-v3')
-rw-r--r-- | libstdc++-v3/ChangeLog | 9 | ||||
-rw-r--r-- | libstdc++-v3/include/bits/atomic_base.h | 60 |
2 files changed, 63 insertions, 6 deletions
diff --git a/libstdc++-v3/ChangeLog b/libstdc++-v3/ChangeLog index abcee817a4b..f28bc5c4b42 100644 --- a/libstdc++-v3/ChangeLog +++ b/libstdc++-v3/ChangeLog @@ -1,3 +1,12 @@ +2011-11-07 Andrew MacLeod <amacleod@redhat.com> + + * include/bits/atomic_base.h (atomic_thread_fence): Call builtin. + (atomic_signal_fence): Call builtin. + (atomic_flag::test_and_set): Call __atomic_exchange when it is lockfree, + otherwise fall back to call __sync_lock_test_and_set. + (atomic_flag::clear): Call __atomic_store when it is lockfree, + otherwise fall back to call __sync_lock_release. + 2011-11-07 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE> PR bootstrap/50982 diff --git a/libstdc++-v3/include/bits/atomic_base.h b/libstdc++-v3/include/bits/atomic_base.h index 2e4a6a1894a..e297eb0e6ad 100644 --- a/libstdc++-v3/include/bits/atomic_base.h +++ b/libstdc++-v3/include/bits/atomic_base.h @@ -69,10 +69,16 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION } void - atomic_thread_fence(memory_order) noexcept; + atomic_thread_fence(memory_order __m) noexcept + { + __atomic_thread_fence (__m); + } void - atomic_signal_fence(memory_order) noexcept; + atomic_signal_fence(memory_order __m) noexcept + { + __atomic_signal_fence (__m); + } /// kill_dependency template<typename _Tp> @@ -261,13 +267,35 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION bool test_and_set(memory_order __m = memory_order_seq_cst) noexcept { - return __atomic_exchange_n(&_M_i, 1, __m); + /* The standard *requires* this to be lock free. If exchange is not + always lock free, the resort to the old test_and_set. */ + if (__atomic_always_lock_free (sizeof (_M_i), 0)) + return __atomic_exchange_n(&_M_i, 1, __m); + else + { + /* Sync test and set is only guaranteed to be acquire. */ + if (__m == memory_order_seq_cst || __m == memory_order_release + || __m == memory_order_acq_rel) + atomic_thread_fence (__m); + return __sync_lock_test_and_set (&_M_i, 1); + } } bool test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept { - return __atomic_exchange_n(&_M_i, 1, __m); + /* The standard *requires* this to be lock free. If exchange is not + always lock free, the resort to the old test_and_set. */ + if (__atomic_always_lock_free (sizeof (_M_i), 0)) + return __atomic_exchange_n(&_M_i, 1, __m); + else + { + /* Sync test and set is only guaranteed to be acquire. */ + if (__m == memory_order_seq_cst || __m == memory_order_release + || __m == memory_order_acq_rel) + atomic_thread_fence (__m); + return __sync_lock_test_and_set (&_M_i, 1); + } } void @@ -277,7 +305,17 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION __glibcxx_assert(__m != memory_order_acquire); __glibcxx_assert(__m != memory_order_acq_rel); - __atomic_store_n(&_M_i, 0, __m); + /* The standard *requires* this to be lock free. If store is not always + lock free, the resort to the old style __sync_lock_release. */ + if (__atomic_always_lock_free (sizeof (_M_i), 0)) + __atomic_store_n(&_M_i, 0, __m); + else + { + __sync_lock_release (&_M_i, 0); + /* __sync_lock_release is only guaranteed to be a release barrier. */ + if (__m == memory_order_seq_cst) + atomic_thread_fence (__m); + } } void @@ -287,7 +325,17 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION __glibcxx_assert(__m != memory_order_acquire); __glibcxx_assert(__m != memory_order_acq_rel); - __atomic_store_n(&_M_i, 0, __m); + /* The standard *requires* this to be lock free. If store is not always + lock free, the resort to the old style __sync_lock_release. */ + if (__atomic_always_lock_free (sizeof (_M_i), 0)) + __atomic_store_n(&_M_i, 0, __m); + else + { + __sync_lock_release (&_M_i, 0); + /* __sync_lock_release is only guaranteed to be a release barrier. */ + if (__m == memory_order_seq_cst) + atomic_thread_fence (__m); + } } }; |