summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoramacleod <amacleod@138bc75d-0d04-0410-961f-82ee72b054a4>2011-11-10 20:38:33 +0000
committeramacleod <amacleod@138bc75d-0d04-0410-961f-82ee72b054a4>2011-11-10 20:38:33 +0000
commit10b744a37ff1abbecf121987f694e0a704b77b95 (patch)
tree4138d94b9a849ba000843310e148fa8a6d8d6c8a
parent5335e796aebf8ae6b08f8a8c664759c706e10653 (diff)
downloadgcc-10b744a37ff1abbecf121987f694e0a704b77b95.tar.gz
PR middle-end/51038
libstdc++-v3 * include/bits/atomic_base.h (atomic_thread_fence): Call built-in. (atomic_signal_fence): Call built-in. (test_and_set, clear): Call new atomic built-ins. gcc * builtins.c (expand_builtin_atomic_clear): New. Expand atomic_clear. (expand_builtin_atomic_test_and_set): New. Expand atomic test_and_set. (expand_builtin): Add cases for test_and_set and clear. * sync-builtins.def (BUILT_IN_ATOMIC_TEST_AND_SET): New. (BUILT_IN_ATOMIC_CLEAR): New. testsuite * gcc.dg/atomic-invalid.c: Add test for invalid __atomic_clear models. * gcc.dg/atomic-flag.c: New. Test __atomic_test_and_set and __atomic_clear. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@181271 138bc75d-0d04-0410-961f-82ee72b054a4
-rw-r--r--gcc/ChangeLog11
-rw-r--r--gcc/builtins.c71
-rw-r--r--gcc/sync-builtins.def6
-rw-r--r--gcc/testsuite/ChangeLog13
-rw-r--r--gcc/testsuite/gcc.dg/atomic-flag.c32
-rw-r--r--gcc/testsuite/gcc.dg/atomic-invalid.c7
-rw-r--r--libstdc++-v3/ChangeLog7
-rw-r--r--libstdc++-v3/include/bits/atomic_base.h64
8 files changed, 159 insertions, 52 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 9c7a2f8ce3a..1f8a56af363 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,12 @@
+2011-11-10 Andrew MacLeod <amacleod@redhat.com>
+
+ PR middle-end/51038
+ * builtins.c (expand_builtin_atomic_clear): New. Expand atomic_clear.
+ (expand_builtin_atomic_test_and_set): New. Expand atomic test_and_set.
+ (expand_builtin): Add cases for test_and_set and clear.
+ * sync-builtins.def (BUILT_IN_ATOMIC_TEST_AND_SET): New.
+ (BUILT_IN_ATOMIC_CLEAR): New.
+
2011-11-10 Roberto Agostino Vitillo <ravitillo@lbl.gov>
PR debug/50983
@@ -37,8 +46,6 @@
be AND followed by NOT.
* builtins.c (expand_builtin_atomic_fetch_op): Patchup code for NAND
should be AND followed by NOT.
- * testsuite/gcc.dg/atomic-noinline[-aux].c: Test no-inline NAND and
- patchup code.
2011-11-10 Jakub Jelinek <jakub@redhat.com>
diff --git a/gcc/builtins.c b/gcc/builtins.c
index d949dbb632f..98dc63604e7 100644
--- a/gcc/builtins.c
+++ b/gcc/builtins.c
@@ -5474,6 +5474,71 @@ expand_builtin_atomic_fetch_op (enum machine_mode mode, tree exp, rtx target,
return ret;
}
+
+/* Expand an atomic clear operation.
+ void _atomic_clear (BOOL *obj, enum memmodel)
+ EXP is the call expression. */
+
+static rtx
+expand_builtin_atomic_clear (tree exp)
+{
+ enum machine_mode mode;
+ rtx mem, ret;
+ enum memmodel model;
+
+ mode = mode_for_size (BOOL_TYPE_SIZE, MODE_INT, 0);
+ mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
+ model = get_memmodel (CALL_EXPR_ARG (exp, 1));
+
+ if (model == MEMMODEL_ACQUIRE || model == MEMMODEL_ACQ_REL)
+ {
+ error ("invalid memory model for %<__atomic_store%>");
+ return const0_rtx;
+ }
+
+ /* Try issuing an __atomic_store, and allow fallback to __sync_lock_release.
+ Failing that, a store is issued by __atomic_store. The only way this can
+ fail is if the bool type is larger than a word size. Unlikely, but
+ handle it anyway for completeness. Assume a single threaded model since
+ there is no atomic support in this case, and no barriers are required. */
+ ret = expand_atomic_store (mem, const0_rtx, model, true);
+ if (!ret)
+ emit_move_insn (mem, const0_rtx);
+ return const0_rtx;
+}
+
+/* Expand an atomic test_and_set operation.
+ bool _atomic_test_and_set (BOOL *obj, enum memmodel)
+ EXP is the call expression. */
+
+static rtx
+expand_builtin_atomic_test_and_set (tree exp)
+{
+ rtx mem, ret;
+ enum memmodel model;
+ enum machine_mode mode;
+
+ mode = mode_for_size (BOOL_TYPE_SIZE, MODE_INT, 0);
+ mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
+ model = get_memmodel (CALL_EXPR_ARG (exp, 1));
+
+ /* Try issuing an exchange. If it is lock free, or if there is a limited
+ functionality __sync_lock_test_and_set, this will utilize it. */
+ ret = expand_atomic_exchange (NULL_RTX, mem, const1_rtx, model, true);
+ if (ret)
+ return ret;
+
+ /* Otherwise, there is no lock free support for test and set. Simply
+ perform a load and a store. Since this presumes a non-atomic architecture,
+ also assume single threadedness and don't issue barriers either. */
+
+ ret = gen_reg_rtx (mode);
+ emit_move_insn (ret, mem);
+ emit_move_insn (mem, const1_rtx);
+ return ret;
+}
+
+
/* Return true if (optional) argument ARG1 of size ARG0 is always lock free on
this architecture. If ARG1 is NULL, use typical alignment for size ARG0. */
@@ -6702,6 +6767,12 @@ expand_builtin (tree exp, rtx target, rtx subtarget, enum machine_mode mode,
if (target)
return target;
break;
+
+ case BUILT_IN_ATOMIC_TEST_AND_SET:
+ return expand_builtin_atomic_test_and_set (exp);
+
+ case BUILT_IN_ATOMIC_CLEAR:
+ return expand_builtin_atomic_clear (exp);
case BUILT_IN_ATOMIC_ALWAYS_LOCK_FREE:
return expand_builtin_atomic_always_lock_free (exp);
diff --git a/gcc/sync-builtins.def b/gcc/sync-builtins.def
index 1a2df9ac018..15ff479bb5e 100644
--- a/gcc/sync-builtins.def
+++ b/gcc/sync-builtins.def
@@ -259,6 +259,12 @@ DEF_SYNC_BUILTIN (BUILT_IN_SYNC_SYNCHRONIZE, "__sync_synchronize",
/* __sync* builtins for the C++ memory model. */
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_TEST_AND_SET, "__atomic_test_and_set",
+ BT_FN_BOOL_VPTR_INT, ATTR_NOTHROW_LEAF_LIST)
+
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_CLEAR, "__atomic_clear", BT_FN_VOID_VPTR_INT,
+ ATTR_NOTHROW_LEAF_LIST)
+
DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_EXCHANGE,
"__atomic_exchange",
BT_FN_VOID_SIZE_VPTR_PTR_PTR_INT, ATTR_NOTHROW_LEAF_LIST)
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index e57b535ad05..90eded98716 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,3 +1,16 @@
+2011-11-10 Andrew MacLeod <amacleod@redhat.com>
+
+ PR middle-end/51038
+ * gcc.dg/atomic-invalid.c: Add test for invalid __atomic_clear models.
+ * gcc.dg/atomic-flag.c: New. Test __atomic_test_and_set and
+ __atomic_clear.
+
+2011-11-10 Andrew MacLeod <amacleod@redhat.com>
+
+ PR rtl-optimization/51040
+ * testsuite/gcc.dg/atomic-noinline[-aux].c: Test no-inline NAND and
+ patchup code.
+
2011-11-10 Jason Merrill <jason@redhat.com>
PR c++/51079
diff --git a/gcc/testsuite/gcc.dg/atomic-flag.c b/gcc/testsuite/gcc.dg/atomic-flag.c
new file mode 100644
index 00000000000..771df2c6091
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/atomic-flag.c
@@ -0,0 +1,32 @@
+/* Test __atomic routines for existence and execution. */
+/* { dg-do run } */
+
+#include <stdbool.h>
+
+/* Test that __atomic_test_and_set and __atomic_clear builtins execute. */
+
+extern void abort(void);
+bool a;
+
+main ()
+{
+ bool b;
+
+ __atomic_clear (&a, __ATOMIC_RELAXED);
+ if (a != 0)
+ abort ();
+
+ b = __atomic_test_and_set (&a, __ATOMIC_SEQ_CST);
+ if (a != 1 || b != 0)
+ abort ();
+
+ b = __atomic_test_and_set (&a, __ATOMIC_ACQ_REL);
+ if (b != 1 || a != 1)
+ abort ();
+
+ __atomic_clear (&a, __ATOMIC_SEQ_CST);
+ if (a != 0)
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/atomic-invalid.c b/gcc/testsuite/gcc.dg/atomic-invalid.c
index 2b73c91e7c0..48de91f6c75 100644
--- a/gcc/testsuite/gcc.dg/atomic-invalid.c
+++ b/gcc/testsuite/gcc.dg/atomic-invalid.c
@@ -4,9 +4,11 @@
/* { dg-require-effective-target sync_int_long } */
#include <stddef.h>
+#include <stdbool.h>
int i, e, b;
size_t s;
+bool x;
main ()
{
@@ -26,4 +28,9 @@ main ()
i = __atomic_always_lock_free (s, NULL); /* { dg-error "non-constant argument" } */
__atomic_load_n (&i, 44); /* { dg-warning "invalid memory model" } */
+
+ __atomic_clear (&x, __ATOMIC_ACQUIRE); /* { dg-error "invalid memory model" } */
+
+ __atomic_clear (&x, __ATOMIC_ACQ_REL); /* { dg-error "invalid memory model" } */
+
}
diff --git a/libstdc++-v3/ChangeLog b/libstdc++-v3/ChangeLog
index c25dd282d05..826ca4482dc 100644
--- a/libstdc++-v3/ChangeLog
+++ b/libstdc++-v3/ChangeLog
@@ -1,3 +1,10 @@
+2011-11-10 Andrew MacLeod <amacleod@redhat.com>
+
+ PR middle-end/51038
+ * include/bits/atomic_base.h (atomic_thread_fence): Call built-in.
+ (atomic_signal_fence): Call built-in.
+ (test_and_set, clear): Call new atomic built-ins.
+
2011-11-09 Jonathan Wakely <jwakely.gcc@gmail.com>
* include/bits/allocator.h (__shrink_to_fit_aux::_S_do_it): Create
diff --git a/libstdc++-v3/include/bits/atomic_base.h b/libstdc++-v3/include/bits/atomic_base.h
index 5327c1bfa21..f0336611d3f 100644
--- a/libstdc++-v3/include/bits/atomic_base.h
+++ b/libstdc++-v3/include/bits/atomic_base.h
@@ -68,11 +68,17 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
return __mo2;
}
- void
- atomic_thread_fence(memory_order __m) noexcept;
+ inline void
+ atomic_thread_fence(memory_order __m) noexcept
+ {
+ __atomic_thread_fence (__m);
+ }
- void
- atomic_signal_fence(memory_order __m) noexcept;
+ inline void
+ atomic_signal_fence(memory_order __m) noexcept
+ {
+ __atomic_thread_fence (__m);
+ }
/// kill_dependency
template<typename _Tp>
@@ -261,35 +267,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
bool
test_and_set(memory_order __m = memory_order_seq_cst) noexcept
{
- /* The standard *requires* this to be lock free. If exchange is not
- always lock free, the resort to the old test_and_set. */
- if (__atomic_always_lock_free (sizeof (_M_i), 0))
- return __atomic_exchange_n(&_M_i, 1, __m);
- else
- {
- /* Sync test and set is only guaranteed to be acquire. */
- if (__m == memory_order_seq_cst || __m == memory_order_release
- || __m == memory_order_acq_rel)
- atomic_thread_fence (__m);
- return __sync_lock_test_and_set (&_M_i, 1);
- }
+ return __atomic_test_and_set (&_M_i, __m);
}
bool
test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept
{
- /* The standard *requires* this to be lock free. If exchange is not
- always lock free, the resort to the old test_and_set. */
- if (__atomic_always_lock_free (sizeof (_M_i), 0))
- return __atomic_exchange_n(&_M_i, 1, __m);
- else
- {
- /* Sync test and set is only guaranteed to be acquire. */
- if (__m == memory_order_seq_cst || __m == memory_order_release
- || __m == memory_order_acq_rel)
- atomic_thread_fence (__m);
- return __sync_lock_test_and_set (&_M_i, 1);
- }
+ return __atomic_test_and_set (&_M_i, __m);
}
void
@@ -299,17 +283,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
- /* The standard *requires* this to be lock free. If store is not always
- lock free, the resort to the old style __sync_lock_release. */
- if (__atomic_always_lock_free (sizeof (_M_i), 0))
- __atomic_store_n(&_M_i, 0, __m);
- else
- {
- __sync_lock_release (&_M_i, 0);
- /* __sync_lock_release is only guaranteed to be a release barrier. */
- if (__m == memory_order_seq_cst)
- atomic_thread_fence (__m);
- }
+ __atomic_clear (&_M_i, __m);
}
void
@@ -319,17 +293,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
- /* The standard *requires* this to be lock free. If store is not always
- lock free, the resort to the old style __sync_lock_release. */
- if (__atomic_always_lock_free (sizeof (_M_i), 0))
- __atomic_store_n(&_M_i, 0, __m);
- else
- {
- __sync_lock_release (&_M_i, 0);
- /* __sync_lock_release is only guaranteed to be a release barrier. */
- if (__m == memory_order_seq_cst)
- atomic_thread_fence (__m);
- }
+ __atomic_clear (&_M_i, __m);
}
};