summaryrefslogtreecommitdiff
path: root/gcc/config/ia64
diff options
context:
space:
mode:
authorAndrew MacLeod <amacleod@redhat.com>2015-05-12 20:01:47 +0000
committerAndrew Macleod <amacleod@gcc.gnu.org>2015-05-12 20:01:47 +0000
commit46b35980b831a980f762753b64c83e1ab8eac880 (patch)
tree46ecdae55c6167da9fe60411b19d39051ad5e4c1 /gcc/config/ia64
parente7a677ca1a53221276c0b382811c0351d381b35a (diff)
downloadgcc-46b35980b831a980f762753b64c83e1ab8eac880.tar.gz
re PR target/65697 (__atomic memory barriers not strong enough for __sync builtins)
2015-05-12 Andrew MacLeod <amacleod@redhat.com> PR target/65697 * coretypes.h (MEMMODEL_SYNC, MEMMODEL_BASE_MASK): New macros. (enum memmodel): Add SYNC_{ACQUIRE,RELEASE,SEQ_CST}. * tree.h (memmodel_from_int, memmodel_base, is_mm_relaxed, is_mm_consume,is_mm_acquire, is_mm_release, is_mm_acq_rel, is_mm_seq_cst, is_mm_sync): New accessor functions. * builtins.c (expand_builtin_sync_operation, expand_builtin_compare_and_swap): Use MEMMODEL_SYNC_SEQ_CST. (expand_builtin_sync_lock_release): Use MEMMODEL_SYNC_RELEASE. (get_memmodel, expand_builtin_atomic_compare_exchange, expand_builtin_atomic_load, expand_builtin_atomic_store, expand_builtin_atomic_clear): Use new accessor routines. (expand_builtin_sync_synchronize): Use MEMMODEL_SYNC_SEQ_CST. * optabs.c (expand_compare_and_swap_loop): Use MEMMODEL_SYNC_SEQ_CST. (maybe_emit_sync_lock_test_and_set): Use new accessors and MEMMODEL_SYNC_ACQUIRE. (expand_sync_lock_test_and_set): Use MEMMODEL_SYNC_ACQUIRE. (expand_mem_thread_fence, expand_mem_signal_fence, expand_atomic_load, expand_atomic_store): Use new accessors. * emit-rtl.c (need_atomic_barrier_p): Add additional enum cases. * tsan.c (instrument_builtin_call): Update check for memory model beyond final enum to use MEMMODEL_LAST. * c-family/c-common.c: Use new accessor for memmodel_base. * config/aarch64/aarch64.c (aarch64_expand_compare_and_swap): Use new accessors. * config/aarch64/atomics.md (atomic_load<mode>,atomic_store<mode>, arch64_load_exclusive<mode>, aarch64_store_exclusive<mode>, mem_thread_fence, *dmb): Likewise. * config/alpha/alpha.c (alpha_split_compare_and_swap, alpha_split_compare_and_swap_12): Likewise. * config/arm/arm.c (arm_expand_compare_and_swap, arm_split_compare_and_swap, arm_split_atomic_op): Likewise. * config/arm/sync.md (atomic_load<mode>, atomic_store<mode>, atomic_loaddi): Likewise. * config/i386/i386.c (ix86_destroy_cost_data, ix86_memmodel_check): Likewise. * config/i386/sync.md (mem_thread_fence, atomic_store<mode>): Likewise. * config/ia64/ia64.c (ia64_expand_atomic_op): Add new memmodel cases and use new accessors. * config/ia64/sync.md (mem_thread_fence, atomic_load<mode>, atomic_store<mode>, atomic_compare_and_swap<mode>, atomic_exchange<mode>): Use new accessors. * config/mips/mips.c (mips_process_sync_loop): Likewise. * config/pa/pa.md (atomic_loaddi, atomic_storedi): Likewise. * config/rs6000/rs6000.c (rs6000_pre_atomic_barrier, rs6000_post_atomic_barrier): Add new cases. (rs6000_expand_atomic_compare_and_swap): Use new accessors. * config/rs6000/sync.md (mem_thread_fence): Add new cases. (atomic_load<mode>): Add new cases and use new accessors. (store_quadpti): Add new cases. * config/s390/s390.md (mem_thread_fence, atomic_store<mode>): Use new accessors. * config/sparc/sparc.c (sparc_emit_membar_for_model): Use new accessors. * doc/extend.texi: Update docs to indicate 16 bits are used for memory model, not 8. From-SVN: r223096
Diffstat (limited to 'gcc/config/ia64')
-rw-r--r--gcc/config/ia64/ia64.c9
-rw-r--r--gcc/config/ia64/sync.md18
2 files changed, 17 insertions, 10 deletions
diff --git a/gcc/config/ia64/ia64.c b/gcc/config/ia64/ia64.c
index 380088e28f7..c1e2ecdf0d8 100644
--- a/gcc/config/ia64/ia64.c
+++ b/gcc/config/ia64/ia64.c
@@ -2386,10 +2386,12 @@ ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
{
case MEMMODEL_ACQ_REL:
case MEMMODEL_SEQ_CST:
+ case MEMMODEL_SYNC_SEQ_CST:
emit_insn (gen_memory_barrier ());
/* FALLTHRU */
case MEMMODEL_RELAXED:
case MEMMODEL_ACQUIRE:
+ case MEMMODEL_SYNC_ACQUIRE:
case MEMMODEL_CONSUME:
if (mode == SImode)
icode = CODE_FOR_fetchadd_acq_si;
@@ -2397,6 +2399,7 @@ ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
icode = CODE_FOR_fetchadd_acq_di;
break;
case MEMMODEL_RELEASE:
+ case MEMMODEL_SYNC_RELEASE:
if (mode == SImode)
icode = CODE_FOR_fetchadd_rel_si;
else
@@ -2423,8 +2426,7 @@ ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
front half of the full barrier. The end half is the cmpxchg.rel.
For relaxed and release memory models, we don't need this. But we
also don't bother trying to prevent it either. */
- gcc_assert (model == MEMMODEL_RELAXED
- || model == MEMMODEL_RELEASE
+ gcc_assert (is_mm_relaxed (model) || is_mm_release (model)
|| MEM_VOLATILE_P (mem));
old_reg = gen_reg_rtx (DImode);
@@ -2468,6 +2470,7 @@ ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
{
case MEMMODEL_RELAXED:
case MEMMODEL_ACQUIRE:
+ case MEMMODEL_SYNC_ACQUIRE:
case MEMMODEL_CONSUME:
switch (mode)
{
@@ -2481,8 +2484,10 @@ ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
break;
case MEMMODEL_RELEASE:
+ case MEMMODEL_SYNC_RELEASE:
case MEMMODEL_ACQ_REL:
case MEMMODEL_SEQ_CST:
+ case MEMMODEL_SYNC_SEQ_CST:
switch (mode)
{
case QImode: icode = CODE_FOR_cmpxchg_rel_qi; break;
diff --git a/gcc/config/ia64/sync.md b/gcc/config/ia64/sync.md
index 75d746d74c7..9c178b826b1 100644
--- a/gcc/config/ia64/sync.md
+++ b/gcc/config/ia64/sync.md
@@ -33,7 +33,7 @@
[(match_operand:SI 0 "const_int_operand" "")] ;; model
""
{
- if (INTVAL (operands[0]) == MEMMODEL_SEQ_CST)
+ if (is_mm_seq_cst (memmodel_from_int (INTVAL (operands[0]))))
emit_insn (gen_memory_barrier ());
DONE;
})
@@ -60,11 +60,11 @@
(match_operand:SI 2 "const_int_operand" "")] ;; model
""
{
- enum memmodel model = (enum memmodel) INTVAL (operands[2]);
+ enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
/* Unless the memory model is relaxed, we want to emit ld.acq, which
will happen automatically for volatile memories. */
- gcc_assert (model == MEMMODEL_RELAXED || MEM_VOLATILE_P (operands[1]));
+ gcc_assert (is_mm_relaxed (model) || MEM_VOLATILE_P (operands[1]));
emit_move_insn (operands[0], operands[1]);
DONE;
})
@@ -75,17 +75,17 @@
(match_operand:SI 2 "const_int_operand" "")] ;; model
""
{
- enum memmodel model = (enum memmodel) INTVAL (operands[2]);
+ enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
/* Unless the memory model is relaxed, we want to emit st.rel, which
will happen automatically for volatile memories. */
- gcc_assert (model == MEMMODEL_RELAXED || MEM_VOLATILE_P (operands[0]));
+ gcc_assert (is_mm_relaxed (model) || MEM_VOLATILE_P (operands[0]));
emit_move_insn (operands[0], operands[1]);
/* Sequentially consistent stores need a subsequent MF. See
http://www.decadent.org.uk/pipermail/cpp-threads/2008-December/001952.html
for a discussion of why a MF is needed here, but not for atomic_load. */
- if (model == MEMMODEL_SEQ_CST)
+ if (is_mm_seq_cst (model))
emit_insn (gen_memory_barrier ());
DONE;
})
@@ -101,7 +101,8 @@
(match_operand:SI 7 "const_int_operand" "")] ;; fail model
""
{
- enum memmodel model = (enum memmodel) INTVAL (operands[6]);
+ /* No need to distinquish __sync from __atomic, so get base value. */
+ enum memmodel model = memmodel_base (INTVAL (operands[6]));
rtx ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
rtx dval, eval;
@@ -200,7 +201,8 @@
(match_operand:SI 3 "const_int_operand" "")] ;; succ model
""
{
- enum memmodel model = (enum memmodel) INTVAL (operands[3]);
+ /* No need to distinquish __sync from __atomic, so get base value. */
+ enum memmodel model = memmodel_base (INTVAL (operands[3]));
switch (model)
{