summaryrefslogtreecommitdiff
path: root/gcc/omp-low.c
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/omp-low.c')
-rw-r--r--gcc/omp-low.c89
1 files changed, 33 insertions, 56 deletions
diff --git a/gcc/omp-low.c b/gcc/omp-low.c
index 05a3493e95f..d8e7ce39a7a 100644
--- a/gcc/omp-low.c
+++ b/gcc/omp-low.c
@@ -4998,7 +4998,7 @@ expand_omp_atomic_store (basic_block load_bb, tree addr)
}
/* A subroutine of expand_omp_atomic. Attempt to implement the atomic
- operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
+ operation as a __atomic_fetch_op builtin. INDEX is log2 of the
size of the data type, and thus usable to find the index of the builtin
decl. Returns false if the expression is not of the proper form. */
@@ -5009,13 +5009,14 @@ expand_omp_atomic_fetch_op (basic_block load_bb,
{
enum built_in_function oldbase, newbase, tmpbase;
tree decl, itype, call;
- direct_optab optab, oldoptab, newoptab;
tree lhs, rhs;
basic_block store_bb = single_succ (load_bb);
gimple_stmt_iterator gsi;
gimple stmt;
location_t loc;
+ enum tree_code code;
bool need_old, need_new;
+ enum machine_mode imode;
/* We expect to find the following sequences:
@@ -5047,47 +5048,34 @@ expand_omp_atomic_fetch_op (basic_block load_bb,
return false;
/* Check for one of the supported fetch-op operations. */
- switch (gimple_assign_rhs_code (stmt))
+ code = gimple_assign_rhs_code (stmt);
+ switch (code)
{
case PLUS_EXPR:
case POINTER_PLUS_EXPR:
- oldbase = BUILT_IN_SYNC_FETCH_AND_ADD_N;
- newbase = BUILT_IN_SYNC_ADD_AND_FETCH_N;
- optab = sync_add_optab;
- oldoptab = sync_old_add_optab;
- newoptab = sync_new_add_optab;
+ oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
+ newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
break;
case MINUS_EXPR:
- oldbase = BUILT_IN_SYNC_FETCH_AND_SUB_N;
- newbase = BUILT_IN_SYNC_SUB_AND_FETCH_N;
- optab = sync_add_optab;
- oldoptab = sync_old_add_optab;
- newoptab = sync_new_add_optab;
+ oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
+ newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
break;
case BIT_AND_EXPR:
- oldbase = BUILT_IN_SYNC_FETCH_AND_AND_N;
- newbase = BUILT_IN_SYNC_AND_AND_FETCH_N;
- optab = sync_and_optab;
- oldoptab = sync_old_and_optab;
- newoptab = sync_new_and_optab;
+ oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
+ newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
break;
case BIT_IOR_EXPR:
- oldbase = BUILT_IN_SYNC_FETCH_AND_OR_N;
- newbase = BUILT_IN_SYNC_OR_AND_FETCH_N;
- optab = sync_ior_optab;
- oldoptab = sync_old_ior_optab;
- newoptab = sync_new_ior_optab;
+ oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
+ newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
break;
case BIT_XOR_EXPR:
- oldbase = BUILT_IN_SYNC_FETCH_AND_XOR_N;
- newbase = BUILT_IN_SYNC_XOR_AND_FETCH_N;
- optab = sync_xor_optab;
- oldoptab = sync_old_xor_optab;
- newoptab = sync_new_xor_optab;
+ oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
+ newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
break;
default:
return false;
}
+
/* Make sure the expression is of the proper form. */
if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
rhs = gimple_assign_rhs2 (stmt);
@@ -5103,37 +5091,25 @@ expand_omp_atomic_fetch_op (basic_block load_bb,
if (decl == NULL_TREE)
return false;
itype = TREE_TYPE (TREE_TYPE (decl));
+ imode = TYPE_MODE (itype);
- if (need_new)
- {
- /* expand_sync_fetch_operation can always compensate when interested
- in the new value. */
- if (direct_optab_handler (newoptab, TYPE_MODE (itype))
- == CODE_FOR_nothing
- && direct_optab_handler (oldoptab, TYPE_MODE (itype))
- == CODE_FOR_nothing)
- return false;
- }
- else if (need_old)
- {
- /* When interested in the old value, expand_sync_fetch_operation
- can compensate only if the operation is reversible. AND and OR
- are not reversible. */
- if (direct_optab_handler (oldoptab, TYPE_MODE (itype))
- == CODE_FOR_nothing
- && (oldbase == BUILT_IN_SYNC_FETCH_AND_AND_N
- || oldbase == BUILT_IN_SYNC_FETCH_AND_OR_N
- || direct_optab_handler (newoptab, TYPE_MODE (itype))
- == CODE_FOR_nothing))
- return false;
- }
- else if (direct_optab_handler (optab, TYPE_MODE (itype)) == CODE_FOR_nothing)
+ /* We could test all of the various optabs involved, but the fact of the
+ matter is that (with the exception of i486 vs i586 and xadd) all targets
+ that support any atomic operaton optab also implements compare-and-swap.
+ Let optabs.c take care of expanding any compare-and-swap loop. */
+ if (!can_compare_and_swap_p (imode))
return false;
gsi = gsi_last_bb (load_bb);
gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
- call = build_call_expr_loc (loc, decl, 2, addr,
- fold_convert_loc (loc, itype, rhs));
+
+ /* OpenMP does not imply any barrier-like semantics on its atomic ops.
+ It only requires that the operation happen atomically. Thus we can
+ use the RELAXED memory model. */
+ call = build_call_expr_loc (loc, decl, 3, addr,
+ fold_convert_loc (loc, itype, rhs),
+ build_int_cst (NULL, MEMMODEL_RELAXED));
+
if (need_old || need_new)
{
lhs = need_old ? loaded_val : stored_val;
@@ -5182,6 +5158,8 @@ expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
edge e;
enum built_in_function fncode;
+ /* ??? We need a non-pointer interface to __atomic_compare_exchange in
+ order to use the RELAXED memory model effectively. */
fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
+ index + 1);
cmpxchg = builtin_decl_explicit (fncode);
@@ -5190,8 +5168,7 @@ expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
itype = TREE_TYPE (TREE_TYPE (cmpxchg));
- if (direct_optab_handler (sync_compare_and_swap_optab, TYPE_MODE (itype))
- == CODE_FOR_nothing)
+ if (!can_compare_and_swap_p (TYPE_MODE (itype)))
return false;
/* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */