diff options
Diffstat (limited to 'gcc/c-family/c-common.c')
-rw-r--r-- | gcc/c-family/c-common.c | 623 |
1 files changed, 560 insertions, 63 deletions
diff --git a/gcc/c-family/c-common.c b/gcc/c-family/c-common.c index 32daa73f90f..a6823311321 100644 --- a/gcc/c-family/c-common.c +++ b/gcc/c-family/c-common.c @@ -407,6 +407,8 @@ static int resort_field_decl_cmp (const void *, const void *); */ const struct c_common_resword c_common_reswords[] = { + { "_Alignas", RID_ALIGNAS, D_CONLY }, + { "_Alignof", RID_ALIGNOF, D_CONLY }, { "_Bool", RID_BOOL, D_CONLY }, { "_Complex", RID_COMPLEX, 0 }, { "_Imaginary", RID_IMAGINARY, D_CONLY }, @@ -1298,12 +1300,7 @@ c_fully_fold_internal (tree expr, bool in_init, bool *maybe_const_operands, && (op1 = get_base_address (op0)) != NULL_TREE && TREE_CODE (op1) == INDIRECT_REF && TREE_CONSTANT (TREE_OPERAND (op1, 0))) - { - tree offset = fold_offsetof (op0, op1); - op1 - = fold_convert_loc (loc, TREE_TYPE (expr), TREE_OPERAND (op1, 0)); - ret = fold_build_pointer_plus_loc (loc, op1, offset); - } + ret = fold_convert_loc (loc, TREE_TYPE (expr), fold_offsetof_1 (op0)); else if (op0 != orig_op0 || in_init) ret = in_init ? fold_build1_initializer_loc (loc, code, TREE_TYPE (expr), op0) @@ -4361,7 +4358,18 @@ c_sizeof_or_alignof_type (location_t loc, value = size_one_node; } else - value = size_int (FUNCTION_BOUNDARY / BITS_PER_UNIT); + { + if (complain) + { + if (c_dialect_cxx ()) + pedwarn (loc, OPT_pedantic, "ISO C++ does not permit " + "%<alignof%> applied to a function type"); + else + pedwarn (loc, OPT_pedantic, "ISO C does not permit " + "%<_Alignof%> applied to a function type"); + } + value = size_int (FUNCTION_BOUNDARY / BITS_PER_UNIT); + } } else if (type_code == VOID_TYPE || type_code == ERROR_MARK) { @@ -6699,6 +6707,36 @@ handle_section_attribute (tree *node, tree ARG_UNUSED (name), tree args, return NULL_TREE; } +/* Check whether ALIGN is a valid user-specified alignment. If so, + return its base-2 log; if not, output an error and return -1. If + ALLOW_ZERO then 0 is valid and should result in a return of -1 with + no error. */ +int +check_user_alignment (const_tree align, bool allow_zero) +{ + int i; + + if (!INTEGRAL_TYPE_P (TREE_TYPE (align)) + || TREE_CODE (align) != INTEGER_CST) + { + error ("requested alignment is not an integer constant"); + return -1; + } + else if (allow_zero && integer_zerop (align)) + return -1; + else if ((i = tree_log2 (align)) == -1) + { + error ("requested alignment is not a power of 2"); + return -1; + } + else if (i >= HOST_BITS_PER_INT - BITS_PER_UNIT_LOG) + { + error ("requested alignment is too large"); + return -1; + } + return i; +} + /* Handle a "aligned" attribute; arguments as in struct attribute_spec.handler. */ @@ -6722,21 +6760,8 @@ handle_aligned_attribute (tree *node, tree ARG_UNUSED (name), tree args, else if (TYPE_P (*node)) type = node, is_type = 1; - if (TREE_CODE (align_expr) != INTEGER_CST) - { - error ("requested alignment is not a constant"); - *no_add_attrs = true; - } - else if ((i = tree_log2 (align_expr)) == -1) - { - error ("requested alignment is not a power of 2"); - *no_add_attrs = true; - } - else if (i >= HOST_BITS_PER_INT - BITS_PER_UNIT_LOG) - { - error ("requested alignment is too large"); - *no_add_attrs = true; - } + if ((i = check_user_alignment (align_expr, false)) == -1) + *no_add_attrs = true; else if (is_type) { if ((flags & (int) ATTR_FLAG_TYPE_IN_PLACE)) @@ -8779,20 +8804,15 @@ c_common_to_target_charset (HOST_WIDE_INT c) return uc; } -/* Build the result of __builtin_offsetof. EXPR is a nested sequence of - component references, with STOP_REF, or alternatively an INDIRECT_REF of - NULL, at the bottom; much like the traditional rendering of offsetof as a - macro. Returns the folded and properly cast result. */ +/* Fold an offsetof-like expression. EXPR is a nested sequence of component + references with an INDIRECT_REF of a constant at the bottom; much like the + traditional rendering of offsetof as a macro. Return the folded result. */ -static tree -fold_offsetof_1 (tree expr, tree stop_ref) +tree +fold_offsetof_1 (tree expr) { - enum tree_code code = PLUS_EXPR; tree base, off, t; - if (expr == stop_ref && TREE_CODE (expr) != ERROR_MARK) - return size_zero_node; - switch (TREE_CODE (expr)) { case ERROR_MARK: @@ -8809,15 +8829,15 @@ fold_offsetof_1 (tree expr, tree stop_ref) case NOP_EXPR: case INDIRECT_REF: - if (!integer_zerop (TREE_OPERAND (expr, 0))) + if (!TREE_CONSTANT (TREE_OPERAND (expr, 0))) { error ("cannot apply %<offsetof%> to a non constant address"); return error_mark_node; } - return size_zero_node; + return TREE_OPERAND (expr, 0); case COMPONENT_REF: - base = fold_offsetof_1 (TREE_OPERAND (expr, 0), stop_ref); + base = fold_offsetof_1 (TREE_OPERAND (expr, 0)); if (base == error_mark_node) return base; @@ -8835,21 +8855,14 @@ fold_offsetof_1 (tree expr, tree stop_ref) break; case ARRAY_REF: - base = fold_offsetof_1 (TREE_OPERAND (expr, 0), stop_ref); + base = fold_offsetof_1 (TREE_OPERAND (expr, 0)); if (base == error_mark_node) return base; t = TREE_OPERAND (expr, 1); - if (TREE_CODE (t) == INTEGER_CST && tree_int_cst_sgn (t) < 0) - { - code = MINUS_EXPR; - t = fold_build1_loc (input_location, NEGATE_EXPR, TREE_TYPE (t), t); - } - t = convert (sizetype, t); - off = size_binop (MULT_EXPR, TYPE_SIZE_UNIT (TREE_TYPE (expr)), t); /* Check if the offset goes beyond the upper bound of the array. */ - if (code == PLUS_EXPR && TREE_CODE (t) == INTEGER_CST) + if (TREE_CODE (t) == INTEGER_CST && tree_int_cst_sgn (t) >= 0) { tree upbound = array_ref_up_bound (expr); if (upbound != NULL_TREE @@ -8889,26 +8902,30 @@ fold_offsetof_1 (tree expr, tree stop_ref) } } } + + t = convert (sizetype, t); + off = size_binop (MULT_EXPR, TYPE_SIZE_UNIT (TREE_TYPE (expr)), t); break; case COMPOUND_EXPR: /* Handle static members of volatile structs. */ t = TREE_OPERAND (expr, 1); gcc_assert (TREE_CODE (t) == VAR_DECL); - return fold_offsetof_1 (t, stop_ref); + return fold_offsetof_1 (t); default: gcc_unreachable (); } - return size_binop (code, base, off); + return fold_build_pointer_plus (base, off); } +/* Likewise, but convert it to the return type of offsetof. */ + tree -fold_offsetof (tree expr, tree stop_ref) +fold_offsetof (tree expr) { - /* Convert back from the internal sizetype to size_t. */ - return convert (size_type_node, fold_offsetof_1 (expr, stop_ref)); + return convert (size_type_node, fold_offsetof_1 (expr)); } /* Warn for A ?: C expressions (with B omitted) where A is a boolean @@ -9261,7 +9278,8 @@ sync_resolve_size (tree function, VEC(tree,gc) *params) was encountered; true on success. */ static bool -sync_resolve_params (tree orig_function, tree function, VEC(tree, gc) *params) +sync_resolve_params (location_t loc, tree orig_function, tree function, + VEC(tree, gc) *params, bool orig_format) { function_args_iterator iter; tree ptype; @@ -9289,21 +9307,34 @@ sync_resolve_params (tree orig_function, tree function, VEC(tree, gc) *params) ++parmnum; if (VEC_length (tree, params) <= parmnum) { - error ("too few arguments to function %qE", orig_function); + error_at (loc, "too few arguments to function %qE", orig_function); return false; } - /* ??? Ideally for the first conversion we'd use convert_for_assignment - so that we get warnings for anything that doesn't match the pointer - type. This isn't portable across the C and C++ front ends atm. */ - val = VEC_index (tree, params, parmnum); - val = convert (ptype, val); - val = convert (arg_type, val); - VEC_replace (tree, params, parmnum, val); + /* Only convert parameters if the size is appropriate with new format + sync routines. */ + if (orig_format + || tree_int_cst_equal (TYPE_SIZE (ptype), TYPE_SIZE (arg_type))) + { + /* Ideally for the first conversion we'd use convert_for_assignment + so that we get warnings for anything that doesn't match the pointer + type. This isn't portable across the C and C++ front ends atm. */ + val = VEC_index (tree, params, parmnum); + val = convert (ptype, val); + val = convert (arg_type, val); + VEC_replace (tree, params, parmnum, val); + } function_args_iter_next (&iter); } + /* __atomic routines are not variadic. */ + if (!orig_format && VEC_length (tree, params) != parmnum + 1) + { + error_at (loc, "too many arguments to function %qE", orig_function); + return false; + } + /* The definition of these primitives is variadic, with the remaining being "an optional list of variables protected by the memory barrier". No clue what that's supposed to mean, precisely, but we consider all @@ -9318,13 +9349,388 @@ sync_resolve_params (tree orig_function, tree function, VEC(tree, gc) *params) PARAMS. */ static tree -sync_resolve_return (tree first_param, tree result) +sync_resolve_return (tree first_param, tree result, bool orig_format) { tree ptype = TREE_TYPE (TREE_TYPE (first_param)); + tree rtype = TREE_TYPE (result); ptype = TYPE_MAIN_VARIANT (ptype); - return convert (ptype, result); + + /* New format doesn't require casting unless the types are the same size. */ + if (orig_format || tree_int_cst_equal (TYPE_SIZE (ptype), TYPE_SIZE (rtype))) + return convert (ptype, result); + else + return result; } +/* This function verifies the PARAMS to generic atomic FUNCTION. + It returns the size if all the parameters are the same size, otherwise + 0 is returned if the parameters are invalid. */ + +static int +get_atomic_generic_size (location_t loc, tree function, VEC(tree,gc) *params) +{ + unsigned int n_param; + unsigned int n_model; + unsigned int x; + int size_0; + tree type_0; + + /* Determine the parameter makeup. */ + switch (DECL_FUNCTION_CODE (function)) + { + case BUILT_IN_ATOMIC_EXCHANGE: + n_param = 4; + n_model = 1; + break; + case BUILT_IN_ATOMIC_LOAD: + case BUILT_IN_ATOMIC_STORE: + n_param = 3; + n_model = 1; + break; + case BUILT_IN_ATOMIC_COMPARE_EXCHANGE: + n_param = 6; + n_model = 2; + break; + default: + return 0; + } + + if (VEC_length (tree, params) != n_param) + { + error_at (loc, "incorrect number of arguments to function %qE", function); + return 0; + } + + /* Get type of first parameter, and determine its size. */ + type_0 = TREE_TYPE (VEC_index (tree, params, 0)); + if (TREE_CODE (type_0) != POINTER_TYPE) + { + error_at (loc, "argument 1 of %qE must be a pointer type", function); + return 0; + } + size_0 = tree_low_cst (TYPE_SIZE_UNIT (TREE_TYPE (type_0)), 1); + + /* Check each other parameter is a pointer and the same size. */ + for (x = 0; x < n_param - n_model; x++) + { + int size; + tree type = TREE_TYPE (VEC_index (tree, params, x)); + /* __atomic_compare_exchange has a bool in the 4th postion, skip it. */ + if (n_param == 6 && x == 3) + continue; + if (!POINTER_TYPE_P (type)) + { + error_at (loc, "argument %d of %qE must be a pointer type", x + 1, + function); + return 0; + } + size = tree_low_cst (TYPE_SIZE_UNIT (TREE_TYPE (type)), 1); + if (size != size_0) + { + error_at (loc, "size mismatch in argument %d of %qE", x + 1, + function); + return 0; + } + } + + /* Check memory model parameters for validity. */ + for (x = n_param - n_model ; x < n_param; x++) + { + tree p = VEC_index (tree, params, x); + if (TREE_CODE (p) == INTEGER_CST) + { + int i = tree_low_cst (p, 1); + if (i < 0 || i >= MEMMODEL_LAST) + { + warning_at (loc, OPT_Winvalid_memory_model, + "invalid memory model argument %d of %qE", x + 1, + function); + return MEMMODEL_SEQ_CST; + } + } + else + if (!INTEGRAL_TYPE_P (TREE_TYPE (p))) + { + error_at (loc, "non-integer memory model argument %d of %qE", x + 1, + function); + return 0; + } + } + + return size_0; +} + + +/* This will take an __atomic_ generic FUNCTION call, and add a size parameter N + at the beginning of the parameter list PARAMS representing the size of the + objects. This is to match the library ABI requirement. LOC is the location + of the function call. + The new function is returned if it needed rebuilding, otherwise NULL_TREE is + returned to allow the external call to be constructed. */ + +static tree +add_atomic_size_parameter (unsigned n, location_t loc, tree function, + VEC(tree,gc) *params) +{ + tree size_node; + + /* Insert a SIZE_T parameter as the first param. If there isn't + enough space, allocate a new vector and recursively re-build with that. */ + if (!VEC_space (tree, params, 1)) + { + unsigned int z, len; + VEC(tree,gc) *vec; + tree f; + + len = VEC_length (tree, params); + vec = VEC_alloc (tree, gc, len + 1); + for (z = 0; z < len; z++) + VEC_quick_push (tree, vec, VEC_index (tree, params, z)); + f = build_function_call_vec (loc, function, vec, NULL); + VEC_free (tree, gc, vec); + return f; + } + + /* Add the size parameter and leave as a function call for processing. */ + size_node = build_int_cst (size_type_node, n); + VEC_quick_insert (tree, params, 0, size_node); + return NULL_TREE; +} + + +/* This will process an __atomic_exchange function call, determine whether it + needs to be mapped to the _N variation, or turned into a library call. + LOC is the location of the builtin call. + FUNCTION is the DECL that has been invoked; + PARAMS is the argument list for the call. The return value is non-null + TRUE is returned if it is translated into the proper format for a call to the + external library, and NEW_RETURN is set the tree for that function. + FALSE is returned if processing for the _N variation is required, and + NEW_RETURN is set to the the return value the result is copied into. */ +static bool +resolve_overloaded_atomic_exchange (location_t loc, tree function, + VEC(tree,gc) *params, tree *new_return) +{ + tree p0, p1, p2, p3; + tree I_type, I_type_ptr; + int n = get_atomic_generic_size (loc, function, params); + + /* If not a lock-free size, change to the library generic format. */ + if (n != 1 && n != 2 && n != 4 && n != 8 && n != 16) + { + *new_return = add_atomic_size_parameter (n, loc, function, params); + return true; + } + + /* Otherwise there is a lockfree match, transform the call from: + void fn(T* mem, T* desired, T* return, model) + into + *return = (T) (fn (In* mem, (In) *desired, model)) */ + + p0 = VEC_index (tree, params, 0); + p1 = VEC_index (tree, params, 1); + p2 = VEC_index (tree, params, 2); + p3 = VEC_index (tree, params, 3); + + /* Create pointer to appropriate size. */ + I_type = builtin_type_for_size (BITS_PER_UNIT * n, 1); + I_type_ptr = build_pointer_type (I_type); + + /* Convert object pointer to required type. */ + p0 = build1 (VIEW_CONVERT_EXPR, I_type_ptr, p0); + VEC_replace (tree, params, 0, p0); + + /* Convert new value to required type, and dereference it. */ + p1 = build_indirect_ref (loc, p1, RO_UNARY_STAR); + p1 = build1 (VIEW_CONVERT_EXPR, I_type, p1); + VEC_replace (tree, params, 1, p1); + + /* Move memory model to the 3rd position, and end param list. */ + VEC_replace (tree, params, 2, p3); + VEC_truncate (tree, params, 3); + + /* Convert return pointer and dereference it for later assignment. */ + *new_return = build_indirect_ref (loc, p2, RO_UNARY_STAR); + + return false; +} + + +/* This will process an __atomic_compare_exchange function call, determine + whether it needs to be mapped to the _N variation, or turned into a lib call. + LOC is the location of the builtin call. + FUNCTION is the DECL that has been invoked; + PARAMS is the argument list for the call. The return value is non-null + TRUE is returned if it is translated into the proper format for a call to the + external library, and NEW_RETURN is set the tree for that function. + FALSE is returned if processing for the _N variation is required. */ + +static bool +resolve_overloaded_atomic_compare_exchange (location_t loc, tree function, + VEC(tree,gc) *params, + tree *new_return) +{ + tree p0, p1, p2; + tree I_type, I_type_ptr; + int n = get_atomic_generic_size (loc, function, params); + + /* If not a lock-free size, change to the library generic format. */ + if (n != 1 && n != 2 && n != 4 && n != 8 && n != 16) + { + /* The library generic format does not have the weak parameter, so + remove it from the param list. Since a parameter has been removed, + we can be sure that there is room for the SIZE_T parameter, meaning + there will not be a recursive rebuilding of the parameter list, so + there is no danger this will be done twice. */ + if (n > 0) + { + VEC_replace (tree, params, 3, VEC_index (tree, params, 4)); + VEC_replace (tree, params, 4, VEC_index (tree, params, 5)); + VEC_truncate (tree, params, 5); + } + *new_return = add_atomic_size_parameter (n, loc, function, params); + return true; + } + + /* Otherwise, there is a match, so the call needs to be transformed from: + bool fn(T* mem, T* desired, T* return, weak, success, failure) + into + bool fn ((In *)mem, (In *)expected, (In) *desired, weak, succ, fail) */ + + p0 = VEC_index (tree, params, 0); + p1 = VEC_index (tree, params, 1); + p2 = VEC_index (tree, params, 2); + + /* Create pointer to appropriate size. */ + I_type = builtin_type_for_size (BITS_PER_UNIT * n, 1); + I_type_ptr = build_pointer_type (I_type); + + /* Convert object pointer to required type. */ + p0 = build1 (VIEW_CONVERT_EXPR, I_type_ptr, p0); + VEC_replace (tree, params, 0, p0); + + /* Convert expected pointer to required type. */ + p1 = build1 (VIEW_CONVERT_EXPR, I_type_ptr, p1); + VEC_replace (tree, params, 1, p1); + + /* Convert desired value to required type, and dereference it. */ + p2 = build_indirect_ref (loc, p2, RO_UNARY_STAR); + p2 = build1 (VIEW_CONVERT_EXPR, I_type, p2); + VEC_replace (tree, params, 2, p2); + + /* The rest of the parameters are fine. NULL means no special return value + processing.*/ + *new_return = NULL; + return false; +} + + +/* This will process an __atomic_load function call, determine whether it + needs to be mapped to the _N variation, or turned into a library call. + LOC is the location of the builtin call. + FUNCTION is the DECL that has been invoked; + PARAMS is the argument list for the call. The return value is non-null + TRUE is returned if it is translated into the proper format for a call to the + external library, and NEW_RETURN is set the tree for that function. + FALSE is returned if processing for the _N variation is required, and + NEW_RETURN is set to the the return value the result is copied into. */ + +static bool +resolve_overloaded_atomic_load (location_t loc, tree function, + VEC(tree,gc) *params, tree *new_return) +{ + tree p0, p1, p2; + tree I_type, I_type_ptr; + int n = get_atomic_generic_size (loc, function, params); + + /* If not a lock-free size, change to the library generic format. */ + if (n != 1 && n != 2 && n != 4 && n != 8 && n != 16) + { + *new_return = add_atomic_size_parameter (n, loc, function, params); + return true; + } + + /* Otherwise, there is a match, so the call needs to be transformed from: + void fn(T* mem, T* return, model) + into + *return = (T) (fn ((In *) mem, model)) */ + + p0 = VEC_index (tree, params, 0); + p1 = VEC_index (tree, params, 1); + p2 = VEC_index (tree, params, 2); + + /* Create pointer to appropriate size. */ + I_type = builtin_type_for_size (BITS_PER_UNIT * n, 1); + I_type_ptr = build_pointer_type (I_type); + + /* Convert object pointer to required type. */ + p0 = build1 (VIEW_CONVERT_EXPR, I_type_ptr, p0); + VEC_replace (tree, params, 0, p0); + + /* Move memory model to the 2nd position, and end param list. */ + VEC_replace (tree, params, 1, p2); + VEC_truncate (tree, params, 2); + + /* Convert return pointer and dereference it for later assignment. */ + *new_return = build_indirect_ref (loc, p1, RO_UNARY_STAR); + + return false; +} + + +/* This will process an __atomic_store function call, determine whether it + needs to be mapped to the _N variation, or turned into a library call. + LOC is the location of the builtin call. + FUNCTION is the DECL that has been invoked; + PARAMS is the argument list for the call. The return value is non-null + TRUE is returned if it is translated into the proper format for a call to the + external library, and NEW_RETURN is set the tree for that function. + FALSE is returned if processing for the _N variation is required, and + NEW_RETURN is set to the the return value the result is copied into. */ + +static bool +resolve_overloaded_atomic_store (location_t loc, tree function, + VEC(tree,gc) *params, tree *new_return) +{ + tree p0, p1; + tree I_type, I_type_ptr; + int n = get_atomic_generic_size (loc, function, params); + + /* If not a lock-free size, change to the library generic format. */ + if (n != 1 && n != 2 && n != 4 && n != 8 && n != 16) + { + *new_return = add_atomic_size_parameter (n, loc, function, params); + return true; + } + + /* Otherwise, there is a match, so the call needs to be transformed from: + void fn(T* mem, T* value, model) + into + fn ((In *) mem, (In) *value, model) */ + + p0 = VEC_index (tree, params, 0); + p1 = VEC_index (tree, params, 1); + + /* Create pointer to appropriate size. */ + I_type = builtin_type_for_size (BITS_PER_UNIT * n, 1); + I_type_ptr = build_pointer_type (I_type); + + /* Convert object pointer to required type. */ + p0 = build1 (VIEW_CONVERT_EXPR, I_type_ptr, p0); + VEC_replace (tree, params, 0, p0); + + /* Convert new value to required type, and dereference it. */ + p1 = build_indirect_ref (loc, p1, RO_UNARY_STAR); + p1 = build1 (VIEW_CONVERT_EXPR, I_type, p1); + VEC_replace (tree, params, 1, p1); + + /* The memory model is in the right spot already. Return is void. */ + *new_return = NULL_TREE; + + return false; +} + + /* Some builtin functions are placeholders for other expressions. This function should be called immediately after parsing the call expression before surrounding code has committed to the type of the expression. @@ -9340,6 +9746,9 @@ tree resolve_overloaded_builtin (location_t loc, tree function, VEC(tree,gc) *params) { enum built_in_function orig_code = DECL_FUNCTION_CODE (function); + bool orig_format = true; + tree new_return = NULL_TREE; + switch (DECL_BUILT_IN_CLASS (function)) { case BUILT_IN_NORMAL: @@ -9356,6 +9765,78 @@ resolve_overloaded_builtin (location_t loc, tree function, VEC(tree,gc) *params) /* Handle BUILT_IN_NORMAL here. */ switch (orig_code) { + case BUILT_IN_ATOMIC_EXCHANGE: + case BUILT_IN_ATOMIC_COMPARE_EXCHANGE: + case BUILT_IN_ATOMIC_LOAD: + case BUILT_IN_ATOMIC_STORE: + { + /* Handle these 4 together so that they can fall through to the next + case if the call is transformed to an _N variant. */ + switch (orig_code) + { + case BUILT_IN_ATOMIC_EXCHANGE: + { + if (resolve_overloaded_atomic_exchange (loc, function, params, + &new_return)) + return new_return; + /* Change to the _N variant. */ + orig_code = BUILT_IN_ATOMIC_EXCHANGE_N; + break; + } + + case BUILT_IN_ATOMIC_COMPARE_EXCHANGE: + { + if (resolve_overloaded_atomic_compare_exchange (loc, function, + params, + &new_return)) + return new_return; + /* Change to the _N variant. */ + orig_code = BUILT_IN_ATOMIC_COMPARE_EXCHANGE_N; + break; + } + case BUILT_IN_ATOMIC_LOAD: + { + if (resolve_overloaded_atomic_load (loc, function, params, + &new_return)) + return new_return; + /* Change to the _N variant. */ + orig_code = BUILT_IN_ATOMIC_LOAD_N; + break; + } + case BUILT_IN_ATOMIC_STORE: + { + if (resolve_overloaded_atomic_store (loc, function, params, + &new_return)) + return new_return; + /* Change to the _N variant. */ + orig_code = BUILT_IN_ATOMIC_STORE_N; + break; + } + default: + gcc_unreachable (); + } + /* Fallthrough to the normal processing. */ + } + case BUILT_IN_ATOMIC_EXCHANGE_N: + case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_N: + case BUILT_IN_ATOMIC_LOAD_N: + case BUILT_IN_ATOMIC_STORE_N: + case BUILT_IN_ATOMIC_ADD_FETCH_N: + case BUILT_IN_ATOMIC_SUB_FETCH_N: + case BUILT_IN_ATOMIC_AND_FETCH_N: + case BUILT_IN_ATOMIC_NAND_FETCH_N: + case BUILT_IN_ATOMIC_XOR_FETCH_N: + case BUILT_IN_ATOMIC_OR_FETCH_N: + case BUILT_IN_ATOMIC_FETCH_ADD_N: + case BUILT_IN_ATOMIC_FETCH_SUB_N: + case BUILT_IN_ATOMIC_FETCH_AND_N: + case BUILT_IN_ATOMIC_FETCH_NAND_N: + case BUILT_IN_ATOMIC_FETCH_XOR_N: + case BUILT_IN_ATOMIC_FETCH_OR_N: + { + orig_format = false; + /* Fallthru for parameter processing. */ + } case BUILT_IN_SYNC_FETCH_AND_ADD_N: case BUILT_IN_SYNC_FETCH_AND_SUB_N: case BUILT_IN_SYNC_FETCH_AND_OR_N: @@ -9382,15 +9863,31 @@ resolve_overloaded_builtin (location_t loc, tree function, VEC(tree,gc) *params) fncode = (enum built_in_function)((int)orig_code + exact_log2 (n) + 1); new_function = builtin_decl_explicit (fncode); - if (!sync_resolve_params (function, new_function, params)) + if (!sync_resolve_params (loc, function, new_function, params, + orig_format)) return error_mark_node; first_param = VEC_index (tree, params, 0); result = build_function_call_vec (loc, new_function, params, NULL); + if (result == error_mark_node) + return result; if (orig_code != BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_N - && orig_code != BUILT_IN_SYNC_LOCK_RELEASE_N) - result = sync_resolve_return (first_param, result); + && orig_code != BUILT_IN_SYNC_LOCK_RELEASE_N + && orig_code != BUILT_IN_ATOMIC_STORE_N) + result = sync_resolve_return (first_param, result, orig_format); + /* If new_return is set, assign function to that expr and cast the + result to void since the generic interface returned void. */ + if (new_return) + { + /* Cast function result from I{1,2,4,8,16} to the required type. */ + result = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (new_return), result); + result = build2 (MODIFY_EXPR, TREE_TYPE (new_return), new_return, + result); + TREE_SIDE_EFFECTS (result) = 1; + protected_set_expr_location (result, loc); + result = convert (void_type_node, result); + } return result; } |