diff options
author | uros <uros@138bc75d-0d04-0410-961f-82ee72b054a4> | 2016-07-19 16:40:55 +0000 |
---|---|---|
committer | uros <uros@138bc75d-0d04-0410-961f-82ee72b054a4> | 2016-07-19 16:40:55 +0000 |
commit | edc19fd0a3e491fac325214ace1331f7b166772b (patch) | |
tree | d187dacf2ba515208c78754cda80de5b3a012926 | |
parent | 6ca7a3d8841ea131efd40ccccf049d7a17e9e0ae (diff) | |
download | gcc-edc19fd0a3e491fac325214ace1331f7b166772b.tar.gz |
* builtins.c: Use HOST_WIDE_INT_1 instead of (HOST_WIDE_INT) 1,
HOST_WIDE_INT_1U instead of (unsigned HOST_WIDE_INT) 1,
HOST_WIDE_INT_M1 instead of (HOST_WIDE_INT) -1 and
HOST_WIDE_INT_M1U instead of (unsigned HOST_WIDE_INT) -1.
* combine.c: Ditto.
* cse.c: Ditto.
* dojump.c: Ditto.
* double-int.c: Ditto.
* dse.c: Ditto.
* dwarf2out.c: Ditto.
* expmed.c: Ditto.
* expr.c: Ditto.
* fold-const.c: Ditto.
* function.c: Ditto.
* fwprop.c: Ditto.
* genmodes.c: Ditto.
* hwint.c: Ditto.
* hwint.h: Ditto.
* ifcvt.c: Ditto.
* loop-doloop.c: Ditto.
* loop-invariant.c: Ditto.
* loop-iv.c: Ditto.
* match.pd: Ditto.
* optabs.c: Ditto.
* real.c: Ditto.
* reload.c: Ditto.
* rtlanal.c: Ditto.
* simplify-rtx.c: Ditto.
* stor-layout.c: Ditto.
* toplev.c: Ditto.
* tree-ssa-loop-ivopts.c: Ditto.
* tree-vect-generic.c: Ditto.
* tree-vect-patterns.c: Ditto.
* tree.c: Ditto.
* tree.h: Ditto.
* ubsan.c: Ditto.
* varasm.c: Ditto.
* wide-int-print.cc: Ditto.
* wide-int.cc: Ditto.
* wide-int.h: Ditto.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@238481 138bc75d-0d04-0410-961f-82ee72b054a4
38 files changed, 228 insertions, 183 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 4d4cd9b44e8..66657798607 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,46 @@ +2016-07-19 Uros Bizjak <ubizjak@gmail.com> + + * builtins.c: Use HOST_WIDE_INT_1 instead of (HOST_WIDE_INT) 1, + HOST_WIDE_INT_1U instead of (unsigned HOST_WIDE_INT) 1, + HOST_WIDE_INT_M1 instead of (HOST_WIDE_INT) -1 and + HOST_WIDE_INT_M1U instead of (unsigned HOST_WIDE_INT) -1. + * combine.c: Ditto. + * cse.c: Ditto. + * dojump.c: Ditto. + * double-int.c: Ditto. + * dse.c: Ditto. + * dwarf2out.c: Ditto. + * expmed.c: Ditto. + * expr.c: Ditto. + * fold-const.c: Ditto. + * function.c: Ditto. + * fwprop.c: Ditto. + * genmodes.c: Ditto. + * hwint.c: Ditto. + * hwint.h: Ditto. + * ifcvt.c: Ditto. + * loop-doloop.c: Ditto. + * loop-invariant.c: Ditto. + * loop-iv.c: Ditto. + * match.pd: Ditto. + * optabs.c: Ditto. + * real.c: Ditto. + * reload.c: Ditto. + * rtlanal.c: Ditto. + * simplify-rtx.c: Ditto. + * stor-layout.c: Ditto. + * toplev.c: Ditto. + * tree-ssa-loop-ivopts.c: Ditto. + * tree-vect-generic.c: Ditto. + * tree-vect-patterns.c: Ditto. + * tree.c: Ditto. + * tree.h: Ditto. + * ubsan.c: Ditto. + * varasm.c: Ditto. + * wide-int-print.cc: Ditto. + * wide-int.cc: Ditto. + * wide-int.h: Ditto. + 2016-07-19 David Malcolm <dmalcolm@redhat.com> * selftest.c (selftest::assert_streq): Handle NULL values of @@ -5,9 +48,9 @@ 2016-07-19 Martin Jambor <mjambor@suse.cz> - PR fortran/71688 - * trans-decl.c (gfc_generate_function_code): Use cgraph_get_create_node - rather than cgraph_create_node to get a call graph node. + PR fortran/71688 + * trans-decl.c (gfc_generate_function_code): Use cgraph_get_create_node + rather than cgraph_create_node to get a call graph node. 2016-07-19 Richard Biener <rguenther@suse.de> @@ -367,7 +410,8 @@ 2016-07-12 Trevor Saunders <tbsaunde+gcc@tbsaunde.org> - * cfgexpand.c (expand_used_vars): Make the type of a local variable auto_vec. + * cfgexpand.c (expand_used_vars): Make the type of a local + variable auto_vec. * genmatch.c (lower_for): Likewise. * haifa-sched.c (haifa_sched_init): Likewise. (add_to_speculative_block): Likewise. @@ -375,7 +419,8 @@ * predict.c (handle_missing_profiles): Likewise. * tree-data-ref.c (loop_nest_has_data_refs): Likewise. * tree-diagnostic.c (maybe_unwind_expanded_macro_loc): Likewise. - * tree-ssa-loop-niter.c (discover_iteration_bound_by_body_walk): Likewise. + * tree-ssa-loop-niter.c (discover_iteration_bound_by_body_walk): + Likewise. (maybe_lower_iteration_bound): Likewise. * tree-ssa-sccvn.c (DFS): Likewise. * tree-stdarg.c (reachable_at_most_once): Likewise. diff --git a/gcc/builtins.c b/gcc/builtins.c index 5f1fd82be92..03a0dc84d53 100644 --- a/gcc/builtins.c +++ b/gcc/builtins.c @@ -668,11 +668,11 @@ target_char_cast (tree cst, char *p) val = TREE_INT_CST_LOW (cst); if (CHAR_TYPE_SIZE < HOST_BITS_PER_WIDE_INT) - val &= (((unsigned HOST_WIDE_INT) 1) << CHAR_TYPE_SIZE) - 1; + val &= (HOST_WIDE_INT_1U << CHAR_TYPE_SIZE) - 1; hostval = val; if (HOST_BITS_PER_CHAR < HOST_BITS_PER_WIDE_INT) - hostval &= (((unsigned HOST_WIDE_INT) 1) << HOST_BITS_PER_CHAR) - 1; + hostval &= (HOST_WIDE_INT_1U << HOST_BITS_PER_CHAR) - 1; if (val != hostval) return 1; diff --git a/gcc/combine.c b/gcc/combine.c index 4db11b03df1..1e5ee8e9514 100644 --- a/gcc/combine.c +++ b/gcc/combine.c @@ -4882,7 +4882,7 @@ find_split_point (rtx *loc, rtx_insn *insn, bool set_src) rtx dest = XEXP (SET_DEST (x), 0); machine_mode mode = GET_MODE (dest); unsigned HOST_WIDE_INT mask - = ((unsigned HOST_WIDE_INT) 1 << len) - 1; + = (HOST_WIDE_INT_1U << len) - 1; rtx or_mask; if (BITS_BIG_ENDIAN) @@ -5016,7 +5016,7 @@ find_split_point (rtx *loc, rtx_insn *insn, bool set_src) if (unsignedp && len <= 8) { unsigned HOST_WIDE_INT mask - = ((unsigned HOST_WIDE_INT) 1 << len) - 1; + = (HOST_WIDE_INT_1U << len) - 1; SUBST (SET_SRC (x), gen_rtx_AND (mode, gen_rtx_LSHIFTRT @@ -5852,7 +5852,7 @@ combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest, && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1)) && (UINTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)) - == ((unsigned HOST_WIDE_INT) 1 << (i + 1)) - 1)) + == (HOST_WIDE_INT_1U << (i + 1)) - 1)) || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND && (GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0))) == (unsigned int) i + 1)))) @@ -6168,7 +6168,7 @@ combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest, else if (SHIFT_COUNT_TRUNCATED && !REG_P (XEXP (x, 1))) SUBST (XEXP (x, 1), force_to_mode (XEXP (x, 1), GET_MODE (XEXP (x, 1)), - ((unsigned HOST_WIDE_INT) 1 + (HOST_WIDE_INT_1U << exact_log2 (GET_MODE_BITSIZE (GET_MODE (x)))) - 1, 0)); @@ -7134,7 +7134,7 @@ expand_compound_operation (rtx x) simplify_shift_const (NULL_RTX, LSHIFTRT, GET_MODE (x), XEXP (x, 0), pos), - ((unsigned HOST_WIDE_INT) 1 << len) - 1); + (HOST_WIDE_INT_1U << len) - 1); else /* Any other cases we can't handle. */ return x; @@ -7261,7 +7261,7 @@ expand_field_assignment (const_rtx x) /* Now compute the equivalent expression. Make a copy of INNER for the SET_DEST in case it is a MEM into which we will substitute; we don't want shared RTL in that case. */ - mask = gen_int_mode (((unsigned HOST_WIDE_INT) 1 << len) - 1, + mask = gen_int_mode ((HOST_WIDE_INT_1U << len) - 1, compute_mode); cleared = simplify_gen_binary (AND, compute_mode, simplify_gen_unary (NOT, compute_mode, @@ -7447,7 +7447,7 @@ make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos, new_rtx = force_to_mode (inner, tmode, len >= HOST_BITS_PER_WIDE_INT ? ~(unsigned HOST_WIDE_INT) 0 - : ((unsigned HOST_WIDE_INT) 1 << len) - 1, + : (HOST_WIDE_INT_1U << len) - 1, 0); /* If this extraction is going into the destination of a SET, @@ -7636,7 +7636,7 @@ make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos, pos_rtx || len + orig_pos >= HOST_BITS_PER_WIDE_INT ? ~(unsigned HOST_WIDE_INT) 0 - : ((((unsigned HOST_WIDE_INT) 1 << len) - 1) + : (((HOST_WIDE_INT_1U << len) - 1) << orig_pos), 0); } @@ -7724,7 +7724,7 @@ extract_left_shift (rtx x, int count) make a new operation. */ if (CONST_INT_P (XEXP (x, 1)) && (UINTVAL (XEXP (x, 1)) - & ((((unsigned HOST_WIDE_INT) 1 << count)) - 1)) == 0 + & (((HOST_WIDE_INT_1U << count)) - 1)) == 0 && (tem = extract_left_shift (XEXP (x, 0), count)) != 0) { HOST_WIDE_INT val = INTVAL (XEXP (x, 1)) >> count; @@ -7793,7 +7793,7 @@ make_compound_operation (rtx x, enum rtx_code in_code) && SCALAR_INT_MODE_P (mode)) { HOST_WIDE_INT count = INTVAL (XEXP (x, 1)); - HOST_WIDE_INT multval = (HOST_WIDE_INT) 1 << count; + HOST_WIDE_INT multval = HOST_WIDE_INT_1 << count; new_rtx = make_compound_operation (XEXP (x, 0), next_code); if (GET_CODE (new_rtx) == NEG) @@ -8389,10 +8389,10 @@ force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask, /* When we have an arithmetic operation, or a shift whose count we do not know, we need to assume that all bits up to the highest-order bit in MASK will be needed. This is how we form such a mask. */ - if (mask & ((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1))) + if (mask & (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1))) fuller_mask = ~(unsigned HOST_WIDE_INT) 0; else - fuller_mask = (((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mask) + 1)) + fuller_mask = ((HOST_WIDE_INT_1U << (floor_log2 (mask) + 1)) - 1); /* Determine what bits of X are guaranteed to be (non)zero. */ @@ -9497,7 +9497,7 @@ make_field_assignment (rtx x) src = force_to_mode (src, mode, GET_MODE_PRECISION (mode) >= HOST_BITS_PER_WIDE_INT ? ~(unsigned HOST_WIDE_INT) 0 - : ((unsigned HOST_WIDE_INT) 1 << len) - 1, + : (HOST_WIDE_INT_1U << len) - 1, 0); /* If SRC is masked by an AND that does not make a difference in @@ -9508,7 +9508,7 @@ make_field_assignment (rtx x) && GET_CODE (src) == AND && CONST_INT_P (XEXP (src, 1)) && UINTVAL (XEXP (src, 1)) - == ((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (assign, 1))) - 1) + == (HOST_WIDE_INT_1U << INTVAL (XEXP (assign, 1))) - 1) src = XEXP (src, 0); return gen_rtx_SET (assign, src); @@ -10433,7 +10433,7 @@ simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode, /* C3 has the low-order C1 bits zero. */ mask = GET_MODE_MASK (mode) - & ~(((unsigned HOST_WIDE_INT) 1 << first_count) - 1); + & ~((HOST_WIDE_INT_1U << first_count) - 1); varop = simplify_and_const_int (NULL_RTX, result_mode, XEXP (varop, 0), mask); @@ -11377,7 +11377,7 @@ simplify_compare_const (enum rtx_code code, machine_mode mode, else if (const_op == 0 && mode_width - 1 < HOST_BITS_PER_WIDE_INT && (nonzero_bits (op0, mode) - & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1))) + & (HOST_WIDE_INT_1U << (mode_width - 1))) == 0) code = EQ; break; @@ -11406,7 +11406,7 @@ simplify_compare_const (enum rtx_code code, machine_mode mode, else if (const_op == 0 && mode_width - 1 < HOST_BITS_PER_WIDE_INT && (nonzero_bits (op0, mode) - & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1))) + & (HOST_WIDE_INT_1U << (mode_width - 1))) == 0) code = NE; break; @@ -11422,7 +11422,7 @@ simplify_compare_const (enum rtx_code code, machine_mode mode, /* (unsigned) < 0x80000000 is equivalent to >= 0. */ else if (mode_width - 1 < HOST_BITS_PER_WIDE_INT && (unsigned HOST_WIDE_INT) const_op - == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1)) + == HOST_WIDE_INT_1U << (mode_width - 1)) { const_op = 0; code = GE; @@ -11438,7 +11438,7 @@ simplify_compare_const (enum rtx_code code, machine_mode mode, /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */ else if (mode_width - 1 < HOST_BITS_PER_WIDE_INT && (unsigned HOST_WIDE_INT) const_op - == ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)) - 1) + == (HOST_WIDE_INT_1U << (mode_width - 1)) - 1) { const_op = 0; code = GE; @@ -11457,7 +11457,7 @@ simplify_compare_const (enum rtx_code code, machine_mode mode, /* (unsigned) >= 0x80000000 is equivalent to < 0. */ else if (mode_width - 1 < HOST_BITS_PER_WIDE_INT && (unsigned HOST_WIDE_INT) const_op - == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1)) + == HOST_WIDE_INT_1U << (mode_width - 1)) { const_op = 0; code = LT; @@ -11473,7 +11473,7 @@ simplify_compare_const (enum rtx_code code, machine_mode mode, /* (unsigned) > 0x7fffffff is equivalent to < 0. */ else if (mode_width - 1 < HOST_BITS_PER_WIDE_INT && (unsigned HOST_WIDE_INT) const_op - == ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)) - 1) + == (HOST_WIDE_INT_1U << (mode_width - 1)) - 1) { const_op = 0; code = LT; @@ -11691,7 +11691,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) MODE, say that we will only be needing the sign bit of OP0. */ if (sign_bit_comparison_p && HWI_COMPUTABLE_MODE_P (mode)) op0 = force_to_mode (op0, mode, - (unsigned HOST_WIDE_INT) 1 + HOST_WIDE_INT_1U << (GET_MODE_PRECISION (mode) - 1), 0); @@ -11778,7 +11778,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) && (GET_CODE (XEXP (op0, 0)) == ABS || (mode_width <= HOST_BITS_PER_WIDE_INT && (nonzero_bits (XEXP (op0, 0), mode) - & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1))) + & (HOST_WIDE_INT_1U << (mode_width - 1))) == 0))) { op0 = XEXP (op0, 0); @@ -11816,7 +11816,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) && mode_width <= HOST_BITS_PER_WIDE_INT) { op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), - ((unsigned HOST_WIDE_INT) 1 + (HOST_WIDE_INT_1U << (mode_width - 1 - INTVAL (XEXP (op0, 1))))); code = (code == LT ? NE : EQ); @@ -11888,7 +11888,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) if ((c1 > 0 && (unsigned HOST_WIDE_INT) c1 - < (unsigned HOST_WIDE_INT) 1 << (mode_width - 1) + < HOST_WIDE_INT_1U << (mode_width - 1) && (equality_comparison_p || unsigned_comparison_p) /* (A - C1) zero-extends if it is positive and sign-extends if it is negative, C2 both zero- and sign-extends. */ @@ -11902,7 +11902,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) - mode_width) && const_op < 0))) || ((unsigned HOST_WIDE_INT) c1 - < (unsigned HOST_WIDE_INT) 1 << (mode_width - 2) + < HOST_WIDE_INT_1U << (mode_width - 2) /* (A - C1) always sign-extends, like C2. */ && num_sign_bit_copies (a, inner_mode) > (unsigned int) (GET_MODE_PRECISION (inner_mode) @@ -12111,7 +12111,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) && CONST_INT_P (XEXP (op0, 1)) && mode_width <= HOST_BITS_PER_WIDE_INT && ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode)) - == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1))) + == HOST_WIDE_INT_1U << (mode_width - 1))) { op0 = XEXP (op0, 0); code = (code == EQ ? GE : LT); @@ -12214,7 +12214,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) && CONST_INT_P (shift_count) && HWI_COMPUTABLE_MODE_P (mode) && (UINTVAL (XEXP (shift_op, 1)) - == (unsigned HOST_WIDE_INT) 1 + == HOST_WIDE_INT_1U << INTVAL (shift_count)))) { op0 @@ -12237,7 +12237,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) && ((INTVAL (XEXP (op0, 1)) + ! equality_comparison_p) < HOST_BITS_PER_WIDE_INT) && (((unsigned HOST_WIDE_INT) const_op - & (((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (op0, 1))) + & ((HOST_WIDE_INT_1U << INTVAL (XEXP (op0, 1))) - 1)) == 0) && mode_width <= HOST_BITS_PER_WIDE_INT && (nonzero_bits (XEXP (op0, 0), mode) @@ -12260,7 +12260,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) && mode_width <= HOST_BITS_PER_WIDE_INT) { op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), - ((unsigned HOST_WIDE_INT) 1 + (HOST_WIDE_INT_1U << (mode_width - 1 - INTVAL (XEXP (op0, 1))))); code = (code == LT ? NE : EQ); @@ -12355,7 +12355,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) { unsigned HOST_WIDE_INT low_bits = (nonzero_bits (XEXP (op0, 0), mode) - & (((unsigned HOST_WIDE_INT) 1 + & ((HOST_WIDE_INT_1U << INTVAL (XEXP (op0, 1))) - 1)); if (low_bits == 0 || !equality_comparison_p) { @@ -12369,7 +12369,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) && (code == GT || code == GTU || code == LE || code == LEU)) const_op - |= (((HOST_WIDE_INT) 1 << INTVAL (XEXP (op0, 1))) - 1); + |= ((HOST_WIDE_INT_1 << INTVAL (XEXP (op0, 1))) - 1); op1 = GEN_INT (const_op); op0 = XEXP (op0, 0); continue; @@ -12475,7 +12475,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) && HWI_COMPUTABLE_MODE_P (mode)) { unsigned HOST_WIDE_INT sign - = (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1); + = HOST_WIDE_INT_1U << (GET_MODE_BITSIZE (mode) - 1); op0 = simplify_gen_binary (AND, tmode, gen_lowpart (tmode, op0), gen_int_mode (sign, tmode)); diff --git a/gcc/cse.c b/gcc/cse.c index c14f29ddf73..6a5ccb5f309 100644 --- a/gcc/cse.c +++ b/gcc/cse.c @@ -3558,7 +3558,7 @@ fold_rtx (rtx x, rtx_insn *insn) instead we test for the problematic value in a more direct manner and hope the Sun compilers get it correct. */ && INTVAL (const_arg1) != - ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)) + (HOST_WIDE_INT_1 << (HOST_BITS_PER_WIDE_INT - 1)) && REG_P (folded_arg1)) { rtx new_const = GEN_INT (-INTVAL (const_arg1)); @@ -4567,7 +4567,7 @@ cse_insn (rtx_insn *insn) if (INTVAL (width) == HOST_BITS_PER_WIDE_INT) mask = ~(HOST_WIDE_INT) 0; else - mask = ((HOST_WIDE_INT) 1 << INTVAL (width)) - 1; + mask = (HOST_WIDE_INT_1 << INTVAL (width)) - 1; val = (val >> shift) & mask; src_eqv = GEN_INT (val); } @@ -4665,7 +4665,7 @@ cse_insn (rtx_insn *insn) && INTVAL (width) < HOST_BITS_PER_WIDE_INT && (INTVAL (src) & ((HOST_WIDE_INT) (-1) << INTVAL (width)))) src_folded - = GEN_INT (INTVAL (src) & (((HOST_WIDE_INT) 1 + = GEN_INT (INTVAL (src) & ((HOST_WIDE_INT_1 << INTVAL (width)) - 1)); } #endif @@ -5235,7 +5235,7 @@ cse_insn (rtx_insn *insn) if (INTVAL (width) == HOST_BITS_PER_WIDE_INT) mask = ~(HOST_WIDE_INT) 0; else - mask = ((HOST_WIDE_INT) 1 << INTVAL (width)) - 1; + mask = (HOST_WIDE_INT_1 << INTVAL (width)) - 1; val &= ~(mask << shift); val |= (INTVAL (trial) & mask) << shift; val = trunc_int_for_mode (val, GET_MODE (dest_reg)); diff --git a/gcc/dojump.c b/gcc/dojump.c index 6e0c01cc95f..58f1e03e41b 100644 --- a/gcc/dojump.c +++ b/gcc/dojump.c @@ -575,7 +575,7 @@ do_jump (tree exp, rtx_code_label *if_false_label, TREE_INT_CST_LOW (shift))) { unsigned HOST_WIDE_INT mask - = (unsigned HOST_WIDE_INT) 1 << TREE_INT_CST_LOW (shift); + = HOST_WIDE_INT_1U << TREE_INT_CST_LOW (shift); do_jump (build2 (BIT_AND_EXPR, argtype, arg, build_int_cstu (argtype, mask)), clr_label, set_label, setclr_prob); diff --git a/gcc/double-int.c b/gcc/double-int.c index 1c4e8e310cb..8a273907a6f 100644 --- a/gcc/double-int.c +++ b/gcc/double-int.c @@ -65,10 +65,10 @@ static int div_and_round_double (unsigned, int, unsigned HOST_WIDE_INT, number. The value of the word is LOWPART + HIGHPART * BASE. */ #define LOWPART(x) \ - ((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)) - 1)) + ((x) & ((HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT / 2)) - 1)) #define HIGHPART(x) \ ((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT / 2) -#define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT / 2) +#define BASE (HOST_WIDE_INT_1U << HOST_BITS_PER_WIDE_INT / 2) /* Unpack a two-word integer into 4 words. LOW and HI are the integer, as two `HOST_WIDE_INT' pieces. @@ -546,7 +546,7 @@ div_and_round_double (unsigned code, int uns, if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */ { /* quo = quo - 1; */ - add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, + add_double (*lquo, *hquo, HOST_WIDE_INT_M1, HOST_WIDE_INT_M1, lquo, hquo); } else @@ -557,7 +557,7 @@ div_and_round_double (unsigned code, int uns, case CEIL_MOD_EXPR: /* round toward positive infinity */ if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */ { - add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0, + add_double (*lquo, *hquo, HOST_WIDE_INT_1, (HOST_WIDE_INT) 0, lquo, hquo); } else @@ -590,10 +590,10 @@ div_and_round_double (unsigned code, int uns, if (quo_neg) /* quo = quo - 1; */ add_double (*lquo, *hquo, - (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo); + HOST_WIDE_INT_M1, HOST_WIDE_INT_M1, lquo, hquo); else /* quo = quo + 1; */ - add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0, + add_double (*lquo, *hquo, HOST_WIDE_INT_1, (HOST_WIDE_INT) 0, lquo, hquo); } else @@ -1058,9 +1058,9 @@ double_int::set_bit (unsigned bitpos) const { double_int a = *this; if (bitpos < HOST_BITS_PER_WIDE_INT) - a.low |= (unsigned HOST_WIDE_INT) 1 << bitpos; + a.low |= HOST_WIDE_INT_1U << bitpos; else - a.high |= (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT); + a.high |= HOST_WIDE_INT_1 << (bitpos - HOST_BITS_PER_WIDE_INT); return a; } diff --git a/gcc/dse.c b/gcc/dse.c index 68d06bb444f..b300fb77c1f 100644 --- a/gcc/dse.c +++ b/gcc/dse.c @@ -1201,7 +1201,7 @@ set_position_unneeded (store_info *s_info, int pos) } else s_info->positions_needed.small_bitmask - &= ~(((unsigned HOST_WIDE_INT) 1) << pos); + &= ~(HOST_WIDE_INT_1U << pos); } /* Mark the whole store S_INFO as unneeded. */ @@ -1744,7 +1744,7 @@ get_stored_val (store_info *store_info, machine_mode read_mode, { unsigned HOST_WIDE_INT c = INTVAL (store_info->rhs) - & (((HOST_WIDE_INT) 1 << BITS_PER_UNIT) - 1); + & ((HOST_WIDE_INT_1 << BITS_PER_UNIT) - 1); int shift = BITS_PER_UNIT; while (shift < HOST_BITS_PER_WIDE_INT) { diff --git a/gcc/dwarf2out.c b/gcc/dwarf2out.c index 4c0332f86e9..9da2b3bf4d4 100644 --- a/gcc/dwarf2out.c +++ b/gcc/dwarf2out.c @@ -12976,7 +12976,7 @@ clz_loc_descriptor (rtx rtl, machine_mode mode, if (GET_CODE (rtl) != CLZ) msb = const1_rtx; else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT) - msb = GEN_INT ((unsigned HOST_WIDE_INT) 1 + msb = GEN_INT (HOST_WIDE_INT_1U << (GET_MODE_BITSIZE (mode) - 1)); else msb = immed_wide_int_const diff --git a/gcc/expmed.c b/gcc/expmed.c index bd29e42aae0..0b0abbcf283 100644 --- a/gcc/expmed.c +++ b/gcc/expmed.c @@ -85,7 +85,7 @@ mask_rtx (machine_mode mode, int bitpos, int bitsize, bool complement) /* Test whether a value is zero of a power of two. */ #define EXACT_POWER_OF_2_OR_ZERO_P(x) \ - (((x) & ((x) - (unsigned HOST_WIDE_INT) 1)) == 0) + (((x) & ((x) - HOST_WIDE_INT_1U)) == 0) struct init_expmed_rtl { @@ -235,7 +235,7 @@ init_expmed (void) memset (&all, 0, sizeof all); for (m = 1; m < MAX_BITS_PER_WORD; m++) { - all.pow2[m] = GEN_INT ((HOST_WIDE_INT) 1 << m); + all.pow2[m] = GEN_INT (HOST_WIDE_INT_1 << m); all.cint[m] = GEN_INT (m); } @@ -1197,14 +1197,14 @@ store_fixed_bit_field_1 (rtx op0, unsigned HOST_WIDE_INT bitsize, unsigned HOST_WIDE_INT v = UINTVAL (value); if (bitsize < HOST_BITS_PER_WIDE_INT) - v &= ((unsigned HOST_WIDE_INT) 1 << bitsize) - 1; + v &= (HOST_WIDE_INT_1U << bitsize) - 1; if (v == 0) all_zero = 1; else if ((bitsize < HOST_BITS_PER_WIDE_INT - && v == ((unsigned HOST_WIDE_INT) 1 << bitsize) - 1) + && v == (HOST_WIDE_INT_1U << bitsize) - 1) || (bitsize == HOST_BITS_PER_WIDE_INT - && v == (unsigned HOST_WIDE_INT) -1)) + && v == HOST_WIDE_INT_M1U)) all_one = 1; value = lshift_value (mode, v, bitnum); @@ -1349,7 +1349,7 @@ store_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize, if (CONST_INT_P (value)) part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value)) >> (bitsize - bitsdone - thissize)) - & (((HOST_WIDE_INT) 1 << thissize) - 1)); + & ((HOST_WIDE_INT_1 << thissize) - 1)); /* Likewise, but the source is little-endian. */ else if (reverse) part = extract_fixed_bit_field (word_mode, value, thissize, @@ -1372,7 +1372,7 @@ store_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize, if (CONST_INT_P (value)) part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value)) >> bitsdone) - & (((HOST_WIDE_INT) 1 << thissize) - 1)); + & ((HOST_WIDE_INT_1 << thissize) - 1)); /* Likewise, but the source is big-endian. */ else if (reverse) part = extract_fixed_bit_field (word_mode, value, thissize, @@ -2805,7 +2805,7 @@ synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t, { unsigned HOST_WIDE_INT d; - d = ((unsigned HOST_WIDE_INT) 1 << m) + 1; + d = (HOST_WIDE_INT_1U << m) + 1; if (t % d == 0 && t > d && m < maxm && (!cache_hit || cache_alg == alg_add_factor)) { @@ -2835,7 +2835,7 @@ synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t, break; } - d = ((unsigned HOST_WIDE_INT) 1 << m) - 1; + d = (HOST_WIDE_INT_1U << m) - 1; if (t % d == 0 && t > d && m < maxm && (!cache_hit || cache_alg == alg_sub_factor)) { @@ -3104,14 +3104,14 @@ expand_mult_const (machine_mode mode, rtx op0, HOST_WIDE_INT val, tem = expand_shift (LSHIFT_EXPR, mode, op0, log, NULL_RTX, 0); accum = force_operand (gen_rtx_PLUS (mode, accum, tem), add_target ? add_target : accum_target); - val_so_far += (HOST_WIDE_INT) 1 << log; + val_so_far += HOST_WIDE_INT_1 << log; break; case alg_sub_t_m2: tem = expand_shift (LSHIFT_EXPR, mode, op0, log, NULL_RTX, 0); accum = force_operand (gen_rtx_MINUS (mode, accum, tem), add_target ? add_target : accum_target); - val_so_far -= (HOST_WIDE_INT) 1 << log; + val_so_far -= HOST_WIDE_INT_1 << log; break; case alg_add_t2_m: @@ -3485,7 +3485,7 @@ choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision, *lgup_ptr = lgup; if (n < HOST_BITS_PER_WIDE_INT) { - unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1; + unsigned HOST_WIDE_INT mask = (HOST_WIDE_INT_1U << n) - 1; *multiplier_ptr = mhigh.to_uhwi () & mask; return mhigh.to_uhwi () >= mask; } @@ -3514,7 +3514,7 @@ invert_mod2n (unsigned HOST_WIDE_INT x, int n) mask = (n == HOST_BITS_PER_WIDE_INT ? ~(unsigned HOST_WIDE_INT) 0 - : ((unsigned HOST_WIDE_INT) 1 << n) - 1); + : (HOST_WIDE_INT_1U << n) - 1); while (nbit < n) { @@ -3776,7 +3776,7 @@ expand_smod_pow2 (machine_mode mode, rtx op0, HOST_WIDE_INT d) mode, 0, -1); if (signmask) { - HOST_WIDE_INT masklow = ((HOST_WIDE_INT) 1 << logd) - 1; + HOST_WIDE_INT masklow = (HOST_WIDE_INT_1 << logd) - 1; signmask = force_reg (mode, signmask); shift = GEN_INT (GET_MODE_BITSIZE (mode) - logd); @@ -4180,7 +4180,7 @@ expand_divmod (int rem_flag, enum tree_code code, machine_mode mode, if (rem_flag) { unsigned HOST_WIDE_INT mask - = ((unsigned HOST_WIDE_INT) 1 << pre_shift) - 1; + = (HOST_WIDE_INT_1U << pre_shift) - 1; remainder = expand_binop (compute_mode, and_optab, op0, gen_int_mode (mask, compute_mode), @@ -4194,7 +4194,7 @@ expand_divmod (int rem_flag, enum tree_code code, machine_mode mode, } else if (size <= HOST_BITS_PER_WIDE_INT) { - if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1))) + if (d >= (HOST_WIDE_INT_1U << (size - 1))) { /* Most significant bit of divisor is set; emit an scc insn. */ @@ -4315,7 +4315,7 @@ expand_divmod (int rem_flag, enum tree_code code, machine_mode mode, quotient = expand_unop (compute_mode, neg_optab, op0, tquotient, 0); else if (HOST_BITS_PER_WIDE_INT >= size - && abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1)) + && abs_d == HOST_WIDE_INT_1U << (size - 1)) { /* This case is not handled correctly below. */ quotient = emit_store_flag (tquotient, EQ, op0, op1, @@ -4365,7 +4365,7 @@ expand_divmod (int rem_flag, enum tree_code code, machine_mode mode, { insn = get_last_insn (); if (insn != last - && abs_d < ((unsigned HOST_WIDE_INT) 1 + && abs_d < (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1))) set_dst_reg_note (insn, REG_EQUAL, gen_rtx_DIV (compute_mode, op0, @@ -4382,7 +4382,7 @@ expand_divmod (int rem_flag, enum tree_code code, machine_mode mode, { choose_multiplier (abs_d, size, size - 1, &ml, &post_shift, &lgup); - if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1)) + if (ml < HOST_WIDE_INT_1U << (size - 1)) { rtx t1, t2, t3; @@ -4488,7 +4488,7 @@ expand_divmod (int rem_flag, enum tree_code code, machine_mode mode, if (rem_flag) { unsigned HOST_WIDE_INT mask - = ((unsigned HOST_WIDE_INT) 1 << pre_shift) - 1; + = (HOST_WIDE_INT_1U << pre_shift) - 1; remainder = expand_binop (compute_mode, and_optab, op0, gen_int_mode (mask, compute_mode), diff --git a/gcc/expr.c b/gcc/expr.c index 4073a989d96..1cb233caf79 100644 --- a/gcc/expr.c +++ b/gcc/expr.c @@ -4764,7 +4764,7 @@ optimize_bitfield_assignment_op (unsigned HOST_WIDE_INT bitsize, binop = code == BIT_IOR_EXPR ? ior_optab : xor_optab; if (bitpos + bitsize != str_bitsize) { - rtx mask = gen_int_mode (((unsigned HOST_WIDE_INT) 1 << bitsize) - 1, + rtx mask = gen_int_mode ((HOST_WIDE_INT_1U << bitsize) - 1, str_mode); value = expand_and (str_mode, value, mask, NULL_RTX); } @@ -7586,7 +7586,7 @@ highest_pow2_factor (const_tree exp) int trailing_zeros = tree_ctz (exp); if (trailing_zeros >= HOST_BITS_PER_WIDE_INT) return BIGGEST_ALIGNMENT; - ret = (unsigned HOST_WIDE_INT) 1 << trailing_zeros; + ret = HOST_WIDE_INT_1U << trailing_zeros; if (ret > BIGGEST_ALIGNMENT) return BIGGEST_ALIGNMENT; return ret; @@ -10324,7 +10324,7 @@ expand_expr_real_1 (tree exp, rtx target, machine_mode tmode, if (TYPE_UNSIGNED (TREE_TYPE (field))) { - op1 = gen_int_mode (((HOST_WIDE_INT) 1 << bitsize) - 1, + op1 = gen_int_mode ((HOST_WIDE_INT_1 << bitsize) - 1, imode); op0 = expand_and (imode, op0, op1, target); } diff --git a/gcc/fold-const.c b/gcc/fold-const.c index 40a8de1c93d..c5d9a79ed28 100644 --- a/gcc/fold-const.c +++ b/gcc/fold-const.c @@ -2058,7 +2058,7 @@ fold_convert_const_fixed_from_int (tree type, const_tree arg1) di.low = TREE_INT_CST_ELT (arg1, 0); if (TREE_INT_CST_NUNITS (arg1) == 1) - di.high = (HOST_WIDE_INT) di.low < 0 ? (HOST_WIDE_INT) -1 : 0; + di.high = (HOST_WIDE_INT) di.low < 0 ? HOST_WIDE_INT_M1 : 0; else di.high = TREE_INT_CST_ELT (arg1, 1); diff --git a/gcc/function.c b/gcc/function.c index abee3640494..269ed8d4543 100644 --- a/gcc/function.c +++ b/gcc/function.c @@ -233,7 +233,7 @@ frame_offset_overflow (HOST_WIDE_INT offset, tree func) { unsigned HOST_WIDE_INT size = FRAME_GROWS_DOWNWARD ? -offset : offset; - if (size > ((unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (Pmode) - 1)) + if (size > (HOST_WIDE_INT_1U << (GET_MODE_BITSIZE (Pmode) - 1)) /* Leave room for the fixed part of the frame. */ - 64 * UNITS_PER_WORD) { diff --git a/gcc/fwprop.c b/gcc/fwprop.c index 88cfefbe1ef..30d5739f0e3 100644 --- a/gcc/fwprop.c +++ b/gcc/fwprop.c @@ -354,7 +354,7 @@ canonicalize_address (rtx x) { HOST_WIDE_INT shift = INTVAL (XEXP (x, 1)); PUT_CODE (x, MULT); - XEXP (x, 1) = gen_int_mode ((HOST_WIDE_INT) 1 << shift, + XEXP (x, 1) = gen_int_mode (HOST_WIDE_INT_1 << shift, GET_MODE (x)); } diff --git a/gcc/genmodes.c b/gcc/genmodes.c index 788031b7fff..59faae98244 100644 --- a/gcc/genmodes.c +++ b/gcc/genmodes.c @@ -1410,7 +1410,7 @@ emit_mode_mask (void) #define MODE_MASK(m) \\\n\ ((m) >= HOST_BITS_PER_WIDE_INT) \\\n\ ? ~(unsigned HOST_WIDE_INT) 0 \\\n\ - : ((unsigned HOST_WIDE_INT) 1 << (m)) - 1\n"); + : (HOST_WIDE_INT_1U << (m)) - 1\n"); for_all_modes (c, m) if (m->precision != (unsigned int)-1) diff --git a/gcc/hwint.c b/gcc/hwint.c index 74c1235158b..b936c52e5c1 100644 --- a/gcc/hwint.c +++ b/gcc/hwint.c @@ -41,20 +41,20 @@ floor_log2 (unsigned HOST_WIDE_INT x) return -1; if (HOST_BITS_PER_WIDE_INT > 64) - if (x >= (unsigned HOST_WIDE_INT) 1 << (t + 64)) + if (x >= HOST_WIDE_INT_1U << (t + 64)) t += 64; if (HOST_BITS_PER_WIDE_INT > 32) - if (x >= ((unsigned HOST_WIDE_INT) 1) << (t + 32)) + if (x >= HOST_WIDE_INT_1U << (t + 32)) t += 32; - if (x >= ((unsigned HOST_WIDE_INT) 1) << (t + 16)) + if (x >= HOST_WIDE_INT_1U << (t + 16)) t += 16; - if (x >= ((unsigned HOST_WIDE_INT) 1) << (t + 8)) + if (x >= HOST_WIDE_INT_1U << (t + 8)) t += 8; - if (x >= ((unsigned HOST_WIDE_INT) 1) << (t + 4)) + if (x >= HOST_WIDE_INT_1U << (t + 4)) t += 4; - if (x >= ((unsigned HOST_WIDE_INT) 1) << (t + 2)) + if (x >= HOST_WIDE_INT_1U << (t + 2)) t += 2; - if (x >= ((unsigned HOST_WIDE_INT) 1) << (t + 1)) + if (x >= HOST_WIDE_INT_1U << (t + 1)) t += 1; return t; diff --git a/gcc/hwint.h b/gcc/hwint.h index 14740ccc939..c816ed40f7b 100644 --- a/gcc/hwint.h +++ b/gcc/hwint.h @@ -226,7 +226,7 @@ exact_log2 (unsigned HOST_WIDE_INT x) #endif /* GCC_VERSION >= 3004 */ #define HOST_WIDE_INT_MIN (HOST_WIDE_INT) \ - ((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)) + (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1)) #define HOST_WIDE_INT_MAX (~(HOST_WIDE_INT_MIN)) extern HOST_WIDE_INT abs_hwi (HOST_WIDE_INT); @@ -276,7 +276,7 @@ zext_hwi (unsigned HOST_WIDE_INT src, unsigned int prec) else { gcc_checking_assert (prec < HOST_BITS_PER_WIDE_INT); - return src & (((unsigned HOST_WIDE_INT) 1 << prec) - 1); + return src & ((HOST_WIDE_INT_1U << prec) - 1); } } diff --git a/gcc/ifcvt.c b/gcc/ifcvt.c index f7f120e04b1..a92ab6dd105 100644 --- a/gcc/ifcvt.c +++ b/gcc/ifcvt.c @@ -2890,7 +2890,7 @@ noce_try_bitop (struct noce_if_info *if_info) if (! rtx_equal_p (x, XEXP (a, 0)) || !CONST_INT_P (XEXP (a, 1)) || (INTVAL (XEXP (a, 1)) & GET_MODE_MASK (mode)) - != (unsigned HOST_WIDE_INT) 1 << bitnum) + != HOST_WIDE_INT_1U << bitnum) return FALSE; /* if ((x & C) == 0) x |= C; is transformed to x |= C. */ @@ -2900,13 +2900,13 @@ noce_try_bitop (struct noce_if_info *if_info) else if (code == NE) { /* if ((x & C) == 0) x ^= C; is transformed to x |= C. */ - result = gen_int_mode ((HOST_WIDE_INT) 1 << bitnum, mode); + result = gen_int_mode (HOST_WIDE_INT_1 << bitnum, mode); result = simplify_gen_binary (IOR, mode, x, result); } else { /* if ((x & C) != 0) x ^= C; is transformed to x &= ~C. */ - result = gen_int_mode (~((HOST_WIDE_INT) 1 << bitnum), mode); + result = gen_int_mode (~(HOST_WIDE_INT_1 << bitnum), mode); result = simplify_gen_binary (AND, mode, x, result); } } @@ -2916,7 +2916,7 @@ noce_try_bitop (struct noce_if_info *if_info) if (! rtx_equal_p (x, XEXP (a, 0)) || !CONST_INT_P (XEXP (a, 1)) || (INTVAL (XEXP (a, 1)) & GET_MODE_MASK (mode)) - != (~((HOST_WIDE_INT) 1 << bitnum) & GET_MODE_MASK (mode))) + != (~(HOST_WIDE_INT_1 << bitnum) & GET_MODE_MASK (mode))) return FALSE; /* if ((x & C) == 0) x &= ~C; is transformed to nothing. */ diff --git a/gcc/loop-doloop.c b/gcc/loop-doloop.c index 3eb9b11d5e1..c31151630ec 100644 --- a/gcc/loop-doloop.c +++ b/gcc/loop-doloop.c @@ -688,7 +688,7 @@ doloop_optimize (struct loop *loop) word_mode_size = GET_MODE_PRECISION (word_mode); word_mode_max - = ((unsigned HOST_WIDE_INT) 1 << (word_mode_size - 1) << 1) - 1; + = (HOST_WIDE_INT_1U << (word_mode_size - 1) << 1) - 1; if (! doloop_seq && mode != word_mode /* Before trying mode different from the one in that # of iterations is diff --git a/gcc/loop-invariant.c b/gcc/loop-invariant.c index dcbe9326a5b..d550a841d06 100644 --- a/gcc/loop-invariant.c +++ b/gcc/loop-invariant.c @@ -777,7 +777,7 @@ canonicalize_address_mult (rtx x) { HOST_WIDE_INT shift = INTVAL (XEXP (sub, 1)); PUT_CODE (sub, MULT); - XEXP (sub, 1) = gen_int_mode ((HOST_WIDE_INT) 1 << shift, + XEXP (sub, 1) = gen_int_mode (HOST_WIDE_INT_1 << shift, GET_MODE (sub)); iter.skip_subrtxes (); } diff --git a/gcc/loop-iv.c b/gcc/loop-iv.c index 57fb8c1fa7f..68cfd0ef9c8 100644 --- a/gcc/loop-iv.c +++ b/gcc/loop-iv.c @@ -1613,7 +1613,7 @@ implies_p (rtx a, rtx b) && CONST_INT_P (XEXP (opb0, 1)) /* Avoid overflows. */ && ((unsigned HOST_WIDE_INT) INTVAL (XEXP (opb0, 1)) - != ((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1))) + != (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1))) && rtx_equal_p (XEXP (opb0, 0), op0)) return INTVAL (op1) == -INTVAL (XEXP (opb0, 1)); if (GET_CODE (b) == GEU @@ -1622,7 +1622,7 @@ implies_p (rtx a, rtx b) && CONST_INT_P (XEXP (opb0, 1)) /* Avoid overflows. */ && ((unsigned HOST_WIDE_INT) INTVAL (XEXP (opb0, 1)) - != ((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1))) + != (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1))) && rtx_equal_p (XEXP (opb0, 0), op0)) return INTVAL (op1) == -INTVAL (XEXP (opb0, 1)); } diff --git a/gcc/match.pd b/gcc/match.pd index b24bfb47de0..836f7d8f704 100644 --- a/gcc/match.pd +++ b/gcc/match.pd @@ -1461,7 +1461,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT) unsigned int prec; if (shift == LSHIFT_EXPR) - zerobits = ((((unsigned HOST_WIDE_INT) 1) << shiftc) - 1); + zerobits = ((HOST_WIDE_INT_1U << shiftc) - 1); else if (shift == RSHIFT_EXPR && (TYPE_PRECISION (shift_type) == GET_MODE_PRECISION (TYPE_MODE (shift_type)))) @@ -1518,7 +1518,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT) mode's mask. */ for (prec = BITS_PER_UNIT; prec < HOST_BITS_PER_WIDE_INT; prec <<= 1) - if (newmask == (((unsigned HOST_WIDE_INT) 1) << prec) - 1) + if (newmask == (HOST_WIDE_INT_1U << prec) - 1) break; } (if (prec < HOST_BITS_PER_WIDE_INT diff --git a/gcc/optabs.c b/gcc/optabs.c index 2bd81db5166..51e10e21769 100644 --- a/gcc/optabs.c +++ b/gcc/optabs.c @@ -4903,7 +4903,7 @@ expand_fix (rtx to, rtx from, int unsignedp) expand_fix (to, target, 0); target = expand_binop (GET_MODE (to), xor_optab, to, gen_int_mode - ((HOST_WIDE_INT) 1 << (bitsize - 1), + (HOST_WIDE_INT_1 << (bitsize - 1), GET_MODE (to)), to, 1, OPTAB_LIB_WIDEN); diff --git a/gcc/real.c b/gcc/real.c index fbebbf0ac62..b009ed607ed 100644 --- a/gcc/real.c +++ b/gcc/real.c @@ -1395,7 +1395,7 @@ real_to_integer (const REAL_VALUE_TYPE *r) case rvc_inf: case rvc_nan: overflow: - i = (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1); + i = HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1); if (!r->sign) i--; return i; @@ -4903,7 +4903,7 @@ real_powi (REAL_VALUE_TYPE *r, format_helper fmt, neg = false; t = *x; - bit = (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1); + bit = HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1); for (i = 0; i < HOST_BITS_PER_WIDE_INT; i++) { if (init) diff --git a/gcc/reload.c b/gcc/reload.c index 06426d92332..1945133baf6 100644 --- a/gcc/reload.c +++ b/gcc/reload.c @@ -6750,7 +6750,7 @@ find_equiv_reg (rtx goal, rtx_insn *insn, enum reg_class rclass, int other, if (NONJUMP_INSN_P (p) /* If we don't want spill regs ... */ && (! (reload_reg_p != 0 - && reload_reg_p != (short *) (HOST_WIDE_INT) 1) + && reload_reg_p != (short *) HOST_WIDE_INT_1) /* ... then ignore insns introduced by reload; they aren't useful and can cause results in reload_as_needed to be different from what they were when calculating the need for @@ -6883,7 +6883,7 @@ find_equiv_reg (rtx goal, rtx_insn *insn, enum reg_class rclass, int other, (Now that insns introduced by reload are ignored above, this case shouldn't happen, but I'm not positive.) */ - if (reload_reg_p != 0 && reload_reg_p != (short *) (HOST_WIDE_INT) 1) + if (reload_reg_p != 0 && reload_reg_p != (short *) HOST_WIDE_INT_1) { int i; for (i = 0; i < valuenregs; ++i) diff --git a/gcc/rtlanal.c b/gcc/rtlanal.c index 8e4762c8681..94453295063 100644 --- a/gcc/rtlanal.c +++ b/gcc/rtlanal.c @@ -4381,7 +4381,7 @@ nonzero_bits1 (const_rtx x, machine_mode mode, const_rtx known_x, /* If X is negative in MODE, sign-extend the value. */ if (SHORT_IMMEDIATES_SIGN_EXTEND && INTVAL (x) > 0 && mode_width < BITS_PER_WORD - && (UINTVAL (x) & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1))) + && (UINTVAL (x) & (HOST_WIDE_INT_1U << (mode_width - 1))) != 0) return UINTVAL (x) | (HOST_WIDE_INT_M1U << mode_width); @@ -4513,9 +4513,9 @@ nonzero_bits1 (const_rtx x, machine_mode mode, const_rtx known_x, int low0 = floor_log2 (nz0 & -nz0); int low1 = floor_log2 (nz1 & -nz1); unsigned HOST_WIDE_INT op0_maybe_minusp - = nz0 & ((unsigned HOST_WIDE_INT) 1 << sign_index); + = nz0 & (HOST_WIDE_INT_1U << sign_index); unsigned HOST_WIDE_INT op1_maybe_minusp - = nz1 & ((unsigned HOST_WIDE_INT) 1 << sign_index); + = nz1 & (HOST_WIDE_INT_1U << sign_index); unsigned int result_width = mode_width; int result_low = 0; @@ -4561,17 +4561,17 @@ nonzero_bits1 (const_rtx x, machine_mode mode, const_rtx known_x, } if (result_width < mode_width) - nonzero &= ((unsigned HOST_WIDE_INT) 1 << result_width) - 1; + nonzero &= (HOST_WIDE_INT_1U << result_width) - 1; if (result_low > 0) - nonzero &= ~(((unsigned HOST_WIDE_INT) 1 << result_low) - 1); + nonzero &= ~((HOST_WIDE_INT_1U << result_low) - 1); } break; case ZERO_EXTRACT: if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT) - nonzero &= ((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (x, 1))) - 1; + nonzero &= (HOST_WIDE_INT_1U << INTVAL (XEXP (x, 1))) - 1; break; case SUBREG: @@ -4652,8 +4652,8 @@ nonzero_bits1 (const_rtx x, machine_mode mode, const_rtx known_x, /* If the sign bit may have been nonzero before the shift, we need to mark all the places it could have been copied to by the shift as possibly nonzero. */ - if (inner & ((unsigned HOST_WIDE_INT) 1 << (width - 1 - count))) - inner |= (((unsigned HOST_WIDE_INT) 1 << count) - 1) + if (inner & (HOST_WIDE_INT_1U << (width - 1 - count))) + inner |= ((HOST_WIDE_INT_1U << count) - 1) << (width - count); } else if (code == ASHIFT) @@ -4677,7 +4677,7 @@ nonzero_bits1 (const_rtx x, machine_mode mode, const_rtx known_x, that value, plus the number of bits in the mode minus one. */ if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero)) nonzero - |= ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1; + |= (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1; else nonzero = -1; break; @@ -4687,14 +4687,14 @@ nonzero_bits1 (const_rtx x, machine_mode mode, const_rtx known_x, that value, plus the number of bits in the mode minus one. */ if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero)) nonzero - |= ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1; + |= (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1; else nonzero = -1; break; case CLRSB: /* This is at most the number of bits in the mode minus 1. */ - nonzero = ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1; + nonzero = (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1; break; case PARITY: @@ -4908,7 +4908,7 @@ num_sign_bit_copies1 (const_rtx x, machine_mode mode, const_rtx known_x, Then see how many zero bits we have. */ nonzero = UINTVAL (x) & GET_MODE_MASK (mode); if (bitwidth <= HOST_BITS_PER_WIDE_INT - && (nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0) + && (nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0) nonzero = (~nonzero) & GET_MODE_MASK (mode); return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1); @@ -5008,7 +5008,7 @@ num_sign_bit_copies1 (const_rtx x, machine_mode mode, const_rtx known_x, return bitwidth; if (num0 > 1 - && (((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero)) + && ((HOST_WIDE_INT_1U << (bitwidth - 1)) & nonzero)) num0--; return num0; @@ -5030,7 +5030,7 @@ num_sign_bit_copies1 (const_rtx x, machine_mode mode, const_rtx known_x, && bitwidth <= HOST_BITS_PER_WIDE_INT && CONST_INT_P (XEXP (x, 1)) && (UINTVAL (XEXP (x, 1)) - & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) == 0) + & (HOST_WIDE_INT_1U << (bitwidth - 1))) == 0) return num1; /* Similarly for IOR when setting high-order bits. */ @@ -5039,7 +5039,7 @@ num_sign_bit_copies1 (const_rtx x, machine_mode mode, const_rtx known_x, && bitwidth <= HOST_BITS_PER_WIDE_INT && CONST_INT_P (XEXP (x, 1)) && (UINTVAL (XEXP (x, 1)) - & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0) + & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0) return num1; return MIN (num0, num1); @@ -5054,7 +5054,7 @@ num_sign_bit_copies1 (const_rtx x, machine_mode mode, const_rtx known_x, && bitwidth <= HOST_BITS_PER_WIDE_INT) { nonzero = nonzero_bits (XEXP (x, 0), mode); - if ((((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero) == 0) + if (((HOST_WIDE_INT_1U << (bitwidth - 1)) & nonzero) == 0) return (nonzero == 1 || nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1); } @@ -5082,9 +5082,9 @@ num_sign_bit_copies1 (const_rtx x, machine_mode mode, const_rtx known_x, if (result > 0 && (bitwidth > HOST_BITS_PER_WIDE_INT || (((nonzero_bits (XEXP (x, 0), mode) - & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0) + & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0) && ((nonzero_bits (XEXP (x, 1), mode) - & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) + & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)))) result--; @@ -5097,7 +5097,7 @@ num_sign_bit_copies1 (const_rtx x, machine_mode mode, const_rtx known_x, if (bitwidth > HOST_BITS_PER_WIDE_INT) return 1; else if ((nonzero_bits (XEXP (x, 0), mode) - & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0) + & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0) return 1; else return cached_num_sign_bit_copies (XEXP (x, 0), mode, @@ -5110,7 +5110,7 @@ num_sign_bit_copies1 (const_rtx x, machine_mode mode, const_rtx known_x, if (bitwidth > HOST_BITS_PER_WIDE_INT) return 1; else if ((nonzero_bits (XEXP (x, 1), mode) - & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0) + & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0) return 1; else return cached_num_sign_bit_copies (XEXP (x, 1), mode, @@ -5125,7 +5125,7 @@ num_sign_bit_copies1 (const_rtx x, machine_mode mode, const_rtx known_x, if (result > 1 && (bitwidth > HOST_BITS_PER_WIDE_INT || (nonzero_bits (XEXP (x, 1), mode) - & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)) + & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)) result--; return result; @@ -5136,7 +5136,7 @@ num_sign_bit_copies1 (const_rtx x, machine_mode mode, const_rtx known_x, if (result > 1 && (bitwidth > HOST_BITS_PER_WIDE_INT || (nonzero_bits (XEXP (x, 1), mode) - & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)) + & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)) result--; return result; @@ -5180,7 +5180,7 @@ num_sign_bit_copies1 (const_rtx x, machine_mode mode, const_rtx known_x, Then see how many zero bits we have. */ nonzero = STORE_FLAG_VALUE; if (bitwidth <= HOST_BITS_PER_WIDE_INT - && (nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0) + && (nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0) nonzero = (~nonzero) & GET_MODE_MASK (mode); return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1); @@ -5199,7 +5199,7 @@ num_sign_bit_copies1 (const_rtx x, machine_mode mode, const_rtx known_x, return 1; nonzero = nonzero_bits (x, mode); - return nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)) + return nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1)) ? 1 : bitwidth - floor_log2 (nonzero) - 1; } @@ -5511,7 +5511,7 @@ canonicalize_condition (rtx_insn *insn, rtx cond, int reverse, BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */ case GE: if ((const_val & max_val) - != ((unsigned HOST_WIDE_INT) 1 + != (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (GET_MODE (op0)) - 1))) code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0)); break; @@ -6290,7 +6290,7 @@ get_index_scale (const struct address_info *info) if (GET_CODE (index) == ASHIFT && CONST_INT_P (XEXP (index, 1)) && info->index_term == &XEXP (index, 0)) - return (HOST_WIDE_INT) 1 << INTVAL (XEXP (index, 1)); + return HOST_WIDE_INT_1 << INTVAL (XEXP (index, 1)); if (info->index == info->index_term) return 1; diff --git a/gcc/simplify-rtx.c b/gcc/simplify-rtx.c index f5c530a4503..4354b5bcd33 100644 --- a/gcc/simplify-rtx.c +++ b/gcc/simplify-rtx.c @@ -40,7 +40,7 @@ along with GCC; see the file COPYING3. If not see occasionally need to sign extend from low to high as if low were a signed wide int. */ #define HWI_SIGN_EXTEND(low) \ - ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0)) + ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : ((HOST_WIDE_INT) 0)) static rtx neg_const_int (machine_mode, const_rtx); static bool plus_minus_operand_p (const_rtx); @@ -111,8 +111,8 @@ mode_signbit_p (machine_mode mode, const_rtx x) return false; if (width < HOST_BITS_PER_WIDE_INT) - val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1; - return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1)); + val &= (HOST_WIDE_INT_1U << width) - 1; + return val == (HOST_WIDE_INT_1U << (width - 1)); } /* Test whether VAL is equal to the most significant bit of mode MODE @@ -132,7 +132,7 @@ val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val) return false; val &= GET_MODE_MASK (mode); - return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1)); + return val == (HOST_WIDE_INT_1U << (width - 1)); } /* Test whether the most significant bit of mode MODE is set in VAL. @@ -149,7 +149,7 @@ val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val) if (width == 0 || width > HOST_BITS_PER_WIDE_INT) return false; - val &= (unsigned HOST_WIDE_INT) 1 << (width - 1); + val &= HOST_WIDE_INT_1U << (width - 1); return val != 0; } @@ -167,7 +167,7 @@ val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val) if (width == 0 || width > HOST_BITS_PER_WIDE_INT) return false; - val &= (unsigned HOST_WIDE_INT) 1 << (width - 1); + val &= HOST_WIDE_INT_1U << (width - 1); return val == 0; } @@ -5188,7 +5188,7 @@ simplify_const_relational_operation (enum rtx_code code, int sign_bitnum = GET_MODE_PRECISION (mode) - 1; int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum && (UINTVAL (inner_const) - & ((unsigned HOST_WIDE_INT) 1 + & (HOST_WIDE_INT_1U << sign_bitnum))); switch (code) @@ -5376,12 +5376,12 @@ simplify_ternary_operation (enum rtx_code code, machine_mode mode, if (HOST_BITS_PER_WIDE_INT != op1val) { /* First zero-extend. */ - val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1; + val &= (HOST_WIDE_INT_1U << op1val) - 1; /* If desired, propagate sign bit. */ if (code == SIGN_EXTRACT - && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1))) + && (val & (HOST_WIDE_INT_1U << (op1val - 1))) != 0) - val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1); + val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1); } return gen_int_mode (val, mode); @@ -5518,7 +5518,7 @@ simplify_ternary_operation (enum rtx_code code, machine_mode mode, if (n_elts == HOST_BITS_PER_WIDE_INT) mask = -1; else - mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1; + mask = (HOST_WIDE_INT_1U << n_elts) - 1; if (!(sel & mask) && !side_effects_p (op0)) return op1; @@ -5534,7 +5534,7 @@ simplify_ternary_operation (enum rtx_code code, machine_mode mode, unsigned int i; for (i = 0; i < n_elts; i++) - RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i)) + RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i)) ? CONST_VECTOR_ELT (trueop0, i) : CONST_VECTOR_ELT (trueop1, i)); return gen_rtx_CONST_VECTOR (mode, v); diff --git a/gcc/stor-layout.c b/gcc/stor-layout.c index d66d769e3fc..4ada959a45a 100644 --- a/gcc/stor-layout.c +++ b/gcc/stor-layout.c @@ -2899,13 +2899,13 @@ get_mode_bounds (machine_mode mode, int sign, } else if (sign) { - min_val = -((unsigned HOST_WIDE_INT) 1 << (size - 1)); - max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1)) - 1; + min_val = -(HOST_WIDE_INT_1U << (size - 1)); + max_val = (HOST_WIDE_INT_1U << (size - 1)) - 1; } else { min_val = 0; - max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1) << 1) - 1; + max_val = (HOST_WIDE_INT_1U << (size - 1) << 1) - 1; } *mmin = gen_int_mode (min_val, target_mode); diff --git a/gcc/toplev.c b/gcc/toplev.c index da80097eb5b..b0bb3ecd309 100644 --- a/gcc/toplev.c +++ b/gcc/toplev.c @@ -561,14 +561,14 @@ compile_file (void) #if defined ASM_OUTPUT_ALIGNED_DECL_COMMON ASM_OUTPUT_ALIGNED_DECL_COMMON (asm_out_file, NULL_TREE, "__gnu_lto_v1", - (unsigned HOST_WIDE_INT) 1, 8); + HOST_WIDE_INT_1U, 8); #elif defined ASM_OUTPUT_ALIGNED_COMMON ASM_OUTPUT_ALIGNED_COMMON (asm_out_file, "__gnu_lto_v1", - (unsigned HOST_WIDE_INT) 1, 8); + HOST_WIDE_INT_1U, 8); #else ASM_OUTPUT_COMMON (asm_out_file, "__gnu_lto_v1", - (unsigned HOST_WIDE_INT) 1, - (unsigned HOST_WIDE_INT) 1); + HOST_WIDE_INT_1U, + HOST_WIDE_INT_1U); #endif } @@ -578,14 +578,14 @@ compile_file (void) { #if defined ASM_OUTPUT_ALIGNED_DECL_COMMON ASM_OUTPUT_ALIGNED_DECL_COMMON (asm_out_file, NULL_TREE, "__gnu_lto_slim", - (unsigned HOST_WIDE_INT) 1, 8); + HOST_WIDE_INT_1U, 8); #elif defined ASM_OUTPUT_ALIGNED_COMMON ASM_OUTPUT_ALIGNED_COMMON (asm_out_file, "__gnu_lto_slim", - (unsigned HOST_WIDE_INT) 1, 8); + HOST_WIDE_INT_1U, 8); #else ASM_OUTPUT_COMMON (asm_out_file, "__gnu_lto_slim", - (unsigned HOST_WIDE_INT) 1, - (unsigned HOST_WIDE_INT) 1); + HOST_WIDE_INT_1U, + HOST_WIDE_INT_1U); #endif } diff --git a/gcc/tree-ssa-loop-ivopts.c b/gcc/tree-ssa-loop-ivopts.c index 20cf9ef0279..ed6bac990f4 100644 --- a/gcc/tree-ssa-loop-ivopts.c +++ b/gcc/tree-ssa-loop-ivopts.c @@ -2488,14 +2488,14 @@ compute_max_addr_offset (struct iv_use *use) for (i = width; i > 0; i--) { - off = ((unsigned HOST_WIDE_INT) 1 << i) - 1; + off = (HOST_WIDE_INT_1U << i) - 1; XEXP (addr, 1) = gen_int_mode (off, addr_mode); if (memory_address_addr_space_p (mem_mode, addr, as)) break; /* For some strict-alignment targets, the offset must be naturally aligned. Try an aligned offset if mem_mode is not QImode. */ - off = ((unsigned HOST_WIDE_INT) 1 << i); + off = (HOST_WIDE_INT_1U << i); if (off > GET_MODE_SIZE (mem_mode) && mem_mode != QImode) { off -= GET_MODE_SIZE (mem_mode); @@ -4002,7 +4002,7 @@ get_address_cost (bool symbol_present, bool var_present, for (i = width; i >= 0; i--) { - off = -((unsigned HOST_WIDE_INT) 1 << i); + off = -(HOST_WIDE_INT_1U << i); XEXP (addr, 1) = gen_int_mode (off, address_mode); if (memory_address_addr_space_p (mem_mode, addr, as)) break; @@ -4011,14 +4011,14 @@ get_address_cost (bool symbol_present, bool var_present, for (i = width; i >= 0; i--) { - off = ((unsigned HOST_WIDE_INT) 1 << i) - 1; + off = (HOST_WIDE_INT_1U << i) - 1; XEXP (addr, 1) = gen_int_mode (off, address_mode); if (memory_address_addr_space_p (mem_mode, addr, as)) break; /* For some strict-alignment targets, the offset must be naturally aligned. Try an aligned offset if mem_mode is not QImode. */ off = mem_mode != QImode - ? ((unsigned HOST_WIDE_INT) 1 << i) + ? (HOST_WIDE_INT_1U << i) - GET_MODE_SIZE (mem_mode) : 0; if (off > 0) diff --git a/gcc/tree-vect-generic.c b/gcc/tree-vect-generic.c index ce9127035ed..5c4798ac2bf 100644 --- a/gcc/tree-vect-generic.c +++ b/gcc/tree-vect-generic.c @@ -474,7 +474,7 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0, unsigned HOST_WIDE_INT mh; unsigned HOST_WIDE_INT d = TREE_INT_CST_LOW (cst) & mask; - if (d >= ((unsigned HOST_WIDE_INT) 1 << (prec - 1))) + if (d >= (HOST_WIDE_INT_1U << (prec - 1))) /* FIXME: Can transform this into op0 >= op1 ? 1 : 0. */ return NULL_TREE; @@ -558,7 +558,7 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0, /* n rem d = n rem -d */ if (code == TRUNC_MOD_EXPR && d < 0) d = abs_d; - else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (prec - 1)) + else if (abs_d == HOST_WIDE_INT_1U << (prec - 1)) { /* This case is not handled correctly below. */ mode = -2; @@ -572,7 +572,7 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0, choose_multiplier (abs_d, prec, prec - 1, &ml, &post_shift, &dummy_int); - if (ml >= (unsigned HOST_WIDE_INT) 1 << (prec - 1)) + if (ml >= HOST_WIDE_INT_1U << (prec - 1)) { this_mode = 4 + (d < 0); ml |= (~(unsigned HOST_WIDE_INT) 0) << (prec - 1); @@ -640,7 +640,7 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0, cond = build2 (LT_EXPR, mask_type, op0, zero); for (i = 0; i < nunits; i++) vec[i] = build_int_cst (TREE_TYPE (type), - ((unsigned HOST_WIDE_INT) 1 + (HOST_WIDE_INT_1U << shifts[i]) - 1); cst = build_vector (type, vec); addend = make_ssa_name (type); @@ -678,7 +678,7 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0, tree mask; for (i = 0; i < nunits; i++) vec[i] = build_int_cst (TREE_TYPE (type), - ((unsigned HOST_WIDE_INT) 1 + (HOST_WIDE_INT_1U << shifts[i]) - 1); mask = build_vector (type, vec); op = optab_for_tree_code (BIT_AND_EXPR, type, optab_default); diff --git a/gcc/tree-vect-patterns.c b/gcc/tree-vect-patterns.c index 3be1b89d584..d78f92d2efb 100644 --- a/gcc/tree-vect-patterns.c +++ b/gcc/tree-vect-patterns.c @@ -2724,7 +2724,7 @@ vect_recog_divmod_pattern (vec<gimple *> *stmts, & GET_MODE_MASK (TYPE_MODE (itype))); tree t1, t2, t3, t4; - if (d >= ((unsigned HOST_WIDE_INT) 1 << (prec - 1))) + if (d >= (HOST_WIDE_INT_1U << (prec - 1))) /* FIXME: Can transform this into oprnd0 >= oprnd1 ? 1 : 0. */ return NULL; @@ -2853,12 +2853,12 @@ vect_recog_divmod_pattern (vec<gimple *> *stmts, oprnd1 = build_int_cst (itype, abs_d); } else if (HOST_BITS_PER_WIDE_INT >= prec - && abs_d == (unsigned HOST_WIDE_INT) 1 << (prec - 1)) + && abs_d == HOST_WIDE_INT_1U << (prec - 1)) /* This case is not handled correctly below. */ return NULL; choose_multiplier (abs_d, prec, prec - 1, &ml, &post_shift, &dummy_int); - if (ml >= (unsigned HOST_WIDE_INT) 1 << (prec - 1)) + if (ml >= HOST_WIDE_INT_1U << (prec - 1)) { add = true; ml |= (~(unsigned HOST_WIDE_INT) 0) << (prec - 1); diff --git a/gcc/tree.c b/gcc/tree.c index ac3e3e93f5f..c08ac25b31e 100644 --- a/gcc/tree.c +++ b/gcc/tree.c @@ -1422,7 +1422,7 @@ wide_int_to_tree (tree type, const wide_int_ref &pcst) { if (pcst.elt (l - 1) == 0) gcc_checking_assert (pcst.elt (l - 2) < 0); - if (pcst.elt (l - 1) == (HOST_WIDE_INT) -1) + if (pcst.elt (l - 1) == HOST_WIDE_INT_M1) gcc_checking_assert (pcst.elt (l - 2) >= 0); } diff --git a/gcc/tree.h b/gcc/tree.h index 3dcc5600d8e..e2ffabfe173 100644 --- a/gcc/tree.h +++ b/gcc/tree.h @@ -1965,7 +1965,7 @@ extern machine_mode element_mode (const_tree t); /* For a VECTOR_TYPE, this is the number of sub-parts of the vector. */ #define TYPE_VECTOR_SUBPARTS(VECTOR_TYPE) \ - (((unsigned HOST_WIDE_INT) 1) \ + (HOST_WIDE_INT_1U \ << VECTOR_TYPE_CHECK (VECTOR_TYPE)->type_common.precision) /* Set precision to n when we have 2^n sub-parts of the vector. */ diff --git a/gcc/ubsan.c b/gcc/ubsan.c index 322fe30720a..8ee57d5f1e1 100644 --- a/gcc/ubsan.c +++ b/gcc/ubsan.c @@ -1827,7 +1827,7 @@ instrument_object_size (gimple_stmt_iterator *gsi, bool is_lhs) base_addr = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (base)), base); unsigned HOST_WIDE_INT size = compute_builtin_object_size (base_addr, 0); - if (size != (unsigned HOST_WIDE_INT) -1) + if (size != HOST_WIDE_INT_M1U) sizet = build_int_cst (sizetype, size); else if (optimize) { diff --git a/gcc/varasm.c b/gcc/varasm.c index 6a8fb81e41f..4ef6115455c 100644 --- a/gcc/varasm.c +++ b/gcc/varasm.c @@ -7497,7 +7497,7 @@ elf_record_gcc_switches (print_switch_type type, const char * name) { case SWITCH_TYPE_PASSED: ASM_OUTPUT_ASCII (asm_out_file, name, strlen (name)); - ASM_OUTPUT_SKIP (asm_out_file, (unsigned HOST_WIDE_INT) 1); + ASM_OUTPUT_SKIP (asm_out_file, HOST_WIDE_INT_1U); break; case SWITCH_TYPE_DESCRIPTIVE: diff --git a/gcc/wide-int-print.cc b/gcc/wide-int-print.cc index 1da70344335..70f2fb48def 100644 --- a/gcc/wide-int-print.cc +++ b/gcc/wide-int-print.cc @@ -120,7 +120,7 @@ print_hex (const wide_int_ref &wi, char *buf) we do not print a '-' with hex. */ buf += sprintf (buf, "0x"); for (j = BLOCKS_NEEDED (wi.get_precision ()); j > i; j--) - buf += sprintf (buf, HOST_WIDE_INT_PRINT_PADDED_HEX, (HOST_WIDE_INT) -1); + buf += sprintf (buf, HOST_WIDE_INT_PRINT_PADDED_HEX, HOST_WIDE_INT_M1); } else diff --git a/gcc/wide-int.cc b/gcc/wide-int.cc index 30d355fdb89..1a209bb9b85 100644 --- a/gcc/wide-int.cc +++ b/gcc/wide-int.cc @@ -60,7 +60,7 @@ static const HOST_WIDE_INT zeros[WIDE_INT_MAX_ELTS] = {}; /* Quantities to deal with values that hold half of a wide int. Used in multiply and divide. */ -#define HALF_INT_MASK (((HOST_WIDE_INT) 1 << HOST_BITS_PER_HALF_WIDE_INT) - 1) +#define HALF_INT_MASK ((HOST_WIDE_INT_1 << HOST_BITS_PER_HALF_WIDE_INT) - 1) #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT) #define BLOCKS_NEEDED(PREC) \ @@ -73,7 +73,7 @@ static const HOST_WIDE_INT zeros[WIDE_INT_MAX_ELTS] = {}; static unsigned HOST_WIDE_INT safe_uhwi (const HOST_WIDE_INT *val, unsigned int len, unsigned int i) { - return i < len ? val[i] : val[len - 1] < 0 ? (HOST_WIDE_INT) -1 : 0; + return i < len ? val[i] : val[len - 1] < 0 ? HOST_WIDE_INT_M1 : 0; } /* Convert the integer in VAL to canonical form, returning its new length. @@ -698,7 +698,7 @@ wi::set_bit_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval, unsigned int len = block + 1; for (unsigned int i = 0; i < len; i++) val[i] = safe_uhwi (xval, xlen, i); - val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit; + val[block] |= HOST_WIDE_INT_1U << subbit; /* If the bit we just set is at the msb of the block, make sure that any higher bits are zeros. */ @@ -710,7 +710,7 @@ wi::set_bit_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval, { for (unsigned int i = 0; i < xlen; i++) val[i] = xval[i]; - val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit; + val[block] |= HOST_WIDE_INT_1U << subbit; return canonize (val, xlen, precision); } } @@ -779,7 +779,7 @@ wi::mask (HOST_WIDE_INT *val, unsigned int width, bool negate, unsigned int shift = width & (HOST_BITS_PER_WIDE_INT - 1); if (shift != 0) { - HOST_WIDE_INT last = ((unsigned HOST_WIDE_INT) 1 << shift) - 1; + HOST_WIDE_INT last = (HOST_WIDE_INT_1U << shift) - 1; val[i++] = negate ? ~last : last; } else @@ -812,12 +812,12 @@ wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width, unsigned int shift = start & (HOST_BITS_PER_WIDE_INT - 1); if (shift) { - HOST_WIDE_INT block = ((unsigned HOST_WIDE_INT) 1 << shift) - 1; + HOST_WIDE_INT block = (HOST_WIDE_INT_1U << shift) - 1; shift += width; if (shift < HOST_BITS_PER_WIDE_INT) { /* case 000111000 */ - block = ((unsigned HOST_WIDE_INT) 1 << shift) - block - 1; + block = (HOST_WIDE_INT_1U << shift) - block - 1; val[i++] = negate ? ~block : block; return i; } @@ -834,7 +834,7 @@ wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width, if (shift != 0) { /* 000011111 */ - HOST_WIDE_INT block = ((unsigned HOST_WIDE_INT) 1 << shift) - 1; + HOST_WIDE_INT block = (HOST_WIDE_INT_1U << shift) - 1; val[i++] = negate ? ~block : block; } else if (end < prec) diff --git a/gcc/wide-int.h b/gcc/wide-int.h index 382d5f31fee..f172faecf87 100644 --- a/gcc/wide-int.h +++ b/gcc/wide-int.h @@ -2116,7 +2116,7 @@ wi::set_bit (const T &x, unsigned int bit) WIDE_INT_REF_FOR (T) xi (x, precision); if (precision <= HOST_BITS_PER_WIDE_INT) { - val[0] = xi.ulow () | ((unsigned HOST_WIDE_INT) 1 << bit); + val[0] = xi.ulow () | (HOST_WIDE_INT_1U << bit); result.set_len (1); } else |