summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--gcc/alias.c9
-rw-r--r--gcc/builtins.c27
-rw-r--r--gcc/c-family/c-common.c18
-rw-r--r--gcc/combine.c4
-rw-r--r--gcc/config/arm/arm.c3
-rw-r--r--gcc/config/avr/avr-log.c2
-rw-r--r--gcc/config/avr/avr.c13
-rw-r--r--gcc/config/darwin.c7
-rw-r--r--gcc/config/rs6000/darwin.h1
-rw-r--r--gcc/config/rs6000/rs6000.c2
-rw-r--r--gcc/cp/decl.c3
-rw-r--r--gcc/cp/tree.c1
-rw-r--r--gcc/dbxout.c11
-rw-r--r--gcc/dwarf2out.c7
-rw-r--r--gcc/expmed.c13
-rw-r--r--gcc/fixed-value.c8
-rw-r--r--gcc/fold-const.c130
-rw-r--r--gcc/fortran/trans-intrinsic.c16
-rw-r--r--gcc/gimple-ssa-strength-reduction.c24
-rw-r--r--gcc/gimplify.c2
-rw-r--r--gcc/graphite-clast-to-gimple.c3
-rw-r--r--gcc/graphite-sese-to-poly.c3
-rw-r--r--gcc/java/boehm.c4
-rw-r--r--gcc/java/jcf-parse.c3
-rw-r--r--gcc/optabs.c30
-rw-r--r--gcc/postreload.c6
-rw-r--r--gcc/predict.c5
-rw-r--r--gcc/print-tree.c4
-rw-r--r--gcc/rtl.h15
-rw-r--r--gcc/simplify-rtx.c71
-rw-r--r--gcc/tree-affine.c42
-rw-r--r--gcc/tree-affine.h6
-rw-r--r--gcc/tree-cfg.c4
-rw-r--r--gcc/tree-chrec.c9
-rw-r--r--gcc/tree-object-size.c2
-rw-r--r--gcc/tree-predcom.c3
-rw-r--r--gcc/tree-ssa-address.c7
-rw-r--r--gcc/tree-ssa-alias.c3
-rw-r--r--gcc/tree-ssa-ccp.c5
-rw-r--r--gcc/tree-ssa-loop-ivopts.c2
-rw-r--r--gcc/tree-ssa-loop-niter.c3
-rw-r--r--gcc/tree-vrp.c85
-rw-r--r--gcc/tree.c14
-rw-r--r--gcc/tree.h4
-rw-r--r--gcc/wide-int.h15
45 files changed, 274 insertions, 375 deletions
diff --git a/gcc/alias.c b/gcc/alias.c
index dce95a25dc3..7af29b3b664 100644
--- a/gcc/alias.c
+++ b/gcc/alias.c
@@ -2345,17 +2345,16 @@ adjust_offset_for_component_ref (tree x, bool *known_p,
{
tree xoffset = component_ref_field_offset (x);
tree field = TREE_OPERAND (x, 1);
- offset_int woffset;
if (TREE_CODE (xoffset) != INTEGER_CST)
{
*known_p = false;
return;
}
- woffset = (wi::to_offset (xoffset)
- + wi::udiv_trunc (wi::to_offset (DECL_FIELD_BIT_OFFSET (field)),
- BITS_PER_UNIT));
-
+ offset_int woffset
+ = (wi::to_offset (xoffset)
+ + wi::udiv_trunc (wi::to_offset (DECL_FIELD_BIT_OFFSET (field)),
+ BITS_PER_UNIT));
if (!wi::fits_uhwi_p (woffset))
{
*known_p = false;
diff --git a/gcc/builtins.c b/gcc/builtins.c
index 5dfc2a701f0..77670f10fb1 100644
--- a/gcc/builtins.c
+++ b/gcc/builtins.c
@@ -699,7 +699,6 @@ c_getstr (tree src)
static rtx
c_readstr (const char *str, enum machine_mode mode)
{
- wide_int c;
HOST_WIDE_INT ch;
unsigned int i, j;
HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
@@ -727,7 +726,7 @@ c_readstr (const char *str, enum machine_mode mode)
tmp[j / HOST_BITS_PER_WIDE_INT] |= ch << (j % HOST_BITS_PER_WIDE_INT);
}
- c = wide_int::from_array (tmp, len, GET_MODE_PRECISION (mode));
+ wide_int c = wide_int::from_array (tmp, len, GET_MODE_PRECISION (mode));
return immed_wide_int_const (c, mode);
}
@@ -7961,7 +7960,6 @@ fold_builtin_int_roundingfn (location_t loc, tree fndecl, tree arg)
{
tree itype = TREE_TYPE (TREE_TYPE (fndecl));
tree ftype = TREE_TYPE (arg);
- wide_int val;
REAL_VALUE_TYPE r;
bool fail = false;
@@ -7989,8 +7987,7 @@ fold_builtin_int_roundingfn (location_t loc, tree fndecl, tree arg)
gcc_unreachable ();
}
- val = real_to_integer (&r, &fail,
- TYPE_PRECISION (itype));
+ wide_int val = real_to_integer (&r, &fail, TYPE_PRECISION (itype));
if (!fail)
return wide_int_to_tree (itype, val);
}
@@ -8025,33 +8022,32 @@ fold_builtin_bitop (tree fndecl, tree arg)
/* Optimize for constant argument. */
if (TREE_CODE (arg) == INTEGER_CST && !TREE_OVERFLOW (arg))
{
- wide_int warg = arg;
int result;
switch (DECL_FUNCTION_CODE (fndecl))
{
CASE_INT_FN (BUILT_IN_FFS):
- result = wi::ffs (warg);
+ result = wi::ffs (arg);
break;
CASE_INT_FN (BUILT_IN_CLZ):
- result = wi::clz (warg);
+ result = wi::clz (arg);
break;
CASE_INT_FN (BUILT_IN_CTZ):
- result = wi::ctz (warg);
+ result = wi::ctz (arg);
break;
CASE_INT_FN (BUILT_IN_CLRSB):
- result = wi::clrsb (warg);
+ result = wi::clrsb (arg);
break;
CASE_INT_FN (BUILT_IN_POPCOUNT):
- result = wi::popcount (warg);
+ result = wi::popcount (arg);
break;
CASE_INT_FN (BUILT_IN_PARITY):
- result = wi::parity (warg);
+ result = wi::parity (arg);
break;
default:
@@ -8679,11 +8675,10 @@ fold_builtin_memory_op (location_t loc, tree dest, tree src,
else if (TREE_CODE (src_base) == MEM_REF
&& TREE_CODE (dest_base) == MEM_REF)
{
- offset_int off;
if (! operand_equal_p (TREE_OPERAND (src_base, 0),
TREE_OPERAND (dest_base, 0), 0))
return NULL_TREE;
- off = mem_ref_offset (src_base) + src_offset;
+ offset_int off = mem_ref_offset (src_base) + src_offset;
if (!wi::fits_shwi_p (off))
return NULL_TREE;
src_offset = off.to_shwi ();
@@ -12622,7 +12617,6 @@ fold_builtin_object_size (tree ptr, tree ost)
if (TREE_CODE (ptr) == ADDR_EXPR)
{
-
wide_int wbytes
= wi::uhwi (compute_builtin_object_size (ptr, object_size_type),
precision);
@@ -12634,9 +12628,8 @@ fold_builtin_object_size (tree ptr, tree ost)
/* If object size is not known yet, delay folding until
later. Maybe subsequent passes will help determining
it. */
- wide_int wbytes;
bytes = compute_builtin_object_size (ptr, object_size_type);
- wbytes = wi::uhwi (bytes, precision);
+ wide_int wbytes = wi::uhwi (bytes, precision);
if (bytes != (unsigned HOST_WIDE_INT) (object_size_type < 2 ? -1 : 0)
&& wi::fits_to_tree_p (wbytes, size_type_node))
return wide_int_to_tree (size_type_node, wbytes);
diff --git a/gcc/c-family/c-common.c b/gcc/c-family/c-common.c
index fe70d54c8ab..6f6eeea4732 100644
--- a/gcc/c-family/c-common.c
+++ b/gcc/c-family/c-common.c
@@ -2454,7 +2454,7 @@ shorten_binary_op (tree result_type, tree op0, tree op1, bool bitwise)
arg0 = c_common_get_narrower (op0, &unsigned0);
arg1 = c_common_get_narrower (op1, &unsigned1);
-
+
/* UNS is 1 if the operation to be done is an unsigned one. */
uns = TYPE_UNSIGNED (result_type);
@@ -3487,7 +3487,7 @@ c_common_type_for_mode (enum machine_mode mode, int unsignedp)
if (mode == DImode)
return unsignedp ? unsigned_intDI_type_node : intDI_type_node;
-
+
#if HOST_BITS_PER_WIDE_INT >= 64
if (mode == TYPE_MODE (intTI_type_node))
return unsignedp ? unsigned_intTI_type_node : intTI_type_node;
@@ -4003,7 +4003,7 @@ shorten_compare (tree *op0_ptr, tree *op1_ptr, tree *restype_ptr,
/* If one of the operands must be floated, we cannot optimize. */
real1 = TREE_CODE (TREE_TYPE (primop0)) == REAL_TYPE;
real2 = TREE_CODE (TREE_TYPE (primop1)) == REAL_TYPE;
-
+
/* If first arg is constant, swap the args (changing operation
so value is preserved), for canonicalization. Don't do this if
the second arg is 0. */
@@ -4518,7 +4518,7 @@ c_common_truthvalue_conversion (location_t location, tree expr)
case ERROR_MARK:
return expr;
-
+
case INTEGER_CST:
return integer_zerop (expr) ? truthvalue_false_node
: truthvalue_true_node;
@@ -4776,7 +4776,7 @@ static GTY((param_is (union tree_node))) htab_t type_hash_table;
/* Return the typed-based alias set for T, which may be an expression
or a type. Return -1 if we don't do anything special. */
-
+
alias_set_type
c_common_get_alias_set (tree t)
{
@@ -5642,7 +5642,7 @@ c_common_nodes_and_builtins (void)
(build_decl (UNKNOWN_LOCATION,
TYPE_DECL, get_identifier (pname),
ptype));
-
+
}
}
@@ -7108,7 +7108,7 @@ handle_mode_attribute (tree *node, tree name, tree args,
tree ident = TREE_VALUE (args);
*no_add_attrs = true;
-
+
if (TREE_CODE (ident) != IDENTIFIER_NODE)
warning (OPT_Wattributes, "%qE attribute ignored", name);
else
@@ -10449,7 +10449,7 @@ resolve_overloaded_atomic_compare_exchange (location_t loc, tree function,
bool fn(T* mem, T* desired, T* return, weak, success, failure)
into
bool fn ((In *)mem, (In *)expected, (In) *desired, weak, succ, fail) */
-
+
p0 = (*params)[0];
p1 = (*params)[1];
p2 = (*params)[2];
@@ -11248,7 +11248,7 @@ do_warn_double_promotion (tree result_type, tree type1, tree type2,
early on, later parts of the compiler can always do the reverse
translation and get back the corresponding typedef name. For
example, given:
-
+
typedef struct S MY_TYPE;
MY_TYPE object;
diff --git a/gcc/combine.c b/gcc/combine.c
index 05a27a907ec..b21ffce6247 100644
--- a/gcc/combine.c
+++ b/gcc/combine.c
@@ -2671,11 +2671,11 @@ try_combine (rtx i3, rtx i2, rtx i1, rtx i0, int *new_direct_jump_p,
if (offset >= 0)
{
- wide_int o;
rtx inner = SET_SRC (PATTERN (i3));
rtx outer = SET_SRC (temp);
- o = wi::insert (std::make_pair (outer, GET_MODE (SET_DEST (temp))),
+ wide_int o
+ = wi::insert (std::make_pair (outer, GET_MODE (SET_DEST (temp))),
std::make_pair (inner, GET_MODE (dest)),
offset, width);
diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
index db0d7fa23f4..cb579dd6254 100644
--- a/gcc/config/arm/arm.c
+++ b/gcc/config/arm/arm.c
@@ -11280,7 +11280,6 @@ vfp3_const_double_index (rtx x)
unsigned HOST_WIDE_INT mask;
int point_pos = 2 * HOST_BITS_PER_WIDE_INT - 1;
bool fail;
- wide_int w;
if (!TARGET_VFP3 || !CONST_DOUBLE_P (x))
return -1;
@@ -11300,7 +11299,7 @@ vfp3_const_double_index (rtx x)
WARNING: If there's ever a VFP version which uses more than 2 * H_W_I - 1
bits for the mantissa, this may fail (low bits would be lost). */
real_ldexp (&m, &r, point_pos - exponent);
- w = real_to_integer (m, &fail, HOST_BITS_PER_WIDE_INT * 2);
+ wide_int w = real_to_integer (m, &fail, HOST_BITS_PER_WIDE_INT * 2);
mantissa = w.elt (0);
mant_hi = w.elt (1);
diff --git a/gcc/config/avr/avr-log.c b/gcc/config/avr/avr-log.c
index 18215679b71..8a631bb1fef 100644
--- a/gcc/config/avr/avr-log.c
+++ b/gcc/config/avr/avr-log.c
@@ -155,7 +155,7 @@ avr_wide_int_pop_digit (wide_int *cst, unsigned base)
/* Dump VAL as hex value to FILE. */
static void
-avr_dump_wide_int_hex (FILE *file, wide_int val)
+avr_dump_wide_int_hex (FILE *file, const wide_int &val)
{
unsigned digit[4];
diff --git a/gcc/config/avr/avr.c b/gcc/config/avr/avr.c
index a3b3c90c8b8..6a2750994f5 100644
--- a/gcc/config/avr/avr.c
+++ b/gcc/config/avr/avr.c
@@ -11368,7 +11368,7 @@ avr_expand_delay_cycles (rtx operands0)
/* Return VAL * BASE + DIGIT. BASE = 0 is shortcut for BASE = 2^{32} */
static wide_int
-avr_wide_int_push_digit (wide_int val, int base,
+avr_wide_int_push_digit (const wide_int &val, int base,
unsigned HOST_WIDE_INT digit)
{
val = 0 == base
@@ -11382,7 +11382,7 @@ avr_wide_int_push_digit (wide_int val, int base,
/* Compute the image of x under f, i.e. perform x --> f(x) */
static int
-avr_map (wide_int f, int x)
+avr_map (const wide_int &f, int x)
{
return 0xf & f.lrshift (4*x).to_uhwi ();
}
@@ -11409,7 +11409,7 @@ enum
};
static unsigned
-avr_map_metric (wide_int a, int mode)
+avr_map_metric (const wide_int &a, int mode)
{
unsigned i, metric = 0;
@@ -11507,7 +11507,7 @@ static const avr_map_op_t avr_map_op[] =
If result.cost < 0 then such a decomposition does not exist. */
static avr_map_op_t
-avr_map_decompose (wide_int f, const avr_map_op_t *g, bool val_const_p)
+avr_map_decompose (const wide_int &f, const avr_map_op_t *g, bool val_const_p)
{
int i;
bool val_used_p = 0 != avr_map_metric (f, MAP_MASK_PREIMAGE_F);
@@ -11584,7 +11584,7 @@ avr_map_decompose (wide_int f, const avr_map_op_t *g, bool val_const_p)
is different to its source position. */
static void
-avr_move_bits (rtx *xop, wide_int map, bool fixp_p, int *plen)
+avr_move_bits (rtx *xop, const wide_int &map, bool fixp_p, int *plen)
{
int bit_dest, b;
@@ -12228,7 +12228,6 @@ avr_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED, tree *arg,
tree tval = arg[2];
tree tmap;
tree map_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
- wide_int map;
bool changed = false;
unsigned i;
avr_map_op_t best_g;
@@ -12241,7 +12240,7 @@ avr_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED, tree *arg,
break;
}
- map = wide_int::from_tree (arg[0]);
+ wide_int map = wide_int::from_tree (arg[0]);
tmap = wide_int_to_tree (map_type, map);
if (TREE_CODE (tval) != INTEGER_CST
diff --git a/gcc/config/darwin.c b/gcc/config/darwin.c
index 5372f4a03f8..8d7c3cede89 100644
--- a/gcc/config/darwin.c
+++ b/gcc/config/darwin.c
@@ -1286,14 +1286,13 @@ darwin_mergeable_constant_section (tree exp,
if (TREE_CODE (size) == INTEGER_CST)
{
- wide_int wsize = size;
- if (wsize == 4)
+ if (wi::eq_p (size, 4))
return darwin_sections[literal4_section];
- else if (wsize == 8)
+ else if (wi::eq_p (size, 8))
return darwin_sections[literal8_section];
else if (HAVE_GAS_LITERAL16
&& TARGET_64BIT
- && wsize == 16)
+ && wi::eq_p (size, 16))
return darwin_sections[literal16_section];
}
}
diff --git a/gcc/config/rs6000/darwin.h b/gcc/config/rs6000/darwin.h
index 5a248a5b085..d5919c4c71d 100644
--- a/gcc/config/rs6000/darwin.h
+++ b/gcc/config/rs6000/darwin.h
@@ -421,4 +421,3 @@ do { \
/* So far, there is no rs6000_fold_builtin, if one is introduced, then
this will need to be modified similar to the x86 case. */
#define TARGET_FOLD_BUILTIN SUBTARGET_FOLD_BUILTIN
-
diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c
index 674ff6027ca..c3a91891f11 100644
--- a/gcc/config/rs6000/rs6000.c
+++ b/gcc/config/rs6000/rs6000.c
@@ -6103,7 +6103,7 @@ offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
if (dsize > 32768)
return false;
-
+
return dalign / BITS_PER_UNIT >= dsize;
}
}
diff --git a/gcc/cp/decl.c b/gcc/cp/decl.c
index 36211de735e..84271fef4dd 100644
--- a/gcc/cp/decl.c
+++ b/gcc/cp/decl.c
@@ -12757,8 +12757,7 @@ finish_enum_value_list (tree enumtype)
/* If -fstrict-enums, still constrain TYPE_MIN/MAX_VALUE. */
if (flag_strict_enums)
- set_min_and_max_values_for_integral_type (enumtype, precision,
- sgn);
+ set_min_and_max_values_for_integral_type (enumtype, precision, sgn);
}
else
underlying_type = ENUM_UNDERLYING_TYPE (enumtype);
diff --git a/gcc/cp/tree.c b/gcc/cp/tree.c
index ccac781c3a8..273131fccae 100644
--- a/gcc/cp/tree.c
+++ b/gcc/cp/tree.c
@@ -34,7 +34,6 @@ along with GCC; see the file COPYING3. If not see
#include "hash-table.h"
#include "wide-int.h"
-
static tree bot_manip (tree *, int *, void *);
static tree bot_replace (tree *, int *, void *);
static int list_hash_eq (const void *, const void *);
diff --git a/gcc/dbxout.c b/gcc/dbxout.c
index 65149814cc8..e87e6eebd2f 100644
--- a/gcc/dbxout.c
+++ b/gcc/dbxout.c
@@ -690,8 +690,7 @@ stabstr_U (unsigned HOST_WIDE_INT num)
static void
stabstr_O (tree cst)
{
- wide_int wcst = cst;
- int prec = wcst.get_precision ();
+ int prec = TYPE_PRECISION (TREE_TYPE (cst));
int res_pres = prec % 3;
int i;
unsigned int digit;
@@ -701,7 +700,7 @@ stabstr_O (tree cst)
/* If the value is zero, the base indicator will serve as the value
all by itself. */
- if (wcst == 0)
+ if (wi::eq_p (cst, 0))
return;
/* GDB wants constants with no extra leading "1" bits, so
@@ -709,19 +708,19 @@ stabstr_O (tree cst)
present. */
if (res_pres == 1)
{
- digit = wi::extract_uhwi (wcst, prec - 1, 1);
+ digit = wi::extract_uhwi (cst, prec - 1, 1);
stabstr_C ('0' + digit);
}
else if (res_pres == 2)
{
- digit = wi::extract_uhwi (wcst, prec - 2, 2);
+ digit = wi::extract_uhwi (cst, prec - 2, 2);
stabstr_C ('0' + digit);
}
prec -= res_pres;
for (i = prec - 3; i <= 0; i = i - 3)
{
- digit = wi::extract_uhwi (wcst, i, 3);
+ digit = wi::extract_uhwi (cst, i, 3);
stabstr_C ('0' + digit);
}
}
diff --git a/gcc/dwarf2out.c b/gcc/dwarf2out.c
index cf7de1ad246..54fc6f7320d 100644
--- a/gcc/dwarf2out.c
+++ b/gcc/dwarf2out.c
@@ -347,7 +347,6 @@ dump_struct_debug (tree type, enum debug_info_usage usage,
#endif
-
/* Get the number of host wide ints needed to represent the precision
of the number. */
@@ -13364,8 +13363,7 @@ loc_descriptor (rtx rtl, enum machine_mode mode,
for (i = 0, p = array; i < length; i++, p += elt_size)
{
rtx elt = CONST_VECTOR_ELT (rtl, i);
- wide_int val = std::make_pair (elt, imode);
- insert_wide_int (val, p);
+ insert_wide_int (std::make_pair (elt, imode), p);
}
break;
@@ -15110,8 +15108,7 @@ add_const_value_attribute (dw_die_ref die, rtx rtl)
for (i = 0, p = array; i < length; i++, p += elt_size)
{
rtx elt = CONST_VECTOR_ELT (rtl, i);
- wide_int val = std::make_pair (elt, imode);
- insert_wide_int (val, p);
+ insert_wide_int (std::make_pair (elt, imode), p);
}
break;
diff --git a/gcc/expmed.c b/gcc/expmed.c
index bb02d0d6d6a..90aa45e09e1 100644
--- a/gcc/expmed.c
+++ b/gcc/expmed.c
@@ -3061,8 +3061,7 @@ expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
#endif
{
int p = GET_MODE_PRECISION (mode);
- wide_int val = std::make_pair (scalar_op1, mode);
- int shift = wi::exact_log2 (val);
+ int shift = wi::exact_log2 (std::make_pair (scalar_op1, mode));
/* Perfect power of 2. */
is_neg = false;
if (shift > 0)
@@ -3080,7 +3079,7 @@ expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
/* Any positive number that fits in a word. */
coeff = CONST_WIDE_INT_ELT (scalar_op1, 0);
}
- else if (wi::sign_mask (val) == 0)
+ else if (wi::sign_mask (std::make_pair (scalar_op1, mode)) == 0)
{
/* Any positive number that fits in a word. */
coeff = CONST_WIDE_INT_ELT (scalar_op1, 0);
@@ -3261,7 +3260,6 @@ choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
unsigned HOST_WIDE_INT *multiplier_ptr,
int *post_shift_ptr, int *lgup_ptr)
{
- wide_int mhigh, mlow;
int lgup, post_shift;
int pow, pow2;
@@ -3275,11 +3273,11 @@ choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
/* mlow = 2^(N + lgup)/d */
wide_int val = wi::set_bit_in_zero (pow, HOST_BITS_PER_DOUBLE_INT);
- mlow = wi::udiv_trunc (val, d);
+ wide_int mlow = wi::udiv_trunc (val, d);
/* mhigh = (2^(N + lgup) + 2^(N + lgup - precision))/d */
val |= wi::set_bit_in_zero (pow2, HOST_BITS_PER_DOUBLE_INT);
- mhigh = wi::udiv_trunc (val, d);
+ wide_int mhigh = wi::udiv_trunc (val, d);
/* If precision == N, then mlow, mhigh exceed 2^N
(but they do not exceed 2^(N+1)). */
@@ -3578,7 +3576,6 @@ expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
{
rtx result, temp, shift, label;
int logd;
- wide_int mask;
int prec = GET_MODE_PRECISION (mode);
logd = floor_log2 (d);
@@ -3640,7 +3637,7 @@ expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
modulus. By including the signbit in the operation, many targets
can avoid an explicit compare operation in the following comparison
against zero. */
- mask = wi::mask (logd, false, GET_MODE_PRECISION (mode));
+ wide_int mask = wi::mask (logd, false, GET_MODE_PRECISION (mode));
mask = wi::set_bit (mask, prec - 1);
temp = expand_binop (mode, and_optab, op0,
diff --git a/gcc/fixed-value.c b/gcc/fixed-value.c
index cc238bd479e..5341029f8aa 100644
--- a/gcc/fixed-value.c
+++ b/gcc/fixed-value.c
@@ -115,7 +115,6 @@ fixed_from_string (FIXED_VALUE_TYPE *f, const char *str, enum machine_mode mode)
unsigned int fbit;
enum fixed_value_range_code temp;
bool fail;
- wide_int w;
f->mode = mode;
fbit = GET_MODE_FBIT (mode);
@@ -130,7 +129,8 @@ fixed_from_string (FIXED_VALUE_TYPE *f, const char *str, enum machine_mode mode)
"large fixed-point constant implicitly truncated to fixed-point type");
real_2expN (&base_value, fbit, mode);
real_arithmetic (&fixed_value, MULT_EXPR, &real_value, &base_value);
- w = real_to_integer (&fixed_value, &fail, GET_MODE_PRECISION (mode));
+ wide_int w = real_to_integer (&fixed_value, &fail,
+ GET_MODE_PRECISION (mode));
f->data.low = w.elt (0);
f->data.high = w.elt (1);
@@ -1049,14 +1049,14 @@ fixed_convert_from_real (FIXED_VALUE_TYPE *f, enum machine_mode mode,
unsigned int fbit = GET_MODE_FBIT (mode);
enum fixed_value_range_code temp;
bool fail;
- wide_int w;
real_value = *a;
f->mode = mode;
real_2expN (&base_value, fbit, mode);
real_arithmetic (&fixed_value, MULT_EXPR, &real_value, &base_value);
- w = real_to_integer (&fixed_value, &fail, GET_MODE_PRECISION (mode));
+ wide_int w = real_to_integer (&fixed_value, &fail,
+ GET_MODE_PRECISION (mode));
f->data.low = w.elt (0);
f->data.high = w.elt (1);
temp = check_real_for_fixed_mode (&real_value, mode);
diff --git a/gcc/fold-const.c b/gcc/fold-const.c
index 7cfc77d1e90..8953467a50f 100644
--- a/gcc/fold-const.c
+++ b/gcc/fold-const.c
@@ -969,28 +969,27 @@ static tree
int_const_binop_1 (enum tree_code code, const_tree arg1, const_tree parg2,
int overflowable)
{
- wide_int op1, arg2, res;
+ wide_int res;
tree t;
tree type = TREE_TYPE (arg1);
signop sign = TYPE_SIGN (type);
bool overflow = false;
- op1 = arg1;
- arg2 = wide_int::from (parg2, TYPE_PRECISION (type),
- TYPE_SIGN (TREE_TYPE (parg2)));
+ wide_int arg2 = wide_int::from (parg2, TYPE_PRECISION (type),
+ TYPE_SIGN (TREE_TYPE (parg2)));
switch (code)
{
case BIT_IOR_EXPR:
- res = op1 | arg2;
+ res = wi::bit_or (arg1, arg2);
break;
case BIT_XOR_EXPR:
- res = op1 ^ arg2;
+ res = wi::bit_xor (arg1, arg2);
break;
case BIT_AND_EXPR:
- res = op1 & arg2;
+ res = wi::bit_and (arg1, arg2);
break;
case RSHIFT_EXPR:
@@ -1008,9 +1007,10 @@ int_const_binop_1 (enum tree_code code, const_tree arg1, const_tree parg2,
/* It's unclear from the C standard whether shifts can overflow.
The following code ignores overflow; perhaps a C standard
interpretation ruling is needed. */
- res = wi::rshift (op1, arg2, sign, GET_MODE_BITSIZE (TYPE_MODE (type)));
+ res = wi::rshift (arg1, arg2, sign,
+ GET_MODE_BITSIZE (TYPE_MODE (type)));
else
- res = wi::lshift (op1, arg2, GET_MODE_BITSIZE (TYPE_MODE (type)));
+ res = wi::lshift (arg1, arg2, GET_MODE_BITSIZE (TYPE_MODE (type)));
break;
case RROTATE_EXPR:
@@ -1025,82 +1025,82 @@ int_const_binop_1 (enum tree_code code, const_tree arg1, const_tree parg2,
}
if (code == RROTATE_EXPR)
- res = wi::rrotate (op1, arg2);
+ res = wi::rrotate (arg1, arg2);
else
- res = wi::lrotate (op1, arg2);
+ res = wi::lrotate (arg1, arg2);
break;
case PLUS_EXPR:
- res = wi::add (op1, arg2, sign, &overflow);
+ res = wi::add (arg1, arg2, sign, &overflow);
break;
case MINUS_EXPR:
- res = wi::sub (op1, arg2, sign, &overflow);
+ res = wi::sub (arg1, arg2, sign, &overflow);
break;
case MULT_EXPR:
- res = wi::mul (op1, arg2, sign, &overflow);
+ res = wi::mul (arg1, arg2, sign, &overflow);
break;
case MULT_HIGHPART_EXPR:
- res = wi::mul_high (op1, arg2, sign);
+ res = wi::mul_high (arg1, arg2, sign);
break;
case TRUNC_DIV_EXPR:
case EXACT_DIV_EXPR:
- res = wi::div_trunc (op1, arg2, sign, &overflow);
+ res = wi::div_trunc (arg1, arg2, sign, &overflow);
if (overflow)
return NULL_TREE;
break;
case FLOOR_DIV_EXPR:
- res = wi::div_floor (op1, arg2, sign, &overflow);
+ res = wi::div_floor (arg1, arg2, sign, &overflow);
if (overflow)
return NULL_TREE;
break;
case CEIL_DIV_EXPR:
- res = wi::div_ceil (op1, arg2, sign, &overflow);
+ res = wi::div_ceil (arg1, arg2, sign, &overflow);
if (overflow)
return NULL_TREE;
break;
case ROUND_DIV_EXPR:
- res = wi::div_round (op1, arg2, sign, &overflow);
+ res = wi::div_round (arg1, arg2, sign, &overflow);
if (overflow)
return NULL_TREE;
break;
case TRUNC_MOD_EXPR:
- res = wi::mod_trunc (op1, arg2, sign, &overflow);
+ res = wi::mod_trunc (arg1, arg2, sign, &overflow);
if (overflow)
return NULL_TREE;
break;
case FLOOR_MOD_EXPR:
- res = wi::mod_floor (op1, arg2, sign, &overflow);
+ res = wi::mod_floor (arg1, arg2, sign, &overflow);
if (overflow)
return NULL_TREE;
break;
case CEIL_MOD_EXPR:
- res = wi::mod_ceil (op1, arg2, sign, &overflow);
+ res = wi::mod_ceil (arg1, arg2, sign, &overflow);
if (overflow)
return NULL_TREE;
break;
case ROUND_MOD_EXPR:
- res = wi::mod_round (op1, arg2, sign, &overflow);
+ res = wi::mod_round (arg1, arg2, sign, &overflow);
if (overflow)
return NULL_TREE;
break;
case MIN_EXPR:
- res = wi::min (op1, arg2, sign);
+ res = wi::min (arg1, arg2, sign);
break;
case MAX_EXPR:
- res = wi::max (op1, arg2, sign);
+ res = wi::max (arg1, arg2, sign);
break;
default:
@@ -5991,10 +5991,9 @@ extract_muldiv_1 (tree t, tree c, enum tree_code code, tree wide_type,
assuming no overflow. */
if (tcode == code)
{
- wide_int mul;
bool overflow_p;
signop sign = TYPE_SIGN (ctype);
- mul = wi::mul_full (op1, c, sign);
+ wide_int mul = wi::mul_full (op1, c, sign);
overflow_p = TREE_OVERFLOW (c) | TREE_OVERFLOW (op1);
if (!wi::fits_to_tree_p (mul, ctype)
&& ((sign == UNSIGNED && tcode != MULT_EXPR) || sign == SIGNED))
@@ -6400,14 +6399,13 @@ fold_div_compare (location_t loc,
tree prod, tmp, hi, lo;
tree arg00 = TREE_OPERAND (arg0, 0);
tree arg01 = TREE_OPERAND (arg0, 1);
- wide_int val;
signop sign = TYPE_SIGN (TREE_TYPE (arg0));
bool neg_overflow = false;
bool overflow;
/* We have to do this the hard way to detect unsigned overflow.
prod = int_const_binop (MULT_EXPR, arg01, arg1); */
- val = wi::mul (arg01, arg1, sign, &overflow);
+ wide_int val = wi::mul (arg01, arg1, sign, &overflow);
prod = force_fit_type (TREE_TYPE (arg00), val, -1, overflow);
neg_overflow = false;
@@ -7511,13 +7509,12 @@ static tree
native_interpret_int (tree type, const unsigned char *ptr, int len)
{
int total_bytes = GET_MODE_SIZE (TYPE_MODE (type));
- wide_int result;
if (total_bytes > len
|| total_bytes * BITS_PER_UNIT > HOST_BITS_PER_DOUBLE_INT)
return NULL_TREE;
- result = wi::from_buffer (ptr, total_bytes);
+ wide_int result = wi::from_buffer (ptr, total_bytes);
return wide_int_to_tree (type, result);
}
@@ -8866,14 +8863,13 @@ maybe_canonicalize_comparison (location_t loc, enum tree_code code, tree type,
static bool
pointer_may_wrap_p (tree base, tree offset, HOST_WIDE_INT bitpos)
{
- wide_int wi_offset, total;
-
if (!POINTER_TYPE_P (TREE_TYPE (base)))
return true;
if (bitpos < 0)
return true;
+ wide_int wi_offset;
int precision = TYPE_PRECISION (TREE_TYPE (base));
if (offset == NULL_TREE)
wi_offset = wi::zero (precision);
@@ -8884,7 +8880,7 @@ pointer_may_wrap_p (tree base, tree offset, HOST_WIDE_INT bitpos)
bool overflow;
wide_int units = wi::shwi (bitpos / BITS_PER_UNIT, precision);
- total = wi::add (wi_offset, units, UNSIGNED, &overflow);
+ wide_int total = wi::add (wi_offset, units, UNSIGNED, &overflow);
if (overflow)
return true;
@@ -9905,12 +9901,7 @@ mask_with_tz (tree type, wide_int x, wide_int y)
{
int tz = wi::ctz (y);
if (tz > 0)
- {
- wide_int mask;
-
- mask = wi::mask (tz, true, TYPE_PRECISION (type));
- return mask & x;
- }
+ return wi::mask (tz, true, TYPE_PRECISION (type)) & x;
return x;
}
@@ -11234,18 +11225,18 @@ fold_binary_loc (location_t loc,
&& TREE_CODE (arg1) == INTEGER_CST
&& TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
{
- wide_int c1, c2, c3, msk;
int width = TYPE_PRECISION (type), w;
bool try_simplify = true;
- c1 = TREE_OPERAND (arg0, 1);
- c2 = arg1;
+ wide_int c1 = TREE_OPERAND (arg0, 1);
+ wide_int c2 = arg1;
/* If (C1&C2) == C1, then (X&C1)|C2 becomes (X,C2). */
if ((c1 & c2) == c1)
return omit_one_operand_loc (loc, type, arg1,
TREE_OPERAND (arg0, 0));
- msk = wi::mask (width, false, TYPE_PRECISION (TREE_TYPE (arg1)));
+ wide_int msk = wi::mask (width, false,
+ TYPE_PRECISION (TREE_TYPE (arg1)));
/* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
if (msk.and_not (c1 | c2) == 0)
@@ -11257,7 +11248,7 @@ fold_binary_loc (location_t loc,
mode which allows further optimizations. */
c1 &= msk;
c2 &= msk;
- c3 = c1.and_not (c2);
+ wide_int c3 = c1.and_not (c2);
for (w = BITS_PER_UNIT; w <= width; w <<= 1)
{
wide_int mask = wi::mask (width - w, false,
@@ -11702,7 +11693,7 @@ fold_binary_loc (location_t loc,
tree pmop[2];
int which = 0;
wide_int cst0;
-
+
/* Now we know that arg0 is (C + D) or (C - D) or
-C and arg1 (M) is == (1LL << cst) - 1.
Store C into PMOP[0] and D into PMOP[1]. */
@@ -11713,10 +11704,10 @@ fold_binary_loc (location_t loc,
pmop[1] = TREE_OPERAND (arg0, 1);
which = 1;
}
-
+
if ((wi::max_value (TREE_TYPE (arg0)) & cst1) != cst1)
which = -1;
-
+
for (; which >= 0; which--)
switch (TREE_CODE (pmop[which]))
{
@@ -11751,7 +11742,7 @@ fold_binary_loc (location_t loc,
default:
break;
}
-
+
/* Only build anything new if we optimized one or both arguments
above. */
if (pmop[0] != TREE_OPERAND (arg0, 0)
@@ -11769,7 +11760,7 @@ fold_binary_loc (location_t loc,
if (pmop[1] != NULL)
pmop[1] = fold_convert_loc (loc, utype, pmop[1]);
}
-
+
if (TREE_CODE (arg0) == NEGATE_EXPR)
tem = fold_build1_loc (loc, NEGATE_EXPR, utype, pmop[0]);
else if (TREE_CODE (arg0) == PLUS_EXPR)
@@ -11804,10 +11795,9 @@ fold_binary_loc (location_t loc,
if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) == NOP_EXPR
&& TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0))))
{
- wide_int mask;
prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)));
- mask = wide_int::from (arg1, prec, UNSIGNED);
+ wide_int mask = wide_int::from (arg1, prec, UNSIGNED);
if (mask == -1)
return
fold_convert_loc (loc, type, TREE_OPERAND (arg0, 0));
@@ -13479,9 +13469,8 @@ fold_binary_loc (location_t loc,
wide_int max = wi::max_value (arg1_type);
wide_int signed_max = wi::max_value (prec, SIGNED);
wide_int min = wi::min_value (arg1_type);
- wide_int wi_arg1 = arg1;
- if (wi_arg1 == max)
+ if (wi::eq_p (arg1, max))
switch (code)
{
case GT_EXPR:
@@ -13502,7 +13491,7 @@ fold_binary_loc (location_t loc,
default:
break;
}
- else if (wi_arg1 == (max - 1))
+ else if (wi::eq_p (arg1, max - 1))
switch (code)
{
case GT_EXPR:
@@ -13522,7 +13511,7 @@ fold_binary_loc (location_t loc,
default:
break;
}
- else if (wi_arg1 == min)
+ else if (wi::eq_p (arg1, min))
switch (code)
{
case LT_EXPR:
@@ -13540,7 +13529,7 @@ fold_binary_loc (location_t loc,
default:
break;
}
- else if (wi_arg1 == (min + 1))
+ else if (wi::eq_p (arg1, min + 1))
switch (code)
{
case GE_EXPR:
@@ -13561,7 +13550,7 @@ fold_binary_loc (location_t loc,
break;
}
- else if (wi_arg1 == signed_max
+ else if (wi::eq_p (arg1, signed_max)
&& TYPE_UNSIGNED (arg1_type)
/* KENNY QUESTIONS THE CHECKING OF THE BITSIZE
HERE. HE FEELS THAT THE PRECISION SHOULD BE
@@ -14097,8 +14086,6 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type,
&& TYPE_PRECISION (TREE_TYPE (tem))
< TYPE_PRECISION (type))
{
- wide_int mask;
- wide_int wi_arg1 = arg1;
int inner_width, outer_width;
tree tem_type;
@@ -14107,16 +14094,16 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type,
if (outer_width > TYPE_PRECISION (type))
outer_width = TYPE_PRECISION (type);
- mask = wi::shifted_mask
+ wide_int mask = wi::shifted_mask
(inner_width, outer_width - inner_width, false,
TYPE_PRECISION (TREE_TYPE (arg1)));
- if (wi_arg1 == mask)
+ if (mask == arg1)
{
tem_type = signed_type_for (TREE_TYPE (tem));
tem = fold_convert_loc (loc, tem_type, tem);
}
- else if ((wi_arg1 & mask) == 0)
+ else if ((mask & arg1) == 0)
{
tem_type = unsigned_type_for (TREE_TYPE (tem));
tem = fold_convert_loc (loc, tem_type, tem);
@@ -14381,14 +14368,13 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type,
for (i = 0; i < nelts; i++)
{
tree val = VECTOR_CST_ELT (arg2, i);
- wide_int t;
if (TREE_CODE (val) != INTEGER_CST)
return NULL_TREE;
/* Make sure that the perm value is in an acceptable
range. */
- t = val;
+ wide_int t = val;
if (wi::gtu_p (t, nelts_cnt))
{
need_mask_canon = true;
@@ -16214,9 +16200,8 @@ fold_negate_const (tree arg0, tree type)
{
case INTEGER_CST:
{
- wide_int val = arg0;
bool overflow;
- val = wi::neg (val, &overflow);
+ wide_int val = wi::neg (arg0, &overflow);
t = force_fit_type (type, val, 1,
(overflow | TREE_OVERFLOW (arg0))
&& !TYPE_UNSIGNED (type));
@@ -16261,11 +16246,9 @@ fold_abs_const (tree arg0, tree type)
{
case INTEGER_CST:
{
- wide_int val = arg0;
-
/* If the value is unsigned or non-negative, then the absolute value
is the same as the ordinary value. */
- if (!wi::neg_p (val, TYPE_SIGN (type)))
+ if (!wi::neg_p (arg0, TYPE_SIGN (type)))
t = arg0;
/* If the value is negative, then the absolute value is
@@ -16273,7 +16256,7 @@ fold_abs_const (tree arg0, tree type)
else
{
bool overflow;
- val = wi::neg (val, &overflow);
+ wide_int val = wi::neg (arg0, &overflow);
t = force_fit_type (type, val, -1,
overflow | TREE_OVERFLOW (arg0));
}
@@ -16300,12 +16283,9 @@ fold_abs_const (tree arg0, tree type)
static tree
fold_not_const (const_tree arg0, tree type)
{
- wide_int val;
-
gcc_assert (TREE_CODE (arg0) == INTEGER_CST);
- val = wi::bit_not (arg0);
- return force_fit_type (type, val, 0, TREE_OVERFLOW (arg0));
+ return force_fit_type (type, wi::bit_not (arg0), 0, TREE_OVERFLOW (arg0));
}
/* Given CODE, a relational operator, the target type, TYPE and two
diff --git a/gcc/fortran/trans-intrinsic.c b/gcc/fortran/trans-intrinsic.c
index 37c8a1cec36..a7366858d1e 100644
--- a/gcc/fortran/trans-intrinsic.c
+++ b/gcc/fortran/trans-intrinsic.c
@@ -984,10 +984,8 @@ trans_this_image (gfc_se * se, gfc_expr *expr)
if (INTEGER_CST_P (dim_arg))
{
- wide_int wdim_arg = dim_arg;
-
- if (wi::ltu_p (wdim_arg, 1)
- || wi::gtu_p (wdim_arg,
+ if (wi::ltu_p (dim_arg, 1)
+ || wi::gtu_p (dim_arg,
GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc))))
gfc_error ("'dim' argument of %s intrinsic at %L is not a valid "
"dimension index", expr->value.function.isym->name,
@@ -1345,10 +1343,9 @@ gfc_conv_intrinsic_bound (gfc_se * se, gfc_expr * expr, int upper)
if (INTEGER_CST_P (bound))
{
- wide_int wbound = bound;
if (((!as || as->type != AS_ASSUMED_RANK)
- && wi::geu_p (wbound, GFC_TYPE_ARRAY_RANK (TREE_TYPE (desc))))
- || wi::gtu_p (wbound, GFC_MAX_DIMENSIONS))
+ && wi::geu_p (bound, GFC_TYPE_ARRAY_RANK (TREE_TYPE (desc))))
+ || wi::gtu_p (bound, GFC_MAX_DIMENSIONS))
gfc_error ("'dim' argument of %s intrinsic at %L is not a valid "
"dimension index", upper ? "UBOUND" : "LBOUND",
&expr->where);
@@ -1543,9 +1540,8 @@ conv_intrinsic_cobound (gfc_se * se, gfc_expr * expr)
if (INTEGER_CST_P (bound))
{
- wide_int wbound = bound;
- if (wi::ltu_p (wbound, 1)
- || wi::gtu_p (wbound, GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc))))
+ if (wi::ltu_p (bound, 1)
+ || wi::gtu_p (bound, GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc))))
gfc_error ("'dim' argument of %s intrinsic at %L is not a valid "
"dimension index", expr->value.function.isym->name,
&expr->where);
diff --git a/gcc/gimple-ssa-strength-reduction.c b/gcc/gimple-ssa-strength-reduction.c
index d9a8ed22bd3..4d9c1f8d68d 100644
--- a/gcc/gimple-ssa-strength-reduction.c
+++ b/gcc/gimple-ssa-strength-reduction.c
@@ -910,7 +910,6 @@ slsr_process_ref (gimple gs)
HOST_WIDE_INT bitsize, bitpos;
enum machine_mode mode;
int unsignedp, volatilep;
- widest_int index;
slsr_cand_t c;
if (gimple_vdef (gs))
@@ -926,7 +925,7 @@ slsr_process_ref (gimple gs)
base = get_inner_reference (ref_expr, &bitsize, &bitpos, &offset, &mode,
&unsignedp, &volatilep, false);
- index = bitpos;
+ widest_int index = bitpos;
if (!restructure_reference (&base, &offset, &index, &type))
return;
@@ -1258,7 +1257,8 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in,
about BASE_IN into the new candidate. Return the new candidate. */
static slsr_cand_t
-create_add_imm_cand (gimple gs, tree base_in, widest_int index_in, bool speed)
+create_add_imm_cand (gimple gs, tree base_in, const widest_int &index_in,
+ bool speed)
{
enum cand_kind kind = CAND_ADD;
tree base = NULL_TREE, stride = NULL_TREE, ctype = NULL_TREE;
@@ -1358,10 +1358,8 @@ slsr_process_add (gimple gs, tree rhs1, tree rhs2, bool speed)
}
else
{
- widest_int index;
-
/* Record an interpretation for the add-immediate. */
- index = wi::to_widest (rhs2);
+ widest_int index = wi::to_widest (rhs2);
if (subtract_p)
index = -index;
@@ -1918,12 +1916,10 @@ cand_already_replaced (slsr_cand_t c)
replace_conditional_candidate. */
static void
-replace_mult_candidate (slsr_cand_t c, tree basis_name,
- const widest_int &bump_in)
+replace_mult_candidate (slsr_cand_t c, tree basis_name, widest_int bump)
{
tree target_type = TREE_TYPE (gimple_assign_lhs (c->cand_stmt));
enum tree_code cand_code = gimple_assign_rhs_code (c->cand_stmt);
- widest_int bump = bump_in;
/* It is highly unlikely, but possible, that the resulting
bump doesn't fit in a HWI. Abandon the replacement
@@ -2023,13 +2019,12 @@ static void
replace_unconditional_candidate (slsr_cand_t c)
{
slsr_cand_t basis;
- widest_int bump;
if (cand_already_replaced (c))
return;
basis = lookup_cand (c->basis);
- bump = cand_increment (c) * wi::to_widest (c->stride);
+ widest_int bump = cand_increment (c) * wi::to_widest (c->stride);
replace_mult_candidate (c, gimple_assign_lhs (basis->cand_stmt), bump);
}
@@ -2235,7 +2230,6 @@ replace_conditional_candidate (slsr_cand_t c)
tree basis_name, name;
slsr_cand_t basis;
location_t loc;
- widest_int bump;
/* Look up the LHS SSA name from C's basis. This will be the
RHS1 of the adds we will introduce to create new phi arguments. */
@@ -2248,7 +2242,7 @@ replace_conditional_candidate (slsr_cand_t c)
name = create_phi_basis (c, lookup_cand (c->def_phi)->cand_stmt,
basis_name, loc, KNOWN_STRIDE);
/* Replace C with an add of the new basis phi and a constant. */
- bump = c->index * wi::to_widest (c->stride);
+ widest_int bump = c->index * wi::to_widest (c->stride);
replace_mult_candidate (c, name, bump);
}
@@ -2380,12 +2374,10 @@ count_candidates (slsr_cand_t c)
candidates with the same increment, also record T_0 for subsequent use. */
static void
-record_increment (slsr_cand_t c, const widest_int &increment_in,
- bool is_phi_adjust)
+record_increment (slsr_cand_t c, widest_int increment, bool is_phi_adjust)
{
bool found = false;
unsigned i;
- widest_int increment = increment_in;
/* Treat increments that differ only in sign as identical so as to
share initializers, unless we are generating pointer arithmetic. */
diff --git a/gcc/gimplify.c b/gcc/gimplify.c
index 4de126ebcd7..21861d244ea 100644
--- a/gcc/gimplify.c
+++ b/gcc/gimplify.c
@@ -1728,7 +1728,7 @@ preprocess_case_label_vec_for_gimple (vec<tree> labels,
low = CASE_HIGH (labels[i - 1]);
if (!low)
low = CASE_LOW (labels[i - 1]);
- if ((wide_int (low) + 1) != high)
+ if (wi::add (low, 1) != high)
break;
}
if (i == len)
diff --git a/gcc/graphite-clast-to-gimple.c b/gcc/graphite-clast-to-gimple.c
index 5de11ba24d7..883011c8532 100644
--- a/gcc/graphite-clast-to-gimple.c
+++ b/gcc/graphite-clast-to-gimple.c
@@ -63,11 +63,10 @@ gmp_cst_to_tree (tree type, mpz_t val)
{
tree t = type ? type : integer_type_node;
mpz_t tmp;
- wide_int wi;
mpz_init (tmp);
mpz_set (tmp, val);
- wi = wi::from_mpz (t, tmp, true);
+ wide_int wi = wi::from_mpz (t, tmp, true);
mpz_clear (tmp);
return wide_int_to_tree (t, wi);
diff --git a/gcc/graphite-sese-to-poly.c b/gcc/graphite-sese-to-poly.c
index c82929b8b94..b929511c728 100644
--- a/gcc/graphite-sese-to-poly.c
+++ b/gcc/graphite-sese-to-poly.c
@@ -53,8 +53,7 @@ along with GCC; see the file COPYING3. If not see
static inline void
tree_int_to_gmp (tree t, mpz_t res)
{
- wide_int wi = t;
- wi::to_mpz (wi, res, TYPE_SIGN (TREE_TYPE (t)));
+ wi::to_mpz (t, res, TYPE_SIGN (TREE_TYPE (t)));
}
/* Returns the index of the PHI argument defined in the outermost
diff --git a/gcc/java/boehm.c b/gcc/java/boehm.c
index 158c8ebd274..14d228a7e2f 100644
--- a/gcc/java/boehm.c
+++ b/gcc/java/boehm.c
@@ -137,7 +137,6 @@ get_boehm_type_descriptor (tree type)
int last_set_index = 0;
HOST_WIDE_INT last_view_index = -1;
int pointer_after_end = 0;
- wide_int mask;
tree field, value, value_type;
/* If the GC wasn't requested, just use a null pointer. */
@@ -145,8 +144,7 @@ get_boehm_type_descriptor (tree type)
return null_pointer_node;
value_type = java_type_for_mode (ptr_mode, 1);
-
- mask = wi::zero (TYPE_PRECISION (value_type));
+ wide_int mask = wi::zero (TYPE_PRECISION (value_type));
/* If we have a type of unknown size, use a proc. */
if (int_size_in_bytes (type) == -1)
diff --git a/gcc/java/jcf-parse.c b/gcc/java/jcf-parse.c
index d9025ced492..750a17faec6 100644
--- a/gcc/java/jcf-parse.c
+++ b/gcc/java/jcf-parse.c
@@ -1040,10 +1040,9 @@ get_constant (JCF *jcf, int index)
case CONSTANT_Long:
{
unsigned HOST_WIDE_INT num;
- wide_int val;
num = JPOOL_UINT (jcf, index);
- val = wi::lshift (wide_int::from (num, 64, SIGNED), 32);
+ wide_int val = wi::lshift (wide_int::from (num, 64, SIGNED), 32);
num = JPOOL_UINT (jcf, index + 1);
val |= num;
diff --git a/gcc/optabs.c b/gcc/optabs.c
index 96d10cb069d..cd5d685bff1 100644
--- a/gcc/optabs.c
+++ b/gcc/optabs.c
@@ -2891,7 +2891,6 @@ expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
const struct real_format *fmt;
int bitpos, word, nwords, i;
enum machine_mode imode;
- wide_int mask;
rtx temp, insns;
/* The format has to have a simple sign bit. */
@@ -2927,7 +2926,7 @@ expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
}
- mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
+ wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
if (code == ABS)
mask = ~mask;
@@ -3568,8 +3567,6 @@ expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
}
else
{
- wide_int mask;
-
if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
{
imode = int_mode_for_mode (mode);
@@ -3590,7 +3587,7 @@ expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
op1 = operand_subword_force (op1, word, mode);
}
- mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
+ wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
sign = expand_binop (imode, and_optab, op1,
immed_wide_int_const (mask, imode),
NULL_RTX, 1, OPTAB_LIB_WIDEN);
@@ -3636,7 +3633,6 @@ expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
int bitpos, bool op0_is_abs)
{
enum machine_mode imode;
- wide_int mask, nmask;
int word, nwords, i;
rtx temp, insns;
@@ -3660,7 +3656,7 @@ expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
}
- mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
+ wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
if (target == 0
|| target == op0
@@ -3680,13 +3676,10 @@ expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
if (i == word)
{
if (!op0_is_abs)
- {
- nmask = ~mask;
- op0_piece
- = expand_binop (imode, and_optab, op0_piece,
- immed_wide_int_const (nmask, imode),
- NULL_RTX, 1, OPTAB_LIB_WIDEN);
- }
+ op0_piece
+ = expand_binop (imode, and_optab, op0_piece,
+ immed_wide_int_const (-mask, imode),
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
op1 = expand_binop (imode, and_optab,
operand_subword_force (op1, i, mode),
immed_wide_int_const (mask, imode),
@@ -3714,12 +3707,9 @@ expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
op0 = gen_lowpart (imode, op0);
if (!op0_is_abs)
- {
- nmask = ~mask;
- op0 = expand_binop (imode, and_optab, op0,
- immed_wide_int_const (nmask, imode),
- NULL_RTX, 1, OPTAB_LIB_WIDEN);
- }
+ op0 = expand_binop (imode, and_optab, op0,
+ immed_wide_int_const (-mask, imode),
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
temp = expand_binop (imode, ior_optab, op0, op1,
gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
target = lowpart_subreg_maybe_copy (mode, temp, imode);
diff --git a/gcc/postreload.c b/gcc/postreload.c
index cdfe7b7ddfa..b9d97174caf 100644
--- a/gcc/postreload.c
+++ b/gcc/postreload.c
@@ -304,12 +304,14 @@ reload_cse_simplify_set (rtx set, rtx insn)
{
case ZERO_EXTEND:
result = wide_int (std::make_pair (this_rtx, GET_MODE (src)));
- if (GET_MODE_PRECISION (GET_MODE (src)) > GET_MODE_PRECISION (word_mode))
+ if (GET_MODE_PRECISION (GET_MODE (src))
+ > GET_MODE_PRECISION (word_mode))
result = wi::zext (result, GET_MODE_PRECISION (word_mode));
break;
case SIGN_EXTEND:
result = wide_int (std::make_pair (this_rtx, GET_MODE (src)));
- if (GET_MODE_PRECISION (GET_MODE (src)) > GET_MODE_PRECISION (word_mode))
+ if (GET_MODE_PRECISION (GET_MODE (src))
+ > GET_MODE_PRECISION (word_mode))
result = wi::sext (result, GET_MODE_PRECISION (word_mode));
break;
default:
diff --git a/gcc/predict.c b/gcc/predict.c
index 81c6a18d079..d17b9f21a40 100644
--- a/gcc/predict.c
+++ b/gcc/predict.c
@@ -1298,7 +1298,7 @@ predict_iv_comparison (struct loop *loop, basic_block bb,
{
int probability;
bool overflow, overall_overflow = false;
- widest_int compare_count, tem, loop_count;
+ widest_int compare_count, tem;
widest_int loop_bound = wi::to_widest (loop_bound_var);
widest_int compare_bound = wi::to_widest (compare_var);
@@ -1308,7 +1308,8 @@ predict_iv_comparison (struct loop *loop, basic_block bb,
/* (loop_bound - base) / compare_step */
tem = wi::sub (loop_bound, base, SIGNED, &overflow);
overall_overflow |= overflow;
- loop_count = wi::div_trunc (tem, compare_step, SIGNED, &overflow);
+ widest_int loop_count = wi::div_trunc (tem, compare_step, SIGNED,
+ &overflow);
overall_overflow |= overflow;
if (!wi::neg_p (compare_step)
diff --git a/gcc/print-tree.c b/gcc/print-tree.c
index c464166f408..e38f59abcb5 100644
--- a/gcc/print-tree.c
+++ b/gcc/print-tree.c
@@ -122,7 +122,7 @@ print_node_brief (FILE *file, const char *prefix, const_tree node, int indent)
fprintf (file, " overflow");
fprintf (file, " ");
- print_dec (wide_int (node), file, TYPE_SIGN (TREE_TYPE (node)));
+ print_dec (node, file, TYPE_SIGN (TREE_TYPE (node)));
}
if (TREE_CODE (node) == REAL_CST)
{
@@ -733,7 +733,7 @@ print_node (FILE *file, const char *prefix, tree node, int indent)
fprintf (file, " overflow");
fprintf (file, " ");
- print_dec (wide_int (node), file, TYPE_SIGN (TREE_TYPE (node)));
+ print_dec (node, file, TYPE_SIGN (TREE_TYPE (node)));
break;
case REAL_CST:
diff --git a/gcc/rtl.h b/gcc/rtl.h
index 649564792fe..bfd1867e6e1 100644
--- a/gcc/rtl.h
+++ b/gcc/rtl.h
@@ -450,8 +450,8 @@ struct GTY((variable_size)) rtvec_def {
case CONST_INT: \
case CONST_DOUBLE
-/* Match CONST_*s for which pointer equality corresponds to value
-equality. */
+/* Match CONST_*s for which pointer equality corresponds to value
+ equality. */
#define CASE_CONST_UNIQUE \
case CONST_INT: \
case CONST_DOUBLE: \
@@ -465,9 +465,6 @@ equality. */
case CONST_VECTOR
#endif
-
-
-
/* Predicate yielding nonzero iff X is an rtx for a constant integer. */
#define CONST_INT_P(X) (GET_CODE (X) == CONST_INT)
@@ -1400,9 +1397,8 @@ struct address_info {
};
/* This is used to bundle an rtx and a mode together so that the pair
- can be used as the second operand of a wide int expression. If we
- ever put modes into rtx integer constants, this should go away and
- then just pass an rtx in. */
+ can be used with the wi:: routines. If we ever put modes into rtx
+ integer constants, this should go away and then just pass an rtx in. */
typedef std::pair <rtx, enum machine_mode> rtx_mode_t;
namespace wi
@@ -1435,7 +1431,8 @@ wi::int_traits <rtx_mode_t>::decompose (HOST_WIDE_INT *,
{
case CONST_INT:
if (precision < HOST_BITS_PER_WIDE_INT)
- gcc_checking_assert (INTVAL (x.first) == sext_hwi (INTVAL (x.first), precision));
+ gcc_checking_assert (INTVAL (x.first)
+ == sext_hwi (INTVAL (x.first), precision));
return wi::storage_ref (&INTVAL (x.first), 1, precision);
diff --git a/gcc/simplify-rtx.c b/gcc/simplify-rtx.c
index eda11e6085e..36f0a7d70b9 100644
--- a/gcc/simplify-rtx.c
+++ b/gcc/simplify-rtx.c
@@ -1649,7 +1649,7 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
{
wide_int result;
enum machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
- wide_int op0 = std::make_pair (op, imode);
+ rtx_mode_t op0 = std::make_pair (op, imode);
#if TARGET_SUPPORTS_WIDE_INT == 0
/* This assert keeps the simplification from producing a result
@@ -1664,11 +1664,11 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
switch (code)
{
case NOT:
- result = ~op0;
+ result = wi::bit_not (op0);
break;
case NEG:
- result = -op0;
+ result = wi::neg (op0);
break;
case ABS:
@@ -1700,7 +1700,7 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
break;
case BSWAP:
- result = op0.bswap ();
+ result = wide_int (op0).bswap ();
break;
case TRUNCATE:
@@ -2013,12 +2013,10 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
if (SCALAR_INT_MODE_P (mode))
{
- wide_int coeff0;
- wide_int coeff1;
rtx lhs = op0, rhs = op1;
- coeff0 = wi::one (GET_MODE_PRECISION (mode));
- coeff1 = wi::one (GET_MODE_PRECISION (mode));
+ wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
+ wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
if (GET_CODE (lhs) == NEG)
{
@@ -2190,12 +2188,10 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
if (SCALAR_INT_MODE_P (mode))
{
- wide_int coeff0;
- wide_int negcoeff1;
rtx lhs = op0, rhs = op1;
- coeff0 = wi::one (GET_MODE_PRECISION (mode));
- negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
+ wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
+ wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
if (GET_CODE (lhs) == NEG)
{
@@ -3705,9 +3701,9 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
&& CONST_SCALAR_INT_P (op1))
{
wide_int result;
- wide_int wop0 = std::make_pair (op0, mode);
bool overflow;
unsigned int bitsize = GET_MODE_BITSIZE (mode);
+ rtx_mode_t pop0 = std::make_pair (op0, mode);
rtx_mode_t pop1 = std::make_pair (op1, mode);
#if TARGET_SUPPORTS_WIDE_INT == 0
@@ -3722,67 +3718,67 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
switch (code)
{
case MINUS:
- result = wop0 - pop1;
+ result = wi::sub (pop0, pop1);
break;
case PLUS:
- result = wop0 + pop1;
+ result = wi::add (pop0, pop1);
break;
case MULT:
- result = wop0 * pop1;
+ result = wi::mul (pop0, pop1);
break;
case DIV:
- result = wi::div_trunc (wop0, pop1, SIGNED, &overflow);
+ result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
if (overflow)
return NULL_RTX;
break;
case MOD:
- result = wi::mod_trunc (wop0, pop1, SIGNED, &overflow);
+ result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
if (overflow)
return NULL_RTX;
break;
case UDIV:
- result = wi::div_trunc (wop0, pop1, UNSIGNED, &overflow);
+ result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
if (overflow)
return NULL_RTX;
break;
case UMOD:
- result = wi::mod_trunc (wop0, pop1, UNSIGNED, &overflow);
+ result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
if (overflow)
return NULL_RTX;
break;
case AND:
- result = wop0 & pop1;
+ result = wi::bit_and (pop0, pop1);
break;
case IOR:
- result = wop0 | pop1;
+ result = wi::bit_or (pop0, pop1);
break;
case XOR:
- result = wop0 ^ pop1;
+ result = wi::bit_xor (pop0, pop1);
break;
case SMIN:
- result = wi::smin (wop0, pop1);
+ result = wi::smin (pop0, pop1);
break;
case SMAX:
- result = wi::smax (wop0, pop1);
+ result = wi::smax (pop0, pop1);
break;
case UMIN:
- result = wi::umin (wop0, pop1);
+ result = wi::umin (pop0, pop1);
break;
case UMAX:
- result = wi::umax (wop0, pop1);
+ result = wi::umax (pop0, pop1);
break;
case LSHIFTRT:
@@ -3801,23 +3797,23 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
switch (code)
{
case LSHIFTRT:
- result = wi::lrshift (wop0, wop1, bitsize);
+ result = wi::lrshift (pop0, wop1, bitsize);
break;
case ASHIFTRT:
- result = wi::arshift (wop0, wop1, bitsize);
+ result = wi::arshift (pop0, wop1, bitsize);
break;
case ASHIFT:
- result = wi::lshift (wop0, wop1, bitsize);
+ result = wi::lshift (pop0, wop1, bitsize);
break;
case ROTATE:
- result = wi::lrotate (wop0, wop1);
+ result = wi::lrotate (pop0, wop1);
break;
case ROTATERT:
- result = wi::rrotate (wop0, wop1);
+ result = wi::rrotate (pop0, wop1);
break;
default:
@@ -4645,16 +4641,15 @@ simplify_const_relational_operation (enum rtx_code code,
largest int representable on the target is as good as
infinite. */
enum machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
- wide_int wo0;
+ rtx_mode_t ptrueop0 = std::make_pair (trueop0, cmode);
rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode);
- wo0 = std::make_pair (trueop0, cmode);
- if (wo0 == ptrueop1)
+ if (wi::eq_p (ptrueop0, ptrueop1))
return comparison_result (code, CMP_EQ);
else
{
- int cr = wi::lts_p (wo0, ptrueop1) ? CMP_LT : CMP_GT;
- cr |= wi::ltu_p (wo0, ptrueop1) ? CMP_LTU : CMP_GTU;
+ int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
+ cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
return comparison_result (code, cr);
}
}
@@ -5203,7 +5198,7 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op,
case CONST_WIDE_INT:
{
- wide_int val = std::make_pair (el, innermode);
+ rtx_mode_t val = std::make_pair (el, innermode);
unsigned char extend = wi::sign_mask (val);
for (i = 0; i < elem_bitsize; i += value_bit)
diff --git a/gcc/tree-affine.c b/gcc/tree-affine.c
index 9321dcc0928..a8a039f9809 100644
--- a/gcc/tree-affine.c
+++ b/gcc/tree-affine.c
@@ -33,10 +33,11 @@ along with GCC; see the file COPYING3. If not see
/* Extends CST as appropriate for the affine combinations COMB. */
widest_int
-wide_int_ext_for_comb (widest_int cst, aff_tree *comb)
+wide_int_ext_for_comb (const widest_int &cst, aff_tree *comb)
{
return wi::sext (cst, TYPE_PRECISION (comb->type));
}
+
/* Initializes affine combination COMB so that its value is zero in TYPE. */
static void
@@ -75,11 +76,11 @@ aff_combination_elt (aff_tree *comb, tree type, tree elt)
/* Scales COMB by SCALE. */
void
-aff_combination_scale (aff_tree *comb, widest_int scale)
+aff_combination_scale (aff_tree *comb, const widest_int &scale_in)
{
unsigned i, j;
- scale = wide_int_ext_for_comb (scale, comb);
+ widest_int scale = wide_int_ext_for_comb (scale_in, comb);
if (scale == 1)
return;
@@ -92,9 +93,8 @@ aff_combination_scale (aff_tree *comb, widest_int scale)
comb->offset = wide_int_ext_for_comb (scale * comb->offset, comb);
for (i = 0, j = 0; i < comb->n; i++)
{
- widest_int new_coef;
-
- new_coef = wide_int_ext_for_comb (scale * comb->elts[i].coef, comb);
+ widest_int new_coef
+ = wide_int_ext_for_comb (scale * comb->elts[i].coef, comb);
/* A coefficient may become zero due to overflow. Remove the zero
elements. */
if (new_coef == 0)
@@ -126,21 +126,20 @@ aff_combination_scale (aff_tree *comb, widest_int scale)
/* Adds ELT * SCALE to COMB. */
void
-aff_combination_add_elt (aff_tree *comb, tree elt, widest_int scale)
+aff_combination_add_elt (aff_tree *comb, tree elt, const widest_int &scale_in)
{
unsigned i;
tree type;
- scale = wide_int_ext_for_comb (scale, comb);
+ widest_int scale = wide_int_ext_for_comb (scale_in, comb);
if (scale == 0)
return;
for (i = 0; i < comb->n; i++)
if (operand_equal_p (comb->elts[i].val, elt, 0))
{
- widest_int new_coef;
-
- new_coef = wide_int_ext_for_comb (comb->elts[i].coef + scale, comb);
+ widest_int new_coef
+ = wide_int_ext_for_comb (comb->elts[i].coef + scale, comb);
if (new_coef != 0)
{
comb->elts[i].coef = new_coef;
@@ -233,10 +232,9 @@ aff_combination_convert (aff_tree *comb, tree type)
comb->offset = wide_int_ext_for_comb (comb->offset, comb);
for (i = j = 0; i < comb->n; i++)
{
- widest_int new_coef = comb->elts[i].coef;
- if (new_coef == 0)
+ if (comb->elts[i].coef == 0)
continue;
- comb->elts[j].coef = new_coef;
+ comb->elts[j].coef = comb->elts[i].coef;
comb->elts[j].val = fold_convert (type, comb->elts[i].val);
j++;
}
@@ -368,7 +366,7 @@ tree_to_aff_combination (tree expr, tree type, aff_tree *comb)
combination COMB. */
static tree
-add_elt_to_tree (tree expr, tree type, tree elt, widest_int scale,
+add_elt_to_tree (tree expr, tree type, tree elt, const widest_int &scale_in,
aff_tree *comb ATTRIBUTE_UNUSED)
{
enum tree_code code;
@@ -376,7 +374,7 @@ add_elt_to_tree (tree expr, tree type, tree elt, widest_int scale,
if (POINTER_TYPE_P (type))
type1 = sizetype;
- scale = wide_int_ext_for_comb (scale, comb);
+ widest_int scale = wide_int_ext_for_comb (scale_in, comb);
if (scale == -1
&& POINTER_TYPE_P (TREE_TYPE (elt)))
@@ -760,7 +758,7 @@ free_affine_expand_cache (struct pointer_map_t **cache)
is set to true. */
static bool
-wide_int_constant_multiple_p (widest_int val, widest_int div,
+wide_int_constant_multiple_p (const widest_int &val, const widest_int &div,
bool *mult_set, widest_int *mult)
{
widest_int rem, cst;
@@ -911,24 +909,20 @@ bool
aff_comb_cannot_overlap_p (aff_tree *diff, const widest_int &size1,
const widest_int &size2)
{
- widest_int d, bound;
-
/* Unless the difference is a constant, we fail. */
if (diff->n != 0)
return false;
- d = diff->offset;
- if (wi::neg_p (d))
+ if (wi::neg_p (diff->offset))
{
/* The second object is before the first one, we succeed if the last
element of the second object is before the start of the first one. */
- bound = d + size2 - 1;
- return wi::neg_p (bound);
+ return wi::neg_p (diff->offset + size2 - 1);
}
else
{
/* We succeed if the second object starts after the first one ends. */
- return wi::les_p (size1, d);
+ return wi::les_p (size1, diff->offset);
}
}
diff --git a/gcc/tree-affine.h b/gcc/tree-affine.h
index ad2307beda4..941d45aeb41 100644
--- a/gcc/tree-affine.h
+++ b/gcc/tree-affine.h
@@ -60,13 +60,13 @@ typedef struct affine_tree_combination
tree rest;
} aff_tree;
-widest_int wide_int_ext_for_comb (widest_int, aff_tree *);
+widest_int wide_int_ext_for_comb (const widest_int &, aff_tree *);
void aff_combination_const (aff_tree *, tree, const widest_int &);
void aff_combination_elt (aff_tree *, tree, tree);
-void aff_combination_scale (aff_tree *, widest_int);
+void aff_combination_scale (aff_tree *, const widest_int &);
void aff_combination_mult (aff_tree *, aff_tree *, aff_tree *);
void aff_combination_add (aff_tree *, aff_tree *);
-void aff_combination_add_elt (aff_tree *, tree, widest_int);
+void aff_combination_add_elt (aff_tree *, tree, const widest_int &);
void aff_combination_remove_elt (aff_tree *, unsigned);
void aff_combination_convert (aff_tree *, tree);
void tree_to_aff_combination (tree, tree, aff_tree *);
diff --git a/gcc/tree-cfg.c b/gcc/tree-cfg.c
index a08c998b961..94be3610c91 100644
--- a/gcc/tree-cfg.c
+++ b/gcc/tree-cfg.c
@@ -1306,12 +1306,12 @@ group_case_labels_stmt (gimple stmt)
{
tree merge_case = gimple_switch_label (stmt, i);
basic_block merge_bb = label_to_block (CASE_LABEL (merge_case));
- wide_int bhp1 = wide_int (base_high) + 1;
+ wide_int bhp1 = wi::add (base_high, 1);
/* Merge the cases if they jump to the same place,
and their ranges are consecutive. */
if (merge_bb == base_bb
- && wide_int (CASE_LOW (merge_case)) == bhp1)
+ && wi::eq_p (CASE_LOW (merge_case), bhp1))
{
base_high = CASE_HIGH (merge_case) ?
CASE_HIGH (merge_case) : CASE_LOW (merge_case);
diff --git a/gcc/tree-chrec.c b/gcc/tree-chrec.c
index b31566d8182..1d02da669fd 100644
--- a/gcc/tree-chrec.c
+++ b/gcc/tree-chrec.c
@@ -477,7 +477,6 @@ chrec_fold_multiply (tree type,
static tree
tree_fold_binomial (tree type, tree n, unsigned int k)
{
- wide_int num, denom, idx, di_res;
bool overflow;
unsigned int i;
tree res;
@@ -489,17 +488,17 @@ tree_fold_binomial (tree type, tree n, unsigned int k)
return fold_convert (type, n);
/* Numerator = n. */
- num = n;
+ wide_int num = n;
/* Check that k <= n. */
if (wi::ltu_p (num, k))
return NULL_TREE;
/* Denominator = 2. */
- denom = wi::two (TYPE_PRECISION (TREE_TYPE (n)));
+ wide_int denom = wi::two (TYPE_PRECISION (TREE_TYPE (n)));
/* Index = Numerator-1. */
- idx = num - 1;
+ wide_int idx = num - 1;
/* Numerator = Numerator*Index = n*(n-1). */
num = wi::smul (num, idx, &overflow);
@@ -521,7 +520,7 @@ tree_fold_binomial (tree type, tree n, unsigned int k)
}
/* Result = Numerator / Denominator. */
- di_res = wi::udiv_trunc (num, denom);
+ wide_int di_res = wi::udiv_trunc (num, denom);
res = wide_int_to_tree (type, di_res);
return int_fits_type_p (res, type) ? res : NULL_TREE;
}
diff --git a/gcc/tree-object-size.c b/gcc/tree-object-size.c
index 9c0ee852520..ca21e92256e 100644
--- a/gcc/tree-object-size.c
+++ b/gcc/tree-object-size.c
@@ -191,7 +191,7 @@ addr_object_size (struct object_size_info *osi, const_tree ptr,
}
if (sz != unknown[object_size_type])
{
- offset_int dsz = offset_int (sz) - mem_ref_offset (pt_var);
+ offset_int dsz = wi::sub (sz, mem_ref_offset (pt_var));
if (wi::neg_p (dsz))
sz = 0;
else if (wi::fits_uhwi_p (dsz))
diff --git a/gcc/tree-predcom.c b/gcc/tree-predcom.c
index 17f85a62f95..ace59d3b298 100644
--- a/gcc/tree-predcom.c
+++ b/gcc/tree-predcom.c
@@ -919,10 +919,9 @@ static void
add_ref_to_chain (chain_p chain, dref ref)
{
dref root = get_chain_root (chain);
- widest_int dist;
gcc_assert (wi::les_p (root->offset, ref->offset));
- dist = ref->offset - root->offset;
+ widest_int dist = ref->offset - root->offset;
if (wi::leu_p (MAX_DISTANCE, dist))
{
free (ref);
diff --git a/gcc/tree-ssa-address.c b/gcc/tree-ssa-address.c
index cdf8a5a8b57..0d587896896 100644
--- a/gcc/tree-ssa-address.c
+++ b/gcc/tree-ssa-address.c
@@ -552,13 +552,12 @@ most_expensive_mult_to_index (tree type, struct mem_address *parts,
addr_space_t as = TYPE_ADDR_SPACE (type);
enum machine_mode address_mode = targetm.addr_space.address_mode (as);
HOST_WIDE_INT coef;
- offset_int best_mult, amult, amult_neg;
unsigned best_mult_cost = 0, acost;
tree mult_elt = NULL_TREE, elt;
unsigned i, j;
enum tree_code op_code;
- best_mult = 0;
+ offset_int best_mult = 0;
for (i = 0; i < addr->n; i++)
{
if (!wi::fits_shwi_p (addr->elts[i].coef))
@@ -584,8 +583,8 @@ most_expensive_mult_to_index (tree type, struct mem_address *parts,
/* Collect elements multiplied by best_mult. */
for (i = j = 0; i < addr->n; i++)
{
- amult = offset_int::from (addr->elts[i].coef, SIGNED);
- amult_neg = -wi::sext (amult, TYPE_PRECISION (addr->type));
+ offset_int amult = offset_int::from (addr->elts[i].coef, SIGNED);
+ offset_int amult_neg = -wi::sext (amult, TYPE_PRECISION (addr->type));
if (amult == best_mult)
op_code = PLUS_EXPR;
diff --git a/gcc/tree-ssa-alias.c b/gcc/tree-ssa-alias.c
index e8ae191983c..5e80e66479c 100644
--- a/gcc/tree-ssa-alias.c
+++ b/gcc/tree-ssa-alias.c
@@ -874,7 +874,6 @@ indirect_ref_may_alias_decl_p (tree ref1 ATTRIBUTE_UNUSED, tree base1,
tree ptrtype1, dbase2;
HOST_WIDE_INT offset1p = offset1, offset2p = offset2;
HOST_WIDE_INT doffset1, doffset2;
- offset_int moff;
gcc_checking_assert ((TREE_CODE (base1) == MEM_REF
|| TREE_CODE (base1) == TARGET_MEM_REF)
@@ -884,7 +883,7 @@ indirect_ref_may_alias_decl_p (tree ref1 ATTRIBUTE_UNUSED, tree base1,
/* The offset embedded in MEM_REFs can be negative. Bias them
so that the resulting offset adjustment is positive. */
- moff = mem_ref_offset (base1);
+ offset_int moff = mem_ref_offset (base1);
moff = wi::lshift (moff, (BITS_PER_UNIT == 8
? 3 : exact_log2 (BITS_PER_UNIT)));
if (wi::neg_p (moff))
diff --git a/gcc/tree-ssa-ccp.c b/gcc/tree-ssa-ccp.c
index 7c76a345a75..d139e4a27d2 100644
--- a/gcc/tree-ssa-ccp.c
+++ b/gcc/tree-ssa-ccp.c
@@ -1255,14 +1255,13 @@ bit_value_binop_1 (enum tree_code code, tree type,
case PLUS_EXPR:
case POINTER_PLUS_EXPR:
{
- widest_int lo, hi;
/* Do the addition with unknown bits set to zero, to give carry-ins of
zero wherever possible. */
- lo = r1val.and_not (r1mask) + r2val.and_not (r2mask);
+ widest_int lo = r1val.and_not (r1mask) + r2val.and_not (r2mask);
lo = wi::ext (lo, width, sgn);
/* Do the addition with unknown bits set to one, to give carry-ins of
one wherever possible. */
- hi = (r1val | r1mask) + (r2val | r2mask);
+ widest_int hi = (r1val | r1mask) + (r2val | r2mask);
hi = wi::ext (hi, width, sgn);
/* Each bit in the result is known if (a) the corresponding bits in
both inputs are known, and (b) the carry-in to that bit position
diff --git a/gcc/tree-ssa-loop-ivopts.c b/gcc/tree-ssa-loop-ivopts.c
index 8831dfd0f86..6846fcf6115 100644
--- a/gcc/tree-ssa-loop-ivopts.c
+++ b/gcc/tree-ssa-loop-ivopts.c
@@ -4649,8 +4649,6 @@ may_eliminate_iv (struct ivopts_data *data,
}
}
- static int cnt = 0;
- cnt++;
cand_value_at (loop, cand, use->stmt, desc->niter, &bnd);
*bound = aff_combination_to_tree (&bnd);
diff --git a/gcc/tree-ssa-loop-niter.c b/gcc/tree-ssa-loop-niter.c
index d0cf228c59a..0b37c91f68d 100644
--- a/gcc/tree-ssa-loop-niter.c
+++ b/gcc/tree-ssa-loop-niter.c
@@ -2625,7 +2625,6 @@ record_nonwrapping_iv (struct loop *loop, tree base, tree step, gimple stmt,
{
tree niter_bound, extreme, delta;
tree type = TREE_TYPE (base), unsigned_type;
- widest_int max;
if (TREE_CODE (step) != INTEGER_CST || integer_zerop (step))
return;
@@ -2666,7 +2665,7 @@ record_nonwrapping_iv (struct loop *loop, tree base, tree step, gimple stmt,
/* STMT is executed at most NITER_BOUND + 1 times, since otherwise the value
would get out of the range. */
niter_bound = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step);
- max = derive_constant_upper_bound (niter_bound);
+ widest_int max = derive_constant_upper_bound (niter_bound);
record_estimate (loop, niter_bound, max, stmt, false, realistic, upper);
}
diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c
index 0e577493632..65f012d06fa 100644
--- a/gcc/tree-vrp.c
+++ b/gcc/tree-vrp.c
@@ -2004,11 +2004,9 @@ zero_nonzero_bits_from_vr (const tree expr_type,
else if (tree_int_cst_sgn (vr->min) >= 0
|| tree_int_cst_sgn (vr->max) < 0)
{
- wide_int wmin = vr->min;
- wide_int wmax = vr->max;
- wide_int xor_mask = wmin ^ wmax;
- *may_be_nonzero = wmin | wmax;
- *must_be_nonzero = wmin & wmax;
+ wide_int xor_mask = wi::bit_xor (vr->min, vr->max);
+ *may_be_nonzero = wi::bit_or (vr->min, vr->max);
+ *must_be_nonzero = wi::bit_and (vr->min, vr->max);
if (xor_mask != 0)
{
wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false,
@@ -2396,10 +2394,6 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
{
signop sgn = TYPE_SIGN (expr_type);
unsigned int prec = TYPE_PRECISION (expr_type);
- wide_int min0 = wide_int (vr0.min);
- wide_int max0 = wide_int (vr0.max);
- wide_int min1 = wide_int (vr1.min);
- wide_int max1 = wide_int (vr1.max);
wide_int type_min = wi::min_value (TYPE_PRECISION (expr_type), sgn);
wide_int type_max = wi::max_value (TYPE_PRECISION (expr_type), sgn);
wide_int wmin, wmax;
@@ -2408,24 +2402,24 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
if (code == PLUS_EXPR)
{
- wmin = min0 + min1;
- wmax = max0 + max1;
+ wmin = wi::add (vr0.min, vr1.min);
+ wmax = wi::add (vr0.max, vr1.max);
/* Check for overflow. */
- if (wi::cmp (min1, 0, sgn) != wi::cmp (wmin, min0, sgn))
- min_ovf = wi::cmp (min0, wmin, sgn);
- if (wi::cmp (max1, 0, sgn) != wi::cmp (wmax, max0, sgn))
- max_ovf = wi::cmp (max0, wmax, sgn);
+ if (wi::cmp (vr1.min, 0, sgn) != wi::cmp (wmin, vr0.min, sgn))
+ min_ovf = wi::cmp (vr0.min, wmin, sgn);
+ if (wi::cmp (vr1.max, 0, sgn) != wi::cmp (wmax, vr0.max, sgn))
+ max_ovf = wi::cmp (vr0.max, wmax, sgn);
}
else /* if (code == MINUS_EXPR) */
{
- wmin = min0 - max1;
- wmax = max0 - min1;
+ wmin = wi::sub (vr0.min, vr1.max);
+ wmax = wi::sub (vr0.max, vr1.min);
- if (wi::cmp (0, max1, sgn) != wi::cmp (wmin, min0, sgn))
- min_ovf = wi::cmp (min0, max1, sgn);
- if (wi::cmp (0, min1, sgn) != wi::cmp (wmax, max0, sgn))
- max_ovf = wi::cmp (max0, min1, sgn);
+ if (wi::cmp (0, vr1.max, sgn) != wi::cmp (wmin, vr0.min, sgn))
+ min_ovf = wi::cmp (vr0.min, vr1.max, sgn);
+ if (wi::cmp (0, vr1.min, sgn) != wi::cmp (wmax, vr0.max, sgn))
+ max_ovf = wi::cmp (vr0.max, vr1.min, sgn);
}
/* For non-wrapping arithmetic look at possibly smaller
@@ -2638,18 +2632,16 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
&& range_int_cst_p (&vr1)
&& TYPE_OVERFLOW_WRAPS (expr_type))
{
- wide_int min0, max0, min1, max1;
- wide_int prod0, prod1, prod2, prod3;
wide_int sizem1 = wi::mask (prec, false, prec2);
wide_int size = sizem1 + 1;
/* Extend the values using the sign of the result to PREC2.
From here on out, everthing is just signed math no matter
what the input types were. */
- min0 = wide_int::from (vr0.min, prec2, sign);
- max0 = wide_int::from (vr0.max, prec2, sign);
- min1 = wide_int::from (vr1.min, prec2, sign);
- max1 = wide_int::from (vr1.max, prec2, sign);
+ wide_int min0 = wide_int::from (vr0.min, prec2, sign);
+ wide_int max0 = wide_int::from (vr0.max, prec2, sign);
+ wide_int min1 = wide_int::from (vr1.min, prec2, sign);
+ wide_int max1 = wide_int::from (vr1.max, prec2, sign);
/* Canonicalize the intervals. */
if (sign == UNSIGNED)
@@ -2667,10 +2659,10 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
}
}
- prod0 = min0 * min1;
- prod1 = min0 * max1;
- prod2 = max0 * min1;
- prod3 = max0 * max1;
+ wide_int prod0 = min0 * min1;
+ wide_int prod1 = min0 * max1;
+ wide_int prod2 = max0 * min1;
+ wide_int prod3 = max0 * max1;
/* Sort the 4 products so that min is in prod0 and max is in
prod3. */
@@ -2783,7 +2775,7 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
int prec = TYPE_PRECISION (expr_type);
int overflow_pos = prec;
int bound_shift;
- wide_int bound, complement, low_bound, high_bound;
+ wide_int low_bound, high_bound;
bool uns = TYPE_UNSIGNED (expr_type);
bool in_bounds = false;
@@ -2796,8 +2788,8 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
zero, which means vr1 is a singleton range of zero, which
means it should be handled by the previous LSHIFT_EXPR
if-clause. */
- bound = wi::set_bit_in_zero (bound_shift, prec);
- complement = ~(bound - 1);
+ wide_int bound = wi::set_bit_in_zero (bound_shift, prec);
+ wide_int complement = ~(bound - 1);
if (uns)
{
@@ -2964,18 +2956,19 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
wide_int may_be_nonzero0, may_be_nonzero1;
wide_int must_be_nonzero0, must_be_nonzero1;
- int_cst_range0 = zero_nonzero_bits_from_vr (expr_type, &vr0, &may_be_nonzero0,
+ int_cst_range0 = zero_nonzero_bits_from_vr (expr_type, &vr0,
+ &may_be_nonzero0,
&must_be_nonzero0);
- int_cst_range1 = zero_nonzero_bits_from_vr (expr_type, &vr1, &may_be_nonzero1,
+ int_cst_range1 = zero_nonzero_bits_from_vr (expr_type, &vr1,
+ &may_be_nonzero1,
&must_be_nonzero1);
type = VR_RANGE;
if (code == BIT_AND_EXPR)
{
- wide_int wmax;
min = wide_int_to_tree (expr_type,
must_be_nonzero0 & must_be_nonzero1);
- wmax = may_be_nonzero0 & may_be_nonzero1;
+ wide_int wmax = may_be_nonzero0 & may_be_nonzero1;
/* If both input ranges contain only negative values we can
truncate the result range maximum to the minimum of the
input range maxima. */
@@ -2997,10 +2990,9 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
}
else if (code == BIT_IOR_EXPR)
{
- wide_int wmin;
max = wide_int_to_tree (expr_type,
may_be_nonzero0 | may_be_nonzero1);
- wmin = must_be_nonzero0 | must_be_nonzero1;
+ wide_int wmin = must_be_nonzero0 | must_be_nonzero1;
/* If the input ranges contain only positive values we can
truncate the minimum of the result range to the maximum
of the input range minima. */
@@ -3022,11 +3014,11 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
}
else if (code == BIT_XOR_EXPR)
{
- wide_int result_zero_bits, result_one_bits;
- result_zero_bits = (must_be_nonzero0 & must_be_nonzero1)
- | ~(may_be_nonzero0 | may_be_nonzero1);
- result_one_bits = must_be_nonzero0.and_not (may_be_nonzero1)
- | must_be_nonzero1.and_not (may_be_nonzero0);
+ wide_int result_zero_bits = ((must_be_nonzero0 & must_be_nonzero1)
+ | ~(may_be_nonzero0 | may_be_nonzero1));
+ wide_int result_one_bits
+ = (must_be_nonzero0.and_not (may_be_nonzero1)
+ | must_be_nonzero1.and_not (may_be_nonzero0));
max = wide_int_to_tree (expr_type, ~result_zero_bits);
min = wide_int_to_tree (expr_type, result_one_bits);
/* If the range has all positive or all negative values the
@@ -3837,11 +3829,10 @@ adjust_range_with_scev (value_range_t *vr, struct loop *loop,
if (max_loop_iterations (loop, &nit))
{
value_range_t maxvr = VR_INITIALIZER;
- widest_int wtmp;
signop sgn = TYPE_SIGN (TREE_TYPE (step));
bool overflow;
- wtmp = wi::mul (wi::to_widest (step), nit, sgn, &overflow);
+ wide_int wtmp = wi::mul (wi::to_widest (step), nit, sgn, &overflow);
/* If the multiplication overflowed we can't do a meaningful
adjustment. Likewise if the result doesn't fit in the type
of the induction variable. For a signed type we have to
diff --git a/gcc/tree.c b/gcc/tree.c
index e25161a4f80..9344f7f0429 100644
--- a/gcc/tree.c
+++ b/gcc/tree.c
@@ -6941,12 +6941,11 @@ tree_int_cst_sign_bit (const_tree t)
int
tree_int_cst_sgn (const_tree t)
{
- wide_int w = t;
- if (w == 0)
+ if (wi::eq_p (t, 0))
return 0;
else if (TYPE_UNSIGNED (TREE_TYPE (t)))
return 1;
- else if (wi::neg_p (w))
+ else if (wi::neg_p (t))
return -1;
else
return 1;
@@ -8621,11 +8620,8 @@ int_fits_type_p (const_tree c, const_tree type)
{
tree type_low_bound, type_high_bound;
bool ok_for_low_bound, ok_for_high_bound;
- wide_int wc, wd;
signop sgn_c = TYPE_SIGN (TREE_TYPE (c));
- wc = c;
-
retry:
type_low_bound = TYPE_MIN_VALUE (type);
type_high_bound = TYPE_MAX_VALUE (type);
@@ -8667,7 +8663,7 @@ retry:
/* Perform some generic filtering which may allow making a decision
even if the bounds are not constant. First, negative integers
never fit in unsigned types, */
- if (TYPE_UNSIGNED (type) && sgn_c == SIGNED && wi::neg_p (wc))
+ if (TYPE_UNSIGNED (type) && sgn_c == SIGNED && wi::neg_p (c))
return false;
/* Second, narrower types always fit in wider ones. */
@@ -8675,7 +8671,7 @@ retry:
return true;
/* Third, unsigned integers with top bit set never fit signed types. */
- if (!TYPE_UNSIGNED (type) && sgn_c == UNSIGNED && wi::neg_p (wc))
+ if (!TYPE_UNSIGNED (type) && sgn_c == UNSIGNED && wi::neg_p (c))
return false;
/* If we haven't been able to decide at this point, there nothing more we
@@ -8690,7 +8686,7 @@ retry:
}
/* Or to fits_to_tree_p, if nothing else. */
- return wi::fits_to_tree_p (wc, type);
+ return wi::fits_to_tree_p (c, type);
}
/* Stores bounds of an integer TYPE in MIN and MAX. If TYPE has non-constant
diff --git a/gcc/tree.h b/gcc/tree.h
index 909afd63f2b..43f383fff80 100644
--- a/gcc/tree.h
+++ b/gcc/tree.h
@@ -5356,9 +5356,9 @@ bool
wi::fits_to_tree_p (const T &x, const_tree type)
{
if (TYPE_SIGN (type) == UNSIGNED)
- return x == zext (x, TYPE_PRECISION (type));
+ return eq_p (x, zext (x, TYPE_PRECISION (type)));
else
- return x == sext (x, TYPE_PRECISION (type));
+ return eq_p (x, sext (x, TYPE_PRECISION (type)));
}
/* Produce the smallest number that is represented in TYPE. The precision
diff --git a/gcc/wide-int.h b/gcc/wide-int.h
index b3f6eb97760..33719af7f91 100644
--- a/gcc/wide-int.h
+++ b/gcc/wide-int.h
@@ -1282,28 +1282,28 @@ wi::uhwi (unsigned HOST_WIDE_INT val, unsigned int precision)
return hwi_with_prec (val, precision, UNSIGNED);
}
-/* Return a wide int of -1 with precision PREC. */
+/* Return a wide int of -1 with precision PRECISION. */
inline wi::hwi_with_prec
wi::minus_one (unsigned int precision)
{
return wi::shwi (-1, precision);
}
-/* Return a wide int of 0 with precision PREC. */
+/* Return a wide int of 0 with precision PRECISION. */
inline wi::hwi_with_prec
wi::zero (unsigned int precision)
{
return wi::shwi (0, precision);
}
-/* Return a wide int of 1 with precision PREC. */
+/* Return a wide int of 1 with precision PRECISION. */
inline wi::hwi_with_prec
wi::one (unsigned int precision)
{
return wi::shwi (1, precision);
}
-/* Return a wide int of 2 with precision PREC. */
+/* Return a wide int of 2 with precision PRECISION. */
inline wi::hwi_with_prec
wi::two (unsigned int precision)
{
@@ -1337,6 +1337,7 @@ wi::int_traits <wi::hwi_with_prec>::
decompose (HOST_WIDE_INT *scratch, unsigned int precision,
const wi::hwi_with_prec &x)
{
+ gcc_checking_assert (precision == x.precision);
scratch[0] = x.val;
if (x.sgn == SIGNED || x.val >= 0 || precision <= HOST_BITS_PER_WIDE_INT)
return wi::storage_ref (scratch, 1, precision);
@@ -1440,8 +1441,7 @@ wi::copy (T1 &x, const T2 &y)
x.set_len (len, y.is_sign_extended);
}
-/* Return true if X fits in a HOST_WIDE_INT with no loss of
- precision. */
+/* Return true if X fits in a HOST_WIDE_INT with no loss of precision. */
template <typename T>
inline bool
wi::fits_shwi_p (const T &x)
@@ -1476,8 +1476,7 @@ wi::neg_p (const T &x, signop sgn)
return xi.sign_mask () < 0;
}
-/* Return -1 if the top bit of X is set and 0 if the top bit is
- clear. */
+/* Return -1 if the top bit of X is set and 0 if the top bit is clear. */
template <typename T>
inline HOST_WIDE_INT
wi::sign_mask (const T &x)