summaryrefslogtreecommitdiff
path: root/gcc/tree-ssa-loop-niter.c
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/tree-ssa-loop-niter.c')
-rw-r--r--gcc/tree-ssa-loop-niter.c260
1 files changed, 124 insertions, 136 deletions
diff --git a/gcc/tree-ssa-loop-niter.c b/gcc/tree-ssa-loop-niter.c
index 7628363cc62..a48ad10424e 100644
--- a/gcc/tree-ssa-loop-niter.c
+++ b/gcc/tree-ssa-loop-niter.c
@@ -55,6 +55,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-pass.h"
#include "stringpool.h"
#include "tree-ssanames.h"
+#include "wide-int-print.h"
#define SWAP(X, Y) do { affine_iv *tmp = (X); (X) = (Y); (Y) = tmp; } while (0)
@@ -85,7 +86,6 @@ split_to_var_and_offset (tree expr, tree *var, mpz_t offset)
{
tree type = TREE_TYPE (expr);
tree op0, op1;
- double_int off;
bool negate = false;
*var = expr;
@@ -107,17 +107,14 @@ split_to_var_and_offset (tree expr, tree *var, mpz_t offset)
*var = op0;
/* Always sign extend the offset. */
- off = tree_to_double_int (op1);
- off = off.sext (TYPE_PRECISION (type));
- mpz_set_double_int (offset, off, false);
+ wi::to_mpz (op1, offset, SIGNED);
if (negate)
mpz_neg (offset, offset);
break;
case INTEGER_CST:
*var = build_int_cst_type (type, 0);
- off = tree_to_double_int (expr);
- mpz_set_double_int (offset, off, TYPE_UNSIGNED (type));
+ wi::to_mpz (expr, offset, TYPE_SIGN (type));
break;
default:
@@ -132,7 +129,7 @@ static void
determine_value_range (struct loop *loop, tree type, tree var, mpz_t off,
mpz_t min, mpz_t max)
{
- double_int minv, maxv;
+ wide_int minv, maxv;
enum value_range_type rtype = VR_VARYING;
/* If the expression is a constant, we know its value exactly. */
@@ -149,6 +146,7 @@ determine_value_range (struct loop *loop, tree type, tree var, mpz_t off,
if (TREE_CODE (var) == SSA_NAME && INTEGRAL_TYPE_P (type))
{
edge e = loop_preheader_edge (loop);
+ signop sgn = TYPE_SIGN (type);
gimple_stmt_iterator gsi;
/* Either for VAR itself... */
@@ -158,7 +156,7 @@ determine_value_range (struct loop *loop, tree type, tree var, mpz_t off,
for (gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple phi = gsi_stmt (gsi);
- double_int minc, maxc;
+ wide_int minc, maxc;
if (PHI_ARG_DEF_FROM_EDGE (phi, e) == var
&& (get_range_info (gimple_phi_result (phi), &minc, &maxc)
== VR_RANGE))
@@ -171,13 +169,13 @@ determine_value_range (struct loop *loop, tree type, tree var, mpz_t off,
}
else
{
- minv = minv.max (minc, TYPE_UNSIGNED (type));
- maxv = maxv.min (maxc, TYPE_UNSIGNED (type));
+ minv = wi::max (minv, minc, sgn);
+ maxv = wi::min (maxv, maxc, sgn);
/* If the PHI result range are inconsistent with
the VAR range, give up on looking at the PHI
results. This can happen if VR_UNDEFINED is
involved. */
- if (minv.cmp (maxv, TYPE_UNSIGNED (type)) > 0)
+ if (wi::gt_p (minv, maxv, sgn))
{
rtype = get_range_info (var, &minv, &maxv);
break;
@@ -188,11 +186,11 @@ determine_value_range (struct loop *loop, tree type, tree var, mpz_t off,
if (rtype == VR_RANGE)
{
mpz_t minm, maxm;
- gcc_assert (minv.cmp (maxv, TYPE_UNSIGNED (type)) <= 0);
+ gcc_assert (wi::le_p (minv, maxv, sgn));
mpz_init (minm);
mpz_init (maxm);
- mpz_set_double_int (minm, minv, TYPE_UNSIGNED (type));
- mpz_set_double_int (maxm, maxv, TYPE_UNSIGNED (type));
+ wi::to_mpz (minv, minm, sgn);
+ wi::to_mpz (maxv, maxm, sgn);
mpz_add (minm, minm, off);
mpz_add (maxm, maxm, off);
/* If the computation may not wrap or off is zero, then this
@@ -262,7 +260,7 @@ bound_difference_of_offsetted_base (tree type, mpz_t x, mpz_t y,
}
mpz_init (m);
- mpz_set_double_int (m, double_int::mask (TYPE_PRECISION (type)), true);
+ wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), m, UNSIGNED);
mpz_add_ui (m, m, 1);
mpz_sub (bnds->up, x, y);
mpz_set (bnds->below, bnds->up);
@@ -541,15 +539,15 @@ end:
difference of two values in TYPE. */
static void
-bounds_add (bounds *bnds, double_int delta, tree type)
+bounds_add (bounds *bnds, const widest_int &delta, tree type)
{
mpz_t mdelta, max;
mpz_init (mdelta);
- mpz_set_double_int (mdelta, delta, false);
+ wi::to_mpz (delta, mdelta, SIGNED);
mpz_init (max);
- mpz_set_double_int (max, double_int::mask (TYPE_PRECISION (type)), true);
+ wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), max, UNSIGNED);
mpz_add (bnds->up, bnds->up, mdelta);
mpz_add (bnds->below, bnds->below, mdelta);
@@ -643,7 +641,7 @@ static void
number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
bounds *bnds, bool exit_must_be_taken)
{
- double_int max;
+ widest_int max;
mpz_t d;
tree type = TREE_TYPE (c);
bool bnds_u_valid = ((no_overflow && exit_must_be_taken)
@@ -652,10 +650,8 @@ number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
if (integer_onep (s)
|| (TREE_CODE (c) == INTEGER_CST
&& TREE_CODE (s) == INTEGER_CST
- && tree_to_double_int (c).mod (tree_to_double_int (s),
- TYPE_UNSIGNED (type),
- EXACT_DIV_EXPR).is_zero ())
- || (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (c))
+ && wi::mod_trunc (c, s, TYPE_SIGN (type)) == 0)
+ || (TYPE_OVERFLOW_UNDEFINED (type)
&& multiple_of_p (type, c, s)))
{
/* If C is an exact multiple of S, then its value will be reached before
@@ -673,15 +669,14 @@ number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
the whole # of iterations analysis will fail). */
if (!no_overflow)
{
- max = double_int::mask (TYPE_PRECISION (type)
- - tree_to_uhwi (num_ending_zeros (s)));
- mpz_set_double_int (bnd, max, true);
+ max = wi::mask <widest_int> (TYPE_PRECISION (type) - wi::ctz (s), false);
+ wi::to_mpz (max, bnd, UNSIGNED);
return;
}
/* Now we know that the induction variable does not overflow, so the loop
iterates at most (range of type / S) times. */
- mpz_set_double_int (bnd, double_int::mask (TYPE_PRECISION (type)), true);
+ wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), bnd, UNSIGNED);
/* If the induction variable is guaranteed to reach the value of C before
overflow, ... */
@@ -690,13 +685,13 @@ number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
/* ... then we can strengthen this to C / S, and possibly we can use
the upper bound on C given by BNDS. */
if (TREE_CODE (c) == INTEGER_CST)
- mpz_set_double_int (bnd, tree_to_double_int (c), true);
+ wi::to_mpz (c, bnd, UNSIGNED);
else if (bnds_u_valid)
mpz_set (bnd, bnds->up);
}
mpz_init (d);
- mpz_set_double_int (d, tree_to_double_int (s), true);
+ wi::to_mpz (s, d, UNSIGNED);
mpz_fdiv_q (bnd, bnd, d);
mpz_clear (d);
}
@@ -747,7 +742,8 @@ number_of_iterations_ne (tree type, affine_iv *iv, tree final,
mpz_init (max);
number_of_iterations_ne_max (max, iv->no_overflow, c, s, bnds,
exit_must_be_taken);
- niter->max = mpz_get_double_int (niter_type, max, false);
+ niter->max = widest_int::from (wi::from_mpz (niter_type, max, false),
+ TYPE_SIGN (niter_type));
mpz_clear (max);
/* First the trivial cases -- when the step is 1. */
@@ -820,7 +816,7 @@ number_of_iterations_lt_to_ne (tree type, affine_iv *iv0, affine_iv *iv1,
tmod = fold_convert (type1, mod);
mpz_init (mmod);
- mpz_set_double_int (mmod, tree_to_double_int (mod), true);
+ wi::to_mpz (mod, mmod, UNSIGNED);
mpz_neg (mmod, mmod);
/* If the induction variable does not overflow and the exit is taken,
@@ -902,7 +898,7 @@ number_of_iterations_lt_to_ne (tree type, affine_iv *iv0, affine_iv *iv1,
niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
niter->may_be_zero,
noloop);
- bounds_add (bnds, tree_to_double_int (mod), type);
+ bounds_add (bnds, wi::to_widest (mod), type);
*delta = fold_build2 (PLUS_EXPR, niter_type, *delta, mod);
ret = true;
@@ -992,7 +988,7 @@ assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
tree assumption = boolean_true_node, bound, diff;
tree mbz, mbzl, mbzr, type1;
bool rolls_p, no_overflow_p;
- double_int dstep;
+ widest_int dstep;
mpz_t mstep, max;
/* We are going to compute the number of iterations as
@@ -1018,22 +1014,22 @@ assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
/* First check whether the answer does not follow from the bounds we gathered
before. */
if (integer_nonzerop (iv0->step))
- dstep = tree_to_double_int (iv0->step);
+ dstep = wi::to_widest (iv0->step);
else
{
- dstep = tree_to_double_int (iv1->step).sext (TYPE_PRECISION (type));
+ dstep = wi::sext (wi::to_widest (iv1->step), TYPE_PRECISION (type));
dstep = -dstep;
}
mpz_init (mstep);
- mpz_set_double_int (mstep, dstep, true);
+ wi::to_mpz (dstep, mstep, UNSIGNED);
mpz_neg (mstep, mstep);
mpz_add_ui (mstep, mstep, 1);
rolls_p = mpz_cmp (mstep, bnds->below) <= 0;
mpz_init (max);
- mpz_set_double_int (max, double_int::mask (TYPE_PRECISION (type)), true);
+ wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), max, UNSIGNED);
mpz_add (max, max, mstep);
no_overflow_p = (mpz_cmp (bnds->up, max) <= 0
/* For pointers, only values lying inside a single object
@@ -1160,7 +1156,8 @@ number_of_iterations_lt (tree type, affine_iv *iv0, affine_iv *iv1,
niter->may_be_zero = fold_build2 (LT_EXPR, boolean_type_node,
iv1->base, iv0->base);
niter->niter = delta;
- niter->max = mpz_get_double_int (niter_type, bnds->up, false);
+ niter->max = widest_int::from (wi::from_mpz (niter_type, bnds->up, false),
+ TYPE_SIGN (niter_type));
return true;
}
@@ -1203,11 +1200,12 @@ number_of_iterations_lt (tree type, affine_iv *iv0, affine_iv *iv1,
mpz_init (mstep);
mpz_init (tmp);
- mpz_set_double_int (mstep, tree_to_double_int (step), true);
+ wi::to_mpz (step, mstep, UNSIGNED);
mpz_add (tmp, bnds->up, mstep);
mpz_sub_ui (tmp, tmp, 1);
mpz_fdiv_q (tmp, tmp, mstep);
- niter->max = mpz_get_double_int (niter_type, tmp, false);
+ niter->max = widest_int::from (wi::from_mpz (niter_type, tmp, false),
+ TYPE_SIGN (niter_type));
mpz_clear (mstep);
mpz_clear (tmp);
@@ -1270,7 +1268,7 @@ number_of_iterations_le (tree type, affine_iv *iv0, affine_iv *iv1,
iv0->base = fold_build2 (MINUS_EXPR, type1,
iv0->base, build_int_cst (type1, 1));
- bounds_add (bnds, double_int_one, type1);
+ bounds_add (bnds, 1, type1);
return number_of_iterations_lt (type, iv0, iv1, niter, exit_must_be_taken,
bnds);
@@ -1342,8 +1340,7 @@ number_of_iterations_cond (struct loop *loop,
niter->assumptions = boolean_true_node;
niter->may_be_zero = boolean_false_node;
niter->niter = NULL_TREE;
- niter->max = double_int_zero;
-
+ niter->max = 0;
niter->bound = NULL_TREE;
niter->cmp = ERROR_MARK;
@@ -1415,7 +1412,7 @@ number_of_iterations_cond (struct loop *loop,
if (tem && integer_zerop (tem))
{
niter->niter = build_int_cst (unsigned_type_for (type), 0);
- niter->max = double_int_zero;
+ niter->max = 0;
return true;
}
@@ -1491,7 +1488,7 @@ number_of_iterations_cond (struct loop *loop,
fprintf (dump_file, " # of iterations ");
print_generic_expr (dump_file, niter->niter, TDF_SLIM);
fprintf (dump_file, ", bounded by ");
- dump_double_int (dump_file, niter->max, true);
+ print_decu (niter->max, dump_file);
fprintf (dump_file, "\n");
}
else
@@ -2003,7 +2000,7 @@ number_of_iterations_exit (struct loop *loop, edge exit,
/* If NITER has simplified into a constant, update MAX. */
if (TREE_CODE (niter->niter) == INTEGER_CST)
- niter->max = tree_to_double_int (niter->niter);
+ niter->max = wi::to_widest (niter->niter);
if (integer_onep (niter->assumptions))
return true;
@@ -2115,7 +2112,7 @@ find_loop_niter (struct loop *loop, edge *exit)
bool
finite_loop_p (struct loop *loop)
{
- double_int nit;
+ widest_int nit;
int flags;
if (flag_unsafe_loop_optimizations)
@@ -2430,13 +2427,13 @@ find_loop_niter_by_eval (struct loop *loop, edge *exit)
*/
-static double_int derive_constant_upper_bound_ops (tree, tree,
+static widest_int derive_constant_upper_bound_ops (tree, tree,
enum tree_code, tree);
/* Returns a constant upper bound on the value of the right-hand side of
an assignment statement STMT. */
-static double_int
+static widest_int
derive_constant_upper_bound_assign (gimple stmt)
{
enum tree_code code = gimple_assign_rhs_code (stmt);
@@ -2451,7 +2448,7 @@ derive_constant_upper_bound_assign (gimple stmt)
is considered to be unsigned. If its type is signed, its value must
be nonnegative. */
-static double_int
+static widest_int
derive_constant_upper_bound (tree val)
{
enum tree_code code;
@@ -2465,12 +2462,12 @@ derive_constant_upper_bound (tree val)
whose type is TYPE. The expression is considered to be unsigned. If
its type is signed, its value must be nonnegative. */
-static double_int
+static widest_int
derive_constant_upper_bound_ops (tree type, tree op0,
enum tree_code code, tree op1)
{
tree subtype, maxt;
- double_int bnd, max, mmax, cst;
+ widest_int bnd, max, mmax, cst;
gimple stmt;
if (INTEGRAL_TYPE_P (type))
@@ -2478,12 +2475,12 @@ derive_constant_upper_bound_ops (tree type, tree op0,
else
maxt = upper_bound_in_type (type, type);
- max = tree_to_double_int (maxt);
+ max = wi::to_widest (maxt);
switch (code)
{
case INTEGER_CST:
- return tree_to_double_int (op0);
+ return wi::to_widest (op0);
CASE_CONVERT:
subtype = TREE_TYPE (op0);
@@ -2505,7 +2502,7 @@ derive_constant_upper_bound_ops (tree type, tree op0,
/* If the bound does not fit in TYPE, max. value of TYPE could be
attained. */
- if (max.ult (bnd))
+ if (wi::ltu_p (max, bnd))
return max;
return bnd;
@@ -2520,25 +2517,24 @@ derive_constant_upper_bound_ops (tree type, tree op0,
/* Canonicalize to OP0 - CST. Consider CST to be signed, in order to
choose the most logical way how to treat this constant regardless
of the signedness of the type. */
- cst = tree_to_double_int (op1);
- cst = cst.sext (TYPE_PRECISION (type));
+ cst = wi::sext (wi::to_widest (op1), TYPE_PRECISION (type));
if (code != MINUS_EXPR)
cst = -cst;
bnd = derive_constant_upper_bound (op0);
- if (cst.is_negative ())
+ if (wi::neg_p (cst))
{
cst = -cst;
/* Avoid CST == 0x80000... */
- if (cst.is_negative ())
+ if (wi::neg_p (cst))
return max;;
/* OP0 + CST. We need to check that
BND <= MAX (type) - CST. */
mmax -= cst;
- if (bnd.ugt (mmax))
+ if (wi::ltu_p (bnd, max))
return max;
return bnd + cst;
@@ -2558,13 +2554,13 @@ derive_constant_upper_bound_ops (tree type, tree op0,
/* This should only happen if the type is unsigned; however, for
buggy programs that use overflowing signed arithmetics even with
-fno-wrapv, this condition may also be true for signed values. */
- if (bnd.ult (cst))
+ if (wi::ltu_p (bnd, cst))
return max;
if (TYPE_UNSIGNED (type))
{
tree tem = fold_binary (GE_EXPR, boolean_type_node, op0,
- double_int_to_tree (type, cst));
+ wide_int_to_tree (type, cst));
if (!tem || integer_nonzerop (tem))
return max;
}
@@ -2581,13 +2577,13 @@ derive_constant_upper_bound_ops (tree type, tree op0,
return max;
bnd = derive_constant_upper_bound (op0);
- return bnd.udiv (tree_to_double_int (op1), FLOOR_DIV_EXPR);
+ return wi::udiv_floor (bnd, wi::to_widest (op1));
case BIT_AND_EXPR:
if (TREE_CODE (op1) != INTEGER_CST
|| tree_int_cst_sign_bit (op1))
return max;
- return tree_to_double_int (op1);
+ return wi::to_widest (op1);
case SSA_NAME:
stmt = SSA_NAME_DEF_STMT (op0);
@@ -2605,7 +2601,7 @@ derive_constant_upper_bound_ops (tree type, tree op0,
static void
do_warn_aggressive_loop_optimizations (struct loop *loop,
- double_int i_bound, gimple stmt)
+ widest_int i_bound, gimple stmt)
{
/* Don't warn if the loop doesn't have known constant bound. */
if (!loop->nb_iterations
@@ -2618,7 +2614,7 @@ do_warn_aggressive_loop_optimizations (struct loop *loop,
|| loop->warned_aggressive_loop_optimizations
/* Only warn if undefined behavior gives us lower estimate than the
known constant bound. */
- || i_bound.ucmp (tree_to_double_int (loop->nb_iterations)) >= 0
+ || wi::cmpu (i_bound, wi::to_widest (loop->nb_iterations)) >= 0
/* And undefined behavior happens unconditionally. */
|| !dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (stmt)))
return;
@@ -2630,8 +2626,8 @@ do_warn_aggressive_loop_optimizations (struct loop *loop,
gimple estmt = last_stmt (e->src);
if (warning_at (gimple_location (stmt), OPT_Waggressive_loop_optimizations,
"iteration %E invokes undefined behavior",
- double_int_to_tree (TREE_TYPE (loop->nb_iterations),
- i_bound)))
+ wide_int_to_tree (TREE_TYPE (loop->nb_iterations),
+ i_bound)))
inform (gimple_location (estmt), "containing loop");
loop->warned_aggressive_loop_optimizations = true;
}
@@ -2641,13 +2637,13 @@ do_warn_aggressive_loop_optimizations (struct loop *loop,
is taken at last when the STMT is executed BOUND + 1 times.
REALISTIC is true if BOUND is expected to be close to the real number
of iterations. UPPER is true if we are sure the loop iterates at most
- BOUND times. I_BOUND is an unsigned double_int upper estimate on BOUND. */
+ BOUND times. I_BOUND is a widest_int upper estimate on BOUND. */
static void
-record_estimate (struct loop *loop, tree bound, double_int i_bound,
+record_estimate (struct loop *loop, tree bound, const widest_int &i_bound,
gimple at_stmt, bool is_exit, bool realistic, bool upper)
{
- double_int delta;
+ widest_int delta;
if (dump_file && (dump_flags & TDF_DETAILS))
{
@@ -2657,7 +2653,7 @@ record_estimate (struct loop *loop, tree bound, double_int i_bound,
upper ? "" : "probably ");
print_generic_expr (dump_file, bound, TDF_SLIM);
fprintf (dump_file, " (bounded by ");
- dump_double_int (dump_file, i_bound, true);
+ print_decu (i_bound, dump_file);
fprintf (dump_file, ") + 1 times in loop %d.\n", loop->num);
}
@@ -2666,7 +2662,7 @@ record_estimate (struct loop *loop, tree bound, double_int i_bound,
if (TREE_CODE (bound) != INTEGER_CST)
realistic = false;
else
- gcc_checking_assert (i_bound == tree_to_double_int (bound));
+ gcc_checking_assert (i_bound == wi::to_widest (bound));
if (!upper && !realistic)
return;
@@ -2697,18 +2693,18 @@ record_estimate (struct loop *loop, tree bound, double_int i_bound,
otherwise it can be executed BOUND + 1 times. We will lower the estimate
later if such statement must be executed on last iteration */
if (is_exit)
- delta = double_int_zero;
+ delta = 0;
else
- delta = double_int_one;
- i_bound += delta;
+ delta = 1;
+ widest_int new_i_bound = i_bound + delta;
/* If an overflow occurred, ignore the result. */
- if (i_bound.ult (delta))
+ if (wi::ltu_p (new_i_bound, delta))
return;
if (upper && !is_exit)
- do_warn_aggressive_loop_optimizations (loop, i_bound, at_stmt);
- record_niter_bound (loop, i_bound, realistic, upper);
+ do_warn_aggressive_loop_optimizations (loop, new_i_bound, at_stmt);
+ record_niter_bound (loop, new_i_bound, realistic, upper);
}
/* Record the estimate on number of iterations of LOOP based on the fact that
@@ -2723,7 +2719,6 @@ record_nonwrapping_iv (struct loop *loop, tree base, tree step, gimple stmt,
{
tree niter_bound, extreme, delta;
tree type = TREE_TYPE (base), unsigned_type;
- double_int max;
if (TREE_CODE (step) != INTEGER_CST || integer_zerop (step))
return;
@@ -2764,7 +2759,7 @@ record_nonwrapping_iv (struct loop *loop, tree base, tree step, gimple stmt,
/* STMT is executed at most NITER_BOUND + 1 times, since otherwise the value
would get out of the range. */
niter_bound = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step);
- max = derive_constant_upper_bound (niter_bound);
+ widest_int max = derive_constant_upper_bound (niter_bound);
record_estimate (loop, niter_bound, max, stmt, false, realistic, upper);
}
@@ -3068,27 +3063,21 @@ infer_loop_bounds_from_undefined (struct loop *loop)
free (bbs);
}
-
-
-/* Compare double ints, callback for qsort. */
+/* Compare wide ints, callback for qsort. */
static int
-double_int_cmp (const void *p1, const void *p2)
+wide_int_cmp (const void *p1, const void *p2)
{
- const double_int *d1 = (const double_int *)p1;
- const double_int *d2 = (const double_int *)p2;
- if (*d1 == *d2)
- return 0;
- if (d1->ult (*d2))
- return -1;
- return 1;
+ const widest_int *d1 = (const widest_int *) p1;
+ const widest_int *d2 = (const widest_int *) p2;
+ return wi::cmpu (*d1, *d2);
}
/* Return index of BOUND in BOUNDS array sorted in increasing order.
Lookup by binary search. */
static int
-bound_index (vec<double_int> bounds, double_int bound)
+bound_index (vec<widest_int> bounds, const widest_int &bound)
{
unsigned int end = bounds.length ();
unsigned int begin = 0;
@@ -3097,11 +3086,11 @@ bound_index (vec<double_int> bounds, double_int bound)
while (begin != end)
{
unsigned int middle = (begin + end) / 2;
- double_int index = bounds[middle];
+ widest_int index = bounds[middle];
if (index == bound)
return middle;
- else if (index.ult (bound))
+ else if (wi::ltu_p (index, bound))
begin = middle + 1;
else
end = middle;
@@ -3120,7 +3109,7 @@ discover_iteration_bound_by_body_walk (struct loop *loop)
{
pointer_map_t *bb_bounds;
struct nb_iter_bound *elt;
- vec<double_int> bounds = vNULL;
+ vec<widest_int> bounds = vNULL;
vec<vec<basic_block> > queues = vNULL;
vec<basic_block> queue = vNULL;
ptrdiff_t queue_index;
@@ -3130,20 +3119,20 @@ discover_iteration_bound_by_body_walk (struct loop *loop)
/* Discover what bounds may interest us. */
for (elt = loop->bounds; elt; elt = elt->next)
{
- double_int bound = elt->bound;
+ widest_int bound = elt->bound;
/* Exit terminates loop at given iteration, while non-exits produce undefined
effect on the next iteration. */
if (!elt->is_exit)
{
- bound += double_int_one;
+ bound += 1;
/* If an overflow occurred, ignore the result. */
- if (bound.is_zero ())
+ if (bound == 0)
continue;
}
if (!loop->any_upper_bound
- || bound.ult (loop->nb_iterations_upper_bound))
+ || wi::ltu_p (bound, loop->nb_iterations_upper_bound))
bounds.safe_push (bound);
}
@@ -3156,7 +3145,7 @@ discover_iteration_bound_by_body_walk (struct loop *loop)
/* Sort the bounds in decreasing order. */
qsort (bounds.address (), bounds.length (),
- sizeof (double_int), double_int_cmp);
+ sizeof (widest_int), wide_int_cmp);
/* For every basic block record the lowest bound that is guaranteed to
terminate the loop. */
@@ -3164,17 +3153,17 @@ discover_iteration_bound_by_body_walk (struct loop *loop)
bb_bounds = pointer_map_create ();
for (elt = loop->bounds; elt; elt = elt->next)
{
- double_int bound = elt->bound;
+ widest_int bound = elt->bound;
if (!elt->is_exit)
{
- bound += double_int_one;
+ bound += 1;
/* If an overflow occurred, ignore the result. */
- if (bound.is_zero ())
+ if (bound == 0)
continue;
}
if (!loop->any_upper_bound
- || bound.ult (loop->nb_iterations_upper_bound))
+ || wi::ltu_p (bound, loop->nb_iterations_upper_bound))
{
ptrdiff_t index = bound_index (bounds, bound);
void **entry = pointer_map_contains (bb_bounds,
@@ -3274,7 +3263,7 @@ discover_iteration_bound_by_body_walk (struct loop *loop)
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "Found better loop bound ");
- dump_double_int (dump_file, bounds[latch_index], true);
+ print_decu (bounds[latch_index], dump_file);
fprintf (dump_file, "\n");
}
record_niter_bound (loop, bounds[latch_index], false, true);
@@ -3309,7 +3298,7 @@ maybe_lower_iteration_bound (struct loop *loop)
for (elt = loop->bounds; elt; elt = elt->next)
{
if (!elt->is_exit
- && elt->bound.ult (loop->nb_iterations_upper_bound))
+ && wi::ltu_p (elt->bound, loop->nb_iterations_upper_bound))
{
if (!not_executed_last_iteration)
not_executed_last_iteration = pointer_set_create ();
@@ -3383,7 +3372,7 @@ maybe_lower_iteration_bound (struct loop *loop)
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Reducing loop iteration estimate by 1; "
"undefined statement must be executed at the last iteration.\n");
- record_niter_bound (loop, loop->nb_iterations_upper_bound - double_int_one,
+ record_niter_bound (loop, loop->nb_iterations_upper_bound - 1,
false, true);
}
BITMAP_FREE (visited);
@@ -3402,7 +3391,7 @@ estimate_numbers_of_iterations_loop (struct loop *loop)
unsigned i;
struct tree_niter_desc niter_desc;
edge ex;
- double_int bound;
+ widest_int bound;
edge likely_exit;
/* Give up if we already have tried to compute an estimation. */
@@ -3449,7 +3438,7 @@ estimate_numbers_of_iterations_loop (struct loop *loop)
if (loop->header->count != 0)
{
gcov_type nit = expected_loop_iterations_unbounded (loop) + 1;
- bound = gcov_type_to_double_int (nit);
+ bound = gcov_type_to_wide_int (nit);
record_niter_bound (loop, bound, true, false);
}
@@ -3460,8 +3449,7 @@ estimate_numbers_of_iterations_loop (struct loop *loop)
&& TREE_CODE (loop->nb_iterations) == INTEGER_CST)
{
loop->any_upper_bound = true;
- loop->nb_iterations_upper_bound
- = tree_to_double_int (loop->nb_iterations);
+ loop->nb_iterations_upper_bound = wi::to_widest (loop->nb_iterations);
}
}
@@ -3471,7 +3459,7 @@ estimate_numbers_of_iterations_loop (struct loop *loop)
the function returns false, otherwise returns true. */
bool
-estimated_loop_iterations (struct loop *loop, double_int *nit)
+estimated_loop_iterations (struct loop *loop, widest_int *nit)
{
/* When SCEV information is available, try to update loop iterations
estimate. Otherwise just return whatever we recorded earlier. */
@@ -3488,13 +3476,13 @@ estimated_loop_iterations (struct loop *loop, double_int *nit)
HOST_WIDE_INT
estimated_loop_iterations_int (struct loop *loop)
{
- double_int nit;
+ widest_int nit;
HOST_WIDE_INT hwi_nit;
if (!estimated_loop_iterations (loop, &nit))
return -1;
- if (!nit.fits_shwi ())
+ if (!wi::fits_shwi_p (nit))
return -1;
hwi_nit = nit.to_shwi ();
@@ -3507,7 +3495,7 @@ estimated_loop_iterations_int (struct loop *loop)
false, otherwise returns true. */
bool
-max_loop_iterations (struct loop *loop, double_int *nit)
+max_loop_iterations (struct loop *loop, widest_int *nit)
{
/* When SCEV information is available, try to update loop iterations
estimate. Otherwise just return whatever we recorded earlier. */
@@ -3524,13 +3512,13 @@ max_loop_iterations (struct loop *loop, double_int *nit)
HOST_WIDE_INT
max_loop_iterations_int (struct loop *loop)
{
- double_int nit;
+ widest_int nit;
HOST_WIDE_INT hwi_nit;
if (!max_loop_iterations (loop, &nit))
return -1;
- if (!nit.fits_shwi ())
+ if (!wi::fits_shwi_p (nit))
return -1;
hwi_nit = nit.to_shwi ();
@@ -3561,18 +3549,18 @@ estimated_stmt_executions_int (struct loop *loop)
false, otherwise returns true. */
bool
-max_stmt_executions (struct loop *loop, double_int *nit)
+max_stmt_executions (struct loop *loop, widest_int *nit)
{
- double_int nit_minus_one;
+ widest_int nit_minus_one;
if (!max_loop_iterations (loop, nit))
return false;
nit_minus_one = *nit;
- *nit += double_int_one;
+ *nit += 1;
- return (*nit).ugt (nit_minus_one);
+ return wi::gtu_p (*nit, nit_minus_one);
}
/* Sets NIT to the estimated number of executions of the latch of the
@@ -3580,18 +3568,18 @@ max_stmt_executions (struct loop *loop, double_int *nit)
false, otherwise returns true. */
bool
-estimated_stmt_executions (struct loop *loop, double_int *nit)
+estimated_stmt_executions (struct loop *loop, widest_int *nit)
{
- double_int nit_minus_one;
+ widest_int nit_minus_one;
if (!estimated_loop_iterations (loop, nit))
return false;
nit_minus_one = *nit;
- *nit += double_int_one;
+ *nit += 1;
- return (*nit).ugt (nit_minus_one);
+ return wi::gtu_p (*nit, nit_minus_one);
}
/* Records estimates on numbers of iterations of loops. */
@@ -3662,7 +3650,7 @@ n_of_executions_at_most (gimple stmt,
struct nb_iter_bound *niter_bound,
tree niter)
{
- double_int bound = niter_bound->bound;
+ widest_int bound = niter_bound->bound;
tree nit_type = TREE_TYPE (niter), e;
enum tree_code cmp;
@@ -3670,7 +3658,7 @@ n_of_executions_at_most (gimple stmt,
/* If the bound does not even fit into NIT_TYPE, it cannot tell us that
the number of iterations is small. */
- if (!double_int_fits_to_tree_p (nit_type, bound))
+ if (!wi::fits_to_tree_p (bound, nit_type))
return false;
/* We know that NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
@@ -3713,16 +3701,16 @@ n_of_executions_at_most (gimple stmt,
gsi_next (&bsi))
if (gimple_has_side_effects (gsi_stmt (bsi)))
return false;
- bound += double_int_one;
- if (bound.is_zero ()
- || !double_int_fits_to_tree_p (nit_type, bound))
+ bound += 1;
+ if (bound == 0
+ || !wi::fits_to_tree_p (bound, nit_type))
return false;
}
cmp = GT_EXPR;
}
e = fold_binary (cmp, boolean_type_node,
- niter, double_int_to_tree (nit_type, bound));
+ niter, wide_int_to_tree (nit_type, bound));
return e && integer_nonzerop (e);
}
@@ -3760,7 +3748,7 @@ scev_probably_wraps_p (tree base, tree step,
tree unsigned_type, valid_niter;
tree type = TREE_TYPE (step);
tree e;
- double_int niter;
+ widest_int niter;
struct nb_iter_bound *bound;
/* FIXME: We really need something like
@@ -3826,10 +3814,10 @@ scev_probably_wraps_p (tree base, tree step,
estimate_numbers_of_iterations_loop (loop);
if (max_loop_iterations (loop, &niter)
- && double_int_fits_to_tree_p (TREE_TYPE (valid_niter), niter)
+ && wi::fits_to_tree_p (niter, TREE_TYPE (valid_niter))
&& (e = fold_binary (GT_EXPR, boolean_type_node, valid_niter,
- double_int_to_tree (TREE_TYPE (valid_niter),
- niter))) != NULL
+ wide_int_to_tree (TREE_TYPE (valid_niter),
+ niter))) != NULL
&& integer_nonzerop (e))
{
fold_undefer_and_ignore_overflow_warnings ();