summaryrefslogtreecommitdiff
path: root/gcc/tree-ssa-ccp.c
diff options
context:
space:
mode:
authorrguenth <rguenth@138bc75d-0d04-0410-961f-82ee72b054a4>2010-08-06 11:47:31 +0000
committerrguenth <rguenth@138bc75d-0d04-0410-961f-82ee72b054a4>2010-08-06 11:47:31 +0000
commitb7e554696457939778e4be53d04b9eea3e5475ce (patch)
tree261ce4e80b1aef1ffd602e6ca4a99546e9283728 /gcc/tree-ssa-ccp.c
parentc137ced0212cc3753463bcc44fe9b5faaf448368 (diff)
downloadgcc-b7e554696457939778e4be53d04b9eea3e5475ce.tar.gz
2010-08-06 Richard Guenther <rguenther@suse.de>
* tree-ssa-ccp.c (struct prop_value_d): Add mask member. (dump_lattice_value): Dump it. (get_default_value): Adjust. (get_constant_value): Likewise. (set_value_varying): Likewise. (set_lattice_value): Make sure to not go up the lattice with bitwise constant values. (get_value_for_expr): Handle ADDR_EXPRs. (value_to_double_int): New function. (get_value_from_alignment): Likewise. (do_dbg_cnt): Adjust. (ccp_lattice_meet): Handle partially constant values. (bit_value_unop_1): New function. (bit_value_binop_1): Likewise. (bit_value_unop): Likewise. (bit_value_binop): Likewise. (evaluate_stmt): Track partially constant values if flag_tree_bit_ccp is set. (ccp_fold_stmt): Dump if we folded a predicate. (ccp_visit_stmt): Adjust. * common.opt (ftree-bit-ccp): New flag. * doc/invoke.texi (ftree-bit-ccp): Document. * opts.c (decode_options): Enable bit-CCP at -O1. * gcc.dg/tree-ssa/ssa-dce-3.c: XFAIL. * gcc.dg/tree-ssa/pr23744.c: Disable CCP. * gcc.dg/tree-ssa/pr25382.c: Likewise. * gcc.dg/tree-ssa/ssa-ccp-30.c: New testcase. * gcc.dg/tree-ssa/ssa-ccp-31.c: Likewise. * gcc.dg/tree-ssa/ssa-ccp-32.c: Likewise. * gcc.dg/tree-ssa/ssa-ccp-33.c: Likewise. * gcc.c-torture/execute/20100805-1.c: Likewise. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@162943 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc/tree-ssa-ccp.c')
-rw-r--r--gcc/tree-ssa-ccp.c754
1 files changed, 699 insertions, 55 deletions
diff --git a/gcc/tree-ssa-ccp.c b/gcc/tree-ssa-ccp.c
index 7d5f1a99ba4..a0165843821 100644
--- a/gcc/tree-ssa-ccp.c
+++ b/gcc/tree-ssa-ccp.c
@@ -150,6 +150,10 @@ struct prop_value_d {
/* Propagated value. */
tree value;
+
+ /* Mask that applies to the propagated value during CCP. For
+ X with a CONSTANT lattice value X & ~mask == value & ~mask. */
+ double_int mask;
};
typedef struct prop_value_d prop_value_t;
@@ -183,7 +187,18 @@ dump_lattice_value (FILE *outf, const char *prefix, prop_value_t val)
break;
case CONSTANT:
fprintf (outf, "%sCONSTANT ", prefix);
- print_generic_expr (outf, val.value, dump_flags);
+ if (TREE_CODE (val.value) != INTEGER_CST
+ || double_int_zero_p (val.mask))
+ print_generic_expr (outf, val.value, dump_flags);
+ else
+ {
+ double_int cval = double_int_and_not (tree_to_double_int (val.value),
+ val.mask);
+ fprintf (outf, "%sCONSTANT " HOST_WIDE_INT_PRINT_DOUBLE_HEX,
+ prefix, cval.high, cval.low);
+ fprintf (outf, " (" HOST_WIDE_INT_PRINT_DOUBLE_HEX ")",
+ val.mask.high, val.mask.low);
+ }
break;
default:
gcc_unreachable ();
@@ -225,7 +240,7 @@ static prop_value_t
get_default_value (tree var)
{
tree sym = SSA_NAME_VAR (var);
- prop_value_t val = { UNINITIALIZED, NULL_TREE };
+ prop_value_t val = { UNINITIALIZED, NULL_TREE, { 0, 0 } };
gimple stmt;
stmt = SSA_NAME_DEF_STMT (var);
@@ -240,7 +255,10 @@ get_default_value (tree var)
&& TREE_CODE (sym) == VAR_DECL)
val.lattice_val = UNDEFINED;
else
- val.lattice_val = VARYING;
+ {
+ val.lattice_val = VARYING;
+ val.mask = double_int_minus_one;
+ }
}
else if (is_gimple_assign (stmt)
/* Value-returning GIMPLE_CALL statements assign to
@@ -266,6 +284,7 @@ get_default_value (tree var)
{
/* Otherwise, VAR will never take on a constant value. */
val.lattice_val = VARYING;
+ val.mask = double_int_minus_one;
}
return val;
@@ -297,7 +316,10 @@ static inline tree
get_constant_value (tree var)
{
prop_value_t *val = get_value (var);
- if (val && val->lattice_val == CONSTANT)
+ if (val
+ && val->lattice_val == CONSTANT
+ && (TREE_CODE (val->value) != INTEGER_CST
+ || double_int_zero_p (val->mask)))
return val->value;
return NULL_TREE;
}
@@ -311,6 +333,7 @@ set_value_varying (tree var)
val->lattice_val = VARYING;
val->value = NULL_TREE;
+ val->mask = double_int_minus_one;
}
/* For float types, modify the value of VAL to make ccp work correctly
@@ -360,6 +383,42 @@ canonicalize_float_value (prop_value_t *val)
}
}
+/* Return whether the lattice transition is valid. */
+
+static bool
+valid_lattice_transition (prop_value_t old_val, prop_value_t new_val)
+{
+ /* Lattice transitions must always be monotonically increasing in
+ value. */
+ if (old_val.lattice_val < new_val.lattice_val)
+ return true;
+
+ if (old_val.lattice_val != new_val.lattice_val)
+ return false;
+
+ if (!old_val.value && !new_val.value)
+ return true;
+
+ /* Now both lattice values are CONSTANT. */
+
+ /* Allow transitioning from &x to &x & ~3. */
+ if (TREE_CODE (old_val.value) != INTEGER_CST
+ && TREE_CODE (new_val.value) == INTEGER_CST)
+ return true;
+
+ /* Bit-lattices have to agree in the still valid bits. */
+ if (TREE_CODE (old_val.value) == INTEGER_CST
+ && TREE_CODE (new_val.value) == INTEGER_CST)
+ return double_int_equal_p
+ (double_int_and_not (tree_to_double_int (old_val.value),
+ new_val.mask),
+ double_int_and_not (tree_to_double_int (new_val.value),
+ new_val.mask));
+
+ /* Otherwise constant values have to agree. */
+ return operand_equal_p (old_val.value, new_val.value, 0);
+}
+
/* Set the value for variable VAR to NEW_VAL. Return true if the new
value is different from VAR's previous value. */
@@ -371,17 +430,34 @@ set_lattice_value (tree var, prop_value_t new_val)
canonicalize_float_value (&new_val);
- /* Lattice transitions must always be monotonically increasing in
- value. If *OLD_VAL and NEW_VAL are the same, return false to
- inform the caller that this was a non-transition. */
+ /* We have to be careful to not go up the bitwise lattice
+ represented by the mask.
+ ??? This doesn't seem to be the best place to enforce this. */
+ if (new_val.lattice_val == CONSTANT
+ && old_val->lattice_val == CONSTANT
+ && TREE_CODE (new_val.value) == INTEGER_CST
+ && TREE_CODE (old_val->value) == INTEGER_CST)
+ {
+ double_int diff;
+ diff = double_int_xor (tree_to_double_int (new_val.value),
+ tree_to_double_int (old_val->value));
+ new_val.mask = double_int_ior (new_val.mask,
+ double_int_ior (old_val->mask, diff));
+ }
- gcc_assert (old_val->lattice_val < new_val.lattice_val
- || (old_val->lattice_val == new_val.lattice_val
- && ((!old_val->value && !new_val.value)
- || operand_equal_p (old_val->value, new_val.value, 0))));
+ gcc_assert (valid_lattice_transition (*old_val, new_val));
- if (old_val->lattice_val != new_val.lattice_val)
+ /* If *OLD_VAL and NEW_VAL are the same, return false to inform the
+ caller that this was a non-transition. */
+ if (old_val->lattice_val != new_val.lattice_val
+ || (new_val.lattice_val == CONSTANT
+ && TREE_CODE (new_val.value) == INTEGER_CST
+ && (TREE_CODE (old_val->value) != INTEGER_CST
+ || !double_int_equal_p (new_val.mask, old_val->mask))))
{
+ /* ??? We would like to delay creation of INTEGER_CSTs from
+ partially constants here. */
+
if (dump_file && (dump_flags & TDF_DETAILS))
{
dump_lattice_value (dump_file, "Lattice value changed to ", new_val);
@@ -397,31 +473,142 @@ set_lattice_value (tree var, prop_value_t new_val)
return false;
}
-/* Return the value for the tree operand EXPR. */
+static prop_value_t get_value_for_expr (tree, bool);
+static prop_value_t bit_value_binop (enum tree_code, tree, tree, tree);
+static void bit_value_binop_1 (enum tree_code, tree, double_int *, double_int *,
+ tree, double_int, double_int,
+ tree, double_int, double_int);
+
+/* Return a double_int that can be used for bitwise simplifications
+ from VAL. */
+
+static double_int
+value_to_double_int (prop_value_t val)
+{
+ if (val.value
+ && TREE_CODE (val.value) == INTEGER_CST)
+ return tree_to_double_int (val.value);
+ else
+ return double_int_zero;
+}
+
+/* Return the value for the address expression EXPR based on alignment
+ information. */
static prop_value_t
-get_value_for_expr (tree expr)
+get_value_from_alignment (tree expr)
+{
+ prop_value_t val;
+ HOST_WIDE_INT bitsize, bitpos;
+ tree base, offset;
+ enum machine_mode mode;
+ int align;
+
+ gcc_assert (TREE_CODE (expr) == ADDR_EXPR);
+
+ base = get_inner_reference (TREE_OPERAND (expr, 0),
+ &bitsize, &bitpos, &offset,
+ &mode, &align, &align, false);
+ if (TREE_CODE (base) == MISALIGNED_INDIRECT_REF)
+ val = get_value_for_expr (TREE_OPERAND (base, 0), true);
+ else if (TREE_CODE (base) == MEM_REF)
+ val = bit_value_binop (PLUS_EXPR, TREE_TYPE (expr),
+ TREE_OPERAND (base, 0), TREE_OPERAND (base, 1));
+ else if (base
+ && ((align = get_object_alignment (base, BITS_PER_UNIT,
+ BIGGEST_ALIGNMENT))
+ > BITS_PER_UNIT))
+ {
+ val.lattice_val = CONSTANT;
+ /* We assume pointers are zero-extended. */
+ val.mask = double_int_and_not
+ (double_int_mask (TYPE_PRECISION (TREE_TYPE (expr))),
+ uhwi_to_double_int (align / BITS_PER_UNIT - 1));
+ val.value = build_int_cst (TREE_TYPE (expr), 0);
+ }
+ else
+ {
+ val.lattice_val = VARYING;
+ val.mask = double_int_minus_one;
+ val.value = NULL_TREE;
+ }
+ if (bitpos != 0)
+ {
+ double_int value, mask;
+ bit_value_binop_1 (PLUS_EXPR, TREE_TYPE (expr), &value, &mask,
+ TREE_TYPE (expr), value_to_double_int (val), val.mask,
+ TREE_TYPE (expr),
+ shwi_to_double_int (bitpos / BITS_PER_UNIT),
+ double_int_zero);
+ val.lattice_val = double_int_minus_one_p (mask) ? VARYING : CONSTANT;
+ val.mask = mask;
+ if (val.lattice_val == CONSTANT)
+ val.value = double_int_to_tree (TREE_TYPE (expr), value);
+ else
+ val.value = NULL_TREE;
+ }
+ /* ??? We should handle i * 4 and more complex expressions from
+ the offset, possibly by just expanding get_value_for_expr. */
+ if (offset != NULL_TREE)
+ {
+ double_int value, mask;
+ prop_value_t oval = get_value_for_expr (offset, true);
+ bit_value_binop_1 (PLUS_EXPR, TREE_TYPE (expr), &value, &mask,
+ TREE_TYPE (expr), value_to_double_int (val), val.mask,
+ TREE_TYPE (expr), value_to_double_int (oval),
+ oval.mask);
+ val.mask = mask;
+ if (double_int_minus_one_p (mask))
+ {
+ val.lattice_val = VARYING;
+ val.value = NULL_TREE;
+ }
+ else
+ {
+ val.lattice_val = CONSTANT;
+ val.value = double_int_to_tree (TREE_TYPE (expr), value);
+ }
+ }
+
+ return val;
+}
+
+/* Return the value for the tree operand EXPR. If FOR_BITS_P is true
+ return constant bits extracted from alignment information for
+ invariant addresses. */
+
+static prop_value_t
+get_value_for_expr (tree expr, bool for_bits_p)
{
prop_value_t val;
if (TREE_CODE (expr) == SSA_NAME)
- val = *(get_value (expr));
- else if (is_gimple_min_invariant (expr))
+ {
+ val = *get_value (expr);
+ if (for_bits_p
+ && val.lattice_val == CONSTANT
+ && TREE_CODE (val.value) == ADDR_EXPR)
+ val = get_value_from_alignment (val.value);
+ }
+ else if (is_gimple_min_invariant (expr)
+ && (!for_bits_p || TREE_CODE (expr) != ADDR_EXPR))
{
val.lattice_val = CONSTANT;
val.value = expr;
+ val.mask = double_int_zero;
canonicalize_float_value (&val);
}
+ else if (TREE_CODE (expr) == ADDR_EXPR)
+ val = get_value_from_alignment (expr);
else
{
val.lattice_val = VARYING;
+ val.mask = double_int_minus_one;
val.value = NULL_TREE;
}
-
return val;
}
-
/* Return the likely CCP lattice value for STMT.
If STMT has no operands, then return CONSTANT.
@@ -637,6 +824,7 @@ do_dbg_cnt (void)
if (!dbg_cnt (ccp))
{
const_val[i].lattice_val = VARYING;
+ const_val[i].mask = double_int_minus_one;
const_val[i].value = NULL_TREE;
}
}
@@ -692,23 +880,58 @@ ccp_lattice_meet (prop_value_t *val1, prop_value_t *val2)
{
/* any M VARYING = VARYING. */
val1->lattice_val = VARYING;
+ val1->mask = double_int_minus_one;
val1->value = NULL_TREE;
}
else if (val1->lattice_val == CONSTANT
&& val2->lattice_val == CONSTANT
+ && TREE_CODE (val1->value) == INTEGER_CST
+ && TREE_CODE (val2->value) == INTEGER_CST)
+ {
+ /* Ci M Cj = Ci if (i == j)
+ Ci M Cj = VARYING if (i != j)
+
+ For INTEGER_CSTs mask unequal bits. If no equal bits remain,
+ drop to varying. */
+ val1->mask
+ = double_int_ior (double_int_ior (val1->mask,
+ val2->mask),
+ double_int_xor (tree_to_double_int (val1->value),
+ tree_to_double_int (val2->value)));
+ if (double_int_minus_one_p (val1->mask))
+ {
+ val1->lattice_val = VARYING;
+ val1->value = NULL_TREE;
+ }
+ }
+ else if (val1->lattice_val == CONSTANT
+ && val2->lattice_val == CONSTANT
&& simple_cst_equal (val1->value, val2->value) == 1)
{
/* Ci M Cj = Ci if (i == j)
Ci M Cj = VARYING if (i != j)
- If these two values come from memory stores, make sure that
- they come from the same memory reference.
- Nothing to do. VAL1 already contains the value we want. */
+ VAL1 already contains the value we want for equivalent values. */
+ }
+ else if (val1->lattice_val == CONSTANT
+ && val2->lattice_val == CONSTANT
+ && (TREE_CODE (val1->value) == ADDR_EXPR
+ || TREE_CODE (val2->value) == ADDR_EXPR))
+ {
+ /* When not equal addresses are involved try meeting for
+ alignment. */
+ prop_value_t tem = *val2;
+ if (TREE_CODE (val1->value) == ADDR_EXPR)
+ *val1 = get_value_for_expr (val1->value, true);
+ if (TREE_CODE (val2->value) == ADDR_EXPR)
+ tem = get_value_for_expr (val2->value, true);
+ ccp_lattice_meet (val1, &tem);
}
else
{
/* Any other combination is VARYING. */
val1->lattice_val = VARYING;
+ val1->mask = double_int_minus_one;
val1->value = NULL_TREE;
}
}
@@ -769,7 +992,7 @@ ccp_visit_phi_node (gimple phi)
if (e->flags & EDGE_EXECUTABLE)
{
tree arg = gimple_phi_arg (phi, i)->def;
- prop_value_t arg_val = get_value_for_expr (arg);
+ prop_value_t arg_val = get_value_for_expr (arg, false);
ccp_lattice_meet (&new_val, &arg_val);
@@ -1334,6 +1557,347 @@ fold_const_aggregate_ref (tree t)
return NULL_TREE;
}
+/* Apply the operation CODE in type TYPE to the value, mask pair
+ RVAL and RMASK representing a value of type RTYPE and set
+ the value, mask pair *VAL and *MASK to the result. */
+
+static void
+bit_value_unop_1 (enum tree_code code, tree type,
+ double_int *val, double_int *mask,
+ tree rtype, double_int rval, double_int rmask)
+{
+ switch (code)
+ {
+ case BIT_NOT_EXPR:
+ *mask = rmask;
+ *val = double_int_not (rval);
+ break;
+
+ case NEGATE_EXPR:
+ {
+ double_int temv, temm;
+ /* Return ~rval + 1. */
+ bit_value_unop_1 (BIT_NOT_EXPR, type, &temv, &temm, type, rval, rmask);
+ bit_value_binop_1 (PLUS_EXPR, type, val, mask,
+ type, temv, temm,
+ type, double_int_one, double_int_zero);
+ break;
+ }
+
+ CASE_CONVERT:
+ {
+ bool uns;
+
+ /* First extend mask and value according to the original type. */
+ uns = (TREE_CODE (rtype) == INTEGER_TYPE && TYPE_IS_SIZETYPE (rtype)
+ ? 0 : TYPE_UNSIGNED (rtype));
+ *mask = double_int_ext (rmask, TYPE_PRECISION (rtype), uns);
+ *val = double_int_ext (rval, TYPE_PRECISION (rtype), uns);
+
+ /* Then extend mask and value according to the target type. */
+ uns = (TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type)
+ ? 0 : TYPE_UNSIGNED (type));
+ *mask = double_int_ext (*mask, TYPE_PRECISION (type), uns);
+ *val = double_int_ext (*val, TYPE_PRECISION (type), uns);
+ break;
+ }
+
+ default:
+ *mask = double_int_minus_one;
+ break;
+ }
+}
+
+/* Apply the operation CODE in type TYPE to the value, mask pairs
+ R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE
+ and R2TYPE and set the value, mask pair *VAL and *MASK to the result. */
+
+static void
+bit_value_binop_1 (enum tree_code code, tree type,
+ double_int *val, double_int *mask,
+ tree r1type, double_int r1val, double_int r1mask,
+ tree r2type, double_int r2val, double_int r2mask)
+{
+ bool uns = (TREE_CODE (type) == INTEGER_TYPE
+ && TYPE_IS_SIZETYPE (type) ? 0 : TYPE_UNSIGNED (type));
+ /* Assume we'll get a constant result. Use an initial varying value,
+ we fall back to varying in the end if necessary. */
+ *mask = double_int_minus_one;
+ switch (code)
+ {
+ case BIT_AND_EXPR:
+ /* The mask is constant where there is a known not
+ set bit, (m1 | m2) & ((v1 | m1) & (v2 | m2)) */
+ *mask = double_int_and (double_int_ior (r1mask, r2mask),
+ double_int_and (double_int_ior (r1val, r1mask),
+ double_int_ior (r2val, r2mask)));
+ *val = double_int_and (r1val, r2val);
+ break;
+
+ case BIT_IOR_EXPR:
+ /* The mask is constant where there is a known
+ set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)). */
+ *mask = double_int_and_not
+ (double_int_ior (r1mask, r2mask),
+ double_int_ior (double_int_and_not (r1val, r1mask),
+ double_int_and_not (r2val, r2mask)));
+ *val = double_int_ior (r1val, r2val);
+ break;
+
+ case BIT_XOR_EXPR:
+ /* m1 | m2 */
+ *mask = double_int_ior (r1mask, r2mask);
+ *val = double_int_xor (r1val, r2val);
+ break;
+
+ case LROTATE_EXPR:
+ case RROTATE_EXPR:
+ if (double_int_zero_p (r2mask))
+ {
+ HOST_WIDE_INT shift = r2val.low;
+ if (code == RROTATE_EXPR)
+ shift = -shift;
+ *mask = double_int_lrotate (r1mask, shift, TYPE_PRECISION (type));
+ *val = double_int_lrotate (r1val, shift, TYPE_PRECISION (type));
+ }
+ break;
+
+ case LSHIFT_EXPR:
+ case RSHIFT_EXPR:
+ /* ??? We can handle partially known shift counts if we know
+ its sign. That way we can tell that (x << (y | 8)) & 255
+ is zero. */
+ if (double_int_zero_p (r2mask))
+ {
+ HOST_WIDE_INT shift = r2val.low;
+ if (code == RSHIFT_EXPR)
+ shift = -shift;
+ /* We need to know if we are doing a left or a right shift
+ to properly shift in zeros for left shift and unsigned
+ right shifts and the sign bit for signed right shifts.
+ For signed right shifts we shift in varying in case
+ the sign bit was varying. */
+ if (shift > 0)
+ {
+ *mask = double_int_lshift (r1mask, shift,
+ TYPE_PRECISION (type), false);
+ *val = double_int_lshift (r1val, shift,
+ TYPE_PRECISION (type), false);
+ }
+ else if (shift < 0)
+ {
+ shift = -shift;
+ *mask = double_int_rshift (r1mask, shift,
+ TYPE_PRECISION (type), !uns);
+ *val = double_int_rshift (r1val, shift,
+ TYPE_PRECISION (type), !uns);
+ }
+ else
+ {
+ *mask = r1mask;
+ *val = r1val;
+ }
+ }
+ break;
+
+ case PLUS_EXPR:
+ case POINTER_PLUS_EXPR:
+ {
+ double_int lo, hi;
+ /* Do the addition with unknown bits set to zero, to give carry-ins of
+ zero wherever possible. */
+ lo = double_int_add (double_int_and_not (r1val, r1mask),
+ double_int_and_not (r2val, r2mask));
+ lo = double_int_ext (lo, TYPE_PRECISION (type), uns);
+ /* Do the addition with unknown bits set to one, to give carry-ins of
+ one wherever possible. */
+ hi = double_int_add (double_int_ior (r1val, r1mask),
+ double_int_ior (r2val, r2mask));
+ hi = double_int_ext (hi, TYPE_PRECISION (type), uns);
+ /* Each bit in the result is known if (a) the corresponding bits in
+ both inputs are known, and (b) the carry-in to that bit position
+ is known. We can check condition (b) by seeing if we got the same
+ result with minimised carries as with maximised carries. */
+ *mask = double_int_ior (double_int_ior (r1mask, r2mask),
+ double_int_xor (lo, hi));
+ *mask = double_int_ext (*mask, TYPE_PRECISION (type), uns);
+ /* It shouldn't matter whether we choose lo or hi here. */
+ *val = lo;
+ break;
+ }
+
+ case MINUS_EXPR:
+ {
+ double_int temv, temm;
+ bit_value_unop_1 (NEGATE_EXPR, r2type, &temv, &temm,
+ r2type, r2val, r2mask);
+ bit_value_binop_1 (PLUS_EXPR, type, val, mask,
+ r1type, r1val, r1mask,
+ r2type, temv, temm);
+ break;
+ }
+
+ case MULT_EXPR:
+ {
+ /* Just track trailing zeros in both operands and transfer
+ them to the other. */
+ int r1tz = double_int_ctz (double_int_ior (r1val, r1mask));
+ int r2tz = double_int_ctz (double_int_ior (r2val, r2mask));
+ if (r1tz + r2tz >= HOST_BITS_PER_DOUBLE_INT)
+ {
+ *mask = double_int_zero;
+ *val = double_int_zero;
+ }
+ else if (r1tz + r2tz > 0)
+ {
+ *mask = double_int_not (double_int_mask (r1tz + r2tz));
+ *mask = double_int_ext (*mask, TYPE_PRECISION (type), uns);
+ *val = double_int_zero;
+ }
+ break;
+ }
+
+ case EQ_EXPR:
+ case NE_EXPR:
+ {
+ double_int m = double_int_ior (r1mask, r2mask);
+ if (!double_int_equal_p (double_int_and_not (r1val, m),
+ double_int_and_not (r2val, m)))
+ {
+ *mask = double_int_zero;
+ *val = ((code == EQ_EXPR) ? double_int_zero : double_int_one);
+ }
+ else
+ {
+ /* We know the result of a comparison is always one or zero. */
+ *mask = double_int_one;
+ *val = double_int_zero;
+ }
+ break;
+ }
+
+ case GE_EXPR:
+ case GT_EXPR:
+ {
+ double_int tem = r1val;
+ r1val = r2val;
+ r2val = tem;
+ tem = r1mask;
+ r1mask = r2mask;
+ r2mask = tem;
+ code = swap_tree_comparison (code);
+ }
+ /* Fallthru. */
+ case LT_EXPR:
+ case LE_EXPR:
+ {
+ int minmax, maxmin;
+ /* If the most significant bits are not known we know nothing. */
+ if (double_int_negative_p (r1mask) || double_int_negative_p (r2mask))
+ break;
+
+ /* If we know the most significant bits we know the values
+ value ranges by means of treating varying bits as zero
+ or one. Do a cross comparison of the max/min pairs. */
+ maxmin = double_int_cmp (double_int_ior (r1val, r1mask),
+ double_int_and_not (r2val, r2mask), uns);
+ minmax = double_int_cmp (double_int_and_not (r1val, r1mask),
+ double_int_ior (r2val, r2mask), uns);
+ if (maxmin < 0) /* r1 is less than r2. */
+ {
+ *mask = double_int_zero;
+ *val = double_int_one;
+ }
+ else if (minmax > 0) /* r1 is not less or equal to r2. */
+ {
+ *mask = double_int_zero;
+ *val = double_int_zero;
+ }
+ else if (maxmin == minmax) /* r1 and r2 are equal. */
+ {
+ /* This probably should never happen as we'd have
+ folded the thing during fully constant value folding. */
+ *mask = double_int_zero;
+ *val = (code == LE_EXPR ? double_int_one : double_int_zero);
+ }
+ else
+ {
+ /* We know the result of a comparison is always one or zero. */
+ *mask = double_int_one;
+ *val = double_int_zero;
+ }
+ break;
+ }
+
+ default:;
+ }
+}
+
+/* Return the propagation value when applying the operation CODE to
+ the value RHS yielding type TYPE. */
+
+static prop_value_t
+bit_value_unop (enum tree_code code, tree type, tree rhs)
+{
+ prop_value_t rval = get_value_for_expr (rhs, true);
+ double_int value, mask;
+ prop_value_t val;
+ gcc_assert ((rval.lattice_val == CONSTANT
+ && TREE_CODE (rval.value) == INTEGER_CST)
+ || double_int_minus_one_p (rval.mask));
+ bit_value_unop_1 (code, type, &value, &mask,
+ TREE_TYPE (rhs), value_to_double_int (rval), rval.mask);
+ if (!double_int_minus_one_p (mask))
+ {
+ val.lattice_val = CONSTANT;
+ val.mask = mask;
+ /* ??? Delay building trees here. */
+ val.value = double_int_to_tree (type, value);
+ }
+ else
+ {
+ val.lattice_val = VARYING;
+ val.value = NULL_TREE;
+ val.mask = double_int_minus_one;
+ }
+ return val;
+}
+
+/* Return the propagation value when applying the operation CODE to
+ the values RHS1 and RHS2 yielding type TYPE. */
+
+static prop_value_t
+bit_value_binop (enum tree_code code, tree type, tree rhs1, tree rhs2)
+{
+ prop_value_t r1val = get_value_for_expr (rhs1, true);
+ prop_value_t r2val = get_value_for_expr (rhs2, true);
+ double_int value, mask;
+ prop_value_t val;
+ gcc_assert ((r1val.lattice_val == CONSTANT
+ && TREE_CODE (r1val.value) == INTEGER_CST)
+ || double_int_minus_one_p (r1val.mask));
+ gcc_assert ((r2val.lattice_val == CONSTANT
+ && TREE_CODE (r2val.value) == INTEGER_CST)
+ || double_int_minus_one_p (r2val.mask));
+ bit_value_binop_1 (code, type, &value, &mask,
+ TREE_TYPE (rhs1), value_to_double_int (r1val), r1val.mask,
+ TREE_TYPE (rhs2), value_to_double_int (r2val), r2val.mask);
+ if (!double_int_minus_one_p (mask))
+ {
+ val.lattice_val = CONSTANT;
+ val.mask = mask;
+ /* ??? Delay building trees here. */
+ val.value = double_int_to_tree (type, value);
+ }
+ else
+ {
+ val.lattice_val = VARYING;
+ val.value = NULL_TREE;
+ val.mask = double_int_minus_one;
+ }
+ return val;
+}
+
/* Evaluate statement STMT.
Valid only for assignments, calls, conditionals, and switches. */
@@ -1343,9 +1907,26 @@ evaluate_stmt (gimple stmt)
prop_value_t val;
tree simplified = NULL_TREE;
ccp_lattice_t likelyvalue = likely_value (stmt);
- bool is_constant;
+ bool is_constant = false;
- fold_defer_overflow_warnings ();
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "which is likely ");
+ switch (likelyvalue)
+ {
+ case CONSTANT:
+ fprintf (dump_file, "CONSTANT");
+ break;
+ case UNDEFINED:
+ fprintf (dump_file, "UNDEFINED");
+ break;
+ case VARYING:
+ fprintf (dump_file, "VARYING");
+ break;
+ default:;
+ }
+ fprintf (dump_file, "\n");
+ }
/* If the statement is likely to have a CONSTANT result, then try
to fold the statement to determine the constant value. */
@@ -1353,7 +1934,19 @@ evaluate_stmt (gimple stmt)
Since likely_value never returns CONSTANT for calls, we will
not attempt to fold them, including builtins that may profit. */
if (likelyvalue == CONSTANT)
- simplified = ccp_fold (stmt);
+ {
+ fold_defer_overflow_warnings ();
+ simplified = ccp_fold (stmt);
+ is_constant = simplified && is_gimple_min_invariant (simplified);
+ fold_undefer_overflow_warnings (is_constant, stmt, 0);
+ if (is_constant)
+ {
+ /* The statement produced a constant value. */
+ val.lattice_val = CONSTANT;
+ val.value = simplified;
+ val.mask = double_int_zero;
+ }
+ }
/* If the statement is likely to have a VARYING result, then do not
bother folding the statement. */
else if (likelyvalue == VARYING)
@@ -1373,46 +1966,85 @@ evaluate_stmt (gimple stmt)
else
/* These cannot satisfy is_gimple_min_invariant without folding. */
gcc_assert (code == GIMPLE_CALL || code == GIMPLE_COND);
+ is_constant = simplified && is_gimple_min_invariant (simplified);
+ if (is_constant)
+ {
+ /* The statement produced a constant value. */
+ val.lattice_val = CONSTANT;
+ val.value = simplified;
+ val.mask = double_int_zero;
+ }
}
- is_constant = simplified && is_gimple_min_invariant (simplified);
-
- fold_undefer_overflow_warnings (is_constant, stmt, 0);
-
- if (dump_file && (dump_flags & TDF_DETAILS))
+ /* Resort to simplification for bitwise tracking. */
+ if (flag_tree_bit_ccp
+ && likelyvalue == CONSTANT
+ && !is_constant)
{
- fprintf (dump_file, "which is likely ");
- switch (likelyvalue)
+ enum gimple_code code = gimple_code (stmt);
+ val.lattice_val = VARYING;
+ val.value = NULL_TREE;
+ val.mask = double_int_minus_one;
+ if (code == GIMPLE_ASSIGN)
{
- case CONSTANT:
- fprintf (dump_file, "CONSTANT");
- break;
- case UNDEFINED:
- fprintf (dump_file, "UNDEFINED");
- break;
- case VARYING:
- fprintf (dump_file, "VARYING");
- break;
- default:;
+ enum tree_code subcode = gimple_assign_rhs_code (stmt);
+ tree rhs1 = gimple_assign_rhs1 (stmt);
+ switch (get_gimple_rhs_class (subcode))
+ {
+ case GIMPLE_SINGLE_RHS:
+ if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
+ || POINTER_TYPE_P (TREE_TYPE (rhs1)))
+ val = get_value_for_expr (rhs1, true);
+ break;
+
+ case GIMPLE_UNARY_RHS:
+ if ((INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
+ || POINTER_TYPE_P (TREE_TYPE (rhs1)))
+ && (INTEGRAL_TYPE_P (gimple_expr_type (stmt))
+ || POINTER_TYPE_P (gimple_expr_type (stmt))))
+ val = bit_value_unop (subcode, gimple_expr_type (stmt), rhs1);
+ break;
+
+ case GIMPLE_BINARY_RHS:
+ if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
+ || POINTER_TYPE_P (TREE_TYPE (rhs1)))
+ {
+ tree rhs2 = gimple_assign_rhs2 (stmt);
+ val = bit_value_binop (subcode,
+ TREE_TYPE (rhs1), rhs1, rhs2);
+ }
+ break;
+
+ default:;
+ }
}
- fprintf (dump_file, "\n");
+ else if (code == GIMPLE_COND)
+ {
+ enum tree_code code = gimple_cond_code (stmt);
+ tree rhs1 = gimple_cond_lhs (stmt);
+ tree rhs2 = gimple_cond_rhs (stmt);
+ if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
+ || POINTER_TYPE_P (TREE_TYPE (rhs1)))
+ val = bit_value_binop (code, TREE_TYPE (rhs1), rhs1, rhs2);
+ }
+ is_constant = (val.lattice_val == CONSTANT);
}
- if (is_constant)
- {
- /* The statement produced a constant value. */
- val.lattice_val = CONSTANT;
- val.value = simplified;
- }
- else
+ if (!is_constant)
{
/* The statement produced a nonconstant value. If the statement
had UNDEFINED operands, then the result of the statement
should be UNDEFINED. Otherwise, the statement is VARYING. */
if (likelyvalue == UNDEFINED)
- val.lattice_val = likelyvalue;
+ {
+ val.lattice_val = likelyvalue;
+ val.mask = double_int_zero;
+ }
else
- val.lattice_val = VARYING;
+ {
+ val.lattice_val = VARYING;
+ val.mask = double_int_minus_one;
+ }
val.value = NULL_TREE;
}
@@ -1438,9 +2070,18 @@ ccp_fold_stmt (gimple_stmt_iterator *gsi)
fold more conditionals here. */
val = evaluate_stmt (stmt);
if (val.lattice_val != CONSTANT
- || TREE_CODE (val.value) != INTEGER_CST)
+ || !double_int_zero_p (val.mask))
return false;
+ if (dump_file)
+ {
+ fprintf (dump_file, "Folding predicate ");
+ print_gimple_expr (dump_file, stmt, 0, 0);
+ fprintf (dump_file, " to ");
+ print_generic_expr (dump_file, val.value, 0);
+ fprintf (dump_file, "\n");
+ }
+
if (integer_zerop (val.value))
gimple_cond_make_false (stmt);
else
@@ -1583,12 +2224,15 @@ visit_cond_stmt (gimple stmt, edge *taken_edge_p)
block = gimple_bb (stmt);
val = evaluate_stmt (stmt);
+ if (val.lattice_val != CONSTANT
+ || !double_int_zero_p (val.mask))
+ return SSA_PROP_VARYING;
/* Find which edge out of the conditional block will be taken and add it
to the worklist. If no single edge can be determined statically,
return SSA_PROP_VARYING to feed all the outgoing edges to the
propagation engine. */
- *taken_edge_p = val.value ? find_taken_edge (block, val.value) : 0;
+ *taken_edge_p = find_taken_edge (block, val.value);
if (*taken_edge_p)
return SSA_PROP_INTERESTING;
else
@@ -1653,7 +2297,7 @@ ccp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p)
Mark them VARYING. */
FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
{
- prop_value_t v = { VARYING, NULL_TREE };
+ prop_value_t v = { VARYING, NULL_TREE, { -1, (HOST_WIDE_INT) -1 } };
set_lattice_value (def, v);
}