Commit 0b4b14ac by Richard Guenther Committed by Richard Biener

tree-ssa-ccp.c (struct prop_value_d): Add mask member.

2010-08-06  Richard Guenther  <rguenther@suse.de>

	* tree-ssa-ccp.c (struct prop_value_d): Add mask member.
	(dump_lattice_value): Dump it.
	(get_default_value): Adjust.
	(get_constant_value): Likewise.
	(set_value_varying): Likewise.
	(set_lattice_value): Make sure to not go up the lattice
	with bitwise constant values.
	(get_value_for_expr): Handle ADDR_EXPRs.
	(value_to_double_int): New function.
	(get_value_from_alignment): Likewise.
	(do_dbg_cnt): Adjust.
	(ccp_lattice_meet): Handle partially constant values.
	(bit_value_unop_1): New function.
	(bit_value_binop_1): Likewise.
	(bit_value_unop): Likewise.
	(bit_value_binop): Likewise.
	(evaluate_stmt): Track partially constant values if
	flag_tree_bit_ccp is set.
	(ccp_fold_stmt): Dump if we folded a predicate.
	(ccp_visit_stmt): Adjust.
	* common.opt (ftree-bit-ccp): New flag.
	* doc/invoke.texi (ftree-bit-ccp): Document.
	* opts.c (decode_options): Enable bit-CCP at -O1.

	* gcc.dg/tree-ssa/ssa-dce-3.c: XFAIL.
	* gcc.dg/tree-ssa/pr23744.c: Disable CCP.
	* gcc.dg/tree-ssa/pr25382.c: Likewise.
	* gcc.dg/tree-ssa/ssa-ccp-30.c: New testcase.
	* gcc.dg/tree-ssa/ssa-ccp-31.c: Likewise.
	* gcc.dg/tree-ssa/ssa-ccp-32.c: Likewise.
	* gcc.dg/tree-ssa/ssa-ccp-33.c: Likewise.
	* gcc.c-torture/execute/20100805-1.c: Likewise.

From-SVN: r162943
parent c2653d8f
2010-08-06 Richard Guenther <rguenther@suse.de>
* tree-ssa-ccp.c (struct prop_value_d): Add mask member.
(dump_lattice_value): Dump it.
(get_default_value): Adjust.
(get_constant_value): Likewise.
(set_value_varying): Likewise.
(set_lattice_value): Make sure to not go up the lattice
with bitwise constant values.
(get_value_for_expr): Handle ADDR_EXPRs.
(value_to_double_int): New function.
(get_value_from_alignment): Likewise.
(do_dbg_cnt): Adjust.
(ccp_lattice_meet): Handle partially constant values.
(bit_value_unop_1): New function.
(bit_value_binop_1): Likewise.
(bit_value_unop): Likewise.
(bit_value_binop): Likewise.
(evaluate_stmt): Track partially constant values if
flag_tree_bit_ccp is set.
(ccp_fold_stmt): Dump if we folded a predicate.
(ccp_visit_stmt): Adjust.
* common.opt (ftree-bit-ccp): New flag.
* doc/invoke.texi (ftree-bit-ccp): Document.
* opts.c (decode_options): Enable bit-CCP at -O1.
2010-08-06 Alan Modra <amodra@gmail.com> 2010-08-06 Alan Modra <amodra@gmail.com>
* doc/invoke.texi (RS/6000 and PowerPC Options): Rewrite -mrelocatable * doc/invoke.texi (RS/6000 and PowerPC Options): Rewrite -mrelocatable
......
...@@ -1281,6 +1281,10 @@ ftree-ccp ...@@ -1281,6 +1281,10 @@ ftree-ccp
Common Report Var(flag_tree_ccp) Optimization Common Report Var(flag_tree_ccp) Optimization
Enable SSA-CCP optimization on trees Enable SSA-CCP optimization on trees
ftree-bit-ccp
Common Report Var(flag_tree_bit_ccp) Optimization
Enable SSA-BIT-CCP optimization on trees
ftree-store-ccp ftree-store-ccp
Common Common
Does nothing. Preserved for backward compatibility. Does nothing. Preserved for backward compatibility.
......
...@@ -380,7 +380,7 @@ Objective-C and Objective-C++ Dialects}. ...@@ -380,7 +380,7 @@ Objective-C and Objective-C++ Dialects}.
-fsel-sched-pipelining -fsel-sched-pipelining-outer-loops @gol -fsel-sched-pipelining -fsel-sched-pipelining-outer-loops @gol
-fsignaling-nans -fsingle-precision-constant -fsplit-ivs-in-unroller @gol -fsignaling-nans -fsingle-precision-constant -fsplit-ivs-in-unroller @gol
-fsplit-wide-types -fstack-protector -fstack-protector-all @gol -fsplit-wide-types -fstack-protector -fstack-protector-all @gol
-fstrict-aliasing -fstrict-overflow -fthread-jumps -ftracer @gol -fstrict-aliasing -fstrict-overflow -fthread-jumps -ftracer -ftree-bit-ccp @gol
-ftree-builtin-call-dce -ftree-ccp -ftree-ch -ftree-copy-prop @gol -ftree-builtin-call-dce -ftree-ccp -ftree-ch -ftree-copy-prop @gol
-ftree-copyrename -ftree-dce -ftree-dominator-opts -ftree-dse @gol -ftree-copyrename -ftree-dce -ftree-dominator-opts -ftree-dse @gol
-ftree-forwprop -ftree-fre -ftree-loop-if-convert -ftree-loop-im @gol -ftree-forwprop -ftree-fre -ftree-loop-if-convert -ftree-loop-im @gol
...@@ -5848,6 +5848,7 @@ compilation time. ...@@ -5848,6 +5848,7 @@ compilation time.
-fipa-reference @gol -fipa-reference @gol
-fmerge-constants -fmerge-constants
-fsplit-wide-types @gol -fsplit-wide-types @gol
-ftree-bit-ccp @gol
-ftree-builtin-call-dce @gol -ftree-builtin-call-dce @gol
-ftree-ccp @gol -ftree-ccp @gol
-ftree-ch @gol -ftree-ch @gol
...@@ -6737,6 +6738,13 @@ Transposing is enabled only if profiling information is available. ...@@ -6737,6 +6738,13 @@ Transposing is enabled only if profiling information is available.
Perform forward store motion on trees. This flag is Perform forward store motion on trees. This flag is
enabled by default at @option{-O} and higher. enabled by default at @option{-O} and higher.
@item -ftree-bit-ccp
@opindex ftree-bit-ccp
Perform sparse conditional bit constant propagation on trees and propagate
pointer alignment information.
This pass only operates on local scalar variables and is enabled by default
at @option{-O} and higher. It requires that @option{-ftree-ccp} is enabled.
@item -ftree-ccp @item -ftree-ccp
@opindex ftree-ccp @opindex ftree-ccp
Perform sparse conditional constant propagation (CCP) on trees. This Perform sparse conditional constant propagation (CCP) on trees. This
......
...@@ -767,6 +767,7 @@ decode_options (unsigned int argc, const char **argv, ...@@ -767,6 +767,7 @@ decode_options (unsigned int argc, const char **argv,
flag_merge_constants = opt1; flag_merge_constants = opt1;
flag_split_wide_types = opt1; flag_split_wide_types = opt1;
flag_tree_ccp = opt1; flag_tree_ccp = opt1;
flag_tree_bit_ccp = opt1;
flag_tree_dce = opt1; flag_tree_dce = opt1;
flag_tree_dom = opt1; flag_tree_dom = opt1;
flag_tree_dse = opt1; flag_tree_dse = opt1;
......
2010-08-06 Richard Guenther <rguenther@suse.de>
* gcc.dg/tree-ssa/ssa-dce-3.c: XFAIL.
* gcc.dg/tree-ssa/pr23744.c: Disable CCP.
* gcc.dg/tree-ssa/pr25382.c: Likewise.
* gcc.dg/tree-ssa/ssa-ccp-30.c: New testcase.
* gcc.dg/tree-ssa/ssa-ccp-31.c: Likewise.
* gcc.dg/tree-ssa/ssa-ccp-32.c: Likewise.
* gcc.dg/tree-ssa/ssa-ccp-33.c: Likewise.
* gcc.c-torture/execute/20100805-1.c: Likewise.
2010-08-05 Martin Jambor <mjambor@suse.cz> 2010-08-05 Martin Jambor <mjambor@suse.cz>
PR testsuite/42855 PR testsuite/42855
......
unsigned int foo (unsigned int a, unsigned int b)
{
unsigned i;
a = a & 1;
for (i = 0; i < b; ++i)
a = a << 1 | a >> (sizeof (unsigned int) * 8 - 1);
return a;
}
extern void abort (void);
int main()
{
if (foo (1, sizeof (unsigned int) * 8 + 1) != 2)
abort ();
return 0;
}
/* { dg-do compile } */ /* { dg-do compile } */
/* { dg-options "-O2 -fdump-tree-vrp1" } */ /* { dg-options "-O2 -fno-tree-ccp -fdump-tree-vrp1" } */
int g (int i, int j) int g (int i, int j)
{ {
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
Check that VRP now gets ranges from BIT_AND_EXPRs. */ Check that VRP now gets ranges from BIT_AND_EXPRs. */
/* { dg-do compile } */ /* { dg-do compile } */
/* { dg-options "-O2 -fdump-tree-vrp1" } */ /* { dg-options "-O2 -fno-tree-ccp -fdump-tree-vrp1" } */
int int
foo (int a) foo (int a)
......
/* { dg-do compile } */
/* { dg-options "-O -fdump-tree-ccp1" } */
int
foo (int a)
{
int b = a & 0xff;
if (b > 300)
return 2;
else
return 1;
}
/* { dg-final { scan-tree-dump-times "Folding predicate b_.* > 300 to 0" 1 "ccp1" } } */
/* { dg-final { cleanup-tree-dump "ccp1" } } */
/* { dg-do compile } */
/* { dg-options "-O -fdump-tree-ccp1" } */
int g (int i, int j)
{
int t = 0;
int i1;
if (i == j)
t = 3;
for (i1 = 0; i1 < 10000; i1++) h();
if (t != 5)
return 0;
else
return 1;
}
/* { dg-final { scan-tree-dump-times "Folding predicate.*to 1" 1 "ccp1" } } */
/* { dg-final { cleanup-tree-dump "ccp1" } } */
/* { dg-do run } */
/* { dg-options "-O" } */
extern void link_error (void);
unsigned int __attribute__((noinline,noclone))
test0 (unsigned int a)
{
a = a & 1;
a = a << 1 | a >> (sizeof (unsigned int) * 8 - 1);
if (a & 1)
{
a = a | 4;
link_error ();
}
if (a & 4)
link_error ();
return a;
}
int __attribute__((noinline,noclone))
test1 (int a)
{
a |= 1;
a = a << (sizeof (int) * 8 - 1);
if (a >= 0)
link_error ();
a = a * 4;
if (a & ~3)
link_error ();
if (a == -1)
link_error ();
return a;
}
int __attribute__((noinline,noclone))
test2 (int a)
{
a = a | 0xff;
a = a + 1;
if (a & 0xff)
link_error ();
a = -a;
if (a & 0xff)
link_error ();
a = a - 1;
if (a & 0xff != 0xff)
link_error ();
return a;
}
extern void abort (void);
int main()
{
if (test0 (1) != 2)
abort ();
if (test1 (0) != 0)
abort ();
if (test2 (-1) != -1)
abort ();
return 0;
}
/* { dg-do link } */
/* { dg-options "-O" } */
extern void link_error ();
int a[256];
void foo(int n)
{
int *p;
for (p = a; n != 0; --n, ++p)
;
if ((__SIZE_TYPE__)p & (sizeof (int) - 1))
link_error ();
}
int main()
{
return 0;
}
...@@ -21,11 +21,15 @@ int main(void) ...@@ -21,11 +21,15 @@ int main(void)
return 0; return 0;
} }
/* We now can prove the infiniteness of the loop during CCP and fail
to eliminate the code inside the infinite loop because we start
by marking the j % 7 condition as useful. See PR45178. */
/* We should eliminate the inner condition, but the loop must be preserved /* We should eliminate the inner condition, but the loop must be preserved
as it is infinite. Therefore there should be just one phi node (for i): */ as it is infinite. Therefore there should be just one phi node (for i): */
/* { dg-final { scan-tree-dump-times "PHI " 1 "cddce1"} } */ /* { dg-final { scan-tree-dump-times "PHI " 1 "cddce1" { xfail *-*-* } } } */
/* And one if (for the exit condition of the loop): */ /* And one if (for the exit condition of the loop): */
/* { dg-final { scan-tree-dump-times "if " 1 "cddce1"} } */ /* { dg-final { scan-tree-dump-times "if " 1 "cddce1" } } */
/* { dg-final { cleanup-tree-dump "cddce1" } } */ /* { dg-final { cleanup-tree-dump "cddce1" } } */
...@@ -150,6 +150,10 @@ struct prop_value_d { ...@@ -150,6 +150,10 @@ struct prop_value_d {
/* Propagated value. */ /* Propagated value. */
tree value; tree value;
/* Mask that applies to the propagated value during CCP. For
X with a CONSTANT lattice value X & ~mask == value & ~mask. */
double_int mask;
}; };
typedef struct prop_value_d prop_value_t; typedef struct prop_value_d prop_value_t;
...@@ -183,7 +187,18 @@ dump_lattice_value (FILE *outf, const char *prefix, prop_value_t val) ...@@ -183,7 +187,18 @@ dump_lattice_value (FILE *outf, const char *prefix, prop_value_t val)
break; break;
case CONSTANT: case CONSTANT:
fprintf (outf, "%sCONSTANT ", prefix); fprintf (outf, "%sCONSTANT ", prefix);
print_generic_expr (outf, val.value, dump_flags); if (TREE_CODE (val.value) != INTEGER_CST
|| double_int_zero_p (val.mask))
print_generic_expr (outf, val.value, dump_flags);
else
{
double_int cval = double_int_and_not (tree_to_double_int (val.value),
val.mask);
fprintf (outf, "%sCONSTANT " HOST_WIDE_INT_PRINT_DOUBLE_HEX,
prefix, cval.high, cval.low);
fprintf (outf, " (" HOST_WIDE_INT_PRINT_DOUBLE_HEX ")",
val.mask.high, val.mask.low);
}
break; break;
default: default:
gcc_unreachable (); gcc_unreachable ();
...@@ -225,7 +240,7 @@ static prop_value_t ...@@ -225,7 +240,7 @@ static prop_value_t
get_default_value (tree var) get_default_value (tree var)
{ {
tree sym = SSA_NAME_VAR (var); tree sym = SSA_NAME_VAR (var);
prop_value_t val = { UNINITIALIZED, NULL_TREE }; prop_value_t val = { UNINITIALIZED, NULL_TREE, { 0, 0 } };
gimple stmt; gimple stmt;
stmt = SSA_NAME_DEF_STMT (var); stmt = SSA_NAME_DEF_STMT (var);
...@@ -240,7 +255,10 @@ get_default_value (tree var) ...@@ -240,7 +255,10 @@ get_default_value (tree var)
&& TREE_CODE (sym) == VAR_DECL) && TREE_CODE (sym) == VAR_DECL)
val.lattice_val = UNDEFINED; val.lattice_val = UNDEFINED;
else else
val.lattice_val = VARYING; {
val.lattice_val = VARYING;
val.mask = double_int_minus_one;
}
} }
else if (is_gimple_assign (stmt) else if (is_gimple_assign (stmt)
/* Value-returning GIMPLE_CALL statements assign to /* Value-returning GIMPLE_CALL statements assign to
...@@ -266,6 +284,7 @@ get_default_value (tree var) ...@@ -266,6 +284,7 @@ get_default_value (tree var)
{ {
/* Otherwise, VAR will never take on a constant value. */ /* Otherwise, VAR will never take on a constant value. */
val.lattice_val = VARYING; val.lattice_val = VARYING;
val.mask = double_int_minus_one;
} }
return val; return val;
...@@ -297,7 +316,10 @@ static inline tree ...@@ -297,7 +316,10 @@ static inline tree
get_constant_value (tree var) get_constant_value (tree var)
{ {
prop_value_t *val = get_value (var); prop_value_t *val = get_value (var);
if (val && val->lattice_val == CONSTANT) if (val
&& val->lattice_val == CONSTANT
&& (TREE_CODE (val->value) != INTEGER_CST
|| double_int_zero_p (val->mask)))
return val->value; return val->value;
return NULL_TREE; return NULL_TREE;
} }
...@@ -311,6 +333,7 @@ set_value_varying (tree var) ...@@ -311,6 +333,7 @@ set_value_varying (tree var)
val->lattice_val = VARYING; val->lattice_val = VARYING;
val->value = NULL_TREE; val->value = NULL_TREE;
val->mask = double_int_minus_one;
} }
/* For float types, modify the value of VAL to make ccp work correctly /* For float types, modify the value of VAL to make ccp work correctly
...@@ -360,6 +383,42 @@ canonicalize_float_value (prop_value_t *val) ...@@ -360,6 +383,42 @@ canonicalize_float_value (prop_value_t *val)
} }
} }
/* Return whether the lattice transition is valid. */
static bool
valid_lattice_transition (prop_value_t old_val, prop_value_t new_val)
{
/* Lattice transitions must always be monotonically increasing in
value. */
if (old_val.lattice_val < new_val.lattice_val)
return true;
if (old_val.lattice_val != new_val.lattice_val)
return false;
if (!old_val.value && !new_val.value)
return true;
/* Now both lattice values are CONSTANT. */
/* Allow transitioning from &x to &x & ~3. */
if (TREE_CODE (old_val.value) != INTEGER_CST
&& TREE_CODE (new_val.value) == INTEGER_CST)
return true;
/* Bit-lattices have to agree in the still valid bits. */
if (TREE_CODE (old_val.value) == INTEGER_CST
&& TREE_CODE (new_val.value) == INTEGER_CST)
return double_int_equal_p
(double_int_and_not (tree_to_double_int (old_val.value),
new_val.mask),
double_int_and_not (tree_to_double_int (new_val.value),
new_val.mask));
/* Otherwise constant values have to agree. */
return operand_equal_p (old_val.value, new_val.value, 0);
}
/* Set the value for variable VAR to NEW_VAL. Return true if the new /* Set the value for variable VAR to NEW_VAL. Return true if the new
value is different from VAR's previous value. */ value is different from VAR's previous value. */
...@@ -371,17 +430,34 @@ set_lattice_value (tree var, prop_value_t new_val) ...@@ -371,17 +430,34 @@ set_lattice_value (tree var, prop_value_t new_val)
canonicalize_float_value (&new_val); canonicalize_float_value (&new_val);
/* Lattice transitions must always be monotonically increasing in /* We have to be careful to not go up the bitwise lattice
value. If *OLD_VAL and NEW_VAL are the same, return false to represented by the mask.
inform the caller that this was a non-transition. */ ??? This doesn't seem to be the best place to enforce this. */
if (new_val.lattice_val == CONSTANT
&& old_val->lattice_val == CONSTANT
&& TREE_CODE (new_val.value) == INTEGER_CST
&& TREE_CODE (old_val->value) == INTEGER_CST)
{
double_int diff;
diff = double_int_xor (tree_to_double_int (new_val.value),
tree_to_double_int (old_val->value));
new_val.mask = double_int_ior (new_val.mask,
double_int_ior (old_val->mask, diff));
}
gcc_assert (old_val->lattice_val < new_val.lattice_val gcc_assert (valid_lattice_transition (*old_val, new_val));
|| (old_val->lattice_val == new_val.lattice_val
&& ((!old_val->value && !new_val.value)
|| operand_equal_p (old_val->value, new_val.value, 0))));
if (old_val->lattice_val != new_val.lattice_val) /* If *OLD_VAL and NEW_VAL are the same, return false to inform the
caller that this was a non-transition. */
if (old_val->lattice_val != new_val.lattice_val
|| (new_val.lattice_val == CONSTANT
&& TREE_CODE (new_val.value) == INTEGER_CST
&& (TREE_CODE (old_val->value) != INTEGER_CST
|| !double_int_equal_p (new_val.mask, old_val->mask))))
{ {
/* ??? We would like to delay creation of INTEGER_CSTs from
partially constants here. */
if (dump_file && (dump_flags & TDF_DETAILS)) if (dump_file && (dump_flags & TDF_DETAILS))
{ {
dump_lattice_value (dump_file, "Lattice value changed to ", new_val); dump_lattice_value (dump_file, "Lattice value changed to ", new_val);
...@@ -397,31 +473,142 @@ set_lattice_value (tree var, prop_value_t new_val) ...@@ -397,31 +473,142 @@ set_lattice_value (tree var, prop_value_t new_val)
return false; return false;
} }
/* Return the value for the tree operand EXPR. */ static prop_value_t get_value_for_expr (tree, bool);
static prop_value_t bit_value_binop (enum tree_code, tree, tree, tree);
static void bit_value_binop_1 (enum tree_code, tree, double_int *, double_int *,
tree, double_int, double_int,
tree, double_int, double_int);
/* Return a double_int that can be used for bitwise simplifications
from VAL. */
static double_int
value_to_double_int (prop_value_t val)
{
if (val.value
&& TREE_CODE (val.value) == INTEGER_CST)
return tree_to_double_int (val.value);
else
return double_int_zero;
}
/* Return the value for the address expression EXPR based on alignment
information. */
static prop_value_t static prop_value_t
get_value_for_expr (tree expr) get_value_from_alignment (tree expr)
{
prop_value_t val;
HOST_WIDE_INT bitsize, bitpos;
tree base, offset;
enum machine_mode mode;
int align;
gcc_assert (TREE_CODE (expr) == ADDR_EXPR);
base = get_inner_reference (TREE_OPERAND (expr, 0),
&bitsize, &bitpos, &offset,
&mode, &align, &align, false);
if (TREE_CODE (base) == MISALIGNED_INDIRECT_REF)
val = get_value_for_expr (TREE_OPERAND (base, 0), true);
else if (TREE_CODE (base) == MEM_REF)
val = bit_value_binop (PLUS_EXPR, TREE_TYPE (expr),
TREE_OPERAND (base, 0), TREE_OPERAND (base, 1));
else if (base
&& ((align = get_object_alignment (base, BITS_PER_UNIT,
BIGGEST_ALIGNMENT))
> BITS_PER_UNIT))
{
val.lattice_val = CONSTANT;
/* We assume pointers are zero-extended. */
val.mask = double_int_and_not
(double_int_mask (TYPE_PRECISION (TREE_TYPE (expr))),
uhwi_to_double_int (align / BITS_PER_UNIT - 1));
val.value = build_int_cst (TREE_TYPE (expr), 0);
}
else
{
val.lattice_val = VARYING;
val.mask = double_int_minus_one;
val.value = NULL_TREE;
}
if (bitpos != 0)
{
double_int value, mask;
bit_value_binop_1 (PLUS_EXPR, TREE_TYPE (expr), &value, &mask,
TREE_TYPE (expr), value_to_double_int (val), val.mask,
TREE_TYPE (expr),
shwi_to_double_int (bitpos / BITS_PER_UNIT),
double_int_zero);
val.lattice_val = double_int_minus_one_p (mask) ? VARYING : CONSTANT;
val.mask = mask;
if (val.lattice_val == CONSTANT)
val.value = double_int_to_tree (TREE_TYPE (expr), value);
else
val.value = NULL_TREE;
}
/* ??? We should handle i * 4 and more complex expressions from
the offset, possibly by just expanding get_value_for_expr. */
if (offset != NULL_TREE)
{
double_int value, mask;
prop_value_t oval = get_value_for_expr (offset, true);
bit_value_binop_1 (PLUS_EXPR, TREE_TYPE (expr), &value, &mask,
TREE_TYPE (expr), value_to_double_int (val), val.mask,
TREE_TYPE (expr), value_to_double_int (oval),
oval.mask);
val.mask = mask;
if (double_int_minus_one_p (mask))
{
val.lattice_val = VARYING;
val.value = NULL_TREE;
}
else
{
val.lattice_val = CONSTANT;
val.value = double_int_to_tree (TREE_TYPE (expr), value);
}
}
return val;
}
/* Return the value for the tree operand EXPR. If FOR_BITS_P is true
return constant bits extracted from alignment information for
invariant addresses. */
static prop_value_t
get_value_for_expr (tree expr, bool for_bits_p)
{ {
prop_value_t val; prop_value_t val;
if (TREE_CODE (expr) == SSA_NAME) if (TREE_CODE (expr) == SSA_NAME)
val = *(get_value (expr)); {
else if (is_gimple_min_invariant (expr)) val = *get_value (expr);
if (for_bits_p
&& val.lattice_val == CONSTANT
&& TREE_CODE (val.value) == ADDR_EXPR)
val = get_value_from_alignment (val.value);
}
else if (is_gimple_min_invariant (expr)
&& (!for_bits_p || TREE_CODE (expr) != ADDR_EXPR))
{ {
val.lattice_val = CONSTANT; val.lattice_val = CONSTANT;
val.value = expr; val.value = expr;
val.mask = double_int_zero;
canonicalize_float_value (&val); canonicalize_float_value (&val);
} }
else if (TREE_CODE (expr) == ADDR_EXPR)
val = get_value_from_alignment (expr);
else else
{ {
val.lattice_val = VARYING; val.lattice_val = VARYING;
val.mask = double_int_minus_one;
val.value = NULL_TREE; val.value = NULL_TREE;
} }
return val; return val;
} }
/* Return the likely CCP lattice value for STMT. /* Return the likely CCP lattice value for STMT.
If STMT has no operands, then return CONSTANT. If STMT has no operands, then return CONSTANT.
...@@ -637,6 +824,7 @@ do_dbg_cnt (void) ...@@ -637,6 +824,7 @@ do_dbg_cnt (void)
if (!dbg_cnt (ccp)) if (!dbg_cnt (ccp))
{ {
const_val[i].lattice_val = VARYING; const_val[i].lattice_val = VARYING;
const_val[i].mask = double_int_minus_one;
const_val[i].value = NULL_TREE; const_val[i].value = NULL_TREE;
} }
} }
...@@ -692,23 +880,58 @@ ccp_lattice_meet (prop_value_t *val1, prop_value_t *val2) ...@@ -692,23 +880,58 @@ ccp_lattice_meet (prop_value_t *val1, prop_value_t *val2)
{ {
/* any M VARYING = VARYING. */ /* any M VARYING = VARYING. */
val1->lattice_val = VARYING; val1->lattice_val = VARYING;
val1->mask = double_int_minus_one;
val1->value = NULL_TREE; val1->value = NULL_TREE;
} }
else if (val1->lattice_val == CONSTANT else if (val1->lattice_val == CONSTANT
&& val2->lattice_val == CONSTANT && val2->lattice_val == CONSTANT
&& TREE_CODE (val1->value) == INTEGER_CST
&& TREE_CODE (val2->value) == INTEGER_CST)
{
/* Ci M Cj = Ci if (i == j)
Ci M Cj = VARYING if (i != j)
For INTEGER_CSTs mask unequal bits. If no equal bits remain,
drop to varying. */
val1->mask
= double_int_ior (double_int_ior (val1->mask,
val2->mask),
double_int_xor (tree_to_double_int (val1->value),
tree_to_double_int (val2->value)));
if (double_int_minus_one_p (val1->mask))
{
val1->lattice_val = VARYING;
val1->value = NULL_TREE;
}
}
else if (val1->lattice_val == CONSTANT
&& val2->lattice_val == CONSTANT
&& simple_cst_equal (val1->value, val2->value) == 1) && simple_cst_equal (val1->value, val2->value) == 1)
{ {
/* Ci M Cj = Ci if (i == j) /* Ci M Cj = Ci if (i == j)
Ci M Cj = VARYING if (i != j) Ci M Cj = VARYING if (i != j)
If these two values come from memory stores, make sure that VAL1 already contains the value we want for equivalent values. */
they come from the same memory reference. }
Nothing to do. VAL1 already contains the value we want. */ else if (val1->lattice_val == CONSTANT
&& val2->lattice_val == CONSTANT
&& (TREE_CODE (val1->value) == ADDR_EXPR
|| TREE_CODE (val2->value) == ADDR_EXPR))
{
/* When not equal addresses are involved try meeting for
alignment. */
prop_value_t tem = *val2;
if (TREE_CODE (val1->value) == ADDR_EXPR)
*val1 = get_value_for_expr (val1->value, true);
if (TREE_CODE (val2->value) == ADDR_EXPR)
tem = get_value_for_expr (val2->value, true);
ccp_lattice_meet (val1, &tem);
} }
else else
{ {
/* Any other combination is VARYING. */ /* Any other combination is VARYING. */
val1->lattice_val = VARYING; val1->lattice_val = VARYING;
val1->mask = double_int_minus_one;
val1->value = NULL_TREE; val1->value = NULL_TREE;
} }
} }
...@@ -769,7 +992,7 @@ ccp_visit_phi_node (gimple phi) ...@@ -769,7 +992,7 @@ ccp_visit_phi_node (gimple phi)
if (e->flags & EDGE_EXECUTABLE) if (e->flags & EDGE_EXECUTABLE)
{ {
tree arg = gimple_phi_arg (phi, i)->def; tree arg = gimple_phi_arg (phi, i)->def;
prop_value_t arg_val = get_value_for_expr (arg); prop_value_t arg_val = get_value_for_expr (arg, false);
ccp_lattice_meet (&new_val, &arg_val); ccp_lattice_meet (&new_val, &arg_val);
...@@ -1334,6 +1557,347 @@ fold_const_aggregate_ref (tree t) ...@@ -1334,6 +1557,347 @@ fold_const_aggregate_ref (tree t)
return NULL_TREE; return NULL_TREE;
} }
/* Apply the operation CODE in type TYPE to the value, mask pair
RVAL and RMASK representing a value of type RTYPE and set
the value, mask pair *VAL and *MASK to the result. */
static void
bit_value_unop_1 (enum tree_code code, tree type,
double_int *val, double_int *mask,
tree rtype, double_int rval, double_int rmask)
{
switch (code)
{
case BIT_NOT_EXPR:
*mask = rmask;
*val = double_int_not (rval);
break;
case NEGATE_EXPR:
{
double_int temv, temm;
/* Return ~rval + 1. */
bit_value_unop_1 (BIT_NOT_EXPR, type, &temv, &temm, type, rval, rmask);
bit_value_binop_1 (PLUS_EXPR, type, val, mask,
type, temv, temm,
type, double_int_one, double_int_zero);
break;
}
CASE_CONVERT:
{
bool uns;
/* First extend mask and value according to the original type. */
uns = (TREE_CODE (rtype) == INTEGER_TYPE && TYPE_IS_SIZETYPE (rtype)
? 0 : TYPE_UNSIGNED (rtype));
*mask = double_int_ext (rmask, TYPE_PRECISION (rtype), uns);
*val = double_int_ext (rval, TYPE_PRECISION (rtype), uns);
/* Then extend mask and value according to the target type. */
uns = (TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type)
? 0 : TYPE_UNSIGNED (type));
*mask = double_int_ext (*mask, TYPE_PRECISION (type), uns);
*val = double_int_ext (*val, TYPE_PRECISION (type), uns);
break;
}
default:
*mask = double_int_minus_one;
break;
}
}
/* Apply the operation CODE in type TYPE to the value, mask pairs
R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE
and R2TYPE and set the value, mask pair *VAL and *MASK to the result. */
static void
bit_value_binop_1 (enum tree_code code, tree type,
double_int *val, double_int *mask,
tree r1type, double_int r1val, double_int r1mask,
tree r2type, double_int r2val, double_int r2mask)
{
bool uns = (TREE_CODE (type) == INTEGER_TYPE
&& TYPE_IS_SIZETYPE (type) ? 0 : TYPE_UNSIGNED (type));
/* Assume we'll get a constant result. Use an initial varying value,
we fall back to varying in the end if necessary. */
*mask = double_int_minus_one;
switch (code)
{
case BIT_AND_EXPR:
/* The mask is constant where there is a known not
set bit, (m1 | m2) & ((v1 | m1) & (v2 | m2)) */
*mask = double_int_and (double_int_ior (r1mask, r2mask),
double_int_and (double_int_ior (r1val, r1mask),
double_int_ior (r2val, r2mask)));
*val = double_int_and (r1val, r2val);
break;
case BIT_IOR_EXPR:
/* The mask is constant where there is a known
set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)). */
*mask = double_int_and_not
(double_int_ior (r1mask, r2mask),
double_int_ior (double_int_and_not (r1val, r1mask),
double_int_and_not (r2val, r2mask)));
*val = double_int_ior (r1val, r2val);
break;
case BIT_XOR_EXPR:
/* m1 | m2 */
*mask = double_int_ior (r1mask, r2mask);
*val = double_int_xor (r1val, r2val);
break;
case LROTATE_EXPR:
case RROTATE_EXPR:
if (double_int_zero_p (r2mask))
{
HOST_WIDE_INT shift = r2val.low;
if (code == RROTATE_EXPR)
shift = -shift;
*mask = double_int_lrotate (r1mask, shift, TYPE_PRECISION (type));
*val = double_int_lrotate (r1val, shift, TYPE_PRECISION (type));
}
break;
case LSHIFT_EXPR:
case RSHIFT_EXPR:
/* ??? We can handle partially known shift counts if we know
its sign. That way we can tell that (x << (y | 8)) & 255
is zero. */
if (double_int_zero_p (r2mask))
{
HOST_WIDE_INT shift = r2val.low;
if (code == RSHIFT_EXPR)
shift = -shift;
/* We need to know if we are doing a left or a right shift
to properly shift in zeros for left shift and unsigned
right shifts and the sign bit for signed right shifts.
For signed right shifts we shift in varying in case
the sign bit was varying. */
if (shift > 0)
{
*mask = double_int_lshift (r1mask, shift,
TYPE_PRECISION (type), false);
*val = double_int_lshift (r1val, shift,
TYPE_PRECISION (type), false);
}
else if (shift < 0)
{
shift = -shift;
*mask = double_int_rshift (r1mask, shift,
TYPE_PRECISION (type), !uns);
*val = double_int_rshift (r1val, shift,
TYPE_PRECISION (type), !uns);
}
else
{
*mask = r1mask;
*val = r1val;
}
}
break;
case PLUS_EXPR:
case POINTER_PLUS_EXPR:
{
double_int lo, hi;
/* Do the addition with unknown bits set to zero, to give carry-ins of
zero wherever possible. */
lo = double_int_add (double_int_and_not (r1val, r1mask),
double_int_and_not (r2val, r2mask));
lo = double_int_ext (lo, TYPE_PRECISION (type), uns);
/* Do the addition with unknown bits set to one, to give carry-ins of
one wherever possible. */
hi = double_int_add (double_int_ior (r1val, r1mask),
double_int_ior (r2val, r2mask));
hi = double_int_ext (hi, TYPE_PRECISION (type), uns);
/* Each bit in the result is known if (a) the corresponding bits in
both inputs are known, and (b) the carry-in to that bit position
is known. We can check condition (b) by seeing if we got the same
result with minimised carries as with maximised carries. */
*mask = double_int_ior (double_int_ior (r1mask, r2mask),
double_int_xor (lo, hi));
*mask = double_int_ext (*mask, TYPE_PRECISION (type), uns);
/* It shouldn't matter whether we choose lo or hi here. */
*val = lo;
break;
}
case MINUS_EXPR:
{
double_int temv, temm;
bit_value_unop_1 (NEGATE_EXPR, r2type, &temv, &temm,
r2type, r2val, r2mask);
bit_value_binop_1 (PLUS_EXPR, type, val, mask,
r1type, r1val, r1mask,
r2type, temv, temm);
break;
}
case MULT_EXPR:
{
/* Just track trailing zeros in both operands and transfer
them to the other. */
int r1tz = double_int_ctz (double_int_ior (r1val, r1mask));
int r2tz = double_int_ctz (double_int_ior (r2val, r2mask));
if (r1tz + r2tz >= HOST_BITS_PER_DOUBLE_INT)
{
*mask = double_int_zero;
*val = double_int_zero;
}
else if (r1tz + r2tz > 0)
{
*mask = double_int_not (double_int_mask (r1tz + r2tz));
*mask = double_int_ext (*mask, TYPE_PRECISION (type), uns);
*val = double_int_zero;
}
break;
}
case EQ_EXPR:
case NE_EXPR:
{
double_int m = double_int_ior (r1mask, r2mask);
if (!double_int_equal_p (double_int_and_not (r1val, m),
double_int_and_not (r2val, m)))
{
*mask = double_int_zero;
*val = ((code == EQ_EXPR) ? double_int_zero : double_int_one);
}
else
{
/* We know the result of a comparison is always one or zero. */
*mask = double_int_one;
*val = double_int_zero;
}
break;
}
case GE_EXPR:
case GT_EXPR:
{
double_int tem = r1val;
r1val = r2val;
r2val = tem;
tem = r1mask;
r1mask = r2mask;
r2mask = tem;
code = swap_tree_comparison (code);
}
/* Fallthru. */
case LT_EXPR:
case LE_EXPR:
{
int minmax, maxmin;
/* If the most significant bits are not known we know nothing. */
if (double_int_negative_p (r1mask) || double_int_negative_p (r2mask))
break;
/* If we know the most significant bits we know the values
value ranges by means of treating varying bits as zero
or one. Do a cross comparison of the max/min pairs. */
maxmin = double_int_cmp (double_int_ior (r1val, r1mask),
double_int_and_not (r2val, r2mask), uns);
minmax = double_int_cmp (double_int_and_not (r1val, r1mask),
double_int_ior (r2val, r2mask), uns);
if (maxmin < 0) /* r1 is less than r2. */
{
*mask = double_int_zero;
*val = double_int_one;
}
else if (minmax > 0) /* r1 is not less or equal to r2. */
{
*mask = double_int_zero;
*val = double_int_zero;
}
else if (maxmin == minmax) /* r1 and r2 are equal. */
{
/* This probably should never happen as we'd have
folded the thing during fully constant value folding. */
*mask = double_int_zero;
*val = (code == LE_EXPR ? double_int_one : double_int_zero);
}
else
{
/* We know the result of a comparison is always one or zero. */
*mask = double_int_one;
*val = double_int_zero;
}
break;
}
default:;
}
}
/* Return the propagation value when applying the operation CODE to
the value RHS yielding type TYPE. */
static prop_value_t
bit_value_unop (enum tree_code code, tree type, tree rhs)
{
prop_value_t rval = get_value_for_expr (rhs, true);
double_int value, mask;
prop_value_t val;
gcc_assert ((rval.lattice_val == CONSTANT
&& TREE_CODE (rval.value) == INTEGER_CST)
|| double_int_minus_one_p (rval.mask));
bit_value_unop_1 (code, type, &value, &mask,
TREE_TYPE (rhs), value_to_double_int (rval), rval.mask);
if (!double_int_minus_one_p (mask))
{
val.lattice_val = CONSTANT;
val.mask = mask;
/* ??? Delay building trees here. */
val.value = double_int_to_tree (type, value);
}
else
{
val.lattice_val = VARYING;
val.value = NULL_TREE;
val.mask = double_int_minus_one;
}
return val;
}
/* Return the propagation value when applying the operation CODE to
the values RHS1 and RHS2 yielding type TYPE. */
static prop_value_t
bit_value_binop (enum tree_code code, tree type, tree rhs1, tree rhs2)
{
prop_value_t r1val = get_value_for_expr (rhs1, true);
prop_value_t r2val = get_value_for_expr (rhs2, true);
double_int value, mask;
prop_value_t val;
gcc_assert ((r1val.lattice_val == CONSTANT
&& TREE_CODE (r1val.value) == INTEGER_CST)
|| double_int_minus_one_p (r1val.mask));
gcc_assert ((r2val.lattice_val == CONSTANT
&& TREE_CODE (r2val.value) == INTEGER_CST)
|| double_int_minus_one_p (r2val.mask));
bit_value_binop_1 (code, type, &value, &mask,
TREE_TYPE (rhs1), value_to_double_int (r1val), r1val.mask,
TREE_TYPE (rhs2), value_to_double_int (r2val), r2val.mask);
if (!double_int_minus_one_p (mask))
{
val.lattice_val = CONSTANT;
val.mask = mask;
/* ??? Delay building trees here. */
val.value = double_int_to_tree (type, value);
}
else
{
val.lattice_val = VARYING;
val.value = NULL_TREE;
val.mask = double_int_minus_one;
}
return val;
}
/* Evaluate statement STMT. /* Evaluate statement STMT.
Valid only for assignments, calls, conditionals, and switches. */ Valid only for assignments, calls, conditionals, and switches. */
...@@ -1343,9 +1907,26 @@ evaluate_stmt (gimple stmt) ...@@ -1343,9 +1907,26 @@ evaluate_stmt (gimple stmt)
prop_value_t val; prop_value_t val;
tree simplified = NULL_TREE; tree simplified = NULL_TREE;
ccp_lattice_t likelyvalue = likely_value (stmt); ccp_lattice_t likelyvalue = likely_value (stmt);
bool is_constant; bool is_constant = false;
fold_defer_overflow_warnings (); if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "which is likely ");
switch (likelyvalue)
{
case CONSTANT:
fprintf (dump_file, "CONSTANT");
break;
case UNDEFINED:
fprintf (dump_file, "UNDEFINED");
break;
case VARYING:
fprintf (dump_file, "VARYING");
break;
default:;
}
fprintf (dump_file, "\n");
}
/* If the statement is likely to have a CONSTANT result, then try /* If the statement is likely to have a CONSTANT result, then try
to fold the statement to determine the constant value. */ to fold the statement to determine the constant value. */
...@@ -1353,7 +1934,19 @@ evaluate_stmt (gimple stmt) ...@@ -1353,7 +1934,19 @@ evaluate_stmt (gimple stmt)
Since likely_value never returns CONSTANT for calls, we will Since likely_value never returns CONSTANT for calls, we will
not attempt to fold them, including builtins that may profit. */ not attempt to fold them, including builtins that may profit. */
if (likelyvalue == CONSTANT) if (likelyvalue == CONSTANT)
simplified = ccp_fold (stmt); {
fold_defer_overflow_warnings ();
simplified = ccp_fold (stmt);
is_constant = simplified && is_gimple_min_invariant (simplified);
fold_undefer_overflow_warnings (is_constant, stmt, 0);
if (is_constant)
{
/* The statement produced a constant value. */
val.lattice_val = CONSTANT;
val.value = simplified;
val.mask = double_int_zero;
}
}
/* If the statement is likely to have a VARYING result, then do not /* If the statement is likely to have a VARYING result, then do not
bother folding the statement. */ bother folding the statement. */
else if (likelyvalue == VARYING) else if (likelyvalue == VARYING)
...@@ -1373,46 +1966,85 @@ evaluate_stmt (gimple stmt) ...@@ -1373,46 +1966,85 @@ evaluate_stmt (gimple stmt)
else else
/* These cannot satisfy is_gimple_min_invariant without folding. */ /* These cannot satisfy is_gimple_min_invariant without folding. */
gcc_assert (code == GIMPLE_CALL || code == GIMPLE_COND); gcc_assert (code == GIMPLE_CALL || code == GIMPLE_COND);
is_constant = simplified && is_gimple_min_invariant (simplified);
if (is_constant)
{
/* The statement produced a constant value. */
val.lattice_val = CONSTANT;
val.value = simplified;
val.mask = double_int_zero;
}
} }
is_constant = simplified && is_gimple_min_invariant (simplified); /* Resort to simplification for bitwise tracking. */
if (flag_tree_bit_ccp
fold_undefer_overflow_warnings (is_constant, stmt, 0); && likelyvalue == CONSTANT
&& !is_constant)
if (dump_file && (dump_flags & TDF_DETAILS))
{ {
fprintf (dump_file, "which is likely "); enum gimple_code code = gimple_code (stmt);
switch (likelyvalue) val.lattice_val = VARYING;
val.value = NULL_TREE;
val.mask = double_int_minus_one;
if (code == GIMPLE_ASSIGN)
{ {
case CONSTANT: enum tree_code subcode = gimple_assign_rhs_code (stmt);
fprintf (dump_file, "CONSTANT"); tree rhs1 = gimple_assign_rhs1 (stmt);
break; switch (get_gimple_rhs_class (subcode))
case UNDEFINED: {
fprintf (dump_file, "UNDEFINED"); case GIMPLE_SINGLE_RHS:
break; if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
case VARYING: || POINTER_TYPE_P (TREE_TYPE (rhs1)))
fprintf (dump_file, "VARYING"); val = get_value_for_expr (rhs1, true);
break; break;
default:;
case GIMPLE_UNARY_RHS:
if ((INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
|| POINTER_TYPE_P (TREE_TYPE (rhs1)))
&& (INTEGRAL_TYPE_P (gimple_expr_type (stmt))
|| POINTER_TYPE_P (gimple_expr_type (stmt))))
val = bit_value_unop (subcode, gimple_expr_type (stmt), rhs1);
break;
case GIMPLE_BINARY_RHS:
if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
|| POINTER_TYPE_P (TREE_TYPE (rhs1)))
{
tree rhs2 = gimple_assign_rhs2 (stmt);
val = bit_value_binop (subcode,
TREE_TYPE (rhs1), rhs1, rhs2);
}
break;
default:;
}
} }
fprintf (dump_file, "\n"); else if (code == GIMPLE_COND)
{
enum tree_code code = gimple_cond_code (stmt);
tree rhs1 = gimple_cond_lhs (stmt);
tree rhs2 = gimple_cond_rhs (stmt);
if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
|| POINTER_TYPE_P (TREE_TYPE (rhs1)))
val = bit_value_binop (code, TREE_TYPE (rhs1), rhs1, rhs2);
}
is_constant = (val.lattice_val == CONSTANT);
} }
if (is_constant) if (!is_constant)
{
/* The statement produced a constant value. */
val.lattice_val = CONSTANT;
val.value = simplified;
}
else
{ {
/* The statement produced a nonconstant value. If the statement /* The statement produced a nonconstant value. If the statement
had UNDEFINED operands, then the result of the statement had UNDEFINED operands, then the result of the statement
should be UNDEFINED. Otherwise, the statement is VARYING. */ should be UNDEFINED. Otherwise, the statement is VARYING. */
if (likelyvalue == UNDEFINED) if (likelyvalue == UNDEFINED)
val.lattice_val = likelyvalue; {
val.lattice_val = likelyvalue;
val.mask = double_int_zero;
}
else else
val.lattice_val = VARYING; {
val.lattice_val = VARYING;
val.mask = double_int_minus_one;
}
val.value = NULL_TREE; val.value = NULL_TREE;
} }
...@@ -1438,9 +2070,18 @@ ccp_fold_stmt (gimple_stmt_iterator *gsi) ...@@ -1438,9 +2070,18 @@ ccp_fold_stmt (gimple_stmt_iterator *gsi)
fold more conditionals here. */ fold more conditionals here. */
val = evaluate_stmt (stmt); val = evaluate_stmt (stmt);
if (val.lattice_val != CONSTANT if (val.lattice_val != CONSTANT
|| TREE_CODE (val.value) != INTEGER_CST) || !double_int_zero_p (val.mask))
return false; return false;
if (dump_file)
{
fprintf (dump_file, "Folding predicate ");
print_gimple_expr (dump_file, stmt, 0, 0);
fprintf (dump_file, " to ");
print_generic_expr (dump_file, val.value, 0);
fprintf (dump_file, "\n");
}
if (integer_zerop (val.value)) if (integer_zerop (val.value))
gimple_cond_make_false (stmt); gimple_cond_make_false (stmt);
else else
...@@ -1583,12 +2224,15 @@ visit_cond_stmt (gimple stmt, edge *taken_edge_p) ...@@ -1583,12 +2224,15 @@ visit_cond_stmt (gimple stmt, edge *taken_edge_p)
block = gimple_bb (stmt); block = gimple_bb (stmt);
val = evaluate_stmt (stmt); val = evaluate_stmt (stmt);
if (val.lattice_val != CONSTANT
|| !double_int_zero_p (val.mask))
return SSA_PROP_VARYING;
/* Find which edge out of the conditional block will be taken and add it /* Find which edge out of the conditional block will be taken and add it
to the worklist. If no single edge can be determined statically, to the worklist. If no single edge can be determined statically,
return SSA_PROP_VARYING to feed all the outgoing edges to the return SSA_PROP_VARYING to feed all the outgoing edges to the
propagation engine. */ propagation engine. */
*taken_edge_p = val.value ? find_taken_edge (block, val.value) : 0; *taken_edge_p = find_taken_edge (block, val.value);
if (*taken_edge_p) if (*taken_edge_p)
return SSA_PROP_INTERESTING; return SSA_PROP_INTERESTING;
else else
...@@ -1653,7 +2297,7 @@ ccp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p) ...@@ -1653,7 +2297,7 @@ ccp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p)
Mark them VARYING. */ Mark them VARYING. */
FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS) FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
{ {
prop_value_t v = { VARYING, NULL_TREE }; prop_value_t v = { VARYING, NULL_TREE, { -1, (HOST_WIDE_INT) -1 } };
set_lattice_value (def, v); set_lattice_value (def, v);
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment