Commit 5f9d2c58 by Aldy Hernandez Committed by Aldy Hernandez

fold-const.c (int_const_binop_1): Abstract...

        * fold-const.c (int_const_binop_1): Abstract...
        (wide_int_binop): ...wide int code here.
	(poly_int_binop): ...poly int code here.
	(tree_binop): ...tree code here.
        * fold-const.h (wide_int_binop): New.
        * tree-vrp.c (vrp_int_const_binop): Call wide_int_binop.
	Remove useless PLUS/MINUS_EXPR case.
        (zero_nonzero_bits_from_vr): Move wide int code...
        (zero_nonzero_bits_from_bounds): ...here.
        (extract_range_from_binary_expr_1): Move mask optimization code...
        (range_easy_mask_min_max): ...here.
        * tree-vrp.h (zero_nonzero_bits_from_bounds): New.
        (range_easy_mask_min_max): New.

From-SVN: r262676
parent 5933c685
2018-07-16 Aldy Hernandez <aldyh@redhat.com>
* fold-const.c (int_const_binop_1): Abstract...
(wide_int_binop): ...wide int code here.
(poly_int_binop): ...poly int code here.
Abstract the rest of int_const_binop_1 into int_const_binop.
* fold-const.h (wide_int_binop): New.
* tree-vrp.c (vrp_int_const_binop): Call wide_int_binop.
Remove useless PLUS/MINUS_EXPR case.
(zero_nonzero_bits_from_vr): Move wide int code...
(zero_nonzero_bits_from_bounds): ...here.
(extract_range_from_binary_expr_1): Move mask optimization code...
(range_easy_mask_min_max): ...here.
* tree-vrp.h (zero_nonzero_bits_from_bounds): New.
(range_easy_mask_min_max): New.
2018-07-15 Jeff Law <law@redhat.com> 2018-07-15 Jeff Law <law@redhat.com>
PR target/85993 PR target/85993
......
...@@ -966,21 +966,17 @@ int_binop_types_match_p (enum tree_code code, const_tree type1, const_tree type2 ...@@ -966,21 +966,17 @@ int_binop_types_match_p (enum tree_code code, const_tree type1, const_tree type2
&& TYPE_MODE (type1) == TYPE_MODE (type2); && TYPE_MODE (type1) == TYPE_MODE (type2);
} }
/* Subroutine of int_const_binop_1 that handles two INTEGER_CSTs. */ /* Combine two wide ints ARG1 and ARG2 under operation CODE to produce
a new constant in RES. Return FALSE if we don't know how to
evaluate CODE at compile-time. */
static tree bool
int_const_binop_2 (enum tree_code code, const_tree parg1, const_tree parg2, wide_int_binop (wide_int &res,
int overflowable) enum tree_code code, const wide_int &arg1, const wide_int &arg2,
signop sign, wi::overflow_type *overflow)
{ {
wide_int res; wide_int tmp;
tree t; *overflow = wi::OVF_NONE;
tree type = TREE_TYPE (parg1);
signop sign = TYPE_SIGN (type);
wi::overflow_type overflow = wi::OVF_NONE;
wi::tree_to_wide_ref arg1 = wi::to_wide (parg1);
wide_int arg2 = wi::to_wide (parg2, TYPE_PRECISION (type));
switch (code) switch (code)
{ {
case BIT_IOR_EXPR: case BIT_IOR_EXPR:
...@@ -999,49 +995,53 @@ int_const_binop_2 (enum tree_code code, const_tree parg1, const_tree parg2, ...@@ -999,49 +995,53 @@ int_const_binop_2 (enum tree_code code, const_tree parg1, const_tree parg2,
case LSHIFT_EXPR: case LSHIFT_EXPR:
if (wi::neg_p (arg2)) if (wi::neg_p (arg2))
{ {
arg2 = -arg2; tmp = -arg2;
if (code == RSHIFT_EXPR) if (code == RSHIFT_EXPR)
code = LSHIFT_EXPR; code = LSHIFT_EXPR;
else else
code = RSHIFT_EXPR; code = RSHIFT_EXPR;
} }
else
tmp = arg2;
if (code == RSHIFT_EXPR) if (code == RSHIFT_EXPR)
/* It's unclear from the C standard whether shifts can overflow. /* It's unclear from the C standard whether shifts can overflow.
The following code ignores overflow; perhaps a C standard The following code ignores overflow; perhaps a C standard
interpretation ruling is needed. */ interpretation ruling is needed. */
res = wi::rshift (arg1, arg2, sign); res = wi::rshift (arg1, tmp, sign);
else else
res = wi::lshift (arg1, arg2); res = wi::lshift (arg1, tmp);
break; break;
case RROTATE_EXPR: case RROTATE_EXPR:
case LROTATE_EXPR: case LROTATE_EXPR:
if (wi::neg_p (arg2)) if (wi::neg_p (arg2))
{ {
arg2 = -arg2; tmp = -arg2;
if (code == RROTATE_EXPR) if (code == RROTATE_EXPR)
code = LROTATE_EXPR; code = LROTATE_EXPR;
else else
code = RROTATE_EXPR; code = RROTATE_EXPR;
} }
else
tmp = arg2;
if (code == RROTATE_EXPR) if (code == RROTATE_EXPR)
res = wi::rrotate (arg1, arg2); res = wi::rrotate (arg1, tmp);
else else
res = wi::lrotate (arg1, arg2); res = wi::lrotate (arg1, tmp);
break; break;
case PLUS_EXPR: case PLUS_EXPR:
res = wi::add (arg1, arg2, sign, &overflow); res = wi::add (arg1, arg2, sign, overflow);
break; break;
case MINUS_EXPR: case MINUS_EXPR:
res = wi::sub (arg1, arg2, sign, &overflow); res = wi::sub (arg1, arg2, sign, overflow);
break; break;
case MULT_EXPR: case MULT_EXPR:
res = wi::mul (arg1, arg2, sign, &overflow); res = wi::mul (arg1, arg2, sign, overflow);
break; break;
case MULT_HIGHPART_EXPR: case MULT_HIGHPART_EXPR:
...@@ -1051,50 +1051,50 @@ int_const_binop_2 (enum tree_code code, const_tree parg1, const_tree parg2, ...@@ -1051,50 +1051,50 @@ int_const_binop_2 (enum tree_code code, const_tree parg1, const_tree parg2,
case TRUNC_DIV_EXPR: case TRUNC_DIV_EXPR:
case EXACT_DIV_EXPR: case EXACT_DIV_EXPR:
if (arg2 == 0) if (arg2 == 0)
return NULL_TREE; return false;
res = wi::div_trunc (arg1, arg2, sign, &overflow); res = wi::div_trunc (arg1, arg2, sign, overflow);
break; break;
case FLOOR_DIV_EXPR: case FLOOR_DIV_EXPR:
if (arg2 == 0) if (arg2 == 0)
return NULL_TREE; return false;
res = wi::div_floor (arg1, arg2, sign, &overflow); res = wi::div_floor (arg1, arg2, sign, overflow);
break; break;
case CEIL_DIV_EXPR: case CEIL_DIV_EXPR:
if (arg2 == 0) if (arg2 == 0)
return NULL_TREE; return false;
res = wi::div_ceil (arg1, arg2, sign, &overflow); res = wi::div_ceil (arg1, arg2, sign, overflow);
break; break;
case ROUND_DIV_EXPR: case ROUND_DIV_EXPR:
if (arg2 == 0) if (arg2 == 0)
return NULL_TREE; return false;
res = wi::div_round (arg1, arg2, sign, &overflow); res = wi::div_round (arg1, arg2, sign, overflow);
break; break;
case TRUNC_MOD_EXPR: case TRUNC_MOD_EXPR:
if (arg2 == 0) if (arg2 == 0)
return NULL_TREE; return false;
res = wi::mod_trunc (arg1, arg2, sign, &overflow); res = wi::mod_trunc (arg1, arg2, sign, overflow);
break; break;
case FLOOR_MOD_EXPR: case FLOOR_MOD_EXPR:
if (arg2 == 0) if (arg2 == 0)
return NULL_TREE; return false;
res = wi::mod_floor (arg1, arg2, sign, &overflow); res = wi::mod_floor (arg1, arg2, sign, overflow);
break; break;
case CEIL_MOD_EXPR: case CEIL_MOD_EXPR:
if (arg2 == 0) if (arg2 == 0)
return NULL_TREE; return false;
res = wi::mod_ceil (arg1, arg2, sign, &overflow); res = wi::mod_ceil (arg1, arg2, sign, overflow);
break; break;
case ROUND_MOD_EXPR: case ROUND_MOD_EXPR:
if (arg2 == 0) if (arg2 == 0)
return NULL_TREE; return false;
res = wi::mod_round (arg1, arg2, sign, &overflow); res = wi::mod_round (arg1, arg2, sign, overflow);
break; break;
case MIN_EXPR: case MIN_EXPR:
...@@ -1106,55 +1106,41 @@ int_const_binop_2 (enum tree_code code, const_tree parg1, const_tree parg2, ...@@ -1106,55 +1106,41 @@ int_const_binop_2 (enum tree_code code, const_tree parg1, const_tree parg2,
break; break;
default: default:
return NULL_TREE; return false;
} }
return true;
t = force_fit_type (type, res, overflowable,
(((sign == SIGNED || overflowable == -1)
&& overflow)
| TREE_OVERFLOW (parg1) | TREE_OVERFLOW (parg2)));
return t;
} }
/* Combine two integer constants PARG1 and PARG2 under operation CODE /* Combine two poly int's ARG1 and ARG2 under operation CODE to
to produce a new constant. Return NULL_TREE if we don't know how produce a new constant in RES. Return FALSE if we don't know how
to evaluate CODE at compile-time. */ to evaluate CODE at compile-time. */
static tree static bool
int_const_binop_1 (enum tree_code code, const_tree arg1, const_tree arg2, poly_int_binop (poly_wide_int &res, enum tree_code code,
int overflowable) const_tree arg1, const_tree arg2,
signop sign, wi::overflow_type *overflow)
{ {
if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg2) == INTEGER_CST)
return int_const_binop_2 (code, arg1, arg2, overflowable);
gcc_assert (NUM_POLY_INT_COEFFS != 1); gcc_assert (NUM_POLY_INT_COEFFS != 1);
gcc_assert (poly_int_tree_p (arg1) && poly_int_tree_p (arg2));
if (poly_int_tree_p (arg1) && poly_int_tree_p (arg2))
{
poly_wide_int res;
wi::overflow_type overflow;
tree type = TREE_TYPE (arg1);
signop sign = TYPE_SIGN (type);
switch (code) switch (code)
{ {
case PLUS_EXPR: case PLUS_EXPR:
res = wi::add (wi::to_poly_wide (arg1), res = wi::add (wi::to_poly_wide (arg1),
wi::to_poly_wide (arg2), sign, &overflow); wi::to_poly_wide (arg2), sign, overflow);
break; break;
case MINUS_EXPR: case MINUS_EXPR:
res = wi::sub (wi::to_poly_wide (arg1), res = wi::sub (wi::to_poly_wide (arg1),
wi::to_poly_wide (arg2), sign, &overflow); wi::to_poly_wide (arg2), sign, overflow);
break; break;
case MULT_EXPR: case MULT_EXPR:
if (TREE_CODE (arg2) == INTEGER_CST) if (TREE_CODE (arg2) == INTEGER_CST)
res = wi::mul (wi::to_poly_wide (arg1), res = wi::mul (wi::to_poly_wide (arg1),
wi::to_wide (arg2), sign, &overflow); wi::to_wide (arg2), sign, overflow);
else if (TREE_CODE (arg1) == INTEGER_CST) else if (TREE_CODE (arg1) == INTEGER_CST)
res = wi::mul (wi::to_poly_wide (arg2), res = wi::mul (wi::to_poly_wide (arg2),
wi::to_wide (arg1), sign, &overflow); wi::to_wide (arg1), sign, overflow);
else else
return NULL_TREE; return NULL_TREE;
break; break;
...@@ -1163,32 +1149,51 @@ int_const_binop_1 (enum tree_code code, const_tree arg1, const_tree arg2, ...@@ -1163,32 +1149,51 @@ int_const_binop_1 (enum tree_code code, const_tree arg1, const_tree arg2,
if (TREE_CODE (arg2) == INTEGER_CST) if (TREE_CODE (arg2) == INTEGER_CST)
res = wi::to_poly_wide (arg1) << wi::to_wide (arg2); res = wi::to_poly_wide (arg1) << wi::to_wide (arg2);
else else
return NULL_TREE; return false;
break; break;
case BIT_IOR_EXPR: case BIT_IOR_EXPR:
if (TREE_CODE (arg2) != INTEGER_CST if (TREE_CODE (arg2) != INTEGER_CST
|| !can_ior_p (wi::to_poly_wide (arg1), wi::to_wide (arg2), || !can_ior_p (wi::to_poly_wide (arg1), wi::to_wide (arg2),
&res)) &res))
return NULL_TREE; return false;
break; break;
default: default:
return NULL_TREE; return false;
}
return force_fit_type (type, res, overflowable,
(((sign == SIGNED || overflowable == -1)
&& overflow)
| TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2)));
} }
return true;
return NULL_TREE;
} }
/* Combine two integer constants ARG1 and ARG2 under operation CODE to
produce a new constant. Return NULL_TREE if we don't know how to
evaluate CODE at compile-time. */
tree tree
int_const_binop (enum tree_code code, const_tree arg1, const_tree arg2) int_const_binop (enum tree_code code, const_tree arg1, const_tree arg2,
int overflowable)
{ {
return int_const_binop_1 (code, arg1, arg2, 1); bool success = false;
poly_wide_int poly_res;
tree type = TREE_TYPE (arg1);
signop sign = TYPE_SIGN (type);
wi::overflow_type overflow = wi::OVF_NONE;
if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg2) == INTEGER_CST)
{
wide_int warg1 = wi::to_wide (arg1), res;
wide_int warg2 = wi::to_wide (arg2, TYPE_PRECISION (type));
success = wide_int_binop (res, code, warg1, warg2, sign, &overflow);
poly_res = res;
}
else if (poly_int_tree_p (arg1) && poly_int_tree_p (arg2))
success = poly_int_binop (poly_res, code, arg1, arg2, sign, &overflow);
if (success)
return force_fit_type (type, poly_res, overflowable,
(((sign == SIGNED || overflowable == -1)
&& overflow)
| TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2)));
return NULL_TREE;
} }
/* Return true if binary operation OP distributes over addition in operand /* Return true if binary operation OP distributes over addition in operand
...@@ -1925,7 +1930,7 @@ size_binop_loc (location_t loc, enum tree_code code, tree arg0, tree arg1) ...@@ -1925,7 +1930,7 @@ size_binop_loc (location_t loc, enum tree_code code, tree arg0, tree arg1)
/* Handle general case of two integer constants. For sizetype /* Handle general case of two integer constants. For sizetype
constant calculations we always want to know about overflow, constant calculations we always want to know about overflow,
even in the unsigned case. */ even in the unsigned case. */
tree res = int_const_binop_1 (code, arg0, arg1, -1); tree res = int_const_binop (code, arg0, arg1, -1);
if (res != NULL_TREE) if (res != NULL_TREE)
return res; return res;
} }
......
...@@ -100,7 +100,10 @@ extern tree fold_bit_and_mask (tree, tree, enum tree_code, ...@@ -100,7 +100,10 @@ extern tree fold_bit_and_mask (tree, tree, enum tree_code,
tree, enum tree_code, tree, tree, tree, enum tree_code, tree, tree,
tree, enum tree_code, tree, tree, tree *); tree, enum tree_code, tree, tree, tree *);
extern tree fold_read_from_constant_string (tree); extern tree fold_read_from_constant_string (tree);
extern tree int_const_binop (enum tree_code, const_tree, const_tree); extern bool wide_int_binop (wide_int &res, enum tree_code,
const wide_int &arg1, const wide_int &arg2,
signop, wi::overflow_type *);
extern tree int_const_binop (enum tree_code, const_tree, const_tree, int = 1);
#define build_fold_addr_expr(T)\ #define build_fold_addr_expr(T)\
build_fold_addr_expr_loc (UNKNOWN_LOCATION, (T)) build_fold_addr_expr_loc (UNKNOWN_LOCATION, (T))
extern tree build_fold_addr_expr_loc (location_t, tree); extern tree build_fold_addr_expr_loc (location_t, tree);
......
...@@ -956,11 +956,13 @@ value_range_constant_singleton (value_range *vr) ...@@ -956,11 +956,13 @@ value_range_constant_singleton (value_range *vr)
return NULL_TREE; return NULL_TREE;
} }
/* Wrapper around int_const_binop. Return true if we can compute the /* Wrapper around wide_int_binop that adjusts for overflow.
result; i.e. if the operation doesn't overflow or if the overflow is
undefined. In the latter case (if the operation overflows and Return true if we can compute the result; i.e. if the operation
overflow is undefined), then adjust the result to be -INF or +INF doesn't overflow or if the overflow is undefined. In the latter
depending on CODE, VAL1 and VAL2. Return the value in *RES. case (if the operation overflows and overflow is undefined), then
adjust the result to be -INF or +INF depending on CODE, VAL1 and
VAL2. Return the value in *RES.
Return false for division by zero, for which the result is Return false for division by zero, for which the result is
indeterminate. */ indeterminate. */
...@@ -970,78 +972,36 @@ vrp_int_const_binop (enum tree_code code, tree val1, tree val2, wide_int *res) ...@@ -970,78 +972,36 @@ vrp_int_const_binop (enum tree_code code, tree val1, tree val2, wide_int *res)
{ {
wi::overflow_type overflow = wi::OVF_NONE; wi::overflow_type overflow = wi::OVF_NONE;
signop sign = TYPE_SIGN (TREE_TYPE (val1)); signop sign = TYPE_SIGN (TREE_TYPE (val1));
wide_int w1 = wi::to_wide (val1);
wide_int w2 = wi::to_wide (val2);
switch (code) switch (code)
{ {
case RSHIFT_EXPR: case RSHIFT_EXPR:
case LSHIFT_EXPR: case LSHIFT_EXPR:
{ w2 = wi::to_wide (val2, TYPE_PRECISION (TREE_TYPE (val1)));
wide_int wval2 = wi::to_wide (val2, TYPE_PRECISION (TREE_TYPE (val1))); /* FALLTHRU */
if (wi::neg_p (wval2))
{
wval2 = -wval2;
if (code == RSHIFT_EXPR)
code = LSHIFT_EXPR;
else
code = RSHIFT_EXPR;
}
if (code == RSHIFT_EXPR)
/* It's unclear from the C standard whether shifts can overflow.
The following code ignores overflow; perhaps a C standard
interpretation ruling is needed. */
*res = wi::rshift (wi::to_wide (val1), wval2, sign);
else
*res = wi::lshift (wi::to_wide (val1), wval2);
break;
}
case MULT_EXPR: case MULT_EXPR:
*res = wi::mul (wi::to_wide (val1),
wi::to_wide (val2), sign, &overflow);
break;
case TRUNC_DIV_EXPR: case TRUNC_DIV_EXPR:
case EXACT_DIV_EXPR: case EXACT_DIV_EXPR:
if (val2 == 0)
return false;
else
*res = wi::div_trunc (wi::to_wide (val1),
wi::to_wide (val2), sign, &overflow);
break;
case FLOOR_DIV_EXPR: case FLOOR_DIV_EXPR:
if (val2 == 0)
return false;
*res = wi::div_floor (wi::to_wide (val1),
wi::to_wide (val2), sign, &overflow);
break;
case CEIL_DIV_EXPR: case CEIL_DIV_EXPR:
if (val2 == 0)
return false;
*res = wi::div_ceil (wi::to_wide (val1),
wi::to_wide (val2), sign, &overflow);
break;
case ROUND_DIV_EXPR: case ROUND_DIV_EXPR:
if (val2 == 0) if (!wide_int_binop (*res, code, w1, w2, sign, &overflow))
return false; return false;
*res = wi::div_round (wi::to_wide (val1),
wi::to_wide (val2), sign, &overflow);
break; break;
default: default:
gcc_unreachable (); gcc_unreachable ();
} }
/* If the operation overflowed return -INF or +INF depending on the
operation and the combination of signs of the operands. */
if (overflow if (overflow
&& TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1))) && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1)))
{ {
/* If the operation overflowed return -INF or +INF depending int sign1 = tree_int_cst_sgn (val1);
on the operation and the combination of signs of the operands. */ int sign2 = tree_int_cst_sgn (val2);
int sgn1 = tree_int_cst_sgn (val1);
int sgn2 = tree_int_cst_sgn (val2);
/* Notice that we only need to handle the restricted set of /* Notice that we only need to handle the restricted set of
operations handled by extract_range_from_binary_expr. operations handled by extract_range_from_binary_expr.
...@@ -1053,64 +1013,47 @@ vrp_int_const_binop (enum tree_code code, tree val1, tree val2, wide_int *res) ...@@ -1053,64 +1013,47 @@ vrp_int_const_binop (enum tree_code code, tree val1, tree val2, wide_int *res)
/* For multiplication, the sign of the overflow is given /* For multiplication, the sign of the overflow is given
by the comparison of the signs of the operands. */ by the comparison of the signs of the operands. */
if ((code == MULT_EXPR && sgn1 == sgn2) if ((code == MULT_EXPR && sign1 == sign2)
/* For addition, the operands must be of the same sign
to yield an overflow. Its sign is therefore that
of one of the operands, for example the first. */
|| (code == PLUS_EXPR && sgn1 >= 0)
/* For subtraction, operands must be of
different signs to yield an overflow. Its sign is
therefore that of the first operand or the opposite of
that of the second operand. A first operand of 0 counts
as positive here, for the corner case 0 - (-INF), which
overflows, but must yield +INF. */
|| (code == MINUS_EXPR && sgn1 >= 0)
/* For division, the only case is -INF / -1 = +INF. */ /* For division, the only case is -INF / -1 = +INF. */
|| code == TRUNC_DIV_EXPR || code == TRUNC_DIV_EXPR
|| code == FLOOR_DIV_EXPR || code == FLOOR_DIV_EXPR
|| code == CEIL_DIV_EXPR || code == CEIL_DIV_EXPR
|| code == EXACT_DIV_EXPR || code == EXACT_DIV_EXPR
|| code == ROUND_DIV_EXPR) || code == ROUND_DIV_EXPR)
*res = wi::max_value (TYPE_PRECISION (TREE_TYPE (val1)), *res = wi::max_value (TYPE_PRECISION (TREE_TYPE (val1)), sign);
TYPE_SIGN (TREE_TYPE (val1)));
else else
*res = wi::min_value (TYPE_PRECISION (TREE_TYPE (val1)), *res = wi::min_value (TYPE_PRECISION (TREE_TYPE (val1)), sign);
TYPE_SIGN (TREE_TYPE (val1)));
return true; return true;
} }
return !overflow; return !overflow;
} }
/* For range [LB, UB] compute two wide_int bitmasks. In *MAY_BE_NONZERO
/* For range VR compute two wide_int bitmasks. In *MAY_BE_NONZERO bitmask, if some bit is unset, it means for all numbers in the range
bitmask if some bit is unset, it means for all numbers in the range
the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO
bitmask if some bit is set, it means for all numbers in the range bitmask, if some bit is set, it means for all numbers in the range
the bit is 1, otherwise it might be 0 or 1. */ the bit is 1, otherwise it might be 0 or 1. */
bool void
zero_nonzero_bits_from_vr (const tree expr_type, zero_nonzero_bits_from_bounds (signop sign,
value_range *vr, const wide_int &lb, const wide_int &ub,
wide_int *may_be_nonzero, wide_int *may_be_nonzero,
wide_int *must_be_nonzero) wide_int *must_be_nonzero)
{ {
*may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type)); *may_be_nonzero = wi::minus_one (lb.get_precision ());
*must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type)); *must_be_nonzero = wi::zero (lb.get_precision ());
if (!range_int_cst_p (vr))
return false;
if (range_int_cst_singleton_p (vr)) if (wi::eq_p (lb, ub))
{ {
*may_be_nonzero = wi::to_wide (vr->min); *may_be_nonzero = lb;
*must_be_nonzero = *may_be_nonzero; *must_be_nonzero = *may_be_nonzero;
} }
else if (tree_int_cst_sgn (vr->min) >= 0 else if (wi::ge_p (lb, 0, sign) || wi::lt_p (ub, 0, sign))
|| tree_int_cst_sgn (vr->max) < 0)
{ {
wide_int xor_mask = wi::to_wide (vr->min) ^ wi::to_wide (vr->max); wide_int xor_mask = lb ^ ub;
*may_be_nonzero = wi::to_wide (vr->min) | wi::to_wide (vr->max); *may_be_nonzero = lb | ub;
*must_be_nonzero = wi::to_wide (vr->min) & wi::to_wide (vr->max); *must_be_nonzero = lb & ub;
if (xor_mask != 0) if (xor_mask != 0)
{ {
wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false, wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false,
...@@ -1119,7 +1062,26 @@ zero_nonzero_bits_from_vr (const tree expr_type, ...@@ -1119,7 +1062,26 @@ zero_nonzero_bits_from_vr (const tree expr_type,
*must_be_nonzero = wi::bit_and_not (*must_be_nonzero, mask); *must_be_nonzero = wi::bit_and_not (*must_be_nonzero, mask);
} }
} }
}
/* Like zero_nonzero_bits_from_bounds, but use the range in value_range VR. */
bool
zero_nonzero_bits_from_vr (const tree expr_type,
value_range *vr,
wide_int *may_be_nonzero,
wide_int *must_be_nonzero)
{
if (!range_int_cst_p (vr))
{
*may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type));
*must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type));
return false;
}
zero_nonzero_bits_from_bounds (TYPE_SIGN (expr_type),
wi::to_wide (vr->min), wi::to_wide (vr->max),
may_be_nonzero, must_be_nonzero);
return true; return true;
} }
...@@ -1275,6 +1237,52 @@ extract_range_from_multiplicative_op_1 (value_range *vr, ...@@ -1275,6 +1237,52 @@ extract_range_from_multiplicative_op_1 (value_range *vr,
wide_int_to_tree (type, max), NULL); wide_int_to_tree (type, max), NULL);
} }
/* For op & or | attempt to optimize:
[LB, UB] op Z
into:
[LB op Z, UB op Z]
if Z is a constant which (for op | its bitwise not) has n
consecutive least significant bits cleared followed by m 1
consecutive bits set immediately above it and either
m + n == precision, or (x >> (m + n)) == (y >> (m + n)).
The least significant n bits of all the values in the range are
cleared or set, the m bits above it are preserved and any bits
above these are required to be the same for all values in the
range.
Return TRUE if the min and max can simply be folded. */
bool
range_easy_mask_min_max (tree_code code,
const wide_int &lb, const wide_int &ub,
const wide_int &mask)
{
wide_int w = mask;
int m = 0, n = 0;
if (code == BIT_IOR_EXPR)
w = ~w;
if (wi::eq_p (w, 0))
n = w.get_precision ();
else
{
n = wi::ctz (w);
w = ~(w | wi::mask (n, false, w.get_precision ()));
if (wi::eq_p (w, 0))
m = w.get_precision () - n;
else
m = wi::ctz (w) - n;
}
wide_int new_mask = wi::mask (m + n, true, w.get_precision ());
if ((new_mask & lb) == (new_mask & ub))
return true;
return false;
}
/* If BOUND will include a symbolic bound, adjust it accordingly, /* If BOUND will include a symbolic bound, adjust it accordingly,
otherwise leave it as is. otherwise leave it as is.
...@@ -2175,41 +2183,16 @@ extract_range_from_binary_expr_1 (value_range *vr, ...@@ -2175,41 +2183,16 @@ extract_range_from_binary_expr_1 (value_range *vr,
vr1p = &vr0; vr1p = &vr0;
} }
/* For op & or | attempt to optimize: /* For op & or | attempt to optimize:
[x, y] op z into [x op z, y op z] [x, y] op z into [x op z, y op z]. */
if z is a constant which (for op | its bitwise not) has n if (vr0p && range_int_cst_p (vr0p)
consecutive least significant bits cleared followed by m 1 && range_easy_mask_min_max (code, wi::to_wide (vr0p->min),
consecutive bits set immediately above it and either wi::to_wide (vr0p->max),
m + n == precision, or (x >> (m + n)) == (y >> (m + n)). wi::to_wide (vr1p->min)))
The least significant n bits of all the values in the range are
cleared or set, the m bits above it are preserved and any bits
above these are required to be the same for all values in the
range. */
if (vr0p && range_int_cst_p (vr0p))
{
wide_int w = wi::to_wide (vr1p->min);
int m = 0, n = 0;
if (code == BIT_IOR_EXPR)
w = ~w;
if (wi::eq_p (w, 0))
n = TYPE_PRECISION (expr_type);
else
{
n = wi::ctz (w);
w = ~(w | wi::mask (n, false, w.get_precision ()));
if (wi::eq_p (w, 0))
m = TYPE_PRECISION (expr_type) - n;
else
m = wi::ctz (w) - n;
}
wide_int mask = wi::mask (m + n, true, w.get_precision ());
if ((mask & wi::to_wide (vr0p->min))
== (mask & wi::to_wide (vr0p->max)))
{ {
min = int_const_binop (code, vr0p->min, vr1p->min); min = int_const_binop (code, vr0p->min, vr1p->min);
max = int_const_binop (code, vr0p->max, vr1p->min); max = int_const_binop (code, vr0p->max, vr1p->min);
} }
} }
}
type = VR_RANGE; type = VR_RANGE;
if (min && max) if (min && max)
......
...@@ -112,8 +112,14 @@ extern bool range_int_cst_p (value_range *); ...@@ -112,8 +112,14 @@ extern bool range_int_cst_p (value_range *);
extern int operand_less_p (tree, tree); extern int operand_less_p (tree, tree);
extern bool find_case_label_range (gswitch *, tree, tree, size_t *, size_t *); extern bool find_case_label_range (gswitch *, tree, tree, size_t *, size_t *);
extern bool find_case_label_index (gswitch *, size_t, tree, size_t *); extern bool find_case_label_index (gswitch *, size_t, tree, size_t *);
extern void zero_nonzero_bits_from_bounds (signop, const wide_int&,
const wide_int&, wide_int *,
wide_int *);
extern bool zero_nonzero_bits_from_vr (const tree, value_range *, extern bool zero_nonzero_bits_from_vr (const tree, value_range *,
wide_int *, wide_int *); wide_int *, wide_int *);
extern bool range_easy_mask_min_max (tree_code,
const wide_int &lb, const wide_int &ub,
const wide_int &mask);
extern bool overflow_comparison_p (tree_code, tree, tree, bool, tree *); extern bool overflow_comparison_p (tree_code, tree, tree, bool, tree *);
extern bool range_int_cst_singleton_p (value_range *); extern bool range_int_cst_singleton_p (value_range *);
extern int value_inside_range (tree, tree, tree); extern int value_inside_range (tree, tree, tree);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment