Commit 0a67e02c by Paolo Bonzini Committed by Paolo Bonzini

simplify-rtx.c (simplify_unary_operation_1, [...]): New, extracted from...

2005-02-28 Paolo Bonzini <bonzini@gnu.org>

	* simplify-rtx.c (simplify_unary_operation_1,
	simplify_const_unary_operation): New, extracted from...
	(simplify_unary_operation): ... this one.
	(simplify_binary_operation_1,
	simplify_const_binary_operation): New, extracted from...
	(simplify_binary_operation): ... this one.
	* rtl.h (simplify_const_unary_operation,
	simplify_const_binary_operation): Add prototypes.

From-SVN: r95686
parent ace9ac7b
2005-02-28 Paolo Bonzini <bonzini@gnu.org>
* simplify-rtx.c (simplify_unary_operation_1,
simplify_const_unary_operation): New, extracted from...
(simplify_unary_operation): ... this one.
(simplify_binary_operation_1,
simplify_const_binary_operation): New, extracted from...
(simplify_binary_operation): ... this one.
* rtl.h (simplify_const_unary_operation,
simplify_const_binary_operation): Add prototypes.
2005-02-28 Julian Brown <julian@codesourcery.com> 2005-02-28 Julian Brown <julian@codesourcery.com>
* config/elfos.h: Revert my patch from 2005-02-25 since it broke * config/elfos.h: Revert my patch from 2005-02-25 since it broke
......
...@@ -1516,8 +1516,12 @@ extern int split_branch_probability; ...@@ -1516,8 +1516,12 @@ extern int split_branch_probability;
extern rtx split_insns (rtx, rtx); extern rtx split_insns (rtx, rtx);
/* In simplify-rtx.c */ /* In simplify-rtx.c */
extern rtx simplify_const_unary_operation (enum rtx_code, enum machine_mode,
rtx, enum machine_mode);
extern rtx simplify_unary_operation (enum rtx_code, enum machine_mode, rtx, extern rtx simplify_unary_operation (enum rtx_code, enum machine_mode, rtx,
enum machine_mode); enum machine_mode);
extern rtx simplify_const_binary_operation (enum rtx_code, enum machine_mode,
rtx, rtx);
extern rtx simplify_binary_operation (enum rtx_code, enum machine_mode, rtx, extern rtx simplify_binary_operation (enum rtx_code, enum machine_mode, rtx,
rtx); rtx);
extern rtx simplify_ternary_operation (enum rtx_code, enum machine_mode, extern rtx simplify_ternary_operation (enum rtx_code, enum machine_mode,
......
...@@ -60,6 +60,9 @@ static rtx simplify_associative_operation (enum rtx_code, enum machine_mode, ...@@ -60,6 +60,9 @@ static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
rtx, rtx); rtx, rtx);
static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode, static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
enum machine_mode, rtx, rtx); enum machine_mode, rtx, rtx);
static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
rtx, rtx, rtx, rtx);
/* Negate a CONST_INT rtx, truncating (because a conversion from a /* Negate a CONST_INT rtx, truncating (because a conversion from a
maximally negative number can overflow). */ maximally negative number can overflow). */
...@@ -355,53 +358,292 @@ rtx ...@@ -355,53 +358,292 @@ rtx
simplify_unary_operation (enum rtx_code code, enum machine_mode mode, simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
rtx op, enum machine_mode op_mode) rtx op, enum machine_mode op_mode)
{ {
rtx trueop, tem;
if (GET_CODE (op) == CONST)
op = XEXP (op, 0);
trueop = avoid_constant_pool_reference (op);
tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
if (tem)
return tem;
return simplify_unary_operation_1 (code, mode, op);
}
/* Perform some simplifications we can do even if the operands
aren't constant. */
static rtx
simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
{
enum rtx_code reversed;
rtx temp;
switch (code)
{
case NOT:
/* (not (not X)) == X. */
if (GET_CODE (op) == NOT)
return XEXP (op, 0);
/* (not (eq X Y)) == (ne X Y), etc. */
if (COMPARISON_P (op)
&& (mode == BImode || STORE_FLAG_VALUE == -1)
&& ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
return simplify_gen_relational (reversed, mode, VOIDmode,
XEXP (op, 0), XEXP (op, 1));
/* (not (plus X -1)) can become (neg X). */
if (GET_CODE (op) == PLUS
&& XEXP (op, 1) == constm1_rtx)
return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
/* Similarly, (not (neg X)) is (plus X -1). */
if (GET_CODE (op) == NEG)
return plus_constant (XEXP (op, 0), -1);
/* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
if (GET_CODE (op) == XOR
&& GET_CODE (XEXP (op, 1)) == CONST_INT
&& (temp = simplify_unary_operation (NOT, mode,
XEXP (op, 1), mode)) != 0)
return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
/* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
if (GET_CODE (op) == PLUS
&& GET_CODE (XEXP (op, 1)) == CONST_INT
&& mode_signbit_p (mode, XEXP (op, 1))
&& (temp = simplify_unary_operation (NOT, mode,
XEXP (op, 1), mode)) != 0)
return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
/* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
operands other than 1, but that is not valid. We could do a
similar simplification for (not (lshiftrt C X)) where C is
just the sign bit, but this doesn't seem common enough to
bother with. */
if (GET_CODE (op) == ASHIFT
&& XEXP (op, 0) == const1_rtx)
{
temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
}
/* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
by reversing the comparison code if valid. */
if (STORE_FLAG_VALUE == -1
&& COMPARISON_P (op)
&& (reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN)
return simplify_gen_relational (reversed, mode, VOIDmode,
XEXP (op, 0), XEXP (op, 1));
/* (not (ashiftrt foo C)) where C is the number of bits in FOO
minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
so we can perform the above simplification. */
if (STORE_FLAG_VALUE == -1
&& GET_CODE (op) == ASHIFTRT
&& GET_CODE (XEXP (op, 1)) == CONST_INT
&& INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
return simplify_gen_relational (GE, mode, VOIDmode,
XEXP (op, 0), const0_rtx);
break;
case NEG:
/* (neg (neg X)) == X. */
if (GET_CODE (op) == NEG)
return XEXP (op, 0);
/* (neg (plus X 1)) can become (not X). */
if (GET_CODE (op) == PLUS
&& XEXP (op, 1) == const1_rtx)
return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
/* Similarly, (neg (not X)) is (plus X 1). */
if (GET_CODE (op) == NOT)
return plus_constant (XEXP (op, 0), 1);
/* (neg (minus X Y)) can become (minus Y X). This transformation
isn't safe for modes with signed zeros, since if X and Y are
both +0, (minus Y X) is the same as (minus X Y). If the
rounding mode is towards +infinity (or -infinity) then the two
expressions will be rounded differently. */
if (GET_CODE (op) == MINUS
&& !HONOR_SIGNED_ZEROS (mode)
&& !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
if (GET_CODE (op) == PLUS
&& !HONOR_SIGNED_ZEROS (mode)
&& !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
{
/* (neg (plus A C)) is simplified to (minus -C A). */
if (GET_CODE (XEXP (op, 1)) == CONST_INT
|| GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
{
temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
if (temp)
return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
}
/* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
}
/* (neg (mult A B)) becomes (mult (neg A) B).
This works even for floating-point values. */
if (GET_CODE (op) == MULT
&& !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
{
temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
}
/* NEG commutes with ASHIFT since it is multiplication. Only do
this if we can then eliminate the NEG (e.g., if the operand
is a constant). */
if (GET_CODE (op) == ASHIFT)
{
temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
if (temp)
return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
}
/* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
C is equal to the width of MODE minus 1. */
if (GET_CODE (op) == ASHIFTRT
&& GET_CODE (XEXP (op, 1)) == CONST_INT
&& INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
return simplify_gen_binary (LSHIFTRT, mode,
XEXP (op, 0), XEXP (op, 1));
/* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
C is equal to the width of MODE minus 1. */
if (GET_CODE (op) == LSHIFTRT
&& GET_CODE (XEXP (op, 1)) == CONST_INT
&& INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
return simplify_gen_binary (ASHIFTRT, mode,
XEXP (op, 0), XEXP (op, 1));
break;
case SIGN_EXTEND:
/* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
becomes just the MINUS if its mode is MODE. This allows
folding switch statements on machines using casesi (such as
the VAX). */
if (GET_CODE (op) == TRUNCATE
&& GET_MODE (XEXP (op, 0)) == mode
&& GET_CODE (XEXP (op, 0)) == MINUS
&& GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
&& GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
return XEXP (op, 0);
/* Check for a sign extension of a subreg of a promoted
variable, where the promotion is sign-extended, and the
target mode is the same as the variable's promotion. */
if (GET_CODE (op) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op)
&& ! SUBREG_PROMOTED_UNSIGNED_P (op)
&& GET_MODE (XEXP (op, 0)) == mode)
return XEXP (op, 0);
#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
if (! POINTERS_EXTEND_UNSIGNED
&& mode == Pmode && GET_MODE (op) == ptr_mode
&& (CONSTANT_P (op)
|| (GET_CODE (op) == SUBREG
&& REG_P (SUBREG_REG (op))
&& REG_POINTER (SUBREG_REG (op))
&& GET_MODE (SUBREG_REG (op)) == Pmode)))
return convert_memory_address (Pmode, op);
#endif
break;
case ZERO_EXTEND:
/* Check for a zero extension of a subreg of a promoted
variable, where the promotion is zero-extended, and the
target mode is the same as the variable's promotion. */
if (GET_CODE (op) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op)
&& SUBREG_PROMOTED_UNSIGNED_P (op)
&& GET_MODE (XEXP (op, 0)) == mode)
return XEXP (op, 0);
#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
if (POINTERS_EXTEND_UNSIGNED > 0
&& mode == Pmode && GET_MODE (op) == ptr_mode
&& (CONSTANT_P (op)
|| (GET_CODE (op) == SUBREG
&& REG_P (SUBREG_REG (op))
&& REG_POINTER (SUBREG_REG (op))
&& GET_MODE (SUBREG_REG (op)) == Pmode)))
return convert_memory_address (Pmode, op);
#endif
break;
default:
break;
}
return 0;
}
/* Try to compute the value of a unary operation CODE whose output mode is to
be MODE with input operand OP whose mode was originally OP_MODE.
Return zero if the value cannot be computed. */
rtx
simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
rtx op, enum machine_mode op_mode)
{
unsigned int width = GET_MODE_BITSIZE (mode); unsigned int width = GET_MODE_BITSIZE (mode);
rtx trueop = avoid_constant_pool_reference (op);
if (code == VEC_DUPLICATE) if (code == VEC_DUPLICATE)
{ {
gcc_assert (VECTOR_MODE_P (mode)); gcc_assert (VECTOR_MODE_P (mode));
if (GET_MODE (trueop) != VOIDmode) if (GET_MODE (op) != VOIDmode)
{ {
if (!VECTOR_MODE_P (GET_MODE (trueop))) if (!VECTOR_MODE_P (GET_MODE (op)))
gcc_assert (GET_MODE_INNER (mode) == GET_MODE (trueop)); gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
else else
gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
(GET_MODE (trueop))); (GET_MODE (op)));
} }
if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
|| GET_CODE (trueop) == CONST_VECTOR) || GET_CODE (op) == CONST_VECTOR)
{ {
int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode)); int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size); unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
rtvec v = rtvec_alloc (n_elts); rtvec v = rtvec_alloc (n_elts);
unsigned int i; unsigned int i;
if (GET_CODE (trueop) != CONST_VECTOR) if (GET_CODE (op) != CONST_VECTOR)
for (i = 0; i < n_elts; i++) for (i = 0; i < n_elts; i++)
RTVEC_ELT (v, i) = trueop; RTVEC_ELT (v, i) = op;
else else
{ {
enum machine_mode inmode = GET_MODE (trueop); enum machine_mode inmode = GET_MODE (op);
int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode)); int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size); unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
gcc_assert (in_n_elts < n_elts); gcc_assert (in_n_elts < n_elts);
gcc_assert ((n_elts % in_n_elts) == 0); gcc_assert ((n_elts % in_n_elts) == 0);
for (i = 0; i < n_elts; i++) for (i = 0; i < n_elts; i++)
RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts); RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
} }
return gen_rtx_CONST_VECTOR (mode, v); return gen_rtx_CONST_VECTOR (mode, v);
} }
} }
else if (GET_CODE (op) == CONST)
return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR) if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
{ {
int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode)); int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size); unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
enum machine_mode opmode = GET_MODE (trueop); enum machine_mode opmode = GET_MODE (op);
int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode)); int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size); unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
rtvec v = rtvec_alloc (n_elts); rtvec v = rtvec_alloc (n_elts);
...@@ -411,7 +653,7 @@ simplify_unary_operation (enum rtx_code code, enum machine_mode mode, ...@@ -411,7 +653,7 @@ simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
for (i = 0; i < n_elts; i++) for (i = 0; i < n_elts; i++)
{ {
rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode), rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
CONST_VECTOR_ELT (trueop, i), CONST_VECTOR_ELT (op, i),
GET_MODE_INNER (opmode)); GET_MODE_INNER (opmode));
if (!x) if (!x)
return 0; return 0;
...@@ -424,32 +666,32 @@ simplify_unary_operation (enum rtx_code code, enum machine_mode mode, ...@@ -424,32 +666,32 @@ simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
check the wrong mode (input vs. output) for a conversion operation, check the wrong mode (input vs. output) for a conversion operation,
such as FIX. At some point, this should be simplified. */ such as FIX. At some point, this should be simplified. */
if (code == FLOAT && GET_MODE (trueop) == VOIDmode if (code == FLOAT && GET_MODE (op) == VOIDmode
&& (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT)) && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
{ {
HOST_WIDE_INT hv, lv; HOST_WIDE_INT hv, lv;
REAL_VALUE_TYPE d; REAL_VALUE_TYPE d;
if (GET_CODE (trueop) == CONST_INT) if (GET_CODE (op) == CONST_INT)
lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv); lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
else else
lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop); lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
REAL_VALUE_FROM_INT (d, lv, hv, mode); REAL_VALUE_FROM_INT (d, lv, hv, mode);
d = real_value_truncate (mode, d); d = real_value_truncate (mode, d);
return CONST_DOUBLE_FROM_REAL_VALUE (d, mode); return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
} }
else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
&& (GET_CODE (trueop) == CONST_DOUBLE && (GET_CODE (op) == CONST_DOUBLE
|| GET_CODE (trueop) == CONST_INT)) || GET_CODE (op) == CONST_INT))
{ {
HOST_WIDE_INT hv, lv; HOST_WIDE_INT hv, lv;
REAL_VALUE_TYPE d; REAL_VALUE_TYPE d;
if (GET_CODE (trueop) == CONST_INT) if (GET_CODE (op) == CONST_INT)
lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv); lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
else else
lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop); lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
if (op_mode == VOIDmode) if (op_mode == VOIDmode)
{ {
...@@ -468,10 +710,10 @@ simplify_unary_operation (enum rtx_code code, enum machine_mode mode, ...@@ -468,10 +710,10 @@ simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
return CONST_DOUBLE_FROM_REAL_VALUE (d, mode); return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
} }
if (GET_CODE (trueop) == CONST_INT if (GET_CODE (op) == CONST_INT
&& width <= HOST_BITS_PER_WIDE_INT && width > 0) && width <= HOST_BITS_PER_WIDE_INT && width > 0)
{ {
HOST_WIDE_INT arg0 = INTVAL (trueop); HOST_WIDE_INT arg0 = INTVAL (op);
HOST_WIDE_INT val; HOST_WIDE_INT val;
switch (code) switch (code)
...@@ -594,18 +836,18 @@ simplify_unary_operation (enum rtx_code code, enum machine_mode mode, ...@@ -594,18 +836,18 @@ simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
/* We can do some operations on integer CONST_DOUBLEs. Also allow /* We can do some operations on integer CONST_DOUBLEs. Also allow
for a DImode operation on a CONST_INT. */ for a DImode operation on a CONST_INT. */
else if (GET_MODE (trueop) == VOIDmode else if (GET_MODE (op) == VOIDmode
&& width <= HOST_BITS_PER_WIDE_INT * 2 && width <= HOST_BITS_PER_WIDE_INT * 2
&& (GET_CODE (trueop) == CONST_DOUBLE && (GET_CODE (op) == CONST_DOUBLE
|| GET_CODE (trueop) == CONST_INT)) || GET_CODE (op) == CONST_INT))
{ {
unsigned HOST_WIDE_INT l1, lv; unsigned HOST_WIDE_INT l1, lv;
HOST_WIDE_INT h1, hv; HOST_WIDE_INT h1, hv;
if (GET_CODE (trueop) == CONST_DOUBLE) if (GET_CODE (op) == CONST_DOUBLE)
l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop); l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
else else
l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1); l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
switch (code) switch (code)
{ {
...@@ -719,11 +961,11 @@ simplify_unary_operation (enum rtx_code code, enum machine_mode mode, ...@@ -719,11 +961,11 @@ simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
return immed_double_const (lv, hv, mode); return immed_double_const (lv, hv, mode);
} }
else if (GET_CODE (trueop) == CONST_DOUBLE else if (GET_CODE (op) == CONST_DOUBLE
&& GET_MODE_CLASS (mode) == MODE_FLOAT) && GET_MODE_CLASS (mode) == MODE_FLOAT)
{ {
REAL_VALUE_TYPE d, t; REAL_VALUE_TYPE d, t;
REAL_VALUE_FROM_CONST_DOUBLE (d, trueop); REAL_VALUE_FROM_CONST_DOUBLE (d, op);
switch (code) switch (code)
{ {
...@@ -753,20 +995,20 @@ simplify_unary_operation (enum rtx_code code, enum machine_mode mode, ...@@ -753,20 +995,20 @@ simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
long tmp[4]; long tmp[4];
int i; int i;
real_to_target (tmp, &d, GET_MODE (trueop)); real_to_target (tmp, &d, GET_MODE (op));
for (i = 0; i < 4; i++) for (i = 0; i < 4; i++)
tmp[i] = ~tmp[i]; tmp[i] = ~tmp[i];
real_from_target (&d, tmp, mode); real_from_target (&d, tmp, mode);
}
break; break;
}
default: default:
gcc_unreachable (); gcc_unreachable ();
} }
return CONST_DOUBLE_FROM_REAL_VALUE (d, mode); return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
} }
else if (GET_CODE (trueop) == CONST_DOUBLE else if (GET_CODE (op) == CONST_DOUBLE
&& GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT && GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
&& GET_MODE_CLASS (mode) == MODE_INT && GET_MODE_CLASS (mode) == MODE_INT
&& width <= 2*HOST_BITS_PER_WIDE_INT && width > 0) && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
{ {
...@@ -775,9 +1017,11 @@ simplify_unary_operation (enum rtx_code code, enum machine_mode mode, ...@@ -775,9 +1017,11 @@ simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
by target backends), for consistency, this routine implements the by target backends), for consistency, this routine implements the
same semantics for constant folding as used by the middle-end. */ same semantics for constant folding as used by the middle-end. */
/* This was formerly used only for non-IEEE float.
eggert@twinsun.com says it is safe for IEEE also. */
HOST_WIDE_INT xh, xl, th, tl; HOST_WIDE_INT xh, xl, th, tl;
REAL_VALUE_TYPE x, t; REAL_VALUE_TYPE x, t;
REAL_VALUE_FROM_CONST_DOUBLE (x, trueop); REAL_VALUE_FROM_CONST_DOUBLE (x, op);
switch (code) switch (code)
{ {
case FIX: case FIX:
...@@ -863,244 +1107,14 @@ simplify_unary_operation (enum rtx_code code, enum machine_mode mode, ...@@ -863,244 +1107,14 @@ simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
return immed_double_const (xl, xh, mode); return immed_double_const (xl, xh, mode);
} }
/* This was formerly used only for non-IEEE float. return NULL_RTX;
eggert@twinsun.com says it is safe for IEEE also. */ }
else
{
enum rtx_code reversed;
rtx temp;
/* There are some simplifications we can do even if the operands
aren't constant. */
switch (code)
{
case NOT:
/* (not (not X)) == X. */
if (GET_CODE (op) == NOT)
return XEXP (op, 0);
/* (not (eq X Y)) == (ne X Y), etc. */ /* Subroutine of simplify_binary_operation to simplify a commutative,
if (COMPARISON_P (op) associative binary operation CODE with result mode MODE, operating
&& (mode == BImode || STORE_FLAG_VALUE == -1) on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
&& ((reversed = reversed_comparison_code (op, NULL_RTX)) SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
!= UNKNOWN)) canonicalization is possible. */
return simplify_gen_relational (reversed, mode, VOIDmode,
XEXP (op, 0), XEXP (op, 1));
/* (not (plus X -1)) can become (neg X). */
if (GET_CODE (op) == PLUS
&& XEXP (op, 1) == constm1_rtx)
return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
/* Similarly, (not (neg X)) is (plus X -1). */
if (GET_CODE (op) == NEG)
return plus_constant (XEXP (op, 0), -1);
/* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
if (GET_CODE (op) == XOR
&& GET_CODE (XEXP (op, 1)) == CONST_INT
&& (temp = simplify_unary_operation (NOT, mode,
XEXP (op, 1),
mode)) != 0)
return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
/* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
if (GET_CODE (op) == PLUS
&& GET_CODE (XEXP (op, 1)) == CONST_INT
&& mode_signbit_p (mode, XEXP (op, 1))
&& (temp = simplify_unary_operation (NOT, mode,
XEXP (op, 1),
mode)) != 0)
return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
/* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
operands other than 1, but that is not valid. We could do a
similar simplification for (not (lshiftrt C X)) where C is
just the sign bit, but this doesn't seem common enough to
bother with. */
if (GET_CODE (op) == ASHIFT
&& XEXP (op, 0) == const1_rtx)
{
temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
}
/* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
by reversing the comparison code if valid. */
if (STORE_FLAG_VALUE == -1
&& COMPARISON_P (op)
&& (reversed = reversed_comparison_code (op, NULL_RTX))
!= UNKNOWN)
return simplify_gen_relational (reversed, mode, VOIDmode,
XEXP (op, 0), XEXP (op, 1));
/* (not (ashiftrt foo C)) where C is the number of bits in FOO
minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
so we can perform the above simplification. */
if (STORE_FLAG_VALUE == -1
&& GET_CODE (op) == ASHIFTRT
&& GET_CODE (XEXP (op, 1)) == CONST_INT
&& INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
return simplify_gen_relational (GE, mode, VOIDmode,
XEXP (op, 0), const0_rtx);
break;
case NEG:
/* (neg (neg X)) == X. */
if (GET_CODE (op) == NEG)
return XEXP (op, 0);
/* (neg (plus X 1)) can become (not X). */
if (GET_CODE (op) == PLUS
&& XEXP (op, 1) == const1_rtx)
return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
/* Similarly, (neg (not X)) is (plus X 1). */
if (GET_CODE (op) == NOT)
return plus_constant (XEXP (op, 0), 1);
/* (neg (minus X Y)) can become (minus Y X). This transformation
isn't safe for modes with signed zeros, since if X and Y are
both +0, (minus Y X) is the same as (minus X Y). If the
rounding mode is towards +infinity (or -infinity) then the two
expressions will be rounded differently. */
if (GET_CODE (op) == MINUS
&& !HONOR_SIGNED_ZEROS (mode)
&& !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
XEXP (op, 0));
if (GET_CODE (op) == PLUS
&& !HONOR_SIGNED_ZEROS (mode)
&& !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
{
/* (neg (plus A C)) is simplified to (minus -C A). */
if (GET_CODE (XEXP (op, 1)) == CONST_INT
|| GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
{
temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
mode);
if (temp)
return simplify_gen_binary (MINUS, mode, temp,
XEXP (op, 0));
}
/* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
}
/* (neg (mult A B)) becomes (mult (neg A) B).
This works even for floating-point values. */
if (GET_CODE (op) == MULT
&& !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
{
temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
}
/* NEG commutes with ASHIFT since it is multiplication. Only do
this if we can then eliminate the NEG (e.g., if the operand
is a constant). */
if (GET_CODE (op) == ASHIFT)
{
temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
mode);
if (temp)
return simplify_gen_binary (ASHIFT, mode, temp,
XEXP (op, 1));
}
/* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
C is equal to the width of MODE minus 1. */
if (GET_CODE (op) == ASHIFTRT
&& GET_CODE (XEXP (op, 1)) == CONST_INT
&& INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
return simplify_gen_binary (LSHIFTRT, mode,
XEXP (op, 0), XEXP (op, 1));
/* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
C is equal to the width of MODE minus 1. */
if (GET_CODE (op) == LSHIFTRT
&& GET_CODE (XEXP (op, 1)) == CONST_INT
&& INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
return simplify_gen_binary (ASHIFTRT, mode,
XEXP (op, 0), XEXP (op, 1));
break;
case SIGN_EXTEND:
/* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
becomes just the MINUS if its mode is MODE. This allows
folding switch statements on machines using casesi (such as
the VAX). */
if (GET_CODE (op) == TRUNCATE
&& GET_MODE (XEXP (op, 0)) == mode
&& GET_CODE (XEXP (op, 0)) == MINUS
&& GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
&& GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
return XEXP (op, 0);
/* Check for a sign extension of a subreg of a promoted
variable, where the promotion is sign-extended, and the
target mode is the same as the variable's promotion. */
if (GET_CODE (op) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op)
&& ! SUBREG_PROMOTED_UNSIGNED_P (op)
&& GET_MODE (XEXP (op, 0)) == mode)
return XEXP (op, 0);
#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
if (! POINTERS_EXTEND_UNSIGNED
&& mode == Pmode && GET_MODE (op) == ptr_mode
&& (CONSTANT_P (op)
|| (GET_CODE (op) == SUBREG
&& REG_P (SUBREG_REG (op))
&& REG_POINTER (SUBREG_REG (op))
&& GET_MODE (SUBREG_REG (op)) == Pmode)))
return convert_memory_address (Pmode, op);
#endif
break;
case ZERO_EXTEND:
/* Check for a zero extension of a subreg of a promoted
variable, where the promotion is zero-extended, and the
target mode is the same as the variable's promotion. */
if (GET_CODE (op) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op)
&& SUBREG_PROMOTED_UNSIGNED_P (op)
&& GET_MODE (XEXP (op, 0)) == mode)
return XEXP (op, 0);
#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
if (POINTERS_EXTEND_UNSIGNED > 0
&& mode == Pmode && GET_MODE (op) == ptr_mode
&& (CONSTANT_P (op)
|| (GET_CODE (op) == SUBREG
&& REG_P (SUBREG_REG (op))
&& REG_POINTER (SUBREG_REG (op))
&& GET_MODE (SUBREG_REG (op)) == Pmode)))
return convert_memory_address (Pmode, op);
#endif
break;
default:
break;
}
return 0;
}
}
/* Subroutine of simplify_binary_operation to simplify a commutative,
associative binary operation CODE with result mode MODE, operating
on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
canonicalization is possible. */
static rtx static rtx
simplify_associative_operation (enum rtx_code code, enum machine_mode mode, simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
...@@ -1154,6 +1168,7 @@ simplify_associative_operation (enum rtx_code code, enum machine_mode mode, ...@@ -1154,6 +1168,7 @@ simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
return 0; return 0;
} }
/* Simplify a binary operation CODE with result mode MODE, operating on OP0 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
and OP1. Return 0 if no simplification is possible. and OP1. Return 0 if no simplification is possible.
...@@ -1163,9 +1178,6 @@ rtx ...@@ -1163,9 +1178,6 @@ rtx
simplify_binary_operation (enum rtx_code code, enum machine_mode mode, simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
rtx op0, rtx op1) rtx op0, rtx op1)
{ {
HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
HOST_WIDE_INT val;
unsigned int width = GET_MODE_BITSIZE (mode);
rtx trueop0, trueop1; rtx trueop0, trueop1;
rtx tem; rtx tem;
...@@ -1186,363 +1198,207 @@ simplify_binary_operation (enum rtx_code code, enum machine_mode mode, ...@@ -1186,363 +1198,207 @@ simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
trueop0 = avoid_constant_pool_reference (op0); trueop0 = avoid_constant_pool_reference (op0);
trueop1 = avoid_constant_pool_reference (op1); trueop1 = avoid_constant_pool_reference (op1);
if (VECTOR_MODE_P (mode) tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
&& code != VEC_CONCAT if (tem)
&& GET_CODE (trueop0) == CONST_VECTOR return tem;
&& GET_CODE (trueop1) == CONST_VECTOR) return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
{ }
unsigned n_elts = GET_MODE_NUNITS (mode);
enum machine_mode op0mode = GET_MODE (trueop0);
unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
enum machine_mode op1mode = GET_MODE (trueop1);
unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
rtvec v = rtvec_alloc (n_elts);
unsigned int i;
gcc_assert (op0_n_elts == n_elts);
gcc_assert (op1_n_elts == n_elts);
for (i = 0; i < n_elts; i++)
{
rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
CONST_VECTOR_ELT (trueop0, i),
CONST_VECTOR_ELT (trueop1, i));
if (!x)
return 0;
RTVEC_ELT (v, i) = x;
}
return gen_rtx_CONST_VECTOR (mode, v); static rtx
} simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
rtx op0, rtx op1, rtx trueop0, rtx trueop1)
{
rtx tem;
HOST_WIDE_INT val;
unsigned int width = GET_MODE_BITSIZE (mode);
if (VECTOR_MODE_P (mode) /* Even if we can't compute a constant result,
&& code == VEC_CONCAT there are some cases worth simplifying. */
&& CONSTANT_P (trueop0) && CONSTANT_P (trueop1))
{
unsigned n_elts = GET_MODE_NUNITS (mode);
rtvec v = rtvec_alloc (n_elts);
gcc_assert (n_elts >= 2); switch (code)
if (n_elts == 2)
{ {
gcc_assert (GET_CODE (trueop0) != CONST_VECTOR); case PLUS:
gcc_assert (GET_CODE (trueop1) != CONST_VECTOR); /* Maybe simplify x + 0 to x. The two expressions are equivalent
when x is NaN, infinite, or finite and nonzero. They aren't
when x is -0 and the rounding mode is not towards -infinity,
since (-0) + 0 is then 0. */
if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
return op0;
RTVEC_ELT (v, 0) = trueop0; /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
RTVEC_ELT (v, 1) = trueop1; transformations are safe even for IEEE. */
} if (GET_CODE (op0) == NEG)
else return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
{ else if (GET_CODE (op1) == NEG)
unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (trueop0)); return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (trueop1));
unsigned i;
gcc_assert (GET_CODE (trueop0) == CONST_VECTOR); /* (~a) + 1 -> -a */
gcc_assert (GET_CODE (trueop1) == CONST_VECTOR); if (INTEGRAL_MODE_P (mode)
gcc_assert (op0_n_elts + op1_n_elts == n_elts); && GET_CODE (op0) == NOT
&& trueop1 == const1_rtx)
return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
for (i = 0; i < op0_n_elts; ++i) /* Handle both-operands-constant cases. We can only add
RTVEC_ELT (v, i) = XVECEXP (trueop0, 0, i); CONST_INTs to constants since the sum of relocatable symbols
for (i = 0; i < op1_n_elts; ++i) can't be handled by most assemblers. Don't add CONST_INT
RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (trueop1, 0, i); to CONST_INT since overflow won't be computed properly if wider
} than HOST_BITS_PER_WIDE_INT. */
return gen_rtx_CONST_VECTOR (mode, v); if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
} && GET_CODE (op1) == CONST_INT)
return plus_constant (op0, INTVAL (op1));
else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
&& GET_CODE (op0) == CONST_INT)
return plus_constant (op1, INTVAL (op0));
if (GET_MODE_CLASS (mode) == MODE_FLOAT /* See if this is something like X * C - X or vice versa or
&& GET_CODE (trueop0) == CONST_DOUBLE if the multiplication is written as a shift. If so, we can
&& GET_CODE (trueop1) == CONST_DOUBLE distribute and make a new multiply, shift, or maybe just
&& mode == GET_MODE (op0) && mode == GET_MODE (op1)) have X (if C is 2 in the example above). But don't make
{ something more expensive than we had before. */
if (code == AND
|| code == IOR
|| code == XOR)
{
long tmp0[4];
long tmp1[4];
REAL_VALUE_TYPE r;
int i;
real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0), if (! FLOAT_MODE_P (mode))
GET_MODE (op0));
real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
GET_MODE (op1));
for (i = 0; i < 4; i++)
{ {
switch (code) HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
rtx lhs = op0, rhs = op1;
if (GET_CODE (lhs) == NEG)
coeff0 = -1, lhs = XEXP (lhs, 0);
else if (GET_CODE (lhs) == MULT
&& GET_CODE (XEXP (lhs, 1)) == CONST_INT)
coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
else if (GET_CODE (lhs) == ASHIFT
&& GET_CODE (XEXP (lhs, 1)) == CONST_INT
&& INTVAL (XEXP (lhs, 1)) >= 0
&& INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
{ {
case AND: coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
tmp0[i] &= tmp1[i]; lhs = XEXP (lhs, 0);
break;
case IOR:
tmp0[i] |= tmp1[i];
break;
case XOR:
tmp0[i] ^= tmp1[i];
break;
default:
gcc_unreachable ();
}
}
real_from_target (&r, tmp0, mode);
return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
} }
else
{
REAL_VALUE_TYPE f0, f1, value, result;
bool inexact;
REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
real_convert (&f0, mode, &f0);
real_convert (&f1, mode, &f1);
if (HONOR_SNANS (mode)
&& (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
return 0;
if (code == DIV
&& REAL_VALUES_EQUAL (f1, dconst0)
&& (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
return 0;
if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode) if (GET_CODE (rhs) == NEG)
&& flag_trapping_math coeff1 = -1, rhs = XEXP (rhs, 0);
&& REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1)) else if (GET_CODE (rhs) == MULT
&& GET_CODE (XEXP (rhs, 1)) == CONST_INT)
{ {
int s0 = REAL_VALUE_NEGATIVE (f0); coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
int s1 = REAL_VALUE_NEGATIVE (f1); }
else if (GET_CODE (rhs) == ASHIFT
&& GET_CODE (XEXP (rhs, 1)) == CONST_INT
&& INTVAL (XEXP (rhs, 1)) >= 0
&& INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
{
coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
rhs = XEXP (rhs, 0);
}
switch (code) if (rtx_equal_p (lhs, rhs))
{ {
case PLUS: rtx orig = gen_rtx_PLUS (mode, op0, op1);
/* Inf + -Inf = NaN plus exception. */ tem = simplify_gen_binary (MULT, mode, lhs,
if (s0 != s1) GEN_INT (coeff0 + coeff1));
return 0; return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
break; ? tem : 0;
case MINUS:
/* Inf - Inf = NaN plus exception. */
if (s0 == s1)
return 0;
break;
case DIV:
/* Inf / Inf = NaN plus exception. */
return 0;
default:
break;
} }
} }
if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode) /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
&& flag_trapping_math if ((GET_CODE (op1) == CONST_INT
&& ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0)) || GET_CODE (op1) == CONST_DOUBLE)
|| (REAL_VALUE_ISINF (f1) && GET_CODE (op0) == XOR
&& REAL_VALUES_EQUAL (f0, dconst0)))) && (GET_CODE (XEXP (op0, 1)) == CONST_INT
/* Inf * 0 = NaN plus exception. */ || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
return 0; && mode_signbit_p (mode, op1))
return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
inexact = real_arithmetic (&value, rtx_to_tree_code (code), simplify_gen_binary (XOR, mode, op1,
&f0, &f1); XEXP (op0, 1)));
real_convert (&result, mode, &value);
/* Don't constant fold this floating point operation if the /* If one of the operands is a PLUS or a MINUS, see if we can
result may dependent upon the run-time rounding mode and simplify this by the associative law.
flag_rounding_math is set, or if GCC's software emulation Don't use the associative law for floating point.
is unable to accurately represent the result. */ The inaccuracy makes it nonassociative,
and subtle programs can break if operations are associated. */
if ((flag_rounding_math if (INTEGRAL_MODE_P (mode)
|| (REAL_MODE_FORMAT_COMPOSITE_P (mode) && (plus_minus_operand_p (op0)
&& !flag_unsafe_math_optimizations)) || plus_minus_operand_p (op1))
&& (inexact || !real_identical (&result, &value))) && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
return NULL_RTX; return tem;
return CONST_DOUBLE_FROM_REAL_VALUE (result, mode); /* Reassociate floating point addition only when the user
} specifies unsafe math optimizations. */
if (FLOAT_MODE_P (mode)
&& flag_unsafe_math_optimizations)
{
tem = simplify_associative_operation (code, mode, op0, op1);
if (tem)
return tem;
} }
break;
/* We can fold some multi-word operations. */ case COMPARE:
if (GET_MODE_CLASS (mode) == MODE_INT #ifdef HAVE_cc0
&& width == HOST_BITS_PER_WIDE_INT * 2 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
&& (GET_CODE (trueop0) == CONST_DOUBLE using cc0, in which case we want to leave it as a COMPARE
|| GET_CODE (trueop0) == CONST_INT) so we can distinguish it from a register-register-copy.
&& (GET_CODE (trueop1) == CONST_DOUBLE
|| GET_CODE (trueop1) == CONST_INT))
{
unsigned HOST_WIDE_INT l1, l2, lv, lt;
HOST_WIDE_INT h1, h2, hv, ht;
if (GET_CODE (trueop0) == CONST_DOUBLE) In IEEE floating point, x-0 is not the same as x. */
l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
else
l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
if (GET_CODE (trueop1) == CONST_DOUBLE) if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1); || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
else && trueop1 == CONST0_RTX (mode))
l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2); return op0;
#endif
switch (code) /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
|| (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
&& XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
{ {
case MINUS: rtx xop00 = XEXP (op0, 0);
/* A - B == A + (-B). */ rtx xop10 = XEXP (op1, 0);
neg_double (l2, h2, &lv, &hv);
l2 = lv, h2 = hv;
/* Fall through.... */
case PLUS: #ifdef HAVE_cc0
add_double (l1, h1, l2, h2, &lv, &hv); if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
#else
if (REG_P (xop00) && REG_P (xop10)
&& GET_MODE (xop00) == GET_MODE (xop10)
&& REGNO (xop00) == REGNO (xop10)
&& GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
&& GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
#endif
return xop00;
}
break; break;
case MULT: case MINUS:
mul_double (l1, h1, l2, h2, &lv, &hv); /* We can't assume x-x is 0 even with non-IEEE floating point,
break; but since it is zero except in very strange circumstances, we
will treat it as zero with -funsafe-math-optimizations. */
if (rtx_equal_p (trueop0, trueop1)
&& ! side_effects_p (op0)
&& (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
return CONST0_RTX (mode);
case DIV: /* Change subtraction from zero into negation. (0 - x) is the
if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2, same as -x when x is NaN, infinite, or finite and nonzero.
&lv, &hv, &lt, &ht)) But if the mode has signed zeros, and does not round towards
return 0; -infinity, then 0 - 0 is 0, not -0. */
break; if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
return simplify_gen_unary (NEG, mode, op1, mode);
case MOD: /* (-1 - a) is ~a. */
if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2, if (trueop0 == constm1_rtx)
&lt, &ht, &lv, &hv)) return simplify_gen_unary (NOT, mode, op1, mode);
return 0;
break;
case UDIV:
if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
&lv, &hv, &lt, &ht))
return 0;
break;
case UMOD:
if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
&lt, &ht, &lv, &hv))
return 0;
break;
case AND:
lv = l1 & l2, hv = h1 & h2;
break;
case IOR:
lv = l1 | l2, hv = h1 | h2;
break;
case XOR:
lv = l1 ^ l2, hv = h1 ^ h2;
break;
case SMIN:
if (h1 < h2
|| (h1 == h2
&& ((unsigned HOST_WIDE_INT) l1
< (unsigned HOST_WIDE_INT) l2)))
lv = l1, hv = h1;
else
lv = l2, hv = h2;
break;
case SMAX:
if (h1 > h2
|| (h1 == h2
&& ((unsigned HOST_WIDE_INT) l1
> (unsigned HOST_WIDE_INT) l2)))
lv = l1, hv = h1;
else
lv = l2, hv = h2;
break;
case UMIN:
if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
|| (h1 == h2
&& ((unsigned HOST_WIDE_INT) l1
< (unsigned HOST_WIDE_INT) l2)))
lv = l1, hv = h1;
else
lv = l2, hv = h2;
break;
case UMAX:
if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
|| (h1 == h2
&& ((unsigned HOST_WIDE_INT) l1
> (unsigned HOST_WIDE_INT) l2)))
lv = l1, hv = h1;
else
lv = l2, hv = h2;
break;
case LSHIFTRT: case ASHIFTRT:
case ASHIFT:
case ROTATE: case ROTATERT:
if (SHIFT_COUNT_TRUNCATED)
l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
return 0;
if (code == LSHIFTRT || code == ASHIFTRT)
rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
code == ASHIFTRT);
else if (code == ASHIFT)
lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
else if (code == ROTATE)
lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
else /* code == ROTATERT */
rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
break;
default:
return 0;
}
return immed_double_const (lv, hv, mode);
}
if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
|| width > HOST_BITS_PER_WIDE_INT || width == 0)
{
/* Even if we can't compute a constant result,
there are some cases worth simplifying. */
switch (code) /* Subtracting 0 has no effect unless the mode has signed zeros
{ and supports rounding towards -infinity. In such a case,
case PLUS: 0 - 0 is -0. */
/* Maybe simplify x + 0 to x. The two expressions are equivalent if (!(HONOR_SIGNED_ZEROS (mode)
when x is NaN, infinite, or finite and nonzero. They aren't && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
when x is -0 and the rounding mode is not towards -infinity, && trueop1 == CONST0_RTX (mode))
since (-0) + 0 is then 0. */
if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
return op0; return op0;
/* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
transformations are safe even for IEEE. */
if (GET_CODE (op0) == NEG)
return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
else if (GET_CODE (op1) == NEG)
return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
/* (~a) + 1 -> -a */
if (INTEGRAL_MODE_P (mode)
&& GET_CODE (op0) == NOT
&& trueop1 == const1_rtx)
return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
/* Handle both-operands-constant cases. We can only add
CONST_INTs to constants since the sum of relocatable symbols
can't be handled by most assemblers. Don't add CONST_INT
to CONST_INT since overflow won't be computed properly if wider
than HOST_BITS_PER_WIDE_INT. */
if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
&& GET_CODE (op1) == CONST_INT)
return plus_constant (op0, INTVAL (op1));
else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
&& GET_CODE (op0) == CONST_INT)
return plus_constant (op1, INTVAL (op0));
/* See if this is something like X * C - X or vice versa or /* See if this is something like X * C - X or vice versa or
if the multiplication is written as a shift. If so, we can if the multiplication is written as a shift. If so, we can
distribute and make a new multiply, shift, or maybe just distribute and make a new multiply, shift, or maybe just
...@@ -1571,7 +1427,7 @@ simplify_binary_operation (enum rtx_code code, enum machine_mode mode, ...@@ -1571,7 +1427,7 @@ simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
} }
if (GET_CODE (rhs) == NEG) if (GET_CODE (rhs) == NEG)
coeff1 = -1, rhs = XEXP (rhs, 0); coeff1 = - 1, rhs = XEXP (rhs, 0);
else if (GET_CODE (rhs) == MULT else if (GET_CODE (rhs) == MULT
&& GET_CODE (XEXP (rhs, 1)) == CONST_INT) && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
{ {
...@@ -1588,24 +1444,27 @@ simplify_binary_operation (enum rtx_code code, enum machine_mode mode, ...@@ -1588,24 +1444,27 @@ simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
if (rtx_equal_p (lhs, rhs)) if (rtx_equal_p (lhs, rhs))
{ {
rtx orig = gen_rtx_PLUS (mode, op0, op1); rtx orig = gen_rtx_MINUS (mode, op0, op1);
tem = simplify_gen_binary (MULT, mode, lhs, tem = simplify_gen_binary (MULT, mode, lhs,
GEN_INT (coeff0 + coeff1)); GEN_INT (coeff0 - coeff1));
return rtx_cost (tem, SET) <= rtx_cost (orig, SET) return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
? tem : 0; ? tem : 0;
} }
} }
/* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */ /* (a - (-b)) -> (a + b). True even for IEEE. */
if ((GET_CODE (op1) == CONST_INT if (GET_CODE (op1) == NEG)
|| GET_CODE (op1) == CONST_DOUBLE) return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
&& GET_CODE (op0) == XOR
&& (GET_CODE (XEXP (op0, 1)) == CONST_INT /* (-x - c) may be simplified as (-c - x). */
|| GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE) if (GET_CODE (op0) == NEG
&& mode_signbit_p (mode, op1)) && (GET_CODE (op1) == CONST_INT
return simplify_gen_binary (XOR, mode, XEXP (op0, 0), || GET_CODE (op1) == CONST_DOUBLE))
simplify_gen_binary (XOR, mode, op1, {
XEXP (op0, 1))); tem = simplify_unary_operation (NEG, mode, op1, mode);
if (tem)
return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
}
/* If one of the operands is a PLUS or a MINUS, see if we can /* If one of the operands is a PLUS or a MINUS, see if we can
simplify this by the associative law. simplify this by the associative law.
...@@ -1619,221 +1478,68 @@ simplify_binary_operation (enum rtx_code code, enum machine_mode mode, ...@@ -1619,221 +1478,68 @@ simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
&& (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0) && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
return tem; return tem;
/* Reassociate floating point addition only when the user /* Don't let a relocatable value get a negative coeff. */
specifies unsafe math optimizations. */ if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
if (FLOAT_MODE_P (mode) return simplify_gen_binary (PLUS, mode,
&& flag_unsafe_math_optimizations) op0,
neg_const_int (mode, op1));
/* (x - (x & y)) -> (x & ~y) */
if (GET_CODE (op1) == AND)
{ {
tem = simplify_associative_operation (code, mode, op0, op1); if (rtx_equal_p (op0, XEXP (op1, 0)))
if (tem) {
return tem; tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
GET_MODE (XEXP (op1, 1)));
return simplify_gen_binary (AND, mode, op0, tem);
}
if (rtx_equal_p (op0, XEXP (op1, 1)))
{
tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
GET_MODE (XEXP (op1, 0)));
return simplify_gen_binary (AND, mode, op0, tem);
}
} }
break; break;
case COMPARE: case MULT:
#ifdef HAVE_cc0 if (trueop1 == constm1_rtx)
/* Convert (compare FOO (const_int 0)) to FOO unless we aren't return simplify_gen_unary (NEG, mode, op0, mode);
using cc0, in which case we want to leave it as a COMPARE
so we can distinguish it from a register-register-copy.
In IEEE floating point, x-0 is not the same as x. */ /* Maybe simplify x * 0 to 0. The reduction is not valid if
x is NaN, since x * 0 is then also NaN. Nor is it valid
when the mode has signed zeros, since multiplying a negative
number by 0 will give -0, not 0. */
if (!HONOR_NANS (mode)
&& !HONOR_SIGNED_ZEROS (mode)
&& trueop1 == CONST0_RTX (mode)
&& ! side_effects_p (op0))
return op1;
if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT /* In IEEE floating point, x*1 is not equivalent to x for
|| ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations) signalling NaNs. */
&& trueop1 == CONST0_RTX (mode)) if (!HONOR_SNANS (mode)
&& trueop1 == CONST1_RTX (mode))
return op0; return op0;
#endif
/* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */ /* Convert multiply by constant power of two into shift unless
if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT) we are still generating RTL. This test is a kludge. */
|| (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU)) if (GET_CODE (trueop1) == CONST_INT
&& XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx) && (val = exact_log2 (INTVAL (trueop1))) >= 0
/* If the mode is larger than the host word size, and the
uppermost bit is set, then this isn't a power of two due
to implicit sign extension. */
&& (width <= HOST_BITS_PER_WIDE_INT
|| val != HOST_BITS_PER_WIDE_INT - 1))
return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
/* x*2 is x+x and x*(-1) is -x */
if (GET_CODE (trueop1) == CONST_DOUBLE
&& GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
&& GET_MODE (op0) == mode)
{ {
rtx xop00 = XEXP (op0, 0); REAL_VALUE_TYPE d;
rtx xop10 = XEXP (op1, 0); REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
#ifdef HAVE_cc0
if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
#else
if (REG_P (xop00) && REG_P (xop10)
&& GET_MODE (xop00) == GET_MODE (xop10)
&& REGNO (xop00) == REGNO (xop10)
&& GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
&& GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
#endif
return xop00;
}
break;
case MINUS:
/* We can't assume x-x is 0 even with non-IEEE floating point,
but since it is zero except in very strange circumstances, we
will treat it as zero with -funsafe-math-optimizations. */
if (rtx_equal_p (trueop0, trueop1)
&& ! side_effects_p (op0)
&& (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
return CONST0_RTX (mode);
/* Change subtraction from zero into negation. (0 - x) is the
same as -x when x is NaN, infinite, or finite and nonzero.
But if the mode has signed zeros, and does not round towards
-infinity, then 0 - 0 is 0, not -0. */
if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
return simplify_gen_unary (NEG, mode, op1, mode);
/* (-1 - a) is ~a. */
if (trueop0 == constm1_rtx)
return simplify_gen_unary (NOT, mode, op1, mode);
/* Subtracting 0 has no effect unless the mode has signed zeros
and supports rounding towards -infinity. In such a case,
0 - 0 is -0. */
if (!(HONOR_SIGNED_ZEROS (mode)
&& HONOR_SIGN_DEPENDENT_ROUNDING (mode))
&& trueop1 == CONST0_RTX (mode))
return op0;
/* See if this is something like X * C - X or vice versa or
if the multiplication is written as a shift. If so, we can
distribute and make a new multiply, shift, or maybe just
have X (if C is 2 in the example above). But don't make
something more expensive than we had before. */
if (! FLOAT_MODE_P (mode))
{
HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
rtx lhs = op0, rhs = op1;
if (GET_CODE (lhs) == NEG)
coeff0 = -1, lhs = XEXP (lhs, 0);
else if (GET_CODE (lhs) == MULT
&& GET_CODE (XEXP (lhs, 1)) == CONST_INT)
{
coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
}
else if (GET_CODE (lhs) == ASHIFT
&& GET_CODE (XEXP (lhs, 1)) == CONST_INT
&& INTVAL (XEXP (lhs, 1)) >= 0
&& INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
{
coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
lhs = XEXP (lhs, 0);
}
if (GET_CODE (rhs) == NEG)
coeff1 = - 1, rhs = XEXP (rhs, 0);
else if (GET_CODE (rhs) == MULT
&& GET_CODE (XEXP (rhs, 1)) == CONST_INT)
{
coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
}
else if (GET_CODE (rhs) == ASHIFT
&& GET_CODE (XEXP (rhs, 1)) == CONST_INT
&& INTVAL (XEXP (rhs, 1)) >= 0
&& INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
{
coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
rhs = XEXP (rhs, 0);
}
if (rtx_equal_p (lhs, rhs))
{
rtx orig = gen_rtx_MINUS (mode, op0, op1);
tem = simplify_gen_binary (MULT, mode, lhs,
GEN_INT (coeff0 - coeff1));
return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
? tem : 0;
}
}
/* (a - (-b)) -> (a + b). True even for IEEE. */
if (GET_CODE (op1) == NEG)
return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
/* (-x - c) may be simplified as (-c - x). */
if (GET_CODE (op0) == NEG
&& (GET_CODE (op1) == CONST_INT
|| GET_CODE (op1) == CONST_DOUBLE))
{
tem = simplify_unary_operation (NEG, mode, op1, mode);
if (tem)
return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
}
/* If one of the operands is a PLUS or a MINUS, see if we can
simplify this by the associative law.
Don't use the associative law for floating point.
The inaccuracy makes it nonassociative,
and subtle programs can break if operations are associated. */
if (INTEGRAL_MODE_P (mode)
&& (plus_minus_operand_p (op0)
|| plus_minus_operand_p (op1))
&& (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
return tem;
/* Don't let a relocatable value get a negative coeff. */
if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
return simplify_gen_binary (PLUS, mode,
op0,
neg_const_int (mode, op1));
/* (x - (x & y)) -> (x & ~y) */
if (GET_CODE (op1) == AND)
{
if (rtx_equal_p (op0, XEXP (op1, 0)))
{
tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
GET_MODE (XEXP (op1, 1)));
return simplify_gen_binary (AND, mode, op0, tem);
}
if (rtx_equal_p (op0, XEXP (op1, 1)))
{
tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
GET_MODE (XEXP (op1, 0)));
return simplify_gen_binary (AND, mode, op0, tem);
}
}
break;
case MULT:
if (trueop1 == constm1_rtx)
return simplify_gen_unary (NEG, mode, op0, mode);
/* Maybe simplify x * 0 to 0. The reduction is not valid if
x is NaN, since x * 0 is then also NaN. Nor is it valid
when the mode has signed zeros, since multiplying a negative
number by 0 will give -0, not 0. */
if (!HONOR_NANS (mode)
&& !HONOR_SIGNED_ZEROS (mode)
&& trueop1 == CONST0_RTX (mode)
&& ! side_effects_p (op0))
return op1;
/* In IEEE floating point, x*1 is not equivalent to x for
signalling NaNs. */
if (!HONOR_SNANS (mode)
&& trueop1 == CONST1_RTX (mode))
return op0;
/* Convert multiply by constant power of two into shift unless
we are still generating RTL. This test is a kludge. */
if (GET_CODE (trueop1) == CONST_INT
&& (val = exact_log2 (INTVAL (trueop1))) >= 0
/* If the mode is larger than the host word size, and the
uppermost bit is set, then this isn't a power of two due
to implicit sign extension. */
&& (width <= HOST_BITS_PER_WIDE_INT
|| val != HOST_BITS_PER_WIDE_INT - 1))
return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
/* x*2 is x+x and x*(-1) is -x */
if (GET_CODE (trueop1) == CONST_DOUBLE
&& GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
&& GET_MODE (op0) == mode)
{
REAL_VALUE_TYPE d;
REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
if (REAL_VALUES_EQUAL (d, dconst2)) if (REAL_VALUES_EQUAL (d, dconst2))
return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0)); return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
...@@ -2011,8 +1717,8 @@ simplify_binary_operation (enum rtx_code code, enum machine_mode mode, ...@@ -2011,8 +1717,8 @@ simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
} }
/* Convert divide by power of two into shift. */ /* Convert divide by power of two into shift. */
if (GET_CODE (trueop1) == CONST_INT if (GET_CODE (trueop1) == CONST_INT
&& (arg1 = exact_log2 (INTVAL (trueop1))) > 0) && (val = exact_log2 (INTVAL (trueop1))) > 0)
return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1)); return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
break; break;
case DIV: case DIV:
...@@ -2116,196 +1822,520 @@ simplify_binary_operation (enum rtx_code code, enum machine_mode mode, ...@@ -2116,196 +1822,520 @@ simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
: const0_rtx; : const0_rtx;
break; break;
case ROTATERT: case ROTATERT:
case ROTATE: case ROTATE:
case ASHIFTRT: case ASHIFTRT:
/* Rotating ~0 always results in ~0. */ /* Rotating ~0 always results in ~0. */
if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
&& (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode) && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
&& ! side_effects_p (op1)) && ! side_effects_p (op1))
return op0; return op0;
/* Fall through.... */
case ASHIFT:
case LSHIFTRT:
if (trueop1 == const0_rtx)
return op0;
if (trueop0 == const0_rtx && ! side_effects_p (op1))
return op0;
break;
case SMIN:
if (width <= HOST_BITS_PER_WIDE_INT
&& GET_CODE (trueop1) == CONST_INT
&& INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
&& ! side_effects_p (op0))
return op1;
if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
return op0;
tem = simplify_associative_operation (code, mode, op0, op1);
if (tem)
return tem;
break;
case SMAX:
if (width <= HOST_BITS_PER_WIDE_INT
&& GET_CODE (trueop1) == CONST_INT
&& ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
== (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
&& ! side_effects_p (op0))
return op1;
if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
return op0;
tem = simplify_associative_operation (code, mode, op0, op1);
if (tem)
return tem;
break;
case UMIN:
if (trueop1 == const0_rtx && ! side_effects_p (op0))
return op1;
if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
return op0;
tem = simplify_associative_operation (code, mode, op0, op1);
if (tem)
return tem;
break;
case UMAX:
if (trueop1 == constm1_rtx && ! side_effects_p (op0))
return op1;
if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
return op0;
tem = simplify_associative_operation (code, mode, op0, op1);
if (tem)
return tem;
break;
case SS_PLUS:
case US_PLUS:
case SS_MINUS:
case US_MINUS:
/* ??? There are simplifications that can be done. */
return 0;
case VEC_SELECT:
if (!VECTOR_MODE_P (mode))
{
gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
gcc_assert (GET_CODE (trueop1) == PARALLEL);
gcc_assert (XVECLEN (trueop1, 0) == 1);
gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
if (GET_CODE (trueop0) == CONST_VECTOR)
return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
(trueop1, 0, 0)));
}
else
{
gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
gcc_assert (GET_MODE_INNER (mode)
== GET_MODE_INNER (GET_MODE (trueop0)));
gcc_assert (GET_CODE (trueop1) == PARALLEL);
if (GET_CODE (trueop0) == CONST_VECTOR)
{
int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
rtvec v = rtvec_alloc (n_elts);
unsigned int i;
gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
for (i = 0; i < n_elts; i++)
{
rtx x = XVECEXP (trueop1, 0, i);
gcc_assert (GET_CODE (x) == CONST_INT);
RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
INTVAL (x));
}
return gen_rtx_CONST_VECTOR (mode, v);
}
}
return 0;
case VEC_CONCAT:
{
enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
? GET_MODE (trueop0)
: GET_MODE_INNER (mode));
enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
? GET_MODE (trueop1)
: GET_MODE_INNER (mode));
gcc_assert (VECTOR_MODE_P (mode));
gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
== GET_MODE_SIZE (mode));
if (VECTOR_MODE_P (op0_mode))
gcc_assert (GET_MODE_INNER (mode)
== GET_MODE_INNER (op0_mode));
else
gcc_assert (GET_MODE_INNER (mode) == op0_mode);
if (VECTOR_MODE_P (op1_mode))
gcc_assert (GET_MODE_INNER (mode)
== GET_MODE_INNER (op1_mode));
else
gcc_assert (GET_MODE_INNER (mode) == op1_mode);
if ((GET_CODE (trueop0) == CONST_VECTOR
|| GET_CODE (trueop0) == CONST_INT
|| GET_CODE (trueop0) == CONST_DOUBLE)
&& (GET_CODE (trueop1) == CONST_VECTOR
|| GET_CODE (trueop1) == CONST_INT
|| GET_CODE (trueop1) == CONST_DOUBLE))
{
int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
rtvec v = rtvec_alloc (n_elts);
unsigned int i;
unsigned in_n_elts = 1;
if (VECTOR_MODE_P (op0_mode))
in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
for (i = 0; i < n_elts; i++)
{
if (i < in_n_elts)
{
if (!VECTOR_MODE_P (op0_mode))
RTVEC_ELT (v, i) = trueop0;
else
RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
}
else
{
if (!VECTOR_MODE_P (op1_mode))
RTVEC_ELT (v, i) = trueop1;
else
RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
i - in_n_elts);
}
}
return gen_rtx_CONST_VECTOR (mode, v);
}
}
return 0;
default:
gcc_unreachable ();
}
return 0;
}
rtx
simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
rtx op0, rtx op1)
{
HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
HOST_WIDE_INT val;
unsigned int width = GET_MODE_BITSIZE (mode);
if (VECTOR_MODE_P (mode)
&& code != VEC_CONCAT
&& GET_CODE (op0) == CONST_VECTOR
&& GET_CODE (op1) == CONST_VECTOR)
{
unsigned n_elts = GET_MODE_NUNITS (mode);
enum machine_mode op0mode = GET_MODE (op0);
unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
enum machine_mode op1mode = GET_MODE (op1);
unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
rtvec v = rtvec_alloc (n_elts);
unsigned int i;
gcc_assert (op0_n_elts == n_elts);
gcc_assert (op1_n_elts == n_elts);
for (i = 0; i < n_elts; i++)
{
rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
CONST_VECTOR_ELT (op0, i),
CONST_VECTOR_ELT (op1, i));
if (!x)
return 0;
RTVEC_ELT (v, i) = x;
}
return gen_rtx_CONST_VECTOR (mode, v);
}
if (VECTOR_MODE_P (mode)
&& code == VEC_CONCAT
&& CONSTANT_P (op0) && CONSTANT_P (op1))
{
unsigned n_elts = GET_MODE_NUNITS (mode);
rtvec v = rtvec_alloc (n_elts);
gcc_assert (n_elts >= 2);
if (n_elts == 2)
{
gcc_assert (GET_CODE (op0) != CONST_VECTOR);
gcc_assert (GET_CODE (op1) != CONST_VECTOR);
RTVEC_ELT (v, 0) = op0;
RTVEC_ELT (v, 1) = op1;
}
else
{
unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
unsigned i;
gcc_assert (GET_CODE (op0) == CONST_VECTOR);
gcc_assert (GET_CODE (op1) == CONST_VECTOR);
gcc_assert (op0_n_elts + op1_n_elts == n_elts);
for (i = 0; i < op0_n_elts; ++i)
RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
for (i = 0; i < op1_n_elts; ++i)
RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
}
return gen_rtx_CONST_VECTOR (mode, v);
}
if (GET_MODE_CLASS (mode) == MODE_FLOAT
&& GET_CODE (op0) == CONST_DOUBLE
&& GET_CODE (op1) == CONST_DOUBLE
&& mode == GET_MODE (op0) && mode == GET_MODE (op1))
{
if (code == AND
|| code == IOR
|| code == XOR)
{
long tmp0[4];
long tmp1[4];
REAL_VALUE_TYPE r;
int i;
real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
GET_MODE (op0));
real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
GET_MODE (op1));
for (i = 0; i < 4; i++)
{
switch (code)
{
case AND:
tmp0[i] &= tmp1[i];
break;
case IOR:
tmp0[i] |= tmp1[i];
break;
case XOR:
tmp0[i] ^= tmp1[i];
break;
default:
gcc_unreachable ();
}
}
real_from_target (&r, tmp0, mode);
return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
}
else
{
REAL_VALUE_TYPE f0, f1, value, result;
bool inexact;
REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
real_convert (&f0, mode, &f0);
real_convert (&f1, mode, &f1);
if (HONOR_SNANS (mode)
&& (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
return 0;
if (code == DIV
&& REAL_VALUES_EQUAL (f1, dconst0)
&& (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
return 0;
if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
&& flag_trapping_math
&& REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
{
int s0 = REAL_VALUE_NEGATIVE (f0);
int s1 = REAL_VALUE_NEGATIVE (f1);
switch (code)
{
case PLUS:
/* Inf + -Inf = NaN plus exception. */
if (s0 != s1)
return 0;
break;
case MINUS:
/* Inf - Inf = NaN plus exception. */
if (s0 == s1)
return 0;
break;
case DIV:
/* Inf / Inf = NaN plus exception. */
return 0;
default:
break;
}
}
if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
&& flag_trapping_math
&& ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
|| (REAL_VALUE_ISINF (f1)
&& REAL_VALUES_EQUAL (f0, dconst0))))
/* Inf * 0 = NaN plus exception. */
return 0;
inexact = real_arithmetic (&value, rtx_to_tree_code (code),
&f0, &f1);
real_convert (&result, mode, &value);
/* Don't constant fold this floating point operation if the
result may dependent upon the run-time rounding mode and
flag_rounding_math is set, or if GCC's software emulation
is unable to accurately represent the result. */
if ((flag_rounding_math
|| (REAL_MODE_FORMAT_COMPOSITE_P (mode)
&& !flag_unsafe_math_optimizations))
&& (inexact || !real_identical (&result, &value)))
return NULL_RTX;
return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
}
}
/* We can fold some multi-word operations. */
if (GET_MODE_CLASS (mode) == MODE_INT
&& width == HOST_BITS_PER_WIDE_INT * 2
&& (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
&& (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
{
unsigned HOST_WIDE_INT l1, l2, lv, lt;
HOST_WIDE_INT h1, h2, hv, ht;
if (GET_CODE (op0) == CONST_DOUBLE)
l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
else
l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
if (GET_CODE (op1) == CONST_DOUBLE)
l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
else
l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
switch (code)
{
case MINUS:
/* A - B == A + (-B). */
neg_double (l2, h2, &lv, &hv);
l2 = lv, h2 = hv;
/* Fall through.... */ /* Fall through.... */
case ASHIFT: case PLUS:
case LSHIFTRT: add_double (l1, h1, l2, h2, &lv, &hv);
if (trueop1 == const0_rtx)
return op0;
if (trueop0 == const0_rtx && ! side_effects_p (op1))
return op0;
break; break;
case SMIN: case MULT:
if (width <= HOST_BITS_PER_WIDE_INT mul_double (l1, h1, l2, h2, &lv, &hv);
&& GET_CODE (trueop1) == CONST_INT
&& INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
&& ! side_effects_p (op0))
return op1;
if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
return op0;
tem = simplify_associative_operation (code, mode, op0, op1);
if (tem)
return tem;
break; break;
case SMAX: case DIV:
if (width <= HOST_BITS_PER_WIDE_INT if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
&& GET_CODE (trueop1) == CONST_INT &lv, &hv, &lt, &ht))
&& ((unsigned HOST_WIDE_INT) INTVAL (trueop1) return 0;
== (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
&& ! side_effects_p (op0))
return op1;
if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
return op0;
tem = simplify_associative_operation (code, mode, op0, op1);
if (tem)
return tem;
break; break;
case UMIN: case MOD:
if (trueop1 == const0_rtx && ! side_effects_p (op0)) if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
return op1; &lt, &ht, &lv, &hv))
if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)) return 0;
return op0;
tem = simplify_associative_operation (code, mode, op0, op1);
if (tem)
return tem;
break; break;
case UMAX: case UDIV:
if (trueop1 == constm1_rtx && ! side_effects_p (op0)) if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
return op1; &lv, &hv, &lt, &ht))
if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)) return 0;
return op0;
tem = simplify_associative_operation (code, mode, op0, op1);
if (tem)
return tem;
break; break;
case SS_PLUS: case UMOD:
case US_PLUS: if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
case SS_MINUS: &lt, &ht, &lv, &hv))
case US_MINUS:
/* ??? There are simplifications that can be done. */
return 0; return 0;
break;
case VEC_SELECT: case AND:
if (!VECTOR_MODE_P (mode)) lv = l1 & l2, hv = h1 & h2;
{ break;
gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
gcc_assert (GET_CODE (trueop1) == PARALLEL);
gcc_assert (XVECLEN (trueop1, 0) == 1);
gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
if (GET_CODE (trueop0) == CONST_VECTOR)
return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
(trueop1, 0, 0)));
}
else
{
gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
gcc_assert (GET_MODE_INNER (mode)
== GET_MODE_INNER (GET_MODE (trueop0)));
gcc_assert (GET_CODE (trueop1) == PARALLEL);
if (GET_CODE (trueop0) == CONST_VECTOR)
{
int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
rtvec v = rtvec_alloc (n_elts);
unsigned int i;
gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
for (i = 0; i < n_elts; i++)
{
rtx x = XVECEXP (trueop1, 0, i);
gcc_assert (GET_CODE (x) == CONST_INT);
RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
INTVAL (x));
}
return gen_rtx_CONST_VECTOR (mode, v); case IOR:
} lv = l1 | l2, hv = h1 | h2;
} break;
return 0;
case VEC_CONCAT:
{
enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
? GET_MODE (trueop0)
: GET_MODE_INNER (mode));
enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
? GET_MODE (trueop1)
: GET_MODE_INNER (mode));
gcc_assert (VECTOR_MODE_P (mode)); case XOR:
gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode) lv = l1 ^ l2, hv = h1 ^ h2;
== GET_MODE_SIZE (mode)); break;
if (VECTOR_MODE_P (op0_mode)) case SMIN:
gcc_assert (GET_MODE_INNER (mode) if (h1 < h2
== GET_MODE_INNER (op0_mode)); || (h1 == h2
&& ((unsigned HOST_WIDE_INT) l1
< (unsigned HOST_WIDE_INT) l2)))
lv = l1, hv = h1;
else else
gcc_assert (GET_MODE_INNER (mode) == op0_mode); lv = l2, hv = h2;
break;
if (VECTOR_MODE_P (op1_mode)) case SMAX:
gcc_assert (GET_MODE_INNER (mode) if (h1 > h2
== GET_MODE_INNER (op1_mode)); || (h1 == h2
&& ((unsigned HOST_WIDE_INT) l1
> (unsigned HOST_WIDE_INT) l2)))
lv = l1, hv = h1;
else else
gcc_assert (GET_MODE_INNER (mode) == op1_mode); lv = l2, hv = h2;
break;
if ((GET_CODE (trueop0) == CONST_VECTOR
|| GET_CODE (trueop0) == CONST_INT
|| GET_CODE (trueop0) == CONST_DOUBLE)
&& (GET_CODE (trueop1) == CONST_VECTOR
|| GET_CODE (trueop1) == CONST_INT
|| GET_CODE (trueop1) == CONST_DOUBLE))
{
int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
rtvec v = rtvec_alloc (n_elts);
unsigned int i;
unsigned in_n_elts = 1;
if (VECTOR_MODE_P (op0_mode)) case UMIN:
in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size); if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
for (i = 0; i < n_elts; i++) || (h1 == h2
{ && ((unsigned HOST_WIDE_INT) l1
if (i < in_n_elts) < (unsigned HOST_WIDE_INT) l2)))
{ lv = l1, hv = h1;
if (!VECTOR_MODE_P (op0_mode))
RTVEC_ELT (v, i) = trueop0;
else
RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
}
else else
{ lv = l2, hv = h2;
if (!VECTOR_MODE_P (op1_mode)) break;
RTVEC_ELT (v, i) = trueop1;
case UMAX:
if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
|| (h1 == h2
&& ((unsigned HOST_WIDE_INT) l1
> (unsigned HOST_WIDE_INT) l2)))
lv = l1, hv = h1;
else else
RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1, lv = l2, hv = h2;
i - in_n_elts); break;
}
}
return gen_rtx_CONST_VECTOR (mode, v); case LSHIFTRT: case ASHIFTRT:
} case ASHIFT:
} case ROTATE: case ROTATERT:
if (SHIFT_COUNT_TRUNCATED)
l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
return 0; return 0;
if (code == LSHIFTRT || code == ASHIFTRT)
rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
code == ASHIFTRT);
else if (code == ASHIFT)
lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
else if (code == ROTATE)
lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
else /* code == ROTATERT */
rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
break;
default: default:
gcc_unreachable (); return 0;
} }
return 0; return immed_double_const (lv, hv, mode);
} }
if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
&& width <= HOST_BITS_PER_WIDE_INT && width != 0)
{
/* Get the integer argument values in two forms: /* Get the integer argument values in two forms:
zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */ zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
arg0 = INTVAL (trueop0); arg0 = INTVAL (op0);
arg1 = INTVAL (trueop1); arg1 = INTVAL (op1);
if (width < HOST_BITS_PER_WIDE_INT) if (width < HOST_BITS_PER_WIDE_INT)
{ {
...@@ -2389,12 +2419,13 @@ simplify_binary_operation (enum rtx_code code, enum machine_mode mode, ...@@ -2389,12 +2419,13 @@ simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
case LSHIFTRT: case LSHIFTRT:
case ASHIFT: case ASHIFT:
case ASHIFTRT: case ASHIFTRT:
/* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure the /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
value is in range. We can't return any old value for out-of-range the value is in range. We can't return any old value for
arguments because either the middle-end (via shift_truncation_mask) out-of-range arguments because either the middle-end (via
or the back-end might be relying on target-specific knowledge. shift_truncation_mask) or the back-end might be relying on
Nor can we rely on shift_truncation_mask, since the shift might target-specific knowledge. Nor can we rely on
not be part of an ashlM3, lshrM3 or ashrM3 instruction. */ shift_truncation_mask, since the shift might not be part of an
ashlM3, lshrM3 or ashrM3 instruction. */
if (SHIFT_COUNT_TRUNCATED) if (SHIFT_COUNT_TRUNCATED)
arg1 = (unsigned HOST_WIDE_INT) arg1 % width; arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode)) else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
...@@ -2461,10 +2492,14 @@ simplify_binary_operation (enum rtx_code code, enum machine_mode mode, ...@@ -2461,10 +2492,14 @@ simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
} }
val = trunc_int_for_mode (val, mode); val = trunc_int_for_mode (val, mode);
return GEN_INT (val); return GEN_INT (val);
}
return NULL_RTX;
} }
/* Simplify a PLUS or MINUS, at least one of whose operands may be another /* Simplify a PLUS or MINUS, at least one of whose operands may be another
PLUS or MINUS. PLUS or MINUS.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment