Commit b6cc0a72 by Kazu Hirata Committed by Jeff Law

* fold-const.c: Fix formatting.

From-SVN: r35425
parent 4e9efe54
2000-08-02 Kazu Hirata <kazu@hxi.com>
* fold-const.c: Fix formatting.
Wed Aug 2 16:26:15 MET DST 2000 Jan Hubicka <jh@suse.cz> Wed Aug 2 16:26:15 MET DST 2000 Jan Hubicka <jh@suse.cz>
* i386.c (legitimate_address_p): Accept other bases than * i386.c (legitimate_address_p): Accept other bases than
......
...@@ -27,7 +27,6 @@ Boston, MA 02111-1307, USA. */ ...@@ -27,7 +27,6 @@ Boston, MA 02111-1307, USA. */
@@ This would also make life easier when this technology is used @@ This would also make life easier when this technology is used
@@ for cross-compilers. */ @@ for cross-compilers. */
/* The entry points in this file are fold, size_int_wide, size_binop /* The entry points in this file are fold, size_int_wide, size_binop
and force_fit_type. and force_fit_type.
...@@ -113,7 +112,6 @@ static int count_cond PARAMS ((tree, int)); ...@@ -113,7 +112,6 @@ static int count_cond PARAMS ((tree, int));
#define CHARMASK 0x7f #define CHARMASK 0x7f
#endif #endif
/* We know that A1 + B1 = SUM1, using 2's complement arithmetic and ignoring /* We know that A1 + B1 = SUM1, using 2's complement arithmetic and ignoring
overflow. Suppose A, B and SUM have the same respective signs as A1, B1, overflow. Suppose A, B and SUM have the same respective signs as A1, B1,
and SUM1. Then this yields nonzero if overflow occurred during the and SUM1. Then this yields nonzero if overflow occurred during the
...@@ -293,8 +291,8 @@ neg_double (l1, h1, lv, hv) ...@@ -293,8 +291,8 @@ neg_double (l1, h1, lv, hv)
} }
else else
{ {
*lv = - l1; *lv = -l1;
*hv = ~ h1; *hv = ~h1;
return 0; return 0;
} }
} }
...@@ -345,7 +343,7 @@ mul_double (l1, h1, l2, h2, lv, hv) ...@@ -345,7 +343,7 @@ mul_double (l1, h1, l2, h2, lv, hv)
/* Check for overflow by calculating the top half of the answer in full; /* Check for overflow by calculating the top half of the answer in full;
it should agree with the low half's sign bit. */ it should agree with the low half's sign bit. */
decode (prod+4, &toplow, &tophigh); decode (prod + 4, &toplow, &tophigh);
if (h1 < 0) if (h1 < 0)
{ {
neg_double (l2, h2, &neglow, &neghigh); neg_double (l2, h2, &neglow, &neghigh);
...@@ -376,10 +374,10 @@ lshift_double (l1, h1, count, prec, lv, hv, arith) ...@@ -376,10 +374,10 @@ lshift_double (l1, h1, count, prec, lv, hv, arith)
{ {
if (count < 0) if (count < 0)
{ {
rshift_double (l1, h1, - count, prec, lv, hv, arith); rshift_double (l1, h1, -count, prec, lv, hv, arith);
return; return;
} }
#ifdef SHIFT_COUNT_TRUNCATED #ifdef SHIFT_COUNT_TRUNCATED
if (SHIFT_COUNT_TRUNCATED) if (SHIFT_COUNT_TRUNCATED)
count %= prec; count %= prec;
...@@ -541,7 +539,7 @@ div_and_round_double (code, uns, ...@@ -541,7 +539,7 @@ div_and_round_double (code, uns,
overflow = 1, lden = 1; overflow = 1, lden = 1;
/* calculate quotient sign and convert operands to unsigned. */ /* calculate quotient sign and convert operands to unsigned. */
if (!uns) if (!uns)
{ {
if (hnum < 0) if (hnum < 0)
{ {
...@@ -551,7 +549,7 @@ div_and_round_double (code, uns, ...@@ -551,7 +549,7 @@ div_and_round_double (code, uns,
&& ((HOST_WIDE_INT) lden & hden) == -1) && ((HOST_WIDE_INT) lden & hden) == -1)
overflow = 1; overflow = 1;
} }
if (hden < 0) if (hden < 0)
{ {
quo_neg = ~ quo_neg; quo_neg = ~ quo_neg;
neg_double (lden, hden, &lden, &hden); neg_double (lden, hden, &lden, &hden);
...@@ -580,7 +578,7 @@ div_and_round_double (code, uns, ...@@ -580,7 +578,7 @@ div_and_round_double (code, uns,
bzero ((char *) num, sizeof num); /* to zero 9th element */ bzero ((char *) num, sizeof num); /* to zero 9th element */
bzero ((char *) den, sizeof den); bzero ((char *) den, sizeof den);
encode (num, lnum, hnum); encode (num, lnum, hnum);
encode (den, lden, hden); encode (den, lden, hden);
/* Special code for when the divisor < BASE. */ /* Special code for when the divisor < BASE. */
...@@ -602,11 +600,12 @@ div_and_round_double (code, uns, ...@@ -602,11 +600,12 @@ div_and_round_double (code, uns,
unsigned HOST_WIDE_INT quo_est, scale; unsigned HOST_WIDE_INT quo_est, scale;
/* Find the highest non-zero divisor digit. */ /* Find the highest non-zero divisor digit. */
for (i = 4 - 1; ; i--) for (i = 4 - 1;; i--)
if (den[i] != 0) { if (den[i] != 0)
den_hi_sig = i; {
break; den_hi_sig = i;
} break;
}
/* Insure that the first digit of the divisor is at least BASE/2. /* Insure that the first digit of the divisor is at least BASE/2.
This is required by the quotient digit estimation algorithm. */ This is required by the quotient digit estimation algorithm. */
...@@ -733,7 +732,7 @@ div_and_round_double (code, uns, ...@@ -733,7 +732,7 @@ div_and_round_double (code, uns,
else else
return overflow; return overflow;
break; break;
case ROUND_DIV_EXPR: case ROUND_DIV_EXPR:
case ROUND_MOD_EXPR: /* round to closest integer */ case ROUND_MOD_EXPR: /* round to closest integer */
{ {
...@@ -821,7 +820,7 @@ target_isinf (x) ...@@ -821,7 +820,7 @@ target_isinf (x)
unsigned mantissa1 : 20; unsigned mantissa1 : 20;
unsigned exponent : 11; unsigned exponent : 11;
unsigned sign : 1; unsigned sign : 1;
} big_endian; } big_endian;
} u; } u;
u.d = dconstm1; u.d = dconstm1;
...@@ -861,7 +860,7 @@ target_isnan (x) ...@@ -861,7 +860,7 @@ target_isnan (x)
unsigned mantissa1 : 20; unsigned mantissa1 : 20;
unsigned exponent : 11; unsigned exponent : 11;
unsigned sign : 1; unsigned sign : 1;
} big_endian; } big_endian;
} u; } u;
u.d = dconstm1; u.d = dconstm1;
...@@ -901,7 +900,7 @@ target_negative (x) ...@@ -901,7 +900,7 @@ target_negative (x)
unsigned mantissa1 : 20; unsigned mantissa1 : 20;
unsigned exponent : 11; unsigned exponent : 11;
unsigned sign : 1; unsigned sign : 1;
} big_endian; } big_endian;
} u; } u;
u.d = dconstm1; u.d = dconstm1;
...@@ -1049,228 +1048,228 @@ real_hex_to_f (s, mode) ...@@ -1049,228 +1048,228 @@ real_hex_to_f (s, mode)
char *s; char *s;
enum machine_mode mode; enum machine_mode mode;
{ {
REAL_VALUE_TYPE ip; REAL_VALUE_TYPE ip;
char *p = s; char *p = s;
unsigned HOST_WIDE_INT low, high; unsigned HOST_WIDE_INT low, high;
int shcount, nrmcount, k; int shcount, nrmcount, k;
int sign, expsign, isfloat; int sign, expsign, isfloat;
int lost = 0;/* Nonzero low order bits shifted out and discarded. */ int lost = 0;/* Nonzero low order bits shifted out and discarded. */
int frexpon = 0; /* Bits after the decimal point. */ int frexpon = 0; /* Bits after the decimal point. */
int expon = 0; /* Value of exponent. */ int expon = 0; /* Value of exponent. */
int decpt = 0; /* How many decimal points. */ int decpt = 0; /* How many decimal points. */
int gotp = 0; /* How many P's. */ int gotp = 0; /* How many P's. */
char c; char c;
isfloat = 0; isfloat = 0;
expsign = 1; expsign = 1;
ip = 0.0; ip = 0.0;
while (*p == ' ' || *p == '\t') while (*p == ' ' || *p == '\t')
++p; ++p;
/* Sign, if any, comes first. */ /* Sign, if any, comes first. */
sign = 1; sign = 1;
if (*p == '-') if (*p == '-')
{ {
sign = -1; sign = -1;
++p; ++p;
} }
/* The string is supposed to start with 0x or 0X . */ /* The string is supposed to start with 0x or 0X . */
if (*p == '0') if (*p == '0')
{ {
++p; ++p;
if (*p == 'x' || *p == 'X') if (*p == 'x' || *p == 'X')
++p; ++p;
else else
abort (); abort ();
} }
else else
abort (); abort ();
while (*p == '0') while (*p == '0')
++p; ++p;
high = 0; high = 0;
low = 0; low = 0;
shcount = 0; shcount = 0;
while ((c = *p) != '\0') while ((c = *p) != '\0')
{ {
if ((c >= '0' && c <= '9') || (c >= 'A' && c <= 'F') if ((c >= '0' && c <= '9') || (c >= 'A' && c <= 'F')
|| (c >= 'a' && c <= 'f')) || (c >= 'a' && c <= 'f'))
{ {
k = c & CHARMASK; k = c & CHARMASK;
if (k >= 'a' && k <= 'f') if (k >= 'a' && k <= 'f')
k = k - 'a' + 10; k = k - 'a' + 10;
else if (k >= 'A') else if (k >= 'A')
k = k - 'A' + 10; k = k - 'A' + 10;
else else
k = k - '0'; k = k - '0';
if ((high & 0xf0000000) == 0) if ((high & 0xf0000000) == 0)
{ {
high = (high << 4) + ((low >> 28) & 15); high = (high << 4) + ((low >> 28) & 15);
low = (low << 4) + k; low = (low << 4) + k;
shcount += 4; shcount += 4;
if (decpt) if (decpt)
frexpon += 4; frexpon += 4;
} }
else else
{ {
/* Record nonzero lost bits. */ /* Record nonzero lost bits. */
lost |= k; lost |= k;
if (! decpt) if (! decpt)
frexpon -= 4; frexpon -= 4;
} }
++p; ++p;
} }
else if ( c == '.') else if (c == '.')
{ {
++decpt; ++decpt;
++p; ++p;
} }
else if (c == 'p' || c == 'P') else if (c == 'p' || c == 'P')
{ {
++gotp; ++gotp;
++p; ++p;
/* Sign of exponent. */ /* Sign of exponent. */
if (*p == '-') if (*p == '-')
{ {
expsign = -1; expsign = -1;
++p; ++p;
} }
/* Value of exponent. /* Value of exponent.
The exponent field is a decimal integer. */ The exponent field is a decimal integer. */
while (ISDIGIT(*p)) while (ISDIGIT (*p))
{ {
k = (*p++ & CHARMASK) - '0'; k = (*p++ & CHARMASK) - '0';
expon = 10 * expon + k; expon = 10 * expon + k;
} }
expon *= expsign; expon *= expsign;
/* F suffix is ambiguous in the significand part /* F suffix is ambiguous in the significand part
so it must appear after the decimal exponent field. */ so it must appear after the decimal exponent field. */
if (*p == 'f' || *p == 'F') if (*p == 'f' || *p == 'F')
{ {
isfloat = 1; isfloat = 1;
++p; ++p;
break; break;
} }
} }
else if (c == 'l' || c == 'L') else if (c == 'l' || c == 'L')
{ {
++p; ++p;
break; break;
} }
else else
break; break;
} }
/* Abort if last character read was not legitimate. */ /* Abort if last character read was not legitimate. */
c = *p; c = *p;
if ((c != '\0' && c != ' ' && c != '\n' && c != '\r') || (decpt > 1)) if ((c != '\0' && c != ' ' && c != '\n' && c != '\r') || (decpt > 1))
abort (); abort ();
/* There must be either one decimal point or one p. */ /* There must be either one decimal point or one p. */
if (decpt == 0 && gotp == 0) if (decpt == 0 && gotp == 0)
abort (); abort ();
shcount -= 4; shcount -= 4;
if (high == 0 && low == 0) if (high == 0 && low == 0)
return dconst0; return dconst0;
/* Normalize. */ /* Normalize. */
nrmcount = 0; nrmcount = 0;
if (high == 0) if (high == 0)
{ {
high = low; high = low;
low = 0; low = 0;
nrmcount += 32; nrmcount += 32;
} }
/* Leave a high guard bit for carry-out. */ /* Leave a high guard bit for carry-out. */
if ((high & 0x80000000) != 0) if ((high & 0x80000000) != 0)
{ {
lost |= low & 1; lost |= low & 1;
low = (low >> 1) | (high << 31); low = (low >> 1) | (high << 31);
high = high >> 1; high = high >> 1;
nrmcount -= 1; nrmcount -= 1;
} }
if ((high & 0xffff8000) == 0) if ((high & 0xffff8000) == 0)
{ {
high = (high << 16) + ((low >> 16) & 0xffff); high = (high << 16) + ((low >> 16) & 0xffff);
low = low << 16; low = low << 16;
nrmcount += 16; nrmcount += 16;
} }
while ((high & 0xc0000000) == 0) while ((high & 0xc0000000) == 0)
{ {
high = (high << 1) + ((low >> 31) & 1); high = (high << 1) + ((low >> 31) & 1);
low = low << 1; low = low << 1;
nrmcount += 1; nrmcount += 1;
} }
if (isfloat || GET_MODE_SIZE(mode) == UNITS_PER_WORD) if (isfloat || GET_MODE_SIZE (mode) == UNITS_PER_WORD)
{ {
/* Keep 24 bits precision, bits 0x7fffff80. /* Keep 24 bits precision, bits 0x7fffff80.
Rounding bit is 0x40. */ Rounding bit is 0x40. */
lost = lost | low | (high & 0x3f); lost = lost | low | (high & 0x3f);
low = 0; low = 0;
if (high & 0x40) if (high & 0x40)
{ {
if ((high & 0x80) || lost) if ((high & 0x80) || lost)
high += 0x40; high += 0x40;
} }
high &= 0xffffff80; high &= 0xffffff80;
} }
else else
{ {
/* We need real.c to do long double formats, so here default /* We need real.c to do long double formats, so here default
to double precision. */ to double precision. */
#if HOST_FLOAT_FORMAT == IEEE_FLOAT_FORMAT #if HOST_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
/* IEEE double. /* IEEE double.
Keep 53 bits precision, bits 0x7fffffff fffffc00. Keep 53 bits precision, bits 0x7fffffff fffffc00.
Rounding bit is low word 0x200. */ Rounding bit is low word 0x200. */
lost = lost | (low & 0x1ff); lost = lost | (low & 0x1ff);
if (low & 0x200) if (low & 0x200)
{ {
if ((low & 0x400) || lost) if ((low & 0x400) || lost)
{ {
low = (low + 0x200) & 0xfffffc00; low = (low + 0x200) & 0xfffffc00;
if (low == 0) if (low == 0)
high += 1; high += 1;
} }
} }
low &= 0xfffffc00; low &= 0xfffffc00;
#else #else
/* Assume it's a VAX with 56-bit significand, /* Assume it's a VAX with 56-bit significand,
bits 0x7fffffff ffffff80. */ bits 0x7fffffff ffffff80. */
lost = lost | (low & 0x7f); lost = lost | (low & 0x7f);
if (low & 0x40) if (low & 0x40)
{ {
if ((low & 0x80) || lost) if ((low & 0x80) || lost)
{ {
low = (low + 0x40) & 0xffffff80; low = (low + 0x40) & 0xffffff80;
if (low == 0) if (low == 0)
high += 1; high += 1;
} }
} }
low &= 0xffffff80; low &= 0xffffff80;
#endif #endif
} }
ip = (double) high; ip = (double) high;
ip = REAL_VALUE_LDEXP (ip, 32) + (double) low; ip = REAL_VALUE_LDEXP (ip, 32) + (double) low;
/* Apply shifts and exponent value as power of 2. */ /* Apply shifts and exponent value as power of 2. */
ip = REAL_VALUE_LDEXP (ip, expon - (nrmcount + frexpon)); ip = REAL_VALUE_LDEXP (ip, expon - (nrmcount + frexpon));
if (sign < 0) if (sign < 0)
ip = -ip; ip = -ip;
return ip; return ip;
} }
#endif /* no REAL_ARITHMETIC */ #endif /* no REAL_ARITHMETIC */
...@@ -1495,7 +1494,7 @@ int_const_binop (code, arg1, arg2, notrunc, forsize) ...@@ -1495,7 +1494,7 @@ int_const_binop (code, arg1, arg2, notrunc, forsize)
break; break;
case RSHIFT_EXPR: case RSHIFT_EXPR:
int2l = - int2l; int2l = -int2l;
case LSHIFT_EXPR: case LSHIFT_EXPR:
/* It's unclear from the C standard whether shifts can overflow. /* It's unclear from the C standard whether shifts can overflow.
The following code ignores overflow; perhaps a C standard The following code ignores overflow; perhaps a C standard
...@@ -1544,7 +1543,7 @@ int_const_binop (code, arg1, arg2, notrunc, forsize) ...@@ -1544,7 +1543,7 @@ int_const_binop (code, arg1, arg2, notrunc, forsize)
/* ... fall through ... */ /* ... fall through ... */
case ROUND_DIV_EXPR: case ROUND_DIV_EXPR:
if (int2h == 0 && int2l == 1) if (int2h == 0 && int2l == 1)
{ {
low = int1l, hi = int1h; low = int1l, hi = int1h;
...@@ -1577,7 +1576,7 @@ int_const_binop (code, arg1, arg2, notrunc, forsize) ...@@ -1577,7 +1576,7 @@ int_const_binop (code, arg1, arg2, notrunc, forsize)
/* ... fall through ... */ /* ... fall through ... */
case ROUND_MOD_EXPR: case ROUND_MOD_EXPR:
overflow = div_and_round_double (code, uns, overflow = div_and_round_double (code, uns,
int1l, int1h, int2l, int2h, int1l, int1h, int2l, int2h,
&garbagel, &garbageh, &low, &hi); &garbagel, &garbageh, &low, &hi);
...@@ -1636,10 +1635,10 @@ int_const_binop (code, arg1, arg2, notrunc, forsize) ...@@ -1636,10 +1635,10 @@ int_const_binop (code, arg1, arg2, notrunc, forsize)
/* Define input and output argument for const_binop_1. */ /* Define input and output argument for const_binop_1. */
struct cb_args struct cb_args
{ {
enum tree_code code; /* Input: tree code for operation*/ enum tree_code code; /* Input: tree code for operation. */
tree type; /* Input: tree type for operation. */ tree type; /* Input: tree type for operation. */
REAL_VALUE_TYPE d1, d2; /* Input: floating point operands. */ REAL_VALUE_TYPE d1, d2; /* Input: floating point operands. */
tree t; /* Output: constant for result. */ tree t; /* Output: constant for result. */
}; };
/* Do the real arithmetic for const_binop while protected by a /* Do the real arithmetic for const_binop while protected by a
...@@ -1647,7 +1646,7 @@ struct cb_args ...@@ -1647,7 +1646,7 @@ struct cb_args
static void static void
const_binop_1 (data) const_binop_1 (data)
PTR data; PTR data;
{ {
struct cb_args *args = (struct cb_args *) data; struct cb_args *args = (struct cb_args *) data;
REAL_VALUE_TYPE value; REAL_VALUE_TYPE value;
...@@ -1660,32 +1659,32 @@ const_binop_1 (data) ...@@ -1660,32 +1659,32 @@ const_binop_1 (data)
case PLUS_EXPR: case PLUS_EXPR:
value = args->d1 + args->d2; value = args->d1 + args->d2;
break; break;
case MINUS_EXPR: case MINUS_EXPR:
value = args->d1 - args->d2; value = args->d1 - args->d2;
break; break;
case MULT_EXPR: case MULT_EXPR:
value = args->d1 * args->d2; value = args->d1 * args->d2;
break; break;
case RDIV_EXPR: case RDIV_EXPR:
#ifndef REAL_INFINITY #ifndef REAL_INFINITY
if (args->d2 == 0) if (args->d2 == 0)
abort (); abort ();
#endif #endif
value = args->d1 / args->d2; value = args->d1 / args->d2;
break; break;
case MIN_EXPR: case MIN_EXPR:
value = MIN (args->d1, args->d2); value = MIN (args->d1, args->d2);
break; break;
case MAX_EXPR: case MAX_EXPR:
value = MAX (args->d1, args->d2); value = MAX (args->d1, args->d2);
break; break;
default: default:
abort (); abort ();
} }
...@@ -1708,7 +1707,8 @@ const_binop (code, arg1, arg2, notrunc) ...@@ -1708,7 +1707,8 @@ const_binop (code, arg1, arg2, notrunc)
register tree arg1, arg2; register tree arg1, arg2;
int notrunc; int notrunc;
{ {
STRIP_NOPS (arg1); STRIP_NOPS (arg2); STRIP_NOPS (arg1);
STRIP_NOPS (arg2);
if (TREE_CODE (arg1) == INTEGER_CST) if (TREE_CODE (arg1) == INTEGER_CST)
return int_const_binop (code, arg1, arg2, notrunc, 0); return int_const_binop (code, arg1, arg2, notrunc, 0);
...@@ -1737,7 +1737,7 @@ const_binop (code, arg1, arg2, notrunc) ...@@ -1737,7 +1737,7 @@ const_binop (code, arg1, arg2, notrunc)
args.d1 = d1; args.d1 = d1;
args.d2 = d2; args.d2 = d2;
args.code = code; args.code = code;
if (do_float_handler (const_binop_1, (PTR) &args)) if (do_float_handler (const_binop_1, (PTR) &args))
/* Receive output from const_binop_1. */ /* Receive output from const_binop_1. */
t = args.t; t = args.t;
...@@ -1859,7 +1859,7 @@ size_int_type_wide (number, type) ...@@ -1859,7 +1859,7 @@ size_int_type_wide (number, type)
static tree size_table[2048 + 1]; static tree size_table[2048 + 1];
static int init_p = 0; static int init_p = 0;
tree t; tree t;
if (ggc_p && ! init_p) if (ggc_p && ! init_p)
{ {
ggc_add_tree_root ((tree *) size_table, ggc_add_tree_root ((tree *) size_table,
...@@ -1994,9 +1994,9 @@ struct fc_args ...@@ -1994,9 +1994,9 @@ struct fc_args
static void static void
fold_convert_1 (data) fold_convert_1 (data)
PTR data; PTR data;
{ {
struct fc_args * args = (struct fc_args *) data; struct fc_args *args = (struct fc_args *) data;
args->t = build_real (args->type, args->t = build_real (args->type,
real_value_truncate (TYPE_MODE (args->type), real_value_truncate (TYPE_MODE (args->type),
...@@ -2135,7 +2135,7 @@ fold_convert (t, arg1) ...@@ -2135,7 +2135,7 @@ fold_convert (t, arg1)
if (TREE_CODE (arg1) == REAL_CST) if (TREE_CODE (arg1) == REAL_CST)
{ {
struct fc_args args; struct fc_args args;
if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg1))) if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
{ {
t = arg1; t = arg1;
...@@ -2146,7 +2146,7 @@ fold_convert (t, arg1) ...@@ -2146,7 +2146,7 @@ fold_convert (t, arg1)
/* Setup input for fold_convert_1() */ /* Setup input for fold_convert_1() */
args.arg1 = arg1; args.arg1 = arg1;
args.type = type; args.type = type;
if (do_float_handler (fold_convert_1, (PTR) &args)) if (do_float_handler (fold_convert_1, (PTR) &args))
{ {
/* Receive output from fold_convert_1() */ /* Receive output from fold_convert_1() */
...@@ -2419,18 +2419,18 @@ operand_equal_p (arg0, arg1, only_const) ...@@ -2419,18 +2419,18 @@ operand_equal_p (arg0, arg1, only_const)
if (TREE_CODE (arg0) == RTL_EXPR) if (TREE_CODE (arg0) == RTL_EXPR)
return rtx_equal_p (RTL_EXPR_RTL (arg0), RTL_EXPR_RTL (arg1)); return rtx_equal_p (RTL_EXPR_RTL (arg0), RTL_EXPR_RTL (arg1));
return 0; return 0;
default: default:
return 0; return 0;
} }
} }
/* Similar to operand_equal_p, but see if ARG0 might have been made by /* Similar to operand_equal_p, but see if ARG0 might have been made by
shorten_compare from ARG1 when ARG1 was being compared with OTHER. shorten_compare from ARG1 when ARG1 was being compared with OTHER.
When in doubt, return 0. */ When in doubt, return 0. */
static int static int
operand_equal_for_comparison_p (arg0, arg1, other) operand_equal_for_comparison_p (arg0, arg1, other)
tree arg0, arg1; tree arg0, arg1;
tree other; tree other;
...@@ -2450,7 +2450,8 @@ operand_equal_for_comparison_p (arg0, arg1, other) ...@@ -2450,7 +2450,8 @@ operand_equal_for_comparison_p (arg0, arg1, other)
and see if the inner values are the same. This removes any and see if the inner values are the same. This removes any
signedness comparison, which doesn't matter here. */ signedness comparison, which doesn't matter here. */
primarg0 = arg0, primarg1 = arg1; primarg0 = arg0, primarg1 = arg1;
STRIP_NOPS (primarg0); STRIP_NOPS (primarg1); STRIP_NOPS (primarg0);
STRIP_NOPS (primarg1);
if (operand_equal_p (primarg0, primarg1, 0)) if (operand_equal_p (primarg0, primarg1, 0))
return 1; return 1;
...@@ -2473,8 +2474,8 @@ operand_equal_for_comparison_p (arg0, arg1, other) ...@@ -2473,8 +2474,8 @@ operand_equal_for_comparison_p (arg0, arg1, other)
/* Make sure shorter operand is extended the right way /* Make sure shorter operand is extended the right way
to match the longer operand. */ to match the longer operand. */
primarg1 = convert (signed_or_unsigned_type (unsignedp1, primarg1 = convert (signed_or_unsigned_type (unsignedp1,
TREE_TYPE (primarg1)), TREE_TYPE (primarg1)),
primarg1); primarg1);
if (operand_equal_p (arg0, convert (type, primarg1), 0)) if (operand_equal_p (arg0, convert (type, primarg1), 0))
return 1; return 1;
...@@ -2544,7 +2545,7 @@ twoval_comparison_p (arg, cval1, cval2, save_p) ...@@ -2544,7 +2545,7 @@ twoval_comparison_p (arg, cval1, cval2, save_p)
&& twoval_comparison_p (TREE_OPERAND (arg, 2), && twoval_comparison_p (TREE_OPERAND (arg, 2),
cval1, cval2, save_p)); cval1, cval2, save_p));
return 0; return 0;
case '<': case '<':
/* First see if we can handle the first operand, then the second. For /* First see if we can handle the first operand, then the second. For
the second operand, we know *CVAL1 can't be zero. It must be that the second operand, we know *CVAL1 can't be zero. It must be that
...@@ -2699,8 +2700,6 @@ pedantic_omit_one_operand (type, result, omitted) ...@@ -2699,8 +2700,6 @@ pedantic_omit_one_operand (type, result, omitted)
return pedantic_non_lvalue (t); return pedantic_non_lvalue (t);
} }
/* Return a simplified tree node for the truth-negation of ARG. This /* Return a simplified tree node for the truth-negation of ARG. This
never alters ARG itself. We assume that ARG is an operation that never alters ARG itself. We assume that ARG is an operation that
...@@ -3005,7 +3004,7 @@ optimize_bit_field_compare (code, compare_type, lhs, rhs) ...@@ -3005,7 +3004,7 @@ optimize_bit_field_compare (code, compare_type, lhs, rhs)
error case below. If we didn't, we might generate wrong code. error case below. If we didn't, we might generate wrong code.
For unsigned fields, the constant shifted right by the field length should For unsigned fields, the constant shifted right by the field length should
be all zero. For signed fields, the high-order bits should agree with be all zero. For signed fields, the high-order bits should agree with
the sign bit. */ the sign bit. */
if (lunsignedp) if (lunsignedp)
...@@ -3102,7 +3101,7 @@ decode_field_reference (exp, pbitsize, pbitpos, pmode, punsignedp, ...@@ -3102,7 +3101,7 @@ decode_field_reference (exp, pbitsize, pbitpos, pmode, punsignedp,
unsigned int precision; unsigned int precision;
unsigned int alignment; unsigned int alignment;
/* All the optimizations using this function assume integer fields. /* All the optimizations using this function assume integer fields.
There are problems with FP fields since the type_for_size call There are problems with FP fields since the type_for_size call
below can fail for, e.g., XFmode. */ below can fail for, e.g., XFmode. */
if (! INTEGRAL_TYPE_P (TREE_TYPE (exp))) if (! INTEGRAL_TYPE_P (TREE_TYPE (exp)))
...@@ -3119,14 +3118,13 @@ decode_field_reference (exp, pbitsize, pbitpos, pmode, punsignedp, ...@@ -3119,14 +3118,13 @@ decode_field_reference (exp, pbitsize, pbitpos, pmode, punsignedp,
return 0; return 0;
} }
inner = get_inner_reference (exp, pbitsize, pbitpos, &offset, pmode, inner = get_inner_reference (exp, pbitsize, pbitpos, &offset, pmode,
punsignedp, pvolatilep, &alignment); punsignedp, pvolatilep, &alignment);
if ((inner == exp && and_mask == 0) if ((inner == exp && and_mask == 0)
|| *pbitsize < 0 || offset != 0 || *pbitsize < 0 || offset != 0
|| TREE_CODE (inner) == PLACEHOLDER_EXPR) || TREE_CODE (inner) == PLACEHOLDER_EXPR)
return 0; return 0;
/* Compute the mask to access the bitfield. */ /* Compute the mask to access the bitfield. */
unsigned_type = type_for_size (*pbitsize, 1); unsigned_type = type_for_size (*pbitsize, 1);
precision = TYPE_PRECISION (unsigned_type); precision = TYPE_PRECISION (unsigned_type);
...@@ -3163,7 +3161,7 @@ all_ones_mask_p (mask, size) ...@@ -3163,7 +3161,7 @@ all_ones_mask_p (mask, size)
TREE_TYPE (tmask) = signed_type (type); TREE_TYPE (tmask) = signed_type (type);
force_fit_type (tmask, 0); force_fit_type (tmask, 0);
return return
tree_int_cst_equal (mask, tree_int_cst_equal (mask,
const_binop (RSHIFT_EXPR, const_binop (RSHIFT_EXPR,
const_binop (LSHIFT_EXPR, tmask, const_binop (LSHIFT_EXPR, tmask,
size_int (precision - size), size_int (precision - size),
...@@ -3174,7 +3172,7 @@ all_ones_mask_p (mask, size) ...@@ -3174,7 +3172,7 @@ all_ones_mask_p (mask, size)
/* Subroutine for fold_truthop: determine if an operand is simple enough /* Subroutine for fold_truthop: determine if an operand is simple enough
to be evaluated unconditionally. */ to be evaluated unconditionally. */
static int static int
simple_operand_p (exp) simple_operand_p (exp)
tree exp; tree exp;
{ {
...@@ -3296,7 +3294,7 @@ range_binop (code, type, arg0, upper0_p, arg1, upper1_p) ...@@ -3296,7 +3294,7 @@ range_binop (code, type, arg0, upper0_p, arg1, upper1_p)
return convert (type, result ? integer_one_node : integer_zero_node); return convert (type, result ? integer_one_node : integer_zero_node);
} }
/* Given EXP, a logical expression, set the range it is testing into /* Given EXP, a logical expression, set the range it is testing into
variables denoted by PIN_P, PLOW, and PHIGH. Return the expression variables denoted by PIN_P, PLOW, and PHIGH. Return the expression
actually being tested. *PLOW and *PHIGH will be made of the same type actually being tested. *PLOW and *PHIGH will be made of the same type
...@@ -3330,13 +3328,13 @@ make_range (exp, pin_p, plow, phigh) ...@@ -3330,13 +3328,13 @@ make_range (exp, pin_p, plow, phigh)
if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code))) if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code)))
{ {
arg0 = TREE_OPERAND (exp, 0); arg0 = TREE_OPERAND (exp, 0);
if (TREE_CODE_CLASS (code) == '<' if (TREE_CODE_CLASS (code) == '<'
|| TREE_CODE_CLASS (code) == '1' || TREE_CODE_CLASS (code) == '1'
|| TREE_CODE_CLASS (code) == '2') || TREE_CODE_CLASS (code) == '2')
type = TREE_TYPE (arg0); type = TREE_TYPE (arg0);
if (TREE_CODE_CLASS (code) == '2' if (TREE_CODE_CLASS (code) == '2'
|| TREE_CODE_CLASS (code) == '<' || TREE_CODE_CLASS (code) == '<'
|| (TREE_CODE_CLASS (code) == 'e' || (TREE_CODE_CLASS (code) == 'e'
&& TREE_CODE_LENGTH (code) > 1)) && TREE_CODE_LENGTH (code) > 1))
arg1 = TREE_OPERAND (exp, 1); arg1 = TREE_OPERAND (exp, 1);
} }
...@@ -3511,7 +3509,7 @@ make_range (exp, pin_p, plow, phigh) ...@@ -3511,7 +3509,7 @@ make_range (exp, pin_p, plow, phigh)
high_positive = fold (build (RSHIFT_EXPR, type, high_positive = fold (build (RSHIFT_EXPR, type,
convert (type, high_positive), convert (type, high_positive),
convert (type, integer_one_node))); convert (type, integer_one_node)));
/* If the low bound is specified, "and" the range with the /* If the low bound is specified, "and" the range with the
range for which the original unsigned value will be range for which the original unsigned value will be
positive. */ positive. */
...@@ -3614,7 +3612,7 @@ build_range_check (type, exp, in_p, low, high) ...@@ -3614,7 +3612,7 @@ build_range_check (type, exp, in_p, low, high)
return 0; return 0;
} }
/* Given two ranges, see if we can merge them into one. Return 1 if we /* Given two ranges, see if we can merge them into one. Return 1 if we
can, 0 if we can't. Set the output range into the specified parameters. */ can, 0 if we can't. Set the output range into the specified parameters. */
static int static int
...@@ -3639,7 +3637,7 @@ merge_ranges (pin_p, plow, phigh, in0_p, low0, high0, in1_p, low1, high1) ...@@ -3639,7 +3637,7 @@ merge_ranges (pin_p, plow, phigh, in0_p, low0, high0, in1_p, low1, high1)
/* Make range 0 be the range that starts first, or ends last if they /* Make range 0 be the range that starts first, or ends last if they
start at the same value. Swap them if it isn't. */ start at the same value. Swap them if it isn't. */
if (integer_onep (range_binop (GT_EXPR, integer_type_node, if (integer_onep (range_binop (GT_EXPR, integer_type_node,
low0, 0, low1, 0)) low0, 0, low1, 0))
|| (lowequal || (lowequal
&& integer_onep (range_binop (GT_EXPR, integer_type_node, && integer_onep (range_binop (GT_EXPR, integer_type_node,
...@@ -3691,7 +3689,7 @@ merge_ranges (pin_p, plow, phigh, in0_p, low0, high0, in1_p, low1, high1) ...@@ -3691,7 +3689,7 @@ merge_ranges (pin_p, plow, phigh, in0_p, low0, high0, in1_p, low1, high1)
{ {
in_p = 1, high = high0; in_p = 1, high = high0;
low = range_binop (PLUS_EXPR, NULL_TREE, high1, 0, low = range_binop (PLUS_EXPR, NULL_TREE, high1, 0,
integer_one_node, 0); integer_one_node, 0);
} }
else if (! subset || highequal) else if (! subset || highequal)
{ {
...@@ -3847,7 +3845,7 @@ unextend (c, p, unsignedp, mask) ...@@ -3847,7 +3845,7 @@ unextend (c, p, unsignedp, mask)
/* We must use a signed type in order to get an arithmetic right shift. /* We must use a signed type in order to get an arithmetic right shift.
However, we must also avoid introducing accidental overflows, so that However, we must also avoid introducing accidental overflows, so that
a subsequent call to integer_zerop will work. Hence we must a subsequent call to integer_zerop will work. Hence we must
do the type conversion here. At this point, the constant is either do the type conversion here. At this point, the constant is either
zero or one, and the conversion to a signed type can never overflow. zero or one, and the conversion to a signed type can never overflow.
We could get an overflow if this conversion is done anywhere else. */ We could get an overflow if this conversion is done anywhere else. */
...@@ -3896,7 +3894,7 @@ fold_truthop (code, truth_type, lhs, rhs) ...@@ -3896,7 +3894,7 @@ fold_truthop (code, truth_type, lhs, rhs)
{ {
/* If this is the "or" of two comparisons, we can do something if /* If this is the "or" of two comparisons, we can do something if
the comparisons are NE_EXPR. If this is the "and", we can do something the comparisons are NE_EXPR. If this is the "and", we can do something
if the comparisons are EQ_EXPR. I.e., if the comparisons are EQ_EXPR. I.e.,
(a->b == 2 && a->c == 4) can become (a->new == NEW). (a->b == 2 && a->c == 4) can become (a->new == NEW).
WANTED_CODE is this operation code. For single bit fields, we can WANTED_CODE is this operation code. For single bit fields, we can
...@@ -3947,7 +3945,7 @@ fold_truthop (code, truth_type, lhs, rhs) ...@@ -3947,7 +3945,7 @@ fold_truthop (code, truth_type, lhs, rhs)
lr_arg = TREE_OPERAND (lhs, 1); lr_arg = TREE_OPERAND (lhs, 1);
rl_arg = TREE_OPERAND (rhs, 0); rl_arg = TREE_OPERAND (rhs, 0);
rr_arg = TREE_OPERAND (rhs, 1); rr_arg = TREE_OPERAND (rhs, 1);
/* If the RHS can be evaluated unconditionally and its operands are /* If the RHS can be evaluated unconditionally and its operands are
simple, it wins to evaluate the RHS unconditionally on machines simple, it wins to evaluate the RHS unconditionally on machines
with expensive branches. In this case, this isn't a comparison with expensive branches. In this case, this isn't a comparison
...@@ -4063,7 +4061,7 @@ fold_truthop (code, truth_type, lhs, rhs) ...@@ -4063,7 +4061,7 @@ fold_truthop (code, truth_type, lhs, rhs)
if (l_const) if (l_const)
{ {
l_const = convert (lntype, l_const); l_const = convert (lntype, l_const);
l_const = unextend (l_const, ll_bitsize, ll_unsignedp, ll_and_mask); l_const = unextend (l_const, ll_bitsize, ll_unsignedp, ll_and_mask);
l_const = const_binop (LSHIFT_EXPR, l_const, size_int (xll_bitpos), 0); l_const = const_binop (LSHIFT_EXPR, l_const, size_int (xll_bitpos), 0);
if (! integer_zerop (const_binop (BIT_AND_EXPR, l_const, if (! integer_zerop (const_binop (BIT_AND_EXPR, l_const,
fold (build1 (BIT_NOT_EXPR, fold (build1 (BIT_NOT_EXPR,
...@@ -4071,7 +4069,7 @@ fold_truthop (code, truth_type, lhs, rhs) ...@@ -4071,7 +4069,7 @@ fold_truthop (code, truth_type, lhs, rhs)
0))) 0)))
{ {
warning ("comparison is always %d", wanted_code == NE_EXPR); warning ("comparison is always %d", wanted_code == NE_EXPR);
return convert (truth_type, return convert (truth_type,
wanted_code == NE_EXPR wanted_code == NE_EXPR
? integer_one_node : integer_zero_node); ? integer_one_node : integer_zero_node);
...@@ -4158,7 +4156,7 @@ fold_truthop (code, truth_type, lhs, rhs) ...@@ -4158,7 +4156,7 @@ fold_truthop (code, truth_type, lhs, rhs)
field containing them both. field containing them both.
Note that we still must mask the lhs/rhs expressions. Furthermore, Note that we still must mask the lhs/rhs expressions. Furthermore,
the mask must be shifted to account for the shift done by the mask must be shifted to account for the shift done by
make_bit_field_ref. */ make_bit_field_ref. */
if ((ll_bitsize + ll_bitpos == rl_bitpos if ((ll_bitsize + ll_bitpos == rl_bitpos
&& lr_bitsize + lr_bitpos == rr_bitpos) && lr_bitsize + lr_bitpos == rr_bitpos)
...@@ -4243,7 +4241,7 @@ fold_truthop (code, truth_type, lhs, rhs) ...@@ -4243,7 +4241,7 @@ fold_truthop (code, truth_type, lhs, rhs)
const_binop (BIT_IOR_EXPR, l_const, r_const, 0)); const_binop (BIT_IOR_EXPR, l_const, r_const, 0));
} }
/* Optimize T, which is a comparison of a MIN_EXPR or MAX_EXPR with a /* Optimize T, which is a comparison of a MIN_EXPR or MAX_EXPR with a
constant. */ constant. */
static tree static tree
...@@ -4367,7 +4365,7 @@ extract_muldiv (t, c, code, wide_type) ...@@ -4367,7 +4365,7 @@ extract_muldiv (t, c, code, wide_type)
{ {
tree type = TREE_TYPE (t); tree type = TREE_TYPE (t);
enum tree_code tcode = TREE_CODE (t); enum tree_code tcode = TREE_CODE (t);
tree ctype = (wide_type != 0 && (GET_MODE_SIZE (TYPE_MODE (wide_type)) tree ctype = (wide_type != 0 && (GET_MODE_SIZE (TYPE_MODE (wide_type))
> GET_MODE_SIZE (TYPE_MODE (type))) > GET_MODE_SIZE (TYPE_MODE (type)))
? wide_type : type); ? wide_type : type);
tree t1, t2; tree t1, t2;
...@@ -4587,7 +4585,7 @@ extract_muldiv (t, c, code, wide_type) ...@@ -4587,7 +4585,7 @@ extract_muldiv (t, c, code, wide_type)
/* If these operations "cancel" each other, we have the main /* If these operations "cancel" each other, we have the main
optimizations of this pass, which occur when either constant is a optimizations of this pass, which occur when either constant is a
multiple of the other, in which case we replace this with either an multiple of the other, in which case we replace this with either an
operation or CODE or TCODE. operation or CODE or TCODE.
If we have an unsigned type that is not a sizetype, we canot do If we have an unsigned type that is not a sizetype, we canot do
this since it will change the result if the original computation this since it will change the result if the original computation
...@@ -4667,8 +4665,8 @@ constant_boolean_node (value, type) ...@@ -4667,8 +4665,8 @@ constant_boolean_node (value, type)
return value ? integer_one_node : integer_zero_node; return value ? integer_one_node : integer_zero_node;
else if (TREE_CODE (type) == BOOLEAN_TYPE) else if (TREE_CODE (type) == BOOLEAN_TYPE)
return truthvalue_conversion (value ? integer_one_node : return truthvalue_conversion (value ? integer_one_node :
integer_zero_node); integer_zero_node);
else else
{ {
tree t = build_int_2 (value, 0); tree t = build_int_2 (value, 0);
...@@ -4707,7 +4705,7 @@ count_cond (expr, lim) ...@@ -4707,7 +4705,7 @@ count_cond (expr, lim)
but we can constant-fold them if they have constant operands. */ but we can constant-fold them if they have constant operands. */
tree tree
fold (expr) fold (expr)
tree expr; tree expr;
{ {
register tree t = expr; register tree t = expr;
...@@ -4722,7 +4720,7 @@ fold (expr) ...@@ -4722,7 +4720,7 @@ fold (expr)
if all operands are constant. */ if all operands are constant. */
int wins = 1; int wins = 1;
/* Don't try to process an RTL_EXPR since its operands aren't trees. /* Don't try to process an RTL_EXPR since its operands aren't trees.
Likewise for a SAVE_EXPR that's already been evaluated. */ Likewise for a SAVE_EXPR that's already been evaluated. */
if (code == RTL_EXPR || (code == SAVE_EXPR && SAVE_EXPR_RTL (t)) != 0) if (code == RTL_EXPR || (code == SAVE_EXPR && SAVE_EXPR_RTL (t)) != 0)
return t; return t;
...@@ -4734,7 +4732,7 @@ fold (expr) ...@@ -4734,7 +4732,7 @@ fold (expr)
return DECL_INITIAL (t); return DECL_INITIAL (t);
return t; return t;
} }
#ifdef MAX_INTEGER_COMPUTATION_MODE #ifdef MAX_INTEGER_COMPUTATION_MODE
check_max_integer_computation_mode (expr); check_max_integer_computation_mode (expr);
#endif #endif
...@@ -4787,7 +4785,7 @@ fold (expr) ...@@ -4787,7 +4785,7 @@ fold (expr)
else else
/* Strip any conversions that don't change the mode. */ /* Strip any conversions that don't change the mode. */
STRIP_NOPS (op); STRIP_NOPS (op);
if (TREE_CODE (op) == COMPLEX_CST) if (TREE_CODE (op) == COMPLEX_CST)
subop = TREE_REALPART (op); subop = TREE_REALPART (op);
else else
...@@ -4838,7 +4836,7 @@ fold (expr) ...@@ -4838,7 +4836,7 @@ fold (expr)
one of the operands is a comparison and the other is a comparison, a one of the operands is a comparison and the other is a comparison, a
BIT_AND_EXPR with the constant 1, or a truth value. In that case, the BIT_AND_EXPR with the constant 1, or a truth value. In that case, the
code below would make the expression more complex. Change it to a code below would make the expression more complex. Change it to a
TRUTH_{AND,OR}_EXPR. Likewise, convert a similar NE_EXPR to TRUTH_{AND,OR}_EXPR. Likewise, convert a similar NE_EXPR to
TRUTH_XOR_EXPR and an EQ_EXPR to the inversion of a TRUTH_XOR_EXPR. */ TRUTH_XOR_EXPR and an EQ_EXPR to the inversion of a TRUTH_XOR_EXPR. */
if ((code == BIT_AND_EXPR || code == BIT_IOR_EXPR if ((code == BIT_AND_EXPR || code == BIT_IOR_EXPR
...@@ -4903,7 +4901,7 @@ fold (expr) ...@@ -4903,7 +4901,7 @@ fold (expr)
TREE_OPERAND (TREE_OPERAND (t, 2), 0))); TREE_OPERAND (TREE_OPERAND (t, 2), 0)));
return t; return t;
} }
else if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<') else if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
return fold (build (COND_EXPR, type, arg0, return fold (build (COND_EXPR, type, arg0,
fold (build1 (code, type, integer_one_node)), fold (build1 (code, type, integer_one_node)),
fold (build1 (code, type, integer_zero_node)))); fold (build1 (code, type, integer_zero_node))));
...@@ -5056,7 +5054,7 @@ fold (expr) ...@@ -5056,7 +5054,7 @@ fold (expr)
&& TREE_CODE (arg1) == COMPOUND_EXPR) && TREE_CODE (arg1) == COMPOUND_EXPR)
return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0), return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
fold (build (code, type, arg0, TREE_OPERAND (arg1, 1)))); fold (build (code, type, arg0, TREE_OPERAND (arg1, 1))));
switch (code) switch (code)
{ {
case INTEGER_CST: case INTEGER_CST:
...@@ -5101,7 +5099,7 @@ fold (expr) ...@@ -5101,7 +5099,7 @@ fold (expr)
unsigned int final_prec = TYPE_PRECISION (final_type); unsigned int final_prec = TYPE_PRECISION (final_type);
int final_unsignedp = TREE_UNSIGNED (final_type); int final_unsignedp = TREE_UNSIGNED (final_type);
/* In addition to the cases of two conversions in a row /* In addition to the cases of two conversions in a row
handled below, if we are converting something to its own handled below, if we are converting something to its own
type via an object of identical or wider precision, neither type via an object of identical or wider precision, neither
conversion is needed. */ conversion is needed. */
...@@ -5140,7 +5138,7 @@ fold (expr) ...@@ -5140,7 +5138,7 @@ fold (expr)
and the outermost type is wider than the intermediate, or and the outermost type is wider than the intermediate, or
- the initial type is a pointer type and the precisions of the - the initial type is a pointer type and the precisions of the
intermediate and final types differ, or intermediate and final types differ, or
- the final type is a pointer type and the precisions of the - the final type is a pointer type and the precisions of the
initial and intermediate types differ. */ initial and intermediate types differ. */
if (! inside_float && ! inter_float && ! final_float if (! inside_float && ! inter_float && ! final_float
&& (inter_prec > inside_prec || inter_prec > final_prec) && (inter_prec > inside_prec || inter_prec > final_prec)
...@@ -5338,12 +5336,12 @@ fold (expr) ...@@ -5338,12 +5336,12 @@ fold (expr)
} }
/* Reassociate (plus (plus (mult) (foo)) (mult)) as /* Reassociate (plus (plus (mult) (foo)) (mult)) as
(plus (plus (mult) (mult)) (foo)) so that we can (plus (plus (mult) (mult)) (foo)) so that we can
take advantage of the factoring cases below. */ take advantage of the factoring cases below. */
if ((TREE_CODE (arg0) == PLUS_EXPR if ((TREE_CODE (arg0) == PLUS_EXPR
&& TREE_CODE (arg1) == MULT_EXPR) && TREE_CODE (arg1) == MULT_EXPR)
|| (TREE_CODE (arg1) == PLUS_EXPR || (TREE_CODE (arg1) == PLUS_EXPR
&& TREE_CODE (arg0) == MULT_EXPR)) && TREE_CODE (arg0) == MULT_EXPR))
{ {
tree parg0, parg1, parg, marg; tree parg0, parg1, parg, marg;
...@@ -5424,7 +5422,7 @@ fold (expr) ...@@ -5424,7 +5422,7 @@ fold (expr)
} }
if (same) if (same)
return fold (build (MULT_EXPR, type, return fold (build (MULT_EXPR, type,
fold (build (PLUS_EXPR, type, alt0, alt1)), fold (build (PLUS_EXPR, type, alt0, alt1)),
same)); same));
} }
...@@ -5445,13 +5443,13 @@ fold (expr) ...@@ -5445,13 +5443,13 @@ fold (expr)
/* (A << B) + (A >> (Z - B)) if A is unsigned and Z is the size of A /* (A << B) + (A >> (Z - B)) if A is unsigned and Z is the size of A
is a rotate of A by B bits. */ is a rotate of A by B bits. */
{ {
register enum tree_code code0, code1; register enum tree_code code0, code1;
code0 = TREE_CODE (arg0); code0 = TREE_CODE (arg0);
code1 = TREE_CODE (arg1); code1 = TREE_CODE (arg1);
if (((code0 == RSHIFT_EXPR && code1 == LSHIFT_EXPR) if (((code0 == RSHIFT_EXPR && code1 == LSHIFT_EXPR)
|| (code1 == RSHIFT_EXPR && code0 == LSHIFT_EXPR)) || (code1 == RSHIFT_EXPR && code0 == LSHIFT_EXPR))
&& operand_equal_p (TREE_OPERAND (arg0, 0), && operand_equal_p (TREE_OPERAND (arg0, 0),
TREE_OPERAND (arg1,0), 0) TREE_OPERAND (arg1, 0), 0)
&& TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0)))) && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0))))
{ {
register tree tree01, tree11; register tree tree01, tree11;
...@@ -5464,53 +5462,52 @@ fold (expr) ...@@ -5464,53 +5462,52 @@ fold (expr)
code01 = TREE_CODE (tree01); code01 = TREE_CODE (tree01);
code11 = TREE_CODE (tree11); code11 = TREE_CODE (tree11);
if (code01 == INTEGER_CST if (code01 == INTEGER_CST
&& code11 == INTEGER_CST && code11 == INTEGER_CST
&& TREE_INT_CST_HIGH (tree01) == 0 && TREE_INT_CST_HIGH (tree01) == 0
&& TREE_INT_CST_HIGH (tree11) == 0 && TREE_INT_CST_HIGH (tree11) == 0
&& ((TREE_INT_CST_LOW (tree01) + TREE_INT_CST_LOW (tree11)) && ((TREE_INT_CST_LOW (tree01) + TREE_INT_CST_LOW (tree11))
== TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0))))) == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)))))
return build (LROTATE_EXPR, type, TREE_OPERAND (arg0, 0), return build (LROTATE_EXPR, type, TREE_OPERAND (arg0, 0),
code0 == LSHIFT_EXPR ? tree01 : tree11); code0 == LSHIFT_EXPR ? tree01 : tree11);
else if (code11 == MINUS_EXPR) else if (code11 == MINUS_EXPR)
{ {
tree tree110, tree111; tree tree110, tree111;
tree110 = TREE_OPERAND (tree11, 0); tree110 = TREE_OPERAND (tree11, 0);
tree111 = TREE_OPERAND (tree11, 1); tree111 = TREE_OPERAND (tree11, 1);
STRIP_NOPS (tree110); STRIP_NOPS (tree110);
STRIP_NOPS (tree111); STRIP_NOPS (tree111);
if (TREE_CODE (tree110) == INTEGER_CST if (TREE_CODE (tree110) == INTEGER_CST
&& 0 == compare_tree_int (tree110, && 0 == compare_tree_int (tree110,
TYPE_PRECISION TYPE_PRECISION
(TREE_TYPE (TREE_OPERAND (TREE_TYPE (TREE_OPERAND
(arg0, 0)))) (arg0, 0))))
&& operand_equal_p (tree01, tree111, 0)) && operand_equal_p (tree01, tree111, 0))
return build ((code0 == LSHIFT_EXPR return build ((code0 == LSHIFT_EXPR
? LROTATE_EXPR ? LROTATE_EXPR
: RROTATE_EXPR), : RROTATE_EXPR),
type, TREE_OPERAND (arg0, 0), tree01); type, TREE_OPERAND (arg0, 0), tree01);
} }
else if (code01 == MINUS_EXPR) else if (code01 == MINUS_EXPR)
{ {
tree tree010, tree011; tree tree010, tree011;
tree010 = TREE_OPERAND (tree01, 0); tree010 = TREE_OPERAND (tree01, 0);
tree011 = TREE_OPERAND (tree01, 1); tree011 = TREE_OPERAND (tree01, 1);
STRIP_NOPS (tree010); STRIP_NOPS (tree010);
STRIP_NOPS (tree011); STRIP_NOPS (tree011);
if (TREE_CODE (tree010) == INTEGER_CST if (TREE_CODE (tree010) == INTEGER_CST
&& 0 == compare_tree_int (tree010, && 0 == compare_tree_int (tree010,
TYPE_PRECISION TYPE_PRECISION
(TREE_TYPE (TREE_OPERAND (TREE_TYPE (TREE_OPERAND
(arg0, 0)))) (arg0, 0))))
&& operand_equal_p (tree11, tree011, 0)) && operand_equal_p (tree11, tree011, 0))
return build ((code0 != LSHIFT_EXPR return build ((code0 != LSHIFT_EXPR
? LROTATE_EXPR ? LROTATE_EXPR
: RROTATE_EXPR), : RROTATE_EXPR),
type, TREE_OPERAND (arg0, 0), tree11); type, TREE_OPERAND (arg0, 0), tree11);
} }
} }
} }
associate: associate:
/* In most languages, can't associate operations on floats through /* In most languages, can't associate operations on floats through
parentheses. Rather than remember where the parentheses were, we parentheses. Rather than remember where the parentheses were, we
...@@ -5570,7 +5567,7 @@ fold (expr) ...@@ -5570,7 +5567,7 @@ fold (expr)
/* (-A) - CST -> (-CST) - A for floating point (what about ints ?) */ /* (-A) - CST -> (-CST) - A for floating point (what about ints ?) */
if (TREE_CODE (arg0) == NEGATE_EXPR && TREE_CODE (arg1) == REAL_CST) if (TREE_CODE (arg0) == NEGATE_EXPR && TREE_CODE (arg1) == REAL_CST)
return return
fold (build (MINUS_EXPR, type, fold (build (MINUS_EXPR, type,
build_real (TREE_TYPE (arg1), build_real (TREE_TYPE (arg1),
REAL_VALUE_NEGATE (TREE_REAL_CST (arg1))), REAL_VALUE_NEGATE (TREE_REAL_CST (arg1))),
TREE_OPERAND (arg0, 0))); TREE_OPERAND (arg0, 0)));
...@@ -5607,7 +5604,7 @@ fold (expr) ...@@ -5607,7 +5604,7 @@ fold (expr)
return non_lvalue (convert (type, arg0)); return non_lvalue (convert (type, arg0));
} }
/* Fold &x - &x. This can happen from &x.foo - &x. /* Fold &x - &x. This can happen from &x.foo - &x.
This is unsafe for certain floats even in non-IEEE formats. This is unsafe for certain floats even in non-IEEE formats.
In IEEE, it is unsafe because it does wrong for NaNs. In IEEE, it is unsafe because it does wrong for NaNs.
Also note that operand_equal_p is always false if an operand Also note that operand_equal_p is always false if an operand
...@@ -5623,7 +5620,7 @@ fold (expr) ...@@ -5623,7 +5620,7 @@ fold (expr)
/* (-A) * (-B) -> A * B */ /* (-A) * (-B) -> A * B */
if (TREE_CODE (arg0) == NEGATE_EXPR && TREE_CODE (arg1) == NEGATE_EXPR) if (TREE_CODE (arg0) == NEGATE_EXPR && TREE_CODE (arg1) == NEGATE_EXPR)
return fold (build (MULT_EXPR, type, TREE_OPERAND (arg0, 0), return fold (build (MULT_EXPR, type, TREE_OPERAND (arg0, 0),
TREE_OPERAND (arg1, 0))); TREE_OPERAND (arg1, 0)));
if (! FLOAT_TYPE_P (type)) if (! FLOAT_TYPE_P (type))
{ {
...@@ -5682,7 +5679,7 @@ fold (expr) ...@@ -5682,7 +5679,7 @@ fold (expr)
/* Convert (or (not arg0) (not arg1)) to (not (and (arg0) (arg1))). /* Convert (or (not arg0) (not arg1)) to (not (and (arg0) (arg1))).
This results in more efficient code for machines without a NAND This results in more efficient code for machines without a NAND
instruction. Combine will canonicalize to the first form instruction. Combine will canonicalize to the first form
which will allow use of NAND instructions provided by the which will allow use of NAND instructions provided by the
backend if they exist. */ backend if they exist. */
...@@ -5716,10 +5713,10 @@ fold (expr) ...@@ -5716,10 +5713,10 @@ fold (expr)
&& integer_zerop (const_binop (BIT_AND_EXPR, && integer_zerop (const_binop (BIT_AND_EXPR,
TREE_OPERAND (arg0, 1), TREE_OPERAND (arg0, 1),
TREE_OPERAND (arg1, 1), 0))) TREE_OPERAND (arg1, 1), 0)))
{ {
code = BIT_IOR_EXPR; code = BIT_IOR_EXPR;
goto bit_ior; goto bit_ior;
} }
/* See if this can be simplified into a rotate first. If that /* See if this can be simplified into a rotate first. If that
is unsuccessful continue in the association code. */ is unsuccessful continue in the association code. */
...@@ -5760,7 +5757,7 @@ fold (expr) ...@@ -5760,7 +5757,7 @@ fold (expr)
/* Convert (and (not arg0) (not arg1)) to (not (or (arg0) (arg1))). /* Convert (and (not arg0) (not arg1)) to (not (or (arg0) (arg1))).
This results in more efficient code for machines without a NOR This results in more efficient code for machines without a NOR
instruction. Combine will canonicalize to the first form instruction. Combine will canonicalize to the first form
which will allow use of NOR instructions provided by the which will allow use of NOR instructions provided by the
backend if they exist. */ backend if they exist. */
...@@ -5824,10 +5821,10 @@ fold (expr) ...@@ -5824,10 +5821,10 @@ fold (expr)
REAL_VALUE_TYPE r; REAL_VALUE_TYPE r;
r = TREE_REAL_CST (arg1); r = TREE_REAL_CST (arg1);
if (exact_real_inverse (TYPE_MODE(TREE_TYPE(arg0)), &r)) if (exact_real_inverse (TYPE_MODE(TREE_TYPE(arg0)), &r))
{ {
tem = build_real (type, r); tem = build_real (type, r);
return fold (build (MULT_EXPR, type, arg0, tem)); return fold (build (MULT_EXPR, type, arg0, tem));
} }
} }
} }
goto binary; goto binary;
...@@ -5852,7 +5849,7 @@ fold (expr) ...@@ -5852,7 +5849,7 @@ fold (expr)
&& multiple_of_p (type, arg0, arg1)) && multiple_of_p (type, arg0, arg1))
return fold (build (EXACT_DIV_EXPR, type, arg0, arg1)); return fold (build (EXACT_DIV_EXPR, type, arg0, arg1));
if (TREE_CODE (arg1) == INTEGER_CST if (TREE_CODE (arg1) == INTEGER_CST
&& 0 != (tem = extract_muldiv (TREE_OPERAND (t, 0), arg1, && 0 != (tem = extract_muldiv (TREE_OPERAND (t, 0), arg1,
code, NULL_TREE))) code, NULL_TREE)))
return convert (type, tem); return convert (type, tem);
...@@ -6097,10 +6094,10 @@ fold (expr) ...@@ -6097,10 +6094,10 @@ fold (expr)
if (TREE_CODE (arg0) == NEGATE_EXPR && TREE_CODE (arg1) == REAL_CST) if (TREE_CODE (arg0) == NEGATE_EXPR && TREE_CODE (arg1) == REAL_CST)
return return
fold (build fold (build
(swap_tree_comparison (code), type, (swap_tree_comparison (code), type,
TREE_OPERAND (arg0, 0), TREE_OPERAND (arg0, 0),
build_real (TREE_TYPE (arg1), build_real (TREE_TYPE (arg1),
REAL_VALUE_NEGATE (TREE_REAL_CST (arg1))))); REAL_VALUE_NEGATE (TREE_REAL_CST (arg1)))));
/* IEEE doesn't distinguish +0 and -0 in comparisons. */ /* IEEE doesn't distinguish +0 and -0 in comparisons. */
/* a CMP (-0) -> a CMP 0 */ /* a CMP (-0) -> a CMP 0 */
if (TREE_CODE (arg1) == REAL_CST if (TREE_CODE (arg1) == REAL_CST
...@@ -6109,7 +6106,6 @@ fold (expr) ...@@ -6109,7 +6106,6 @@ fold (expr)
build_real (TREE_TYPE (arg1), dconst0))); build_real (TREE_TYPE (arg1), dconst0)));
} }
/* If one arg is a constant integer, put it last. */ /* If one arg is a constant integer, put it last. */
if (TREE_CODE (arg0) == INTEGER_CST if (TREE_CODE (arg0) == INTEGER_CST
&& TREE_CODE (arg1) != INTEGER_CST) && TREE_CODE (arg1) != INTEGER_CST)
...@@ -6197,7 +6193,6 @@ fold (expr) ...@@ -6197,7 +6193,6 @@ fold (expr)
convert (TREE_TYPE (varop), convert (TREE_TYPE (varop),
mask))); mask)));
} }
t = build (code, type, t = build (code, type,
(constopnum == 0) ? newconst : varop, (constopnum == 0) ? newconst : varop,
...@@ -6259,7 +6254,6 @@ fold (expr) ...@@ -6259,7 +6254,6 @@ fold (expr)
convert (TREE_TYPE (varop), convert (TREE_TYPE (varop),
mask))); mask)));
} }
t = build (code, type, t = build (code, type,
(constopnum == 0) ? newconst : varop, (constopnum == 0) ? newconst : varop,
...@@ -6334,7 +6328,7 @@ fold (expr) ...@@ -6334,7 +6328,7 @@ fold (expr)
|| (TREE_CODE (t1) == INTEGER_CST || (TREE_CODE (t1) == INTEGER_CST
&& int_fits_type_p (t1, TREE_TYPE (tem))))) && int_fits_type_p (t1, TREE_TYPE (tem)))))
return fold (build (code, type, tem, convert (TREE_TYPE (tem), t1))); return fold (build (code, type, tem, convert (TREE_TYPE (tem), t1)));
/* If this is comparing a constant with a MIN_EXPR or a MAX_EXPR of a /* If this is comparing a constant with a MIN_EXPR or a MAX_EXPR of a
constant, we can simplify it. */ constant, we can simplify it. */
else if (TREE_CODE (arg1) == INTEGER_CST else if (TREE_CODE (arg1) == INTEGER_CST
...@@ -6358,7 +6352,7 @@ fold (expr) ...@@ -6358,7 +6352,7 @@ fold (expr)
build (GE_EXPR, type, TREE_OPERAND (arg0, 0), tem), build (GE_EXPR, type, TREE_OPERAND (arg0, 0), tem),
build (LE_EXPR, type, build (LE_EXPR, type,
TREE_OPERAND (arg0, 0), arg1))); TREE_OPERAND (arg0, 0), arg1)));
/* If this is an EQ or NE comparison with zero and ARG0 is /* If this is an EQ or NE comparison with zero and ARG0 is
(1 << foo) & bar, convert it to (bar >> foo) & 1. Both require (1 << foo) & bar, convert it to (bar >> foo) & 1. Both require
two operations, but the latter can be done in one less insn two operations, but the latter can be done in one less insn
...@@ -6435,7 +6429,7 @@ fold (expr) ...@@ -6435,7 +6429,7 @@ fold (expr)
&& TREE_UNSIGNED (TREE_TYPE (arg0)) && TREE_UNSIGNED (TREE_TYPE (arg0))
&& TREE_CODE (arg1) == LSHIFT_EXPR && TREE_CODE (arg1) == LSHIFT_EXPR
&& integer_onep (TREE_OPERAND (arg1, 0))) && integer_onep (TREE_OPERAND (arg1, 0)))
return build (code == LT_EXPR ? EQ_EXPR : NE_EXPR, type, return build (code == LT_EXPR ? EQ_EXPR : NE_EXPR, type,
build (RSHIFT_EXPR, TREE_TYPE (arg0), arg0, build (RSHIFT_EXPR, TREE_TYPE (arg0), arg0,
TREE_OPERAND (arg1, 1)), TREE_OPERAND (arg1, 1)),
convert (TREE_TYPE (arg0), integer_zero_node)); convert (TREE_TYPE (arg0), integer_zero_node));
...@@ -6578,7 +6572,7 @@ fold (expr) ...@@ -6578,7 +6572,7 @@ fold (expr)
&& (TREE_INT_CST_LOW (arg1) && (TREE_INT_CST_LOW (arg1)
== ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1) == ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1)
&& TREE_UNSIGNED (TREE_TYPE (arg1))) && TREE_UNSIGNED (TREE_TYPE (arg1)))
switch (TREE_CODE (t)) switch (TREE_CODE (t))
{ {
case LE_EXPR: case LE_EXPR:
...@@ -6951,7 +6945,7 @@ fold (expr) ...@@ -6951,7 +6945,7 @@ fold (expr)
case LT_EXPR: case LT_EXPR:
/* In C++ a ?: expression can be an lvalue, so put the /* In C++ a ?: expression can be an lvalue, so put the
operand which will be used if they are equal first operand which will be used if they are equal first
so that we can convert this back to the so that we can convert this back to the
corresponding COND_EXPR. */ corresponding COND_EXPR. */
return pedantic_non_lvalue return pedantic_non_lvalue
(convert (type, fold (build (MIN_EXPR, comp_type, (convert (type, fold (build (MIN_EXPR, comp_type,
...@@ -7066,7 +7060,7 @@ fold (expr) ...@@ -7066,7 +7060,7 @@ fold (expr)
if (integer_onep (TREE_OPERAND (t, 1)) if (integer_onep (TREE_OPERAND (t, 1))
&& integer_zerop (TREE_OPERAND (t, 2)) && integer_zerop (TREE_OPERAND (t, 2))
/* If we try to convert TREE_OPERAND (t, 0) to our type, the /* If we try to convert TREE_OPERAND (t, 0) to our type, the
call to fold will try to move the conversion inside call to fold will try to move the conversion inside
a COND, which will recurse. In that case, the COND_EXPR a COND, which will recurse. In that case, the COND_EXPR
is probably the best choice, so leave it alone. */ is probably the best choice, so leave it alone. */
&& type == TREE_TYPE (arg0)) && type == TREE_TYPE (arg0))
...@@ -7146,7 +7140,7 @@ fold (expr) ...@@ -7146,7 +7140,7 @@ fold (expr)
tree arg01; tree arg01;
if (kind0 == '1' || code0 == TRUTH_NOT_EXPR) if (kind0 == '1' || code0 == TRUTH_NOT_EXPR)
return fold (build1 (code0, type, return fold (build1 (code0, type,
fold (build1 (CLEANUP_POINT_EXPR, fold (build1 (CLEANUP_POINT_EXPR,
TREE_TYPE (arg00), arg00)))); TREE_TYPE (arg00), arg00))));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment