tree-ssa-loop-niter.c 87 KB
Newer Older
1
/* Functions to determine/estimate number of iterations of a loop.
2
   Copyright (C) 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
3 4 5 6 7
   
This file is part of GCC.
   
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
8
Free Software Foundation; either version 3, or (at your option) any
9 10 11 12 13 14 15 16
later version.
   
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
for more details.
   
You should have received a copy of the GNU General Public License
17 18
along with GCC; see the file COPYING3.  If not see
<http://www.gnu.org/licenses/>.  */
19 20 21 22 23 24 25 26 27 28 29 30

#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
#include "rtl.h"
#include "tm_p.h"
#include "hard-reg-set.h"
#include "basic-block.h"
#include "output.h"
#include "diagnostic.h"
31
#include "intl.h"
32 33 34 35 36 37 38
#include "tree-flow.h"
#include "tree-dump.h"
#include "cfgloop.h"
#include "tree-pass.h"
#include "ggc.h"
#include "tree-chrec.h"
#include "tree-scalar-evolution.h"
39
#include "tree-data-ref.h"
40 41
#include "params.h"
#include "flags.h"
42
#include "toplev.h"
43
#include "tree-inline.h"
44
#include "gmp.h"
45

46
#define SWAP(X, Y) do { affine_iv *tmp = (X); (X) = (Y); (Y) = tmp; } while (0)
47

48 49 50 51
/* The maximum number of dominator BBs we search for conditions
   of loop header copies we use for simplifying a conditional
   expression.  */
#define MAX_DOMINATORS_TO_WALK 8
52 53 54 55 56 57 58

/*

   Analysis of number of iterations of an affine exit test.

*/

59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
/* Bounds on some value, BELOW <= X <= UP.  */

typedef struct
{
  mpz_t below, up;
} bounds;


/* Splits expression EXPR to a variable part VAR and constant OFFSET.  */

static void
split_to_var_and_offset (tree expr, tree *var, mpz_t offset)
{
  tree type = TREE_TYPE (expr);
  tree op0, op1;
  double_int off;
  bool negate = false;

  *var = expr;
  mpz_set_ui (offset, 0);

  switch (TREE_CODE (expr))
    {
    case MINUS_EXPR:
      negate = true;
      /* Fallthru.  */

    case PLUS_EXPR:
Andrew Pinski committed
87
    case POINTER_PLUS_EXPR:
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
      op0 = TREE_OPERAND (expr, 0);
      op1 = TREE_OPERAND (expr, 1);

      if (TREE_CODE (op1) != INTEGER_CST)
	break;

      *var = op0;
      /* Always sign extend the offset.  */
      off = double_int_sext (tree_to_double_int (op1),
			     TYPE_PRECISION (type));
      mpz_set_double_int (offset, off, false);
      break;

    case INTEGER_CST:
      *var = build_int_cst_type (type, 0);
      off = tree_to_double_int (expr);
      mpz_set_double_int (offset, off, TYPE_UNSIGNED (type));
      break;

    default:
      break;
    }
}

/* Stores estimate on the minimum/maximum value of the expression VAR + OFF
   in TYPE to MIN and MAX.  */

static void
determine_value_range (tree type, tree var, mpz_t off,
		       mpz_t min, mpz_t max)
{
  /* If the expression is a constant, we know its value exactly.  */
  if (integer_zerop (var))
    {
      mpz_set (min, off);
      mpz_set (max, off);
      return;
    }

  /* If the computation may wrap, we know nothing about the value, except for
     the range of the type.  */
129
  get_type_static_bounds (type, min, max);
130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
  if (!nowrap_type_p (type))
    return;

  /* Since the addition of OFF does not wrap, if OFF is positive, then we may
     add it to MIN, otherwise to MAX.  */
  if (mpz_sgn (off) < 0)
    mpz_add (max, max, off);
  else
    mpz_add (min, min, off);
}

/* Stores the bounds on the difference of the values of the expressions
   (var + X) and (var + Y), computed in TYPE, to BNDS.  */

static void
bound_difference_of_offsetted_base (tree type, mpz_t x, mpz_t y,
				    bounds *bnds)
{
  int rel = mpz_cmp (x, y);
  bool may_wrap = !nowrap_type_p (type);
  mpz_t m;

  /* If X == Y, then the expressions are always equal.
     If X > Y, there are the following possibilities:
       a) neither of var + X and var + Y overflow or underflow, or both of
	  them do.  Then their difference is X - Y.
       b) var + X overflows, and var + Y does not.  Then the values of the
	  expressions are var + X - M and var + Y, where M is the range of
	  the type, and their difference is X - Y - M.
       c) var + Y underflows and var + X does not.  Their difference again
	  is M - X + Y.
       Therefore, if the arithmetics in type does not overflow, then the
       bounds are (X - Y, X - Y), otherwise they are (X - Y - M, X - Y)
     Similarly, if X < Y, the bounds are either (X - Y, X - Y) or
     (X - Y, X - Y + M).  */

  if (rel == 0)
    {
      mpz_set_ui (bnds->below, 0);
      mpz_set_ui (bnds->up, 0);
      return;
    }

  mpz_init (m);
  mpz_set_double_int (m, double_int_mask (TYPE_PRECISION (type)), true);
  mpz_add_ui (m, m, 1);
  mpz_sub (bnds->up, x, y);
  mpz_set (bnds->below, bnds->up);

  if (may_wrap)
    {
      if (rel > 0)
	mpz_sub (bnds->below, bnds->below, m);
      else
	mpz_add (bnds->up, bnds->up, m);
    }

  mpz_clear (m);
}

/* From condition C0 CMP C1 derives information regarding the
   difference of values of VARX + OFFX and VARY + OFFY, computed in TYPE,
   and stores it to BNDS.  */

static void
refine_bounds_using_guard (tree type, tree varx, mpz_t offx,
			   tree vary, mpz_t offy,
			   tree c0, enum tree_code cmp, tree c1,
			   bounds *bnds)
{
200
  tree varc0, varc1, tmp, ctype;
201 202 203 204 205 206 207 208 209 210 211
  mpz_t offc0, offc1, loffx, loffy, bnd;
  bool lbound = false;
  bool no_wrap = nowrap_type_p (type);
  bool x_ok, y_ok;

  switch (cmp)
    {
    case LT_EXPR:
    case LE_EXPR:
    case GT_EXPR:
    case GE_EXPR:
212 213 214
      STRIP_SIGN_NOPS (c0);
      STRIP_SIGN_NOPS (c1);
      ctype = TREE_TYPE (c0);
215
      if (!useless_type_conversion_p (ctype, type))
216 217
	return;

218 219 220 221
      break;

    case EQ_EXPR:
      /* We could derive quite precise information from EQ_EXPR, however, such
222 223
	 a guard is unlikely to appear, so we do not bother with handling
	 it.  */
224 225 226
      return;

    case NE_EXPR:
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253
      /* NE_EXPR comparisons do not contain much of useful information, except for
	 special case of comparing with the bounds of the type.  */
      if (TREE_CODE (c1) != INTEGER_CST
	  || !INTEGRAL_TYPE_P (type))
	return;

      /* Ensure that the condition speaks about an expression in the same type
	 as X and Y.  */
      ctype = TREE_TYPE (c0);
      if (TYPE_PRECISION (ctype) != TYPE_PRECISION (type))
	return;
      c0 = fold_convert (type, c0);
      c1 = fold_convert (type, c1);

      if (TYPE_MIN_VALUE (type)
	  && operand_equal_p (c1, TYPE_MIN_VALUE (type), 0))
	{
	  cmp = GT_EXPR;
	  break;
	}
      if (TYPE_MAX_VALUE (type)
	  && operand_equal_p (c1, TYPE_MAX_VALUE (type), 0))
	{
	  cmp = LT_EXPR;
	  break;
	}

254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
      return;
    default:
      return;
    } 

  mpz_init (offc0);
  mpz_init (offc1);
  split_to_var_and_offset (expand_simple_operations (c0), &varc0, offc0);
  split_to_var_and_offset (expand_simple_operations (c1), &varc1, offc1);

  /* We are only interested in comparisons of expressions based on VARX and
     VARY.  TODO -- we might also be able to derive some bounds from
     expressions containing just one of the variables.  */

  if (operand_equal_p (varx, varc1, 0))
    {
      tmp = varc0; varc0 = varc1; varc1 = tmp;
      mpz_swap (offc0, offc1);
      cmp = swap_tree_comparison (cmp);
    }

  if (!operand_equal_p (varx, varc0, 0)
      || !operand_equal_p (vary, varc1, 0))
    goto end;

  mpz_init_set (loffx, offx);
  mpz_init_set (loffy, offy);

  if (cmp == GT_EXPR || cmp == GE_EXPR)
    {
      tmp = varx; varx = vary; vary = tmp;
      mpz_swap (offc0, offc1);
      mpz_swap (loffx, loffy);
      cmp = swap_tree_comparison (cmp);
      lbound = true;
    }

  /* If there is no overflow, the condition implies that

     (VARX + OFFX) cmp (VARY + OFFY) + (OFFX - OFFY + OFFC1 - OFFC0).

     The overflows and underflows may complicate things a bit; each
     overflow decreases the appropriate offset by M, and underflow
     increases it by M.  The above inequality would not necessarily be
     true if
   
     -- VARX + OFFX underflows and VARX + OFFC0 does not, or
	VARX + OFFC0 overflows, but VARX + OFFX does not.
	This may only happen if OFFX < OFFC0.
     -- VARY + OFFY overflows and VARY + OFFC1 does not, or
	VARY + OFFC1 underflows and VARY + OFFY does not.
	This may only happen if OFFY > OFFC1.  */

  if (no_wrap)
    {
      x_ok = true;
      y_ok = true;
    }
  else
    {
      x_ok = (integer_zerop (varx)
	      || mpz_cmp (loffx, offc0) >= 0);
      y_ok = (integer_zerop (vary)
	      || mpz_cmp (loffy, offc1) <= 0);
    }

  if (x_ok && y_ok)
    {
      mpz_init (bnd);
      mpz_sub (bnd, loffx, loffy);
      mpz_add (bnd, bnd, offc1);
      mpz_sub (bnd, bnd, offc0);

      if (cmp == LT_EXPR)
	mpz_sub_ui (bnd, bnd, 1);

      if (lbound)
	{
	  mpz_neg (bnd, bnd);
	  if (mpz_cmp (bnds->below, bnd) < 0)
	    mpz_set (bnds->below, bnd);
	}
      else
	{
	  if (mpz_cmp (bnd, bnds->up) < 0)
	    mpz_set (bnds->up, bnd);
	}
      mpz_clear (bnd);
    }

  mpz_clear (loffx);
  mpz_clear (loffy);
end:
  mpz_clear (offc0);
  mpz_clear (offc1);
}

/* Stores the bounds on the value of the expression X - Y in LOOP to BNDS.
   The subtraction is considered to be performed in arbitrary precision,
   without overflows.
 
   We do not attempt to be too clever regarding the value ranges of X and
   Y; most of the time, they are just integers or ssa names offsetted by
   integer.  However, we try to use the information contained in the
   comparisons before the loop (usually created by loop header copying).  */

static void
bound_difference (struct loop *loop, tree x, tree y, bounds *bnds)
{
  tree type = TREE_TYPE (x);
  tree varx, vary;
  mpz_t offx, offy;
  mpz_t minx, maxx, miny, maxy;
  int cnt = 0;
  edge e;
  basic_block bb;
370
  tree cond, c0, c1;
371 372
  enum tree_code cmp;

373 374 375 376 377
  /* Get rid of unnecessary casts, but preserve the value of
     the expressions.  */
  STRIP_SIGN_NOPS (x);
  STRIP_SIGN_NOPS (y);

378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
  mpz_init (bnds->below);
  mpz_init (bnds->up);
  mpz_init (offx);
  mpz_init (offy);
  split_to_var_and_offset (x, &varx, offx);
  split_to_var_and_offset (y, &vary, offy);

  if (!integer_zerop (varx)
      && operand_equal_p (varx, vary, 0))
    {
      /* Special case VARX == VARY -- we just need to compare the
         offsets.  The matters are a bit more complicated in the
	 case addition of offsets may wrap.  */
      bound_difference_of_offsetted_base (type, offx, offy, bnds);
    }
  else
    {
      /* Otherwise, use the value ranges to determine the initial
	 estimates on below and up.  */
      mpz_init (minx);
      mpz_init (maxx);
      mpz_init (miny);
      mpz_init (maxy);
      determine_value_range (type, varx, offx, minx, maxx);
      determine_value_range (type, vary, offy, miny, maxy);

      mpz_sub (bnds->below, minx, maxy);
      mpz_sub (bnds->up, maxx, miny);
      mpz_clear (minx);
      mpz_clear (maxx);
      mpz_clear (miny);
      mpz_clear (maxy);
    }

  /* If both X and Y are constants, we cannot get any more precise.  */
  if (integer_zerop (varx) && integer_zerop (vary))
    goto end;

  /* Now walk the dominators of the loop header and use the entry
     guards to refine the estimates.  */
  for (bb = loop->header;
       bb != ENTRY_BLOCK_PTR && cnt < MAX_DOMINATORS_TO_WALK;
       bb = get_immediate_dominator (CDI_DOMINATORS, bb))
    {
      if (!single_pred_p (bb))
	continue;
      e = single_pred_edge (bb);

      if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
	continue;

      cond = COND_EXPR_COND (last_stmt (e->src));
      if (!COMPARISON_CLASS_P (cond))
	continue;
      c0 = TREE_OPERAND (cond, 0);
      cmp = TREE_CODE (cond);
      c1 = TREE_OPERAND (cond, 1);

      if (e->flags & EDGE_FALSE_VALUE)
	cmp = invert_tree_comparison (cmp, false);

      refine_bounds_using_guard (type, varx, offx, vary, offy,
				 c0, cmp, c1, bnds);
      ++cnt;
    }

end:
  mpz_clear (offx);
  mpz_clear (offy);
}

/* Update the bounds in BNDS that restrict the value of X to the bounds
   that restrict the value of X + DELTA.  X can be obtained as a
   difference of two values in TYPE.  */

static void
bounds_add (bounds *bnds, double_int delta, tree type)
{
  mpz_t mdelta, max;

  mpz_init (mdelta);
  mpz_set_double_int (mdelta, delta, false);

  mpz_init (max);
  mpz_set_double_int (max, double_int_mask (TYPE_PRECISION (type)), true);

  mpz_add (bnds->up, bnds->up, mdelta);
  mpz_add (bnds->below, bnds->below, mdelta);

  if (mpz_cmp (bnds->up, max) > 0)
    mpz_set (bnds->up, max);

  mpz_neg (max, max);
  if (mpz_cmp (bnds->below, max) < 0)
    mpz_set (bnds->below, max);

  mpz_clear (mdelta);
  mpz_clear (max);
}

/* Update the bounds in BNDS that restrict the value of X to the bounds
   that restrict the value of -X.  */

static void
bounds_negate (bounds *bnds)
{
  mpz_t tmp;

  mpz_init_set (tmp, bnds->up);
  mpz_neg (bnds->up, bnds->below);
  mpz_neg (bnds->below, tmp);
  mpz_clear (tmp);
}

492 493 494 495 496 497
/* Returns inverse of X modulo 2^s, where MASK = 2^s-1.  */

static tree
inverse (tree x, tree mask)
{
  tree type = TREE_TYPE (x);
498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518
  tree rslt;
  unsigned ctr = tree_floor_log2 (mask);

  if (TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT)
    {
      unsigned HOST_WIDE_INT ix;
      unsigned HOST_WIDE_INT imask;
      unsigned HOST_WIDE_INT irslt = 1;

      gcc_assert (cst_and_fits_in_hwi (x));
      gcc_assert (cst_and_fits_in_hwi (mask));

      ix = int_cst_value (x);
      imask = int_cst_value (mask);

      for (; ctr; ctr--)
	{
	  irslt *= ix;
	  ix *= ix;
	}
      irslt &= imask;
519

520 521 522
      rslt = build_int_cst_type (type, irslt);
    }
  else
523
    {
524
      rslt = build_int_cst (type, 1);
525 526
      for (; ctr; ctr--)
	{
527 528
	  rslt = int_const_binop (MULT_EXPR, rslt, x, 0);
	  x = int_const_binop (MULT_EXPR, x, x, 0);
529
	}
530
      rslt = int_const_binop (BIT_AND_EXPR, rslt, mask, 0);
531 532 533 534 535
    }

  return rslt;
}

536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574
/* Derives the upper bound BND on the number of executions of loop with exit
   condition S * i <> C, assuming that the loop is not infinite.  If
   NO_OVERFLOW is true, then the control variable of the loop does not
   overflow.  If NO_OVERFLOW is true or BNDS.below >= 0, then BNDS.up
   contains the upper bound on the value of C.  */

static void
number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
			     bounds *bnds)
{
  double_int max;
  mpz_t d;

  /* If the control variable does not overflow, the number of iterations is
     at most c / s.  Otherwise it is at most the period of the control
     variable.  */
  if (!no_overflow && !multiple_of_p (TREE_TYPE (c), c, s))
    {
      max = double_int_mask (TYPE_PRECISION (TREE_TYPE (c))
			     - tree_low_cst (num_ending_zeros (s), 1));
      mpz_set_double_int (bnd, max, true);
      return;
    }

  /* Determine the upper bound on C.  */
  if (no_overflow || mpz_sgn (bnds->below) >= 0)
    mpz_set (bnd, bnds->up);
  else if (TREE_CODE (c) == INTEGER_CST)
    mpz_set_double_int (bnd, tree_to_double_int (c), true);
  else
    mpz_set_double_int (bnd, double_int_mask (TYPE_PRECISION (TREE_TYPE (c))),
			true);

  mpz_init (d);
  mpz_set_double_int (d, tree_to_double_int (s), true);
  mpz_fdiv_q (bnd, bnd, d);
  mpz_clear (d);
}

575 576 577
/* Determines number of iterations of loop whose ending condition
   is IV <> FINAL.  TYPE is the type of the iv.  The number of
   iterations is stored to NITER.  NEVER_INFINITE is true if
578 579
   we know that the exit must be taken eventually, i.e., that the IV
   ever reaches the value FINAL (we derived this earlier, and possibly set
580 581
   NITER->assumptions to make sure this is the case).  BNDS contains the
   bounds on the difference FINAL - IV->base.  */
582

583 584
static bool
number_of_iterations_ne (tree type, affine_iv *iv, tree final,
585 586
			 struct tree_niter_desc *niter, bool never_infinite,
			 bounds *bnds)
587
{
588 589
  tree niter_type = unsigned_type_for (type);
  tree s, c, d, bits, assumption, tmp, bound;
590
  mpz_t max;
591

592 593 594 595
  niter->control = *iv;
  niter->bound = final;
  niter->cmp = NE_EXPR;

596 597 598 599 600
  /* Rearrange the terms so that we get inequality S * i <> C, with S
     positive.  Also cast everything to the unsigned type.  If IV does
     not overflow, BNDS bounds the value of C.  Also, this is the
     case if the computation |FINAL - IV->base| does not overflow, i.e.,
     if BNDS->below in the result is nonnegative.  */
601
  if (tree_int_cst_sign_bit (iv->step))
602
    {
603 604 605 606 607
      s = fold_convert (niter_type,
			fold_build1 (NEGATE_EXPR, type, iv->step));
      c = fold_build2 (MINUS_EXPR, niter_type,
		       fold_convert (niter_type, iv->base),
		       fold_convert (niter_type, final));
608
      bounds_negate (bnds);
609
    }
610
  else
611
    {
612 613 614 615 616
      s = fold_convert (niter_type, iv->step);
      c = fold_build2 (MINUS_EXPR, niter_type,
		       fold_convert (niter_type, final),
		       fold_convert (niter_type, iv->base));
    }
617

618 619
  mpz_init (max);
  number_of_iterations_ne_max (max, iv->no_overflow, c, s, bnds);
620
  niter->max = mpz_get_double_int (niter_type, max, false);
621 622
  mpz_clear (max);

623 624 625 626 627
  /* First the trivial cases -- when the step is 1.  */
  if (integer_onep (s))
    {
      niter->niter = c;
      return true;
628 629
    }

630 631 632 633 634 635 636
  /* Let nsd (step, size of mode) = d.  If d does not divide c, the loop
     is infinite.  Otherwise, the number of iterations is
     (inverse(s/d) * (c/d)) mod (size of mode/d).  */
  bits = num_ending_zeros (s);
  bound = build_low_bits_mask (niter_type,
			       (TYPE_PRECISION (niter_type)
				- tree_low_cst (bits, 1)));
637

638
  d = fold_binary_to_constant (LSHIFT_EXPR, niter_type,
639
			       build_int_cst (niter_type, 1), bits);
640
  s = fold_binary_to_constant (RSHIFT_EXPR, niter_type, s, bits);
641

642 643 644 645 646 647 648
  if (!never_infinite)
    {
      /* If we cannot assume that the loop is not infinite, record the
	 assumptions for divisibility of c.  */
      assumption = fold_build2 (FLOOR_MOD_EXPR, niter_type, c, d);
      assumption = fold_build2 (EQ_EXPR, boolean_type_node,
				assumption, build_int_cst (niter_type, 0));
649
      if (!integer_nonzerop (assumption))
650 651
	niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
					  niter->assumptions, assumption);
652
    }
653 654 655 656 657 658
      
  c = fold_build2 (EXACT_DIV_EXPR, niter_type, c, d);
  tmp = fold_build2 (MULT_EXPR, niter_type, c, inverse (s, bound));
  niter->niter = fold_build2 (BIT_AND_EXPR, niter_type, tmp, bound);
  return true;
}
659

660 661 662 663 664 665
/* Checks whether we can determine the final value of the control variable
   of the loop with ending condition IV0 < IV1 (computed in TYPE).
   DELTA is the difference IV1->base - IV0->base, STEP is the absolute value
   of the step.  The assumptions necessary to ensure that the computation
   of the final value does not overflow are recorded in NITER.  If we
   find the final value, we adjust DELTA and return TRUE.  Otherwise
666 667
   we return false.  BNDS bounds the value of IV1->base - IV0->base,
   and will be updated by the same amount as DELTA.  */
668 669 670 671

static bool
number_of_iterations_lt_to_ne (tree type, affine_iv *iv0, affine_iv *iv1,
			       struct tree_niter_desc *niter,
672 673
			       tree *delta, tree step,
			       bounds *bnds)
674 675 676 677
{
  tree niter_type = TREE_TYPE (step);
  tree mod = fold_build2 (FLOOR_MOD_EXPR, niter_type, *delta, step);
  tree tmod;
678
  mpz_t mmod;
679
  tree assumption = boolean_true_node, bound, noloop;
680
  bool ret = false;
Andrew Pinski committed
681 682 683
  tree type1 = type;
  if (POINTER_TYPE_P (type))
    type1 = sizetype;
684 685 686

  if (TREE_CODE (mod) != INTEGER_CST)
    return false;
687
  if (integer_nonzerop (mod))
688
    mod = fold_build2 (MINUS_EXPR, niter_type, step, mod);
Andrew Pinski committed
689
  tmod = fold_convert (type1, mod);
690

691 692 693 694
  mpz_init (mmod);
  mpz_set_double_int (mmod, tree_to_double_int (mod), true);
  mpz_neg (mmod, mmod);

695
  if (integer_nonzerop (iv0->step))
696
    {
697 698 699
      /* The final value of the iv is iv1->base + MOD, assuming that this
	 computation does not overflow, and that
	 iv0->base <= iv1->base + MOD.  */
700
      if (!iv1->no_overflow && !integer_zerop (mod))
701 702
	{
	  bound = fold_build2 (MINUS_EXPR, type,
Andrew Pinski committed
703
			       TYPE_MAX_VALUE (type1), tmod);
704 705
	  assumption = fold_build2 (LE_EXPR, boolean_type_node,
				    iv1->base, bound);
706
	  if (integer_zerop (assumption))
707
	    goto end;
708
	}
709 710 711 712 713
      if (mpz_cmp (mmod, bnds->below) < 0)
	noloop = boolean_false_node;
      else
	noloop = fold_build2 (GT_EXPR, boolean_type_node,
			      iv0->base,
Andrew Pinski committed
714
			      fold_build2 (PLUS_EXPR, type1,
715
					   iv1->base, tmod));
716 717 718
    }
  else
    {
719 720 721
      /* The final value of the iv is iv0->base - MOD, assuming that this
	 computation does not overflow, and that
	 iv0->base - MOD <= iv1->base. */
722
      if (!iv0->no_overflow && !integer_zerop (mod))
723
	{
Andrew Pinski committed
724 725
	  bound = fold_build2 (PLUS_EXPR, type1,
			       TYPE_MIN_VALUE (type1), tmod);
726 727
	  assumption = fold_build2 (GE_EXPR, boolean_type_node,
				    iv0->base, bound);
728
	  if (integer_zerop (assumption))
729
	    goto end;
730
	}
731 732 733 734
      if (mpz_cmp (mmod, bnds->below) < 0)
	noloop = boolean_false_node;
      else
	noloop = fold_build2 (GT_EXPR, boolean_type_node,
Andrew Pinski committed
735
			      fold_build2 (MINUS_EXPR, type1,
736 737
					   iv0->base, tmod),
			      iv1->base);
738 739
    }

740
  if (!integer_nonzerop (assumption))
741 742 743
    niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
				      niter->assumptions,
				      assumption);
744
  if (!integer_zerop (noloop))
745 746 747
    niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
				      niter->may_be_zero,
				      noloop);
748
  bounds_add (bnds, tree_to_double_int (mod), type);
749
  *delta = fold_build2 (PLUS_EXPR, niter_type, *delta, mod);
750 751 752 753 754

  ret = true;
end:
  mpz_clear (mmod);
  return ret;
755 756 757 758 759 760
}

/* Add assertions to NITER that ensure that the control variable of the loop
   with ending condition IV0 < IV1 does not overflow.  Types of IV0 and IV1
   are TYPE.  Returns false if we can prove that there is an overflow, true
   otherwise.  STEP is the absolute value of the step.  */
761

762 763 764 765 766 767 768
static bool
assert_no_overflow_lt (tree type, affine_iv *iv0, affine_iv *iv1,
		       struct tree_niter_desc *niter, tree step)
{
  tree bound, d, assumption, diff;
  tree niter_type = TREE_TYPE (step);

769
  if (integer_nonzerop (iv0->step))
770
    {
771 772 773 774 775 776 777 778 779
      /* for (i = iv0->base; i < iv1->base; i += iv0->step) */
      if (iv0->no_overflow)
	return true;

      /* If iv0->base is a constant, we can determine the last value before
	 overflow precisely; otherwise we conservatively assume
	 MAX - STEP + 1.  */

      if (TREE_CODE (iv0->base) == INTEGER_CST)
780
	{
781 782 783 784
	  d = fold_build2 (MINUS_EXPR, niter_type,
			   fold_convert (niter_type, TYPE_MAX_VALUE (type)),
			   fold_convert (niter_type, iv0->base));
	  diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
785 786
	}
      else
787
	diff = fold_build2 (MINUS_EXPR, niter_type, step,
788
			    build_int_cst (niter_type, 1));
789 790 791 792 793 794 795 796 797 798 799 800
      bound = fold_build2 (MINUS_EXPR, type,
			   TYPE_MAX_VALUE (type), fold_convert (type, diff));
      assumption = fold_build2 (LE_EXPR, boolean_type_node,
				iv1->base, bound);
    }
  else
    {
      /* for (i = iv1->base; i > iv0->base; i += iv1->step) */
      if (iv1->no_overflow)
	return true;

      if (TREE_CODE (iv1->base) == INTEGER_CST)
801
	{
802 803 804 805
	  d = fold_build2 (MINUS_EXPR, niter_type,
			   fold_convert (niter_type, iv1->base),
			   fold_convert (niter_type, TYPE_MIN_VALUE (type)));
	  diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
806
	}
807 808
      else
	diff = fold_build2 (MINUS_EXPR, niter_type, step,
809
			    build_int_cst (niter_type, 1));
810 811 812 813
      bound = fold_build2 (PLUS_EXPR, type,
			   TYPE_MIN_VALUE (type), fold_convert (type, diff));
      assumption = fold_build2 (GE_EXPR, boolean_type_node,
				iv0->base, bound);
814 815
    }

816
  if (integer_zerop (assumption))
817
    return false;
818
  if (!integer_nonzerop (assumption))
819 820 821 822 823 824 825
    niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
				      niter->assumptions, assumption);
    
  iv0->no_overflow = true;
  iv1->no_overflow = true;
  return true;
}
826

827
/* Add an assumption to NITER that a loop whose ending condition
828 829
   is IV0 < IV1 rolls.  TYPE is the type of the control iv.  BNDS
   bounds the value of IV1->base - IV0->base.  */
830 831 832

static void
assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
833
		      struct tree_niter_desc *niter, bounds *bnds)
834 835
{
  tree assumption = boolean_true_node, bound, diff;
Andrew Pinski committed
836
  tree mbz, mbzl, mbzr, type1;
837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893
  bool rolls_p, no_overflow_p;
  double_int dstep;
  mpz_t mstep, max;

  /* We are going to compute the number of iterations as
     (iv1->base - iv0->base + step - 1) / step, computed in the unsigned
     variant of TYPE.  This formula only works if 
     
     -step + 1 <= (iv1->base - iv0->base) <= MAX - step + 1
   
     (where MAX is the maximum value of the unsigned variant of TYPE, and
     the computations in this formula are performed in full precision
     (without overflows).

     Usually, for loops with exit condition iv0->base + step * i < iv1->base,
     we have a condition of form iv0->base - step < iv1->base before the loop,
     and for loops iv0->base < iv1->base - step * i the condition
     iv0->base < iv1->base + step, due to loop header copying, which enable us
     to prove the lower bound.
     
     The upper bound is more complicated.  Unless the expressions for initial
     and final value themselves contain enough information, we usually cannot
     derive it from the context.  */

  /* First check whether the answer does not follow from the bounds we gathered
     before.  */
  if (integer_nonzerop (iv0->step))
    dstep = tree_to_double_int (iv0->step);
  else
    {
      dstep = double_int_sext (tree_to_double_int (iv1->step),
			       TYPE_PRECISION (type));
      dstep = double_int_neg (dstep);
    }

  mpz_init (mstep);
  mpz_set_double_int (mstep, dstep, true);
  mpz_neg (mstep, mstep);
  mpz_add_ui (mstep, mstep, 1);

  rolls_p = mpz_cmp (mstep, bnds->below) <= 0;

  mpz_init (max);
  mpz_set_double_int (max, double_int_mask (TYPE_PRECISION (type)), true);
  mpz_add (max, max, mstep);
  no_overflow_p = (mpz_cmp (bnds->up, max) <= 0
		   /* For pointers, only values lying inside a single object
		      can be compared or manipulated by pointer arithmetics.
		      Gcc in general does not allow or handle objects larger
		      than half of the address space, hence the upper bound
		      is satisfied for pointers.  */
		   || POINTER_TYPE_P (type));
  mpz_clear (mstep);
  mpz_clear (max);

  if (rolls_p && no_overflow_p)
    return;
Andrew Pinski committed
894 895 896 897
  
  type1 = type;
  if (POINTER_TYPE_P (type))
    type1 = sizetype;
898 899 900

  /* Now the hard part; we must formulate the assumption(s) as expressions, and
     we must be careful not to introduce overflow.  */
901

902
  if (integer_nonzerop (iv0->step))
903
    {
Andrew Pinski committed
904 905
      diff = fold_build2 (MINUS_EXPR, type1,
			  iv0->step, build_int_cst (type1, 1));
906

907 908 909 910
      /* We need to know that iv0->base >= MIN + iv0->step - 1.  Since
	 0 address never belongs to any object, we can assume this for
	 pointers.  */
      if (!POINTER_TYPE_P (type))
911
	{
Andrew Pinski committed
912
	  bound = fold_build2 (PLUS_EXPR, type1,
913 914 915
			       TYPE_MIN_VALUE (type), diff);
	  assumption = fold_build2 (GE_EXPR, boolean_type_node,
				    iv0->base, bound);
916 917
	}

918 919
      /* And then we can compute iv0->base - diff, and compare it with
	 iv1->base.  */      
920 921 922
      mbzl = fold_build2 (MINUS_EXPR, type1, 
			  fold_convert (type1, iv0->base), diff);
      mbzr = fold_convert (type1, iv1->base);
923
    }
924
  else
925
    {
Andrew Pinski committed
926 927
      diff = fold_build2 (PLUS_EXPR, type1,
			  iv1->step, build_int_cst (type1, 1));
928 929

      if (!POINTER_TYPE_P (type))
930
	{
Andrew Pinski committed
931
	  bound = fold_build2 (PLUS_EXPR, type1,
932 933 934
			       TYPE_MAX_VALUE (type), diff);
	  assumption = fold_build2 (LE_EXPR, boolean_type_node,
				    iv1->base, bound);
935 936
	}

937 938 939
      mbzl = fold_convert (type1, iv0->base);
      mbzr = fold_build2 (MINUS_EXPR, type1,
			  fold_convert (type1, iv1->base), diff);
940
    }
941

942
  if (!integer_nonzerop (assumption))
943 944
    niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
				      niter->assumptions, assumption);
945 946 947 948 949 950
  if (!rolls_p)
    {
      mbz = fold_build2 (GT_EXPR, boolean_type_node, mbzl, mbzr);
      niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
					niter->may_be_zero, mbz);
    }
951
}
952

953 954
/* Determines number of iterations of loop whose ending condition
   is IV0 < IV1.  TYPE is the type of the iv.  The number of
955 956
   iterations is stored to NITER.  BNDS bounds the difference
   IV1->base - IV0->base.  */
957 958 959 960

static bool
number_of_iterations_lt (tree type, affine_iv *iv0, affine_iv *iv1,
			 struct tree_niter_desc *niter,
961 962
			 bool never_infinite ATTRIBUTE_UNUSED,
			 bounds *bnds)
963 964 965
{
  tree niter_type = unsigned_type_for (type);
  tree delta, step, s;
966
  mpz_t mstep, tmp;
967

968
  if (integer_nonzerop (iv0->step))
969 970 971 972 973 974 975 976 977 978 979 980
    {
      niter->control = *iv0;
      niter->cmp = LT_EXPR;
      niter->bound = iv1->base;
    }
  else
    {
      niter->control = *iv1;
      niter->cmp = GT_EXPR;
      niter->bound = iv0->base;
    }

981 982 983 984 985
  delta = fold_build2 (MINUS_EXPR, niter_type,
		       fold_convert (niter_type, iv1->base),
		       fold_convert (niter_type, iv0->base));

  /* First handle the special case that the step is +-1.  */
986 987
  if ((integer_onep (iv0->step) && integer_zerop (iv1->step))
      || (integer_all_onesp (iv1->step) && integer_zerop (iv0->step)))
988 989 990 991
    {
      /* for (i = iv0->base; i < iv1->base; i++)

	 or
992

993 994 995
	 for (i = iv1->base; i > iv0->base; i--).
	     
	 In both cases # of iterations is iv1->base - iv0->base, assuming that
996 997 998 999 1000 1001 1002 1003 1004 1005
	 iv1->base >= iv0->base.

         First try to derive a lower bound on the value of
	 iv1->base - iv0->base, computed in full precision.  If the difference
	 is nonnegative, we are done, otherwise we must record the
	 condition.  */

      if (mpz_sgn (bnds->below) < 0)
	niter->may_be_zero = fold_build2 (LT_EXPR, boolean_type_node,
					  iv1->base, iv0->base);
1006
      niter->niter = delta;
1007
      niter->max = mpz_get_double_int (niter_type, bnds->up, false);
1008
      return true;
1009
    }
1010

1011
  if (integer_nonzerop (iv0->step))
1012
    step = fold_convert (niter_type, iv0->step);
1013
  else
1014 1015 1016 1017 1018 1019
    step = fold_convert (niter_type,
			 fold_build1 (NEGATE_EXPR, type, iv1->step));

  /* If we can determine the final value of the control iv exactly, we can
     transform the condition to != comparison.  In particular, this will be
     the case if DELTA is constant.  */
1020 1021
  if (number_of_iterations_lt_to_ne (type, iv0, iv1, niter, &delta, step,
				     bnds))
1022
    {
1023 1024
      affine_iv zps;

1025
      zps.base = build_int_cst (niter_type, 0);
1026 1027 1028 1029 1030
      zps.step = step;
      /* number_of_iterations_lt_to_ne will add assumptions that ensure that
	 zps does not overflow.  */
      zps.no_overflow = true;

1031
      return number_of_iterations_ne (type, &zps, delta, niter, true, bnds);
1032 1033
    }

1034 1035 1036
  /* Make sure that the control iv does not overflow.  */
  if (!assert_no_overflow_lt (type, iv0, iv1, niter, step))
    return false;
1037

1038 1039 1040
  /* We determine the number of iterations as (delta + step - 1) / step.  For
     this to work, we must know that iv1->base >= iv0->base - step + 1,
     otherwise the loop does not roll.  */
1041
  assert_loop_rolls_lt (type, iv0, iv1, niter, bnds);
1042 1043

  s = fold_build2 (MINUS_EXPR, niter_type,
1044
		   step, build_int_cst (niter_type, 1));
1045 1046
  delta = fold_build2 (PLUS_EXPR, niter_type, delta, s);
  niter->niter = fold_build2 (FLOOR_DIV_EXPR, niter_type, delta, step);
1047 1048 1049 1050 1051 1052 1053

  mpz_init (mstep);
  mpz_init (tmp);
  mpz_set_double_int (mstep, tree_to_double_int (step), true);
  mpz_add (tmp, bnds->up, mstep);
  mpz_sub_ui (tmp, tmp, 1);
  mpz_fdiv_q (tmp, tmp, mstep);
1054
  niter->max = mpz_get_double_int (niter_type, tmp, false);
1055 1056 1057
  mpz_clear (mstep);
  mpz_clear (tmp);

1058
  return true;
1059 1060
}

1061 1062 1063
/* Determines number of iterations of loop whose ending condition
   is IV0 <= IV1.  TYPE is the type of the iv.  The number of
   iterations is stored to NITER.  NEVER_INFINITE is true if
1064
   we know that this condition must eventually become false (we derived this
1065
   earlier, and possibly set NITER->assumptions to make sure this
1066
   is the case).  BNDS bounds the difference IV1->base - IV0->base.  */
1067 1068 1069

static bool
number_of_iterations_le (tree type, affine_iv *iv0, affine_iv *iv1,
1070 1071
			 struct tree_niter_desc *niter, bool never_infinite,
			 bounds *bnds)
1072 1073
{
  tree assumption;
Andrew Pinski committed
1074 1075 1076
  tree type1 = type;
  if (POINTER_TYPE_P (type))
    type1 = sizetype;
1077 1078 1079 1080 1081 1082 1083 1084

  /* Say that IV0 is the control variable.  Then IV0 <= IV1 iff
     IV0 < IV1 + 1, assuming that IV1 is not equal to the greatest
     value of the type.  This we must know anyway, since if it is
     equal to this value, the loop rolls forever.  */

  if (!never_infinite)
    {
1085
      if (integer_nonzerop (iv0->step))
1086
	assumption = fold_build2 (NE_EXPR, boolean_type_node,
Andrew Pinski committed
1087
				  iv1->base, TYPE_MAX_VALUE (type1));
1088 1089
      else
	assumption = fold_build2 (NE_EXPR, boolean_type_node,
Andrew Pinski committed
1090
				  iv0->base, TYPE_MIN_VALUE (type1));
1091

1092
      if (integer_zerop (assumption))
1093
	return false;
1094
      if (!integer_nonzerop (assumption))
1095 1096 1097 1098
	niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
					  niter->assumptions, assumption);
    }

1099
  if (integer_nonzerop (iv0->step))
Andrew Pinski committed
1100 1101
    iv1->base = fold_build2 (PLUS_EXPR, type1,
			     iv1->base, build_int_cst (type1, 1));
1102
  else
Andrew Pinski committed
1103 1104
    iv0->base = fold_build2 (MINUS_EXPR, type1,
			     iv0->base, build_int_cst (type1, 1));
1105

Andrew Pinski committed
1106
  bounds_add (bnds, double_int_one, type1);
1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126

  return number_of_iterations_lt (type, iv0, iv1, niter, never_infinite, bnds);
}

/* Dumps description of affine induction variable IV to FILE.  */

static void
dump_affine_iv (FILE *file, affine_iv *iv)
{
  if (!integer_zerop (iv->step))
    fprintf (file, "[");

  print_generic_expr (dump_file, iv->base, TDF_SLIM);

  if (!integer_zerop (iv->step))
    {
      fprintf (file, ", + , ");
      print_generic_expr (dump_file, iv->step, TDF_SLIM);
      fprintf (file, "]%s", iv->no_overflow ? "(no_overflow)" : "");
    }
1127
}
1128

1129 1130 1131 1132 1133 1134
/* Determine the number of iterations according to condition (for staying
   inside loop) which compares two induction variables using comparison
   operator CODE.  The induction variable on left side of the comparison
   is IV0, the right-hand side is IV1.  Both induction variables must have
   type TYPE, which must be an integer or pointer type.  The steps of the
   ivs must be constants (or NULL_TREE, which is interpreted as constant zero).
1135

1136 1137
   LOOP is the loop whose number of iterations we are determining.

1138 1139 1140 1141
   ONLY_EXIT is true if we are sure this is the only way the loop could be
   exited (including possibly non-returning function calls, exceptions, etc.)
   -- in this case we can use the information whether the control induction
   variables can overflow or not in a more efficient way.
1142 1143 1144 1145 1146
   
   The results (number of iterations and assumptions as described in
   comments at struct tree_niter_desc in tree-flow.h) are stored to NITER.
   Returns false if it fails to determine number of iterations, true if it
   was determined (possibly with some assumptions).  */
1147 1148

static bool
1149 1150
number_of_iterations_cond (struct loop *loop,
			   tree type, affine_iv *iv0, enum tree_code code,
1151 1152
			   affine_iv *iv1, struct tree_niter_desc *niter,
			   bool only_exit)
1153
{
1154 1155
  bool never_infinite, ret;
  bounds bnds;
1156 1157 1158 1159 1160 1161 1162 1163 1164

  /* The meaning of these assumptions is this:
     if !assumptions
       then the rest of information does not have to be valid
     if may_be_zero then the loop does not roll, even if
       niter != 0.  */
  niter->assumptions = boolean_true_node;
  niter->may_be_zero = boolean_false_node;
  niter->niter = NULL_TREE;
1165
  niter->max = double_int_zero;
1166

1167 1168 1169
  niter->bound = NULL_TREE;
  niter->cmp = ERROR_MARK;

1170 1171 1172
  /* Make < comparison from > ones, and for NE_EXPR comparisons, ensure that
     the control variable is on lhs.  */
  if (code == GE_EXPR || code == GT_EXPR
1173
      || (code == NE_EXPR && integer_zerop (iv0->step)))
1174
    {
1175
      SWAP (iv0, iv1);
1176 1177
      code = swap_tree_comparison (code);
    }
1178

1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192
  if (!only_exit)
    {
      /* If this is not the only possible exit from the loop, the information
	 that the induction variables cannot overflow as derived from
	 signedness analysis cannot be relied upon.  We use them e.g. in the
	 following way:  given loop for (i = 0; i <= n; i++), if i is
	 signed, it cannot overflow, thus this loop is equivalent to
	 for (i = 0; i < n + 1; i++);  however, if n == MAX, but the loop
	 is exited in some other way before i overflows, this transformation
	 is incorrect (the new loop exits immediately).  */
      iv0->no_overflow = false;
      iv1->no_overflow = false;
    }

1193
  if (POINTER_TYPE_P (type))
1194
    {
1195 1196 1197 1198
      /* Comparison of pointers is undefined unless both iv0 and iv1 point
	 to the same object.  If they do, the control variable cannot wrap
	 (as wrap around the bounds of memory will never return a pointer
	 that would be guaranteed to point to the same object, even if we
1199 1200 1201 1202
	 avoid undefined behavior by casting to size_t and back).  The
	 restrictions on pointer arithmetics and comparisons of pointers
	 ensure that using the no-overflow assumptions is correct in this
	 case even if ONLY_EXIT is false.  */
1203 1204 1205
      iv0->no_overflow = true;
      iv1->no_overflow = true;
    }
1206

1207 1208
  /* If the control induction variable does not overflow, the loop obviously
     cannot be infinite.  */
1209
  if (!integer_zerop (iv0->step) && iv0->no_overflow)
1210
    never_infinite = true;
1211
  else if (!integer_zerop (iv1->step) && iv1->no_overflow)
1212 1213 1214
    never_infinite = true;
  else
    never_infinite = false;
1215

1216 1217 1218
  /* We can handle the case when neither of the sides of the comparison is
     invariant, provided that the test is NE_EXPR.  This rarely occurs in
     practice, but it is simple enough to manage.  */
1219
  if (!integer_zerop (iv0->step) && !integer_zerop (iv1->step))
1220 1221 1222
    {
      if (code != NE_EXPR)
	return false;
1223

1224 1225 1226
      iv0->step = fold_binary_to_constant (MINUS_EXPR, type,
					   iv0->step, iv1->step);
      iv0->no_overflow = false;
1227
      iv1->step = build_int_cst (type, 0);
1228 1229
      iv1->no_overflow = true;
    }
1230

1231 1232 1233
  /* If the result of the comparison is a constant,  the loop is weird.  More
     precise handling would be possible, but the situation is not common enough
     to waste time on it.  */
1234
  if (integer_zerop (iv0->step) && integer_zerop (iv1->step))
1235
    return false;
1236

1237 1238 1239 1240
  /* Ignore loops of while (i-- < 10) type.  */
  if (code != NE_EXPR)
    {
      if (iv0->step && tree_int_cst_sign_bit (iv0->step))
1241 1242
	return false;

1243
      if (!integer_zerop (iv1->step) && !tree_int_cst_sign_bit (iv1->step))
1244
	return false;
1245
    }
1246

1247
  /* If the loop exits immediately, there is nothing to do.  */
1248
  if (integer_zerop (fold_build2 (code, boolean_type_node, iv0->base, iv1->base)))
1249
    {
1250
      niter->niter = build_int_cst (unsigned_type_for (type), 0);
1251
      niter->max = double_int_zero;
1252 1253
      return true;
    }
1254
	  
1255 1256
  /* OK, now we know we have a senseful loop.  Handle several cases, depending
     on what comparison operator is used.  */
1257 1258 1259 1260 1261
  bound_difference (loop, iv1->base, iv0->base, &bnds);

  if (dump_file && (dump_flags & TDF_DETAILS))
    {
      fprintf (dump_file,
1262
	       "Analyzing # of iterations of loop %d\n", loop->num);
1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279

      fprintf (dump_file, "  exit condition ");
      dump_affine_iv (dump_file, iv0);
      fprintf (dump_file, " %s ",
	       code == NE_EXPR ? "!="
	       : code == LT_EXPR ? "<"
	       : "<=");
      dump_affine_iv (dump_file, iv1);
      fprintf (dump_file, "\n");

      fprintf (dump_file, "  bounds on difference of bases: ");
      mpz_out_str (dump_file, 10, bnds.below);
      fprintf (dump_file, " ... ");
      mpz_out_str (dump_file, 10, bnds.up);
      fprintf (dump_file, "\n");
    }

1280 1281 1282
  switch (code)
    {
    case NE_EXPR:
1283
      gcc_assert (integer_zerop (iv1->step));
1284 1285 1286 1287
      ret = number_of_iterations_ne (type, iv0, iv1->base, niter,
				     never_infinite, &bnds);
      break;

1288
    case LT_EXPR:
1289 1290 1291 1292
      ret = number_of_iterations_lt (type, iv0, iv1, niter, never_infinite,
				     &bnds);
      break;

1293
    case LE_EXPR:
1294 1295 1296 1297
      ret = number_of_iterations_le (type, iv0, iv1, niter, never_infinite,
				     &bnds);
      break;

1298 1299 1300
    default:
      gcc_unreachable ();
    }
1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333

  mpz_clear (bnds.up);
  mpz_clear (bnds.below);

  if (dump_file && (dump_flags & TDF_DETAILS))
    {
      if (ret)
	{
	  fprintf (dump_file, "  result:\n");
	  if (!integer_nonzerop (niter->assumptions))
	    {
	      fprintf (dump_file, "    under assumptions ");
	      print_generic_expr (dump_file, niter->assumptions, TDF_SLIM);
	      fprintf (dump_file, "\n");
	    }

	  if (!integer_zerop (niter->may_be_zero))
	    {
	      fprintf (dump_file, "    zero if ");
	      print_generic_expr (dump_file, niter->may_be_zero, TDF_SLIM);
	      fprintf (dump_file, "\n");
	    }

	  fprintf (dump_file, "    # of iterations ");
	  print_generic_expr (dump_file, niter->niter, TDF_SLIM);
	  fprintf (dump_file, ", bounded by ");
	  dump_double_int (dump_file, niter->max, true);
	  fprintf (dump_file, "\n");
	}
      else
	fprintf (dump_file, "  failed\n\n");
    }
  return ret;
1334 1335
}

1336 1337 1338
/* Substitute NEW for OLD in EXPR and fold the result.  */

static tree
1339
simplify_replace_tree (tree expr, tree old, tree new_tree)
1340 1341 1342 1343 1344 1345 1346 1347 1348
{
  unsigned i, n;
  tree ret = NULL_TREE, e, se;

  if (!expr)
    return NULL_TREE;

  if (expr == old
      || operand_equal_p (expr, old, 0))
1349
    return unshare_expr (new_tree);
1350

1351
  if (!EXPR_P (expr) && !GIMPLE_STMT_P (expr))
1352 1353
    return expr;

1354
  n = TREE_OPERAND_LENGTH (expr);
1355 1356 1357
  for (i = 0; i < n; i++)
    {
      e = TREE_OPERAND (expr, i);
1358
      se = simplify_replace_tree (e, old, new_tree);
1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370
      if (e == se)
	continue;

      if (!ret)
	ret = copy_node (expr);

      TREE_OPERAND (ret, i) = se;
    }

  return (ret ? fold (ret) : expr);
}

1371 1372 1373
/* Expand definitions of ssa names in EXPR as long as they are simple
   enough, and return the new expression.  */

1374
tree
1375 1376 1377 1378
expand_simple_operations (tree expr)
{
  unsigned i, n;
  tree ret = NULL_TREE, e, ee, stmt;
1379 1380 1381 1382
  enum tree_code code;

  if (expr == NULL_TREE)
    return expr;
1383 1384 1385 1386

  if (is_gimple_min_invariant (expr))
    return expr;

1387
  code = TREE_CODE (expr);
1388 1389
  if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code)))
    {
1390
      n = TREE_OPERAND_LENGTH (expr);
1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403
      for (i = 0; i < n; i++)
	{
	  e = TREE_OPERAND (expr, i);
	  ee = expand_simple_operations (e);
	  if (e == ee)
	    continue;

	  if (!ret)
	    ret = copy_node (expr);

	  TREE_OPERAND (ret, i) = ee;
	}

1404 1405 1406 1407 1408 1409 1410
      if (!ret)
	return expr;

      fold_defer_overflow_warnings ();
      ret = fold (ret);
      fold_undefer_and_ignore_overflow_warnings ();
      return ret;
1411 1412 1413 1414 1415 1416
    }

  if (TREE_CODE (expr) != SSA_NAME)
    return expr;

  stmt = SSA_NAME_DEF_STMT (expr);
1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434
  if (TREE_CODE (stmt) == PHI_NODE)
    {
      basic_block src, dest;

      if (PHI_NUM_ARGS (stmt) != 1)
	return expr;
      e = PHI_ARG_DEF (stmt, 0);

      /* Avoid propagating through loop exit phi nodes, which
	 could break loop-closed SSA form restrictions.  */
      dest = bb_for_stmt (stmt);
      src = single_pred (dest);
      if (TREE_CODE (e) == SSA_NAME
	  && src->loop_father != dest->loop_father)
	return expr;

      return expand_simple_operations (e);
    }
1435
  if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
1436 1437
    return expr;

1438
  e = GIMPLE_STMT_OPERAND (stmt, 1);
1439 1440 1441 1442 1443 1444 1445 1446 1447
  if (/* Casts are simple.  */
      TREE_CODE (e) != NOP_EXPR
      && TREE_CODE (e) != CONVERT_EXPR
      /* Copies are simple.  */
      && TREE_CODE (e) != SSA_NAME
      /* Assignments of invariants are simple.  */
      && !is_gimple_min_invariant (e)
      /* And increments and decrements by a constant are simple.  */
      && !((TREE_CODE (e) == PLUS_EXPR
Andrew Pinski committed
1448 1449
	    || TREE_CODE (e) == MINUS_EXPR
	    || TREE_CODE (e) == POINTER_PLUS_EXPR)
1450 1451 1452 1453 1454 1455
	   && is_gimple_min_invariant (TREE_OPERAND (e, 1))))
    return expr;

  return expand_simple_operations (e);
}

1456
/* Tries to simplify EXPR using the condition COND.  Returns the simplified
1457
   expression (or EXPR unchanged, if no simplification was possible).  */
1458 1459

static tree
1460
tree_simplify_using_condition_1 (tree cond, tree expr)
1461 1462
{
  bool changed;
1463
  tree e, te, e0, e1, e2, notcond;
1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474
  enum tree_code code = TREE_CODE (expr);

  if (code == INTEGER_CST)
    return expr;

  if (code == TRUTH_OR_EXPR
      || code == TRUTH_AND_EXPR
      || code == COND_EXPR)
    {
      changed = false;

1475
      e0 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 0));
1476 1477 1478
      if (TREE_OPERAND (expr, 0) != e0)
	changed = true;

1479
      e1 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 1));
1480 1481 1482 1483 1484
      if (TREE_OPERAND (expr, 1) != e1)
	changed = true;

      if (code == COND_EXPR)
	{
1485
	  e2 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 2));
1486 1487 1488 1489 1490 1491 1492 1493 1494
	  if (TREE_OPERAND (expr, 2) != e2)
	    changed = true;
	}
      else
	e2 = NULL_TREE;

      if (changed)
	{
	  if (code == COND_EXPR)
1495
	    expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
1496
	  else
1497
	    expr = fold_build2 (code, boolean_type_node, e0, e1);
1498 1499 1500 1501 1502
	}

      return expr;
    }

1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513
  /* In case COND is equality, we may be able to simplify EXPR by copy/constant
     propagation, and vice versa.  Fold does not handle this, since it is
     considered too expensive.  */
  if (TREE_CODE (cond) == EQ_EXPR)
    {
      e0 = TREE_OPERAND (cond, 0);
      e1 = TREE_OPERAND (cond, 1);

      /* We know that e0 == e1.  Check whether we cannot simplify expr
	 using this fact.  */
      e = simplify_replace_tree (expr, e0, e1);
1514
      if (integer_zerop (e) || integer_nonzerop (e))
1515 1516 1517
	return e;

      e = simplify_replace_tree (expr, e1, e0);
1518
      if (integer_zerop (e) || integer_nonzerop (e))
1519 1520 1521 1522 1523 1524 1525 1526 1527
	return e;
    }
  if (TREE_CODE (expr) == EQ_EXPR)
    {
      e0 = TREE_OPERAND (expr, 0);
      e1 = TREE_OPERAND (expr, 1);

      /* If e0 == e1 (EXPR) implies !COND, then EXPR cannot be true.  */
      e = simplify_replace_tree (cond, e0, e1);
1528
      if (integer_zerop (e))
1529 1530
	return e;
      e = simplify_replace_tree (cond, e1, e0);
1531
      if (integer_zerop (e))
1532 1533 1534 1535 1536 1537 1538 1539 1540
	return e;
    }
  if (TREE_CODE (expr) == NE_EXPR)
    {
      e0 = TREE_OPERAND (expr, 0);
      e1 = TREE_OPERAND (expr, 1);

      /* If e0 == e1 (!EXPR) implies !COND, then EXPR must be true.  */
      e = simplify_replace_tree (cond, e0, e1);
1541
      if (integer_zerop (e))
1542 1543
	return boolean_true_node;
      e = simplify_replace_tree (cond, e1, e0);
1544
      if (integer_zerop (e))
1545 1546 1547
	return boolean_true_node;
    }

1548 1549
  te = expand_simple_operations (expr);

1550 1551
  /* Check whether COND ==> EXPR.  */
  notcond = invert_truthvalue (cond);
1552
  e = fold_binary (TRUTH_OR_EXPR, boolean_type_node, notcond, te);
1553
  if (e && integer_nonzerop (e))
1554 1555 1556
    return e;

  /* Check whether COND ==> not EXPR.  */
1557
  e = fold_binary (TRUTH_AND_EXPR, boolean_type_node, cond, te);
1558
  if (e && integer_zerop (e))
1559 1560 1561 1562 1563
    return e;

  return expr;
}

1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577
/* Tries to simplify EXPR using the condition COND.  Returns the simplified
   expression (or EXPR unchanged, if no simplification was possible).
   Wrapper around tree_simplify_using_condition_1 that ensures that chains
   of simple operations in definitions of ssa names in COND are expanded,
   so that things like casts or incrementing the value of the bound before
   the loop do not cause us to fail.  */

static tree
tree_simplify_using_condition (tree cond, tree expr)
{
  cond = expand_simple_operations (cond);

  return tree_simplify_using_condition_1 (cond, expr);
}
1578

1579 1580 1581 1582 1583
/* Tries to simplify EXPR using the conditions on entry to LOOP.
   Returns the simplified expression (or EXPR unchanged, if no
   simplification was possible).*/

static tree
1584
simplify_using_initial_conditions (struct loop *loop, tree expr)
1585 1586 1587
{
  edge e;
  basic_block bb;
1588
  tree cond;
1589
  int cnt = 0;
1590 1591 1592 1593

  if (TREE_CODE (expr) == INTEGER_CST)
    return expr;

1594 1595 1596
  /* Limit walking the dominators to avoid quadraticness in
     the number of BBs times the number of loops in degenerate
     cases.  */
1597
  for (bb = loop->header;
1598
       bb != ENTRY_BLOCK_PTR && cnt < MAX_DOMINATORS_TO_WALK;
1599 1600
       bb = get_immediate_dominator (CDI_DOMINATORS, bb))
    {
1601
      if (!single_pred_p (bb))
1602
	continue;
1603
      e = single_pred_edge (bb);
1604 1605 1606 1607 1608 1609 1610

      if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
	continue;

      cond = COND_EXPR_COND (last_stmt (e->src));
      if (e->flags & EDGE_FALSE_VALUE)
	cond = invert_truthvalue (cond);
1611
      expr = tree_simplify_using_condition (cond, expr);
1612
      ++cnt;
1613 1614 1615 1616 1617
    }

  return expr;
}

1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672
/* Tries to simplify EXPR using the evolutions of the loop invariants
   in the superloops of LOOP.  Returns the simplified expression
   (or EXPR unchanged, if no simplification was possible).  */

static tree
simplify_using_outer_evolutions (struct loop *loop, tree expr)
{
  enum tree_code code = TREE_CODE (expr);
  bool changed;
  tree e, e0, e1, e2;

  if (is_gimple_min_invariant (expr))
    return expr;

  if (code == TRUTH_OR_EXPR
      || code == TRUTH_AND_EXPR
      || code == COND_EXPR)
    {
      changed = false;

      e0 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 0));
      if (TREE_OPERAND (expr, 0) != e0)
	changed = true;

      e1 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 1));
      if (TREE_OPERAND (expr, 1) != e1)
	changed = true;

      if (code == COND_EXPR)
	{
	  e2 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 2));
	  if (TREE_OPERAND (expr, 2) != e2)
	    changed = true;
	}
      else
	e2 = NULL_TREE;

      if (changed)
	{
	  if (code == COND_EXPR)
	    expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
	  else
	    expr = fold_build2 (code, boolean_type_node, e0, e1);
	}

      return expr;
    }

  e = instantiate_parameters (loop, expr);
  if (is_gimple_min_invariant (e))
    return e;

  return expr;
}

1673 1674 1675
/* Returns true if EXIT is the only possible exit from LOOP.  */

static bool
1676
loop_only_exit_p (const struct loop *loop, const_edge exit)
1677 1678 1679 1680 1681 1682
{
  basic_block *body;
  block_stmt_iterator bsi;
  unsigned i;
  tree call;

1683
  if (exit != single_exit (loop))
1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703
    return false;

  body = get_loop_body (loop);
  for (i = 0; i < loop->num_nodes; i++)
    {
      for (bsi = bsi_start (body[0]); !bsi_end_p (bsi); bsi_next (&bsi))
	{
	  call = get_call_expr_in (bsi_stmt (bsi));
	  if (call && TREE_SIDE_EFFECTS (call))
	    {
	      free (body);
	      return false;
	    }
	}
    }

  free (body);
  return true;
}

1704 1705 1706 1707
/* Stores description of number of iterations of LOOP derived from
   EXIT (an exit edge of the LOOP) in NITER.  Returns true if some
   useful information could be derived (and fields of NITER has
   meaning described in comments at struct tree_niter_desc
1708 1709 1710
   declaration), false otherwise.  If WARN is true and
   -Wunsafe-loop-optimizations was given, warn if the optimizer is going to use
   potentially unsafe assumptions.  */
1711 1712 1713

bool
number_of_iterations_exit (struct loop *loop, edge exit,
1714 1715
			   struct tree_niter_desc *niter,
			   bool warn)
1716 1717
{
  tree stmt, cond, type;
1718
  tree op0, op1;
1719
  enum tree_code code;
1720
  affine_iv iv0, iv1;
1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753

  if (!dominated_by_p (CDI_DOMINATORS, loop->latch, exit->src))
    return false;

  niter->assumptions = boolean_false_node;
  stmt = last_stmt (exit->src);
  if (!stmt || TREE_CODE (stmt) != COND_EXPR)
    return false;

  /* We want the condition for staying inside loop.  */
  cond = COND_EXPR_COND (stmt);
  if (exit->flags & EDGE_TRUE_VALUE)
    cond = invert_truthvalue (cond);

  code = TREE_CODE (cond);
  switch (code)
    {
    case GT_EXPR:
    case GE_EXPR:
    case NE_EXPR:
    case LT_EXPR:
    case LE_EXPR:
      break;

    default:
      return false;
    }
  
  op0 = TREE_OPERAND (cond, 0);
  op1 = TREE_OPERAND (cond, 1);
  type = TREE_TYPE (op0);

  if (TREE_CODE (type) != INTEGER_TYPE
1754
      && !POINTER_TYPE_P (type))
1755 1756
    return false;
     
1757
  if (!simple_iv (loop, stmt, op0, &iv0, false))
1758
    return false;
1759
  if (!simple_iv (loop, stmt, op1, &iv1, false))
1760 1761
    return false;

1762
  /* We don't want to see undefined signed overflow warnings while
1763
     computing the number of iterations.  */
1764 1765
  fold_defer_overflow_warnings ();

1766 1767
  iv0.base = expand_simple_operations (iv0.base);
  iv1.base = expand_simple_operations (iv1.base);
1768
  if (!number_of_iterations_cond (loop, type, &iv0, code, &iv1, niter,
1769
				  loop_only_exit_p (loop, exit)))
1770 1771 1772 1773
    {
      fold_undefer_and_ignore_overflow_warnings ();
      return false;
    }
1774 1775 1776 1777 1778 1779 1780 1781 1782

  if (optimize >= 3)
    {
      niter->assumptions = simplify_using_outer_evolutions (loop,
							    niter->assumptions);
      niter->may_be_zero = simplify_using_outer_evolutions (loop,
							    niter->may_be_zero);
      niter->niter = simplify_using_outer_evolutions (loop, niter->niter);
    }
1783 1784 1785

  niter->assumptions
	  = simplify_using_initial_conditions (loop,
1786
					       niter->assumptions);
1787 1788
  niter->may_be_zero
	  = simplify_using_initial_conditions (loop,
1789
					       niter->may_be_zero);
1790

1791 1792
  fold_undefer_and_ignore_overflow_warnings ();

1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811
  if (integer_onep (niter->assumptions))
    return true;

  /* With -funsafe-loop-optimizations we assume that nothing bad can happen.
     But if we can prove that there is overflow or some other source of weird
     behavior, ignore the loop even with -funsafe-loop-optimizations.  */
  if (integer_zerop (niter->assumptions))
    return false;

  if (flag_unsafe_loop_optimizations)
    niter->assumptions = boolean_true_node;

  if (warn)
    {
      const char *wording;
      location_t loc = EXPR_LOCATION (stmt);
  
      /* We can provide a more specific warning if one of the operator is
	 constant and the other advances by +1 or -1.  */
1812 1813
      if (!integer_zerop (iv1.step)
	  ? (integer_zerop (iv0.step)
1814
	     && (integer_onep (iv1.step) || integer_all_onesp (iv1.step)))
1815
	  : (integer_onep (iv0.step) || integer_all_onesp (iv0.step)))
1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832
        wording =
          flag_unsafe_loop_optimizations
          ? N_("assuming that the loop is not infinite")
          : N_("cannot optimize possibly infinite loops");
      else
	wording = 
	  flag_unsafe_loop_optimizations
	  ? N_("assuming that the loop counter does not overflow")
	  : N_("cannot optimize loop, the loop counter may overflow");

      if (LOCATION_LINE (loc) > 0)
	warning (OPT_Wunsafe_loop_optimizations, "%H%s", &loc, gettext (wording));
      else
	warning (OPT_Wunsafe_loop_optimizations, "%s", gettext (wording));
    }

  return flag_unsafe_loop_optimizations;
1833 1834
}

1835 1836 1837 1838 1839 1840 1841 1842
/* Try to determine the number of iterations of LOOP.  If we succeed,
   expression giving number of iterations is returned and *EXIT is
   set to the edge from that the information is obtained.  Otherwise
   chrec_dont_know is returned.  */

tree
find_loop_niter (struct loop *loop, edge *exit)
{
1843 1844
  unsigned i;
  VEC (edge, heap) *exits = get_loop_exit_edges (loop);
1845 1846 1847 1848 1849
  edge ex;
  tree niter = NULL_TREE, aniter;
  struct tree_niter_desc desc;

  *exit = NULL;
1850
  for (i = 0; VEC_iterate (edge, exits, i, ex); i++)
1851 1852 1853 1854
    {
      if (!just_once_each_iteration_p (loop, ex->src))
	continue;

1855
      if (!number_of_iterations_exit (loop, ex, &desc, false))
1856 1857
	continue;

1858
      if (integer_nonzerop (desc.may_be_zero))
1859 1860 1861
	{
	  /* We exit in the first iteration through this exit.
	     We won't find anything better.  */
1862
	  niter = build_int_cst (unsigned_type_node, 0);
1863 1864 1865 1866
	  *exit = ex;
	  break;
	}

1867
      if (!integer_zerop (desc.may_be_zero))
1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897
	continue;

      aniter = desc.niter;

      if (!niter)
	{
	  /* Nothing recorded yet.  */
	  niter = aniter;
	  *exit = ex;
	  continue;
	}

      /* Prefer constants, the lower the better.  */
      if (TREE_CODE (aniter) != INTEGER_CST)
	continue;

      if (TREE_CODE (niter) != INTEGER_CST)
	{
	  niter = aniter;
	  *exit = ex;
	  continue;
	}

      if (tree_int_cst_lt (aniter, niter))
	{
	  niter = aniter;
	  *exit = ex;
	  continue;
	}
    }
1898
  VEC_free (edge, heap, exits);
1899 1900 1901 1902

  return niter ? niter : chrec_dont_know;
}

1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921
/*

   Analysis of a number of iterations of a loop by a brute-force evaluation.

*/

/* Bound on the number of iterations we try to evaluate.  */

#define MAX_ITERATIONS_TO_TRACK \
  ((unsigned) PARAM_VALUE (PARAM_MAX_ITERATIONS_TO_TRACK))

/* Returns the loop phi node of LOOP such that ssa name X is derived from its
   result by a chain of operations such that all but exactly one of their
   operands are constants.  */

static tree
chain_of_csts_start (struct loop *loop, tree x)
{
  tree stmt = SSA_NAME_DEF_STMT (x);
1922
  tree use;
1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936
  basic_block bb = bb_for_stmt (stmt);

  if (!bb
      || !flow_bb_inside_loop_p (loop, bb))
    return NULL_TREE;
  
  if (TREE_CODE (stmt) == PHI_NODE)
    {
      if (bb == loop->header)
	return stmt;

      return NULL_TREE;
    }

1937
  if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
1938 1939
    return NULL_TREE;

1940
  if (!ZERO_SSA_OPERANDS (stmt, SSA_OP_ALL_VIRTUALS))
1941
    return NULL_TREE;
1942
  if (SINGLE_SSA_DEF_OPERAND (stmt, SSA_OP_DEF) == NULL_DEF_OPERAND_P)
1943
    return NULL_TREE;
1944 1945 1946

  use = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
  if (use == NULL_USE_OPERAND_P)
1947 1948
    return NULL_TREE;

1949
  return chain_of_csts_start (loop, use);
1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991
}

/* Determines whether the expression X is derived from a result of a phi node
   in header of LOOP such that

   * the derivation of X consists only from operations with constants
   * the initial value of the phi node is constant
   * the value of the phi node in the next iteration can be derived from the
     value in the current iteration by a chain of operations with constants.
   
   If such phi node exists, it is returned.  If X is a constant, X is returned
   unchanged.  Otherwise NULL_TREE is returned.  */

static tree
get_base_for (struct loop *loop, tree x)
{
  tree phi, init, next;

  if (is_gimple_min_invariant (x))
    return x;

  phi = chain_of_csts_start (loop, x);
  if (!phi)
    return NULL_TREE;

  init = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
  next = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));

  if (TREE_CODE (next) != SSA_NAME)
    return NULL_TREE;

  if (!is_gimple_min_invariant (init))
    return NULL_TREE;

  if (chain_of_csts_start (loop, next) != phi)
    return NULL_TREE;

  return phi;
}

/* Given an expression X, then 
 
1992
   * if X is NULL_TREE, we return the constant BASE.
1993 1994 1995 1996 1997 1998 1999 2000 2001 2002
   * otherwise X is a SSA name, whose value in the considered loop is derived
     by a chain of operations with constant from a result of a phi node in
     the header of the loop.  Then we return value of X when the value of the
     result of this phi node is given by the constant BASE.  */

static tree
get_val_for (tree x, tree base)
{
  tree stmt, nx, val;
  use_operand_p op;
2003
  ssa_op_iter iter;
2004

2005 2006
  gcc_assert (is_gimple_min_invariant (base));

2007 2008 2009 2010 2011 2012 2013
  if (!x)
    return base;

  stmt = SSA_NAME_DEF_STMT (x);
  if (TREE_CODE (stmt) == PHI_NODE)
    return base;

2014 2015 2016 2017 2018
  FOR_EACH_SSA_USE_OPERAND (op, stmt, iter, SSA_OP_USE)
    {
      nx = USE_FROM_PTR (op);
      val = get_val_for (nx, base);
      SET_USE (op, val);
2019
      val = fold (GIMPLE_STMT_OPERAND (stmt, 1));
2020 2021 2022 2023
      SET_USE (op, nx);
      /* only iterate loop once.  */
      return val;
    }
2024

2025
  /* Should never reach here.  */
2026
  gcc_unreachable ();
2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090
}

/* Tries to count the number of iterations of LOOP till it exits by EXIT
   by brute force -- i.e. by determining the value of the operands of the
   condition at EXIT in first few iterations of the loop (assuming that
   these values are constant) and determining the first one in that the
   condition is not satisfied.  Returns the constant giving the number
   of the iterations of LOOP if successful, chrec_dont_know otherwise.  */

tree
loop_niter_by_eval (struct loop *loop, edge exit)
{
  tree cond, cnd, acnd;
  tree op[2], val[2], next[2], aval[2], phi[2];
  unsigned i, j;
  enum tree_code cmp;

  cond = last_stmt (exit->src);
  if (!cond || TREE_CODE (cond) != COND_EXPR)
    return chrec_dont_know;

  cnd = COND_EXPR_COND (cond);
  if (exit->flags & EDGE_TRUE_VALUE)
    cnd = invert_truthvalue (cnd);

  cmp = TREE_CODE (cnd);
  switch (cmp)
    {
    case EQ_EXPR:
    case NE_EXPR:
    case GT_EXPR:
    case GE_EXPR:
    case LT_EXPR:
    case LE_EXPR:
      for (j = 0; j < 2; j++)
	op[j] = TREE_OPERAND (cnd, j);
      break;

    default:
      return chrec_dont_know;
    }

  for (j = 0; j < 2; j++)
    {
      phi[j] = get_base_for (loop, op[j]);
      if (!phi[j])
	return chrec_dont_know;
    }

  for (j = 0; j < 2; j++)
    {
      if (TREE_CODE (phi[j]) == PHI_NODE)
	{
	  val[j] = PHI_ARG_DEF_FROM_EDGE (phi[j], loop_preheader_edge (loop));
	  next[j] = PHI_ARG_DEF_FROM_EDGE (phi[j], loop_latch_edge (loop));
	}
      else
	{
	  val[j] = phi[j];
	  next[j] = NULL_TREE;
	  op[j] = NULL_TREE;
	}
    }

2091 2092 2093
  /* Don't issue signed overflow warnings.  */
  fold_defer_overflow_warnings ();

2094 2095 2096 2097 2098
  for (i = 0; i < MAX_ITERATIONS_TO_TRACK; i++)
    {
      for (j = 0; j < 2; j++)
	aval[j] = get_val_for (op[j], val[j]);

2099
      acnd = fold_binary (cmp, boolean_type_node, aval[0], aval[1]);
2100
      if (acnd && integer_zerop (acnd))
2101
	{
2102
	  fold_undefer_and_ignore_overflow_warnings ();
2103 2104 2105 2106
	  if (dump_file && (dump_flags & TDF_DETAILS))
	    fprintf (dump_file,
		     "Proved that loop %d iterates %d times using brute force.\n",
		     loop->num, i);
2107
	  return build_int_cst (unsigned_type_node, i);
2108 2109 2110
	}

      for (j = 0; j < 2; j++)
2111 2112 2113
	{
	  val[j] = get_val_for (next[j], val[j]);
	  if (!is_gimple_min_invariant (val[j]))
2114 2115 2116 2117
	    {
	      fold_undefer_and_ignore_overflow_warnings ();
	      return chrec_dont_know;
	    }
2118
	}
2119 2120
    }

2121 2122
  fold_undefer_and_ignore_overflow_warnings ();

2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135
  return chrec_dont_know;
}

/* Finds the exit of the LOOP by that the loop exits after a constant
   number of iterations and stores the exit edge to *EXIT.  The constant
   giving the number of iterations of LOOP is returned.  The number of
   iterations is determined using loop_niter_by_eval (i.e. by brute force
   evaluation).  If we are unable to find the exit for that loop_niter_by_eval
   determines the number of iterations, chrec_dont_know is returned.  */

tree
find_loop_niter_by_eval (struct loop *loop, edge *exit)
{
2136 2137
  unsigned i;
  VEC (edge, heap) *exits = get_loop_exit_edges (loop);
2138 2139 2140 2141
  edge ex;
  tree niter = NULL_TREE, aniter;

  *exit = NULL;
2142
  for (i = 0; VEC_iterate (edge, exits, i, ex); i++)
2143 2144 2145 2146 2147
    {
      if (!just_once_each_iteration_p (loop, ex->src))
	continue;

      aniter = loop_niter_by_eval (loop, ex);
2148
      if (chrec_contains_undetermined (aniter))
2149 2150 2151
	continue;

      if (niter
2152
	  && !tree_int_cst_lt (aniter, niter))
2153 2154 2155 2156 2157
	continue;

      niter = aniter;
      *exit = ex;
    }
2158
  VEC_free (edge, heap, exits);
2159 2160 2161 2162 2163 2164 2165 2166 2167 2168

  return niter ? niter : chrec_dont_know;
}

/*

   Analysis of upper bounds on number of iterations of a loop.

*/

2169 2170
/* Returns a constant upper bound on the value of expression VAL.  VAL
   is considered to be unsigned.  If its type is signed, its value must
2171
   be nonnegative.  */
2172
 
2173
static double_int
2174
derive_constant_upper_bound (const_tree val)
2175 2176
{
  tree type = TREE_TYPE (val);
2177 2178
  tree op0, op1, subtype, maxt;
  double_int bnd, max, mmax, cst;
2179
  tree stmt;
2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200

  if (INTEGRAL_TYPE_P (type))
    maxt = TYPE_MAX_VALUE (type);
  else
    maxt = upper_bound_in_type (type, type);

  max = tree_to_double_int (maxt);

  switch (TREE_CODE (val))
    {
    case INTEGER_CST:
      return tree_to_double_int (val);

    case NOP_EXPR:
    case CONVERT_EXPR:
      op0 = TREE_OPERAND (val, 0);
      subtype = TREE_TYPE (op0);
      if (!TYPE_UNSIGNED (subtype)
	  /* If TYPE is also signed, the fact that VAL is nonnegative implies
	     that OP0 is nonnegative.  */
	  && TYPE_UNSIGNED (type)
2201
	  && !tree_expr_nonnegative_p (op0))
2202 2203 2204 2205 2206 2207
	{
	  /* If we cannot prove that the casted expression is nonnegative,
	     we cannot establish more useful upper bound than the precision
	     of the type gives us.  */
	  return max;
	}
2208

2209 2210
      /* We now know that op0 is an nonnegative value.  Try deriving an upper
	 bound for it.  */
2211
      bnd = derive_constant_upper_bound (op0);
2212 2213 2214 2215 2216 2217 2218 2219 2220

      /* If the bound does not fit in TYPE, max. value of TYPE could be
	 attained.  */
      if (double_int_ucmp (max, bnd) < 0)
	return max;

      return bnd;

    case PLUS_EXPR:
Andrew Pinski committed
2221
    case POINTER_PLUS_EXPR:
2222 2223 2224 2225 2226
    case MINUS_EXPR:
      op0 = TREE_OPERAND (val, 0);
      op1 = TREE_OPERAND (val, 1);

      if (TREE_CODE (op1) != INTEGER_CST
2227
	  || !tree_expr_nonnegative_p (op0))
2228 2229
	return max;

2230 2231 2232
      /* Canonicalize to OP0 - CST.  Consider CST to be signed, in order to
	 choose the most logical way how to treat this constant regardless
	 of the signedness of the type.  */
2233
      cst = tree_to_double_int (op1);
2234
      cst = double_int_sext (cst, TYPE_PRECISION (type));
2235 2236 2237
      if (TREE_CODE (val) == PLUS_EXPR)
	cst = double_int_neg (cst);

2238
      bnd = derive_constant_upper_bound (op0);
2239 2240 2241 2242 2243 2244 2245 2246

      if (double_int_negative_p (cst))
	{
	  cst = double_int_neg (cst);
	  /* Avoid CST == 0x80000...  */
	  if (double_int_negative_p (cst))
	    return max;;

2247
	  /* OP0 + CST.  We need to check that
2248 2249 2250 2251 2252 2253 2254 2255 2256 2257
	     BND <= MAX (type) - CST.  */

	  mmax = double_int_add (max, double_int_neg (cst));
	  if (double_int_ucmp (bnd, mmax) > 0)
	    return max;

	  return double_int_add (bnd, cst);
	}
      else
	{
2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268
	  /* OP0 - CST, where CST >= 0.

	     If TYPE is signed, we have already verified that OP0 >= 0, and we
	     know that the result is nonnegative.  This implies that
	     VAL <= BND - CST.

	     If TYPE is unsigned, we must additionally know that OP0 >= CST,
	     otherwise the operation underflows.
	   */

	  /* This should only happen if the type is unsigned; however, for
2269
	     buggy programs that use overflowing signed arithmetics even with
2270
	     -fno-wrapv, this condition may also be true for signed values.  */
2271 2272 2273
	  if (double_int_ucmp (bnd, cst) < 0)
	    return max;

2274 2275 2276 2277 2278 2279 2280
	  if (TYPE_UNSIGNED (type))
	    {
	      tree tem = fold_binary (GE_EXPR, boolean_type_node, op0,
				      double_int_to_tree (type, cst));
	      if (!tem || integer_nonzerop (tem))
		return max;
	    }
2281

2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294
	  bnd = double_int_add (bnd, double_int_neg (cst));
	}

      return bnd;

    case FLOOR_DIV_EXPR:
    case EXACT_DIV_EXPR:
      op0 = TREE_OPERAND (val, 0);
      op1 = TREE_OPERAND (val, 1);
      if (TREE_CODE (op1) != INTEGER_CST
	  || tree_int_cst_sign_bit (op1))
	return max;

2295
      bnd = derive_constant_upper_bound (op0);
2296 2297
      return double_int_udiv (bnd, tree_to_double_int (op1), FLOOR_DIV_EXPR);

2298 2299 2300 2301 2302 2303 2304 2305 2306
    case BIT_AND_EXPR:
      op1 = TREE_OPERAND (val, 1);
      if (TREE_CODE (op1) != INTEGER_CST
	  || tree_int_cst_sign_bit (op1))
	return max;
      return tree_to_double_int (op1);

    case SSA_NAME:
      stmt = SSA_NAME_DEF_STMT (val);
2307 2308
      if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT
	  || GIMPLE_STMT_OPERAND (stmt, 0) != val)
2309
	return max;
2310
      return derive_constant_upper_bound (GIMPLE_STMT_OPERAND (stmt, 1));
2311

2312 2313 2314
    default: 
      return max;
    }
2315 2316
}

2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343
/* Records that every statement in LOOP is executed I_BOUND times.
   REALISTIC is true if I_BOUND is expected to be close the the real number
   of iterations.  UPPER is true if we are sure the loop iterates at most
   I_BOUND times.  */

static void
record_niter_bound (struct loop *loop, double_int i_bound, bool realistic,
		    bool upper)
{
  /* Update the bounds only when there is no previous estimation, or when the current
     estimation is smaller.  */
  if (upper
      && (!loop->any_upper_bound
	  || double_int_ucmp (i_bound, loop->nb_iterations_upper_bound) < 0))
    {
      loop->any_upper_bound = true;
      loop->nb_iterations_upper_bound = i_bound;
    }
  if (realistic
      && (!loop->any_estimate
	  || double_int_ucmp (i_bound, loop->nb_iterations_estimate) < 0))
    {
      loop->any_estimate = true;
      loop->nb_iterations_estimate = i_bound;
    }
}

2344
/* Records that AT_STMT is executed at most BOUND + 1 times in LOOP.  IS_EXIT
2345 2346
   is true if the loop is exited immediately after STMT, and this exit
   is taken at last when the STMT is executed BOUND + 1 times.
2347 2348 2349
   REALISTIC is true if BOUND is expected to be close the the real number
   of iterations.  UPPER is true if we are sure the loop iterates at most
   BOUND times.  I_BOUND is an unsigned double_int upper estimate on BOUND.  */
2350

2351
static void
2352
record_estimate (struct loop *loop, tree bound, double_int i_bound,
2353
		 tree at_stmt, bool is_exit, bool realistic, bool upper)
2354
{
2355 2356
  double_int delta;
  edge exit;
2357 2358 2359

  if (dump_file && (dump_flags & TDF_DETAILS))
    {
2360
      fprintf (dump_file, "Statement %s", is_exit ? "(exit)" : "");
2361
      print_generic_expr (dump_file, at_stmt, TDF_SLIM);
2362 2363
      fprintf (dump_file, " is %sexecuted at most ",
	       upper ? "" : "probably ");
2364
      print_generic_expr (dump_file, bound, TDF_SLIM);
2365
      fprintf (dump_file, " (bounded by ");
2366 2367
      dump_double_int (dump_file, i_bound, true);
      fprintf (dump_file, ") + 1 times in loop %d.\n", loop->num);
2368 2369
    }

2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380
  /* If the I_BOUND is just an estimate of BOUND, it rarely is close to the
     real number of iterations.  */
  if (TREE_CODE (bound) != INTEGER_CST)
    realistic = false;
  if (!upper && !realistic)
    return;

  /* If we have a guaranteed upper bound, record it in the appropriate
     list.  */
  if (upper)
    {
2381
      struct nb_iter_bound *elt = GGC_NEW (struct nb_iter_bound);
2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404

      elt->bound = i_bound;
      elt->stmt = at_stmt;
      elt->is_exit = is_exit;
      elt->next = loop->bounds;
      loop->bounds = elt;
    }

  /* Update the number of iteration estimates according to the bound.
     If at_stmt is an exit, then every statement in the loop is
     executed at most BOUND + 1 times.  If it is not an exit, then
     some of the statements before it could be executed BOUND + 2
     times, if an exit of LOOP is before stmt.  */
  exit = single_exit (loop);
  if (is_exit
      || (exit != NULL
	  && dominated_by_p (CDI_DOMINATORS,
			     exit->src, bb_for_stmt (at_stmt))))
    delta = double_int_one;
  else
    delta = double_int_two;
  i_bound = double_int_add (i_bound, delta);

2405
  /* If an overflow occurred, ignore the result.  */
2406 2407 2408 2409
  if (double_int_ucmp (i_bound, delta) < 0)
    return;

  record_niter_bound (loop, i_bound, realistic, upper);
2410 2411
}

2412 2413
/* Record the estimate on number of iterations of LOOP based on the fact that
   the induction variable BASE + STEP * i evaluated in STMT does not wrap and
2414 2415 2416
   its values belong to the range <LOW, HIGH>.  REALISTIC is true if the
   estimated number of iterations is expected to be close to the real one.
   UPPER is true if we are sure the induction variable does not wrap.  */
2417 2418 2419

static void
record_nonwrapping_iv (struct loop *loop, tree base, tree step, tree stmt,
2420
		       tree low, tree high, bool realistic, bool upper)
2421 2422 2423
{
  tree niter_bound, extreme, delta;
  tree type = TREE_TYPE (base), unsigned_type;
2424
  double_int max;
2425

2426
  if (TREE_CODE (step) != INTEGER_CST || integer_zerop (step))
2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464
    return;

  if (dump_file && (dump_flags & TDF_DETAILS))
    {
      fprintf (dump_file, "Induction variable (");
      print_generic_expr (dump_file, TREE_TYPE (base), TDF_SLIM);
      fprintf (dump_file, ") ");
      print_generic_expr (dump_file, base, TDF_SLIM);
      fprintf (dump_file, " + ");
      print_generic_expr (dump_file, step, TDF_SLIM);
      fprintf (dump_file, " * iteration does not wrap in statement ");
      print_generic_expr (dump_file, stmt, TDF_SLIM);
      fprintf (dump_file, " in loop %d.\n", loop->num);
    }

  unsigned_type = unsigned_type_for (type);
  base = fold_convert (unsigned_type, base);
  step = fold_convert (unsigned_type, step);

  if (tree_int_cst_sign_bit (step))
    {
      extreme = fold_convert (unsigned_type, low);
      if (TREE_CODE (base) != INTEGER_CST)
	base = fold_convert (unsigned_type, high);
      delta = fold_build2 (MINUS_EXPR, unsigned_type, base, extreme);
      step = fold_build1 (NEGATE_EXPR, unsigned_type, step);
    }
  else
    {
      extreme = fold_convert (unsigned_type, high);
      if (TREE_CODE (base) != INTEGER_CST)
	base = fold_convert (unsigned_type, low);
      delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, base);
    }

  /* STMT is executed at most NITER_BOUND + 1 times, since otherwise the value
     would get out of the range.  */
  niter_bound = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step);
2465
  max = derive_constant_upper_bound (niter_bound);
2466
  record_estimate (loop, niter_bound, max, stmt, false, realistic, upper);
2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497
}

/* Returns true if REF is a reference to an array at the end of a dynamically
   allocated structure.  If this is the case, the array may be allocated larger
   than its upper bound implies.  */

static bool
array_at_struct_end_p (tree ref)
{
  tree base = get_base_address (ref);
  tree parent, field;

  /* Unless the reference is through a pointer, the size of the array matches
     its declaration.  */
  if (!base || !INDIRECT_REF_P (base))
    return false;
  
  for (;handled_component_p (ref); ref = parent)
    {
      parent = TREE_OPERAND (ref, 0);

      if (TREE_CODE (ref) == COMPONENT_REF)
	{
	  /* All fields of a union are at its end.  */
	  if (TREE_CODE (TREE_TYPE (parent)) == UNION_TYPE)
	    continue;

	  /* Unless the field is at the end of the struct, we are done.  */
	  field = TREE_OPERAND (ref, 1);
	  if (TREE_CHAIN (field))
	    return false;
2498
	}
2499 2500 2501 2502 2503 2504

      /* The other options are ARRAY_REF, ARRAY_RANGE_REF, VIEW_CONVERT_EXPR.
	 In all these cases, we might be accessing the last element, and
	 although in practice this will probably never happen, it is legal for
	 the indices of this last element to exceed the bounds of the array.
	 Therefore, continue checking.  */
2505
    }
2506 2507 2508

  gcc_assert (INDIRECT_REF_P (ref));
  return true;
2509 2510
}

2511
/* Determine information about number of iterations a LOOP from the index
2512 2513 2514
   IDX of a data reference accessed in STMT.  RELIABLE is true if STMT is
   guaranteed to be executed in every iteration of LOOP.  Callback for
   for_each_index.  */
2515 2516 2517 2518 2519

struct ilb_data
{
  struct loop *loop;
  tree stmt;
2520
  bool reliable;
2521 2522 2523 2524 2525
};

static bool
idx_infer_loop_bounds (tree base, tree *idx, void *dta)
{
2526
  struct ilb_data *data = (struct ilb_data *) dta;
2527 2528
  tree ev, init, step;
  tree low, high, type, next;
2529
  bool sign, upper = data->reliable, at_end = false;
2530 2531
  struct loop *loop = data->loop;

2532
  if (TREE_CODE (base) != ARRAY_REF)
2533 2534
    return true;

2535 2536 2537 2538
  /* For arrays at the end of the structure, we are not guaranteed that they
     do not really extend over their declared size.  However, for arrays of
     size greater than one, this is unlikely to be intended.  */
  if (array_at_struct_end_p (base))
2539 2540 2541 2542
    {
      at_end = true;
      upper = false;
    }
2543

2544 2545 2546 2547 2548 2549 2550
  ev = instantiate_parameters (loop, analyze_scalar_evolution (loop, *idx));
  init = initial_condition (ev);
  step = evolution_part_in_loop_num (ev, loop->num);

  if (!init
      || !step
      || TREE_CODE (step) != INTEGER_CST
2551
      || integer_zerop (step)
2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566
      || tree_contains_chrecs (init, NULL)
      || chrec_contains_symbols_defined_in_loop (init, loop->num))
    return true;

  low = array_ref_low_bound (base);
  high = array_ref_up_bound (base);
  
  /* The case of nonconstant bounds could be handled, but it would be
     complicated.  */
  if (TREE_CODE (low) != INTEGER_CST
      || !high
      || TREE_CODE (high) != INTEGER_CST)
    return true;
  sign = tree_int_cst_sign_bit (step);
  type = TREE_TYPE (step);
2567 2568 2569

  /* The array of length 1 at the end of a structure most likely extends
     beyond its bounds.  */
2570
  if (at_end
2571 2572 2573
      && operand_equal_p (low, high, 0))
    return true;

2574 2575 2576 2577 2578 2579 2580
  /* In case the relevant bound of the array does not fit in type, or
     it does, but bound + step (in type) still belongs into the range of the
     array, the index may wrap and still stay within the range of the array
     (consider e.g. if the array is indexed by the full range of
     unsigned char).

     To make things simpler, we require both bounds to fit into type, although
2581
     there are cases where this would not be strictly necessary.  */
2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596
  if (!int_fits_type_p (high, type)
      || !int_fits_type_p (low, type))
    return true;
  low = fold_convert (type, low);
  high = fold_convert (type, high);

  if (sign)
    next = fold_binary (PLUS_EXPR, type, low, step);
  else
    next = fold_binary (PLUS_EXPR, type, high, step);
  
  if (tree_int_cst_compare (low, next) <= 0
      && tree_int_cst_compare (next, high) <= 0)
    return true;

2597
  record_nonwrapping_iv (loop, init, step, data->stmt, low, high, true, upper);
2598 2599 2600 2601
  return true;
}

/* Determine information about number of iterations a LOOP from the bounds
2602 2603
   of arrays in the data reference REF accessed in STMT.  RELIABLE is true if
   STMT is guaranteed to be executed in every iteration of LOOP.*/
2604 2605

static void
2606 2607
infer_loop_bounds_from_ref (struct loop *loop, tree stmt, tree ref,
			    bool reliable)
2608 2609 2610 2611 2612
{
  struct ilb_data data;

  data.loop = loop;
  data.stmt = stmt;
2613
  data.reliable = reliable;
2614 2615 2616 2617
  for_each_index (&ref, idx_infer_loop_bounds, &data);
}

/* Determine information about number of iterations of a LOOP from the way
2618 2619
   arrays are used in STMT.  RELIABLE is true if STMT is guaranteed to be
   executed in every iteration of LOOP.  */
2620 2621

static void
2622
infer_loop_bounds_from_array (struct loop *loop, tree stmt, bool reliable)
2623 2624 2625
{
  tree call;

2626
  if (TREE_CODE (stmt) == GIMPLE_MODIFY_STMT)
2627
    {
2628 2629
      tree op0 = GIMPLE_STMT_OPERAND (stmt, 0);
      tree op1 = GIMPLE_STMT_OPERAND (stmt, 1);
2630 2631 2632 2633

      /* For each memory access, analyze its access function
	 and record a bound on the loop iteration domain.  */
      if (REFERENCE_CLASS_P (op0))
2634
	infer_loop_bounds_from_ref (loop, stmt, op0, reliable);
2635 2636

      if (REFERENCE_CLASS_P (op1))
2637
	infer_loop_bounds_from_ref (loop, stmt, op1, reliable);
2638 2639 2640 2641 2642 2643
    }
  
  
  call = get_call_expr_in (stmt);
  if (call)
    {
2644 2645
      tree arg;
      call_expr_arg_iterator iter;
2646

2647 2648
      FOR_EACH_CALL_EXPR_ARG (arg, iter, call)
	if (REFERENCE_CLASS_P (arg))
2649
	  infer_loop_bounds_from_ref (loop, stmt, arg, reliable);
2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660
    }
}

/* Determine information about number of iterations of a LOOP from the fact
   that signed arithmetics in STMT does not overflow.  */

static void
infer_loop_bounds_from_signedness (struct loop *loop, tree stmt)
{
  tree def, base, step, scev, type, low, high;

2661
  if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
2662 2663
    return;

2664
  def = GIMPLE_STMT_OPERAND (stmt, 0);
2665 2666 2667 2668 2669 2670

  if (TREE_CODE (def) != SSA_NAME)
    return;

  type = TREE_TYPE (def);
  if (!INTEGRAL_TYPE_P (type)
2671
      || !TYPE_OVERFLOW_UNDEFINED (type))
2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689
    return;

  scev = instantiate_parameters (loop, analyze_scalar_evolution (loop, def));
  if (chrec_contains_undetermined (scev))
    return;

  base = initial_condition_in_loop_num (scev, loop->num);
  step = evolution_part_in_loop_num (scev, loop->num);

  if (!base || !step
      || TREE_CODE (step) != INTEGER_CST
      || tree_contains_chrecs (base, NULL)
      || chrec_contains_symbols_defined_in_loop (base, loop->num))
    return;

  low = lower_bound_in_type (type, type);
  high = upper_bound_in_type (type, type);

2690
  record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
2691 2692
}

2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705
/* The following analyzers are extracting informations on the bounds
   of LOOP from the following undefined behaviors:

   - data references should not access elements over the statically
     allocated size,

   - signed variables should not overflow when flag_wrapv is not set.
*/

static void
infer_loop_bounds_from_undefined (struct loop *loop)
{
  unsigned i;
2706
  basic_block *bbs;
2707
  block_stmt_iterator bsi;
2708
  basic_block bb;
2709
  bool reliable;
2710 2711 2712 2713 2714 2715 2716
  
  bbs = get_loop_body (loop);

  for (i = 0; i < loop->num_nodes; i++)
    {
      bb = bbs[i];

2717
      /* If BB is not executed in each iteration of the loop, we cannot
2718 2719 2720
	 use the operations in it to infer reliable upper bound on the
	 # of iterations of the loop.  However, we can use it as a guess.  */
      reliable = dominated_by_p (CDI_DOMINATORS, loop->latch, bb);
2721

2722
      for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
2723
	{
2724 2725
	  tree stmt = bsi_stmt (bsi);

2726 2727 2728 2729
	  infer_loop_bounds_from_array (loop, stmt, reliable);

	  if (reliable)
	    infer_loop_bounds_from_signedness (loop, stmt);
2730 2731
  	}

2732 2733 2734 2735 2736
    }

  free (bbs);
}

2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753
/* Converts VAL to double_int.  */

static double_int
gcov_type_to_double_int (gcov_type val)
{
  double_int ret;

  ret.low = (unsigned HOST_WIDE_INT) val;
  /* If HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_WIDEST_INT, avoid shifting by
     the size of type.  */
  val >>= HOST_BITS_PER_WIDE_INT - 1;
  val >>= 1;
  ret.high = (unsigned HOST_WIDE_INT) val;

  return ret;
}

2754 2755
/* Records estimates on numbers of iterations of LOOP.  */

2756
void
2757 2758
estimate_numbers_of_iterations_loop (struct loop *loop)
{
2759
  VEC (edge, heap) *exits;
2760
  tree niter, type;
2761
  unsigned i;
2762
  struct tree_niter_desc niter_desc;
2763
  edge ex;
2764
  double_int bound;
2765

2766
  /* Give up if we already have tried to compute an estimation.  */
2767
  if (loop->estimate_state != EST_NOT_COMPUTED)
2768
    return;
2769 2770 2771
  loop->estimate_state = EST_AVAILABLE;
  loop->any_upper_bound = false;
  loop->any_estimate = false;
2772

2773 2774
  exits = get_loop_exit_edges (loop);
  for (i = 0; VEC_iterate (edge, exits, i, ex); i++)
2775
    {
2776
      if (!number_of_iterations_exit (loop, ex, &niter_desc, false))
2777 2778 2779 2780
	continue;

      niter = niter_desc.niter;
      type = TREE_TYPE (niter);
2781
      if (TREE_CODE (niter_desc.may_be_zero) != INTEGER_CST)
2782
	niter = build3 (COND_EXPR, type, niter_desc.may_be_zero,
2783
			build_int_cst (type, 0),
2784
			niter);
2785
      record_estimate (loop, niter, niter_desc.max,
2786
		       last_stmt (ex->src),
2787
		       true, true, true);
2788
    }
2789
  VEC_free (edge, heap, exits);
2790
  
2791
  infer_loop_bounds_from_undefined (loop);
2792 2793 2794 2795 2796

  /* If we have a measured profile, use it to estimate the number of
     iterations.  */
  if (loop->header->count != 0)
    {
2797 2798
      gcov_type nit = expected_loop_iterations_unbounded (loop) + 1;
      bound = gcov_type_to_double_int (nit);
2799 2800 2801 2802 2803 2804 2805 2806 2807 2808
      record_niter_bound (loop, bound, true, false);
    }

  /* If an upper bound is smaller than the realistic estimate of the
     number of iterations, use the upper bound instead.  */
  if (loop->any_upper_bound
      && loop->any_estimate
      && double_int_ucmp (loop->nb_iterations_upper_bound,
			  loop->nb_iterations_estimate) < 0)
    loop->nb_iterations_estimate = loop->nb_iterations_upper_bound;
2809 2810
}

2811
/* Records estimates on numbers of iterations of loops.  */
2812 2813

void
2814
estimate_numbers_of_iterations (void)
2815
{
2816
  loop_iterator li;
2817 2818
  struct loop *loop;

2819 2820 2821 2822
  /* We don't want to issue signed overflow warnings while getting
     loop iteration estimates.  */
  fold_defer_overflow_warnings ();

2823
  FOR_EACH_LOOP (li, loop, 0)
2824
    {
2825
      estimate_numbers_of_iterations_loop (loop);
2826
    }
2827 2828

  fold_undefer_and_ignore_overflow_warnings ();
2829 2830 2831 2832
}

/* Returns true if statement S1 dominates statement S2.  */

2833
bool
2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855
stmt_dominates_stmt_p (tree s1, tree s2)
{
  basic_block bb1 = bb_for_stmt (s1), bb2 = bb_for_stmt (s2);

  if (!bb1
      || s1 == s2)
    return true;

  if (bb1 == bb2)
    {
      block_stmt_iterator bsi;

      for (bsi = bsi_start (bb1); bsi_stmt (bsi) != s2; bsi_next (&bsi))
	if (bsi_stmt (bsi) == s1)
	  return true;

      return false;
    }

  return dominated_by_p (CDI_DOMINATORS, bb2, bb1);
}

2856
/* Returns true when we can prove that the number of executions of
2857 2858 2859 2860
   STMT in the loop is at most NITER, according to the bound on
   the number of executions of the statement NITER_BOUND->stmt recorded in
   NITER_BOUND.  If STMT is NULL, we must prove this bound for all
   statements in the loop.  */
2861

2862
static bool
2863 2864 2865
n_of_executions_at_most (tree stmt,
			 struct nb_iter_bound *niter_bound, 
			 tree niter)
2866
{
2867
  double_int bound = niter_bound->bound;
2868
  tree nit_type = TREE_TYPE (niter), e;
2869
  enum tree_code cmp;
2870

2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882
  gcc_assert (TYPE_UNSIGNED (nit_type));

  /* If the bound does not even fit into NIT_TYPE, it cannot tell us that
     the number of iterations is small.  */
  if (!double_int_fits_to_tree_p (nit_type, bound))
    return false;

  /* We know that NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
     times.  This means that:
     
     -- if NITER_BOUND->is_exit is true, then everything before
        NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
2883
	times, and everything after it at most NITER_BOUND->bound times.
2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900

     -- If NITER_BOUND->is_exit is false, then if we can prove that when STMT
	is executed, then NITER_BOUND->stmt is executed as well in the same
	iteration (we conclude that if both statements belong to the same
	basic block, or if STMT is after NITER_BOUND->stmt), then STMT
	is executed at most NITER_BOUND->bound + 1 times.  Otherwise STMT is
	executed at most NITER_BOUND->bound + 2 times.  */

  if (niter_bound->is_exit)
    {
      if (stmt
	  && stmt != niter_bound->stmt
	  && stmt_dominates_stmt_p (niter_bound->stmt, stmt))
	cmp = GE_EXPR;
      else
	cmp = GT_EXPR;
    }
2901
  else
2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913
    {
      if (!stmt
	  || (bb_for_stmt (stmt) != bb_for_stmt (niter_bound->stmt)
	      && !stmt_dominates_stmt_p (niter_bound->stmt, stmt)))
	{
	  bound = double_int_add (bound, double_int_one);
	  if (double_int_zero_p (bound)
	      || !double_int_fits_to_tree_p (nit_type, bound))
	    return false;
	}
      cmp = GT_EXPR;
    }
2914

2915 2916 2917
  e = fold_binary (cmp, boolean_type_node,
		   niter, double_int_to_tree (nit_type, bound));
  return e && integer_nonzerop (e);
2918 2919
}

2920
/* Returns true if the arithmetics in TYPE can be assumed not to wrap.  */
2921

2922 2923
bool
nowrap_type_p (tree type)
2924
{
2925 2926
  if (INTEGRAL_TYPE_P (type)
      && TYPE_OVERFLOW_UNDEFINED (type))
2927
    return true;
2928

2929 2930
  if (POINTER_TYPE_P (type))
    return true;
2931 2932 2933 2934

  return false;
}

2935 2936 2937 2938 2939
/* Return false only when the induction variable BASE + STEP * I is
   known to not overflow: i.e. when the number of iterations is small
   enough with respect to the step and initial condition in order to
   keep the evolution confined in TYPEs bounds.  Return true when the
   iv is known to overflow or when the property is not computable.
2940 2941 2942 2943
 
   USE_OVERFLOW_SEMANTICS is true if this function should assume that
   the rules for overflow of the given language apply (e.g., that signed
   arithmetics in C does not overflow).  */
2944 2945

bool
2946
scev_probably_wraps_p (tree base, tree step, 
2947
		       tree at_stmt, struct loop *loop,
2948
		       bool use_overflow_semantics)
2949 2950 2951 2952
{
  struct nb_iter_bound *bound;
  tree delta, step_abs;
  tree unsigned_type, valid_niter;
2953 2954 2955 2956 2957 2958 2959
  tree type = TREE_TYPE (step);

  /* FIXME: We really need something like
     http://gcc.gnu.org/ml/gcc-patches/2005-06/msg02025.html.

     We used to test for the following situation that frequently appears
     during address arithmetics:
2960 2961 2962 2963 2964
	 
       D.1621_13 = (long unsigned intD.4) D.1620_12;
       D.1622_14 = D.1621_13 * 8;
       D.1623_15 = (doubleD.29 *) D.1622_14;

2965 2966 2967 2968 2969
     And derived that the sequence corresponding to D_14
     can be proved to not wrap because it is used for computing a
     memory access; however, this is not really the case -- for example,
     if D_12 = (unsigned char) [254,+,1], then D_14 has values
     2032, 2040, 0, 8, ..., but the code is still legal.  */
2970

2971 2972
  if (chrec_contains_undetermined (base)
      || chrec_contains_undetermined (step)
2973 2974
      || TREE_CODE (step) != INTEGER_CST)
    return true;
2975

2976
  if (integer_zerop (step))
2977
    return false;
2978

2979 2980
  /* If we can use the fact that signed and pointer arithmetics does not
     wrap, we are done.  */
2981
  if (use_overflow_semantics && nowrap_type_p (type))
2982
    return false;
2983

2984 2985 2986
  /* Don't issue signed overflow warnings.  */
  fold_defer_overflow_warnings ();

2987 2988 2989 2990 2991
  /* Otherwise, compute the number of iterations before we reach the
     bound of the type, and verify that the loop is exited before this
     occurs.  */
  unsigned_type = unsigned_type_for (type);
  base = fold_convert (unsigned_type, base);
2992

2993 2994 2995 2996 2997 2998 2999
  if (tree_int_cst_sign_bit (step))
    {
      tree extreme = fold_convert (unsigned_type,
				   lower_bound_in_type (type, type));
      delta = fold_build2 (MINUS_EXPR, unsigned_type, base, extreme);
      step_abs = fold_build1 (NEGATE_EXPR, unsigned_type,
			      fold_convert (unsigned_type, step));
3000
    }
3001
  else
3002
    {
3003 3004 3005 3006
      tree extreme = fold_convert (unsigned_type,
				   upper_bound_in_type (type, type));
      delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, base);
      step_abs = fold_convert (unsigned_type, step);
3007 3008
    }

3009
  valid_niter = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step_abs);
3010

3011
  estimate_numbers_of_iterations_loop (loop);
3012
  for (bound = loop->bounds; bound; bound = bound->next)
3013 3014 3015 3016 3017 3018 3019 3020 3021
    {
      if (n_of_executions_at_most (at_stmt, bound, valid_niter))
	{
	  fold_undefer_and_ignore_overflow_warnings ();
	  return false;
	}
    }

  fold_undefer_and_ignore_overflow_warnings ();
3022 3023 3024 3025

  /* At this point we still don't have a proof that the iv does not
     overflow: give up.  */
  return true;
3026 3027 3028 3029
}

/* Frees the information on upper bounds on numbers of iterations of LOOP.  */

3030
void
3031 3032 3033
free_numbers_of_iterations_estimates_loop (struct loop *loop)
{
  struct nb_iter_bound *bound, *next;
3034 3035

  loop->nb_iterations = NULL;
3036
  loop->estimate_state = EST_NOT_COMPUTED;
3037 3038 3039
  for (bound = loop->bounds; bound; bound = next)
    {
      next = bound->next;
3040
      ggc_free (bound);
3041 3042 3043 3044 3045
    }

  loop->bounds = NULL;
}

3046
/* Frees the information on upper bounds on numbers of iterations of loops.  */
3047 3048

void
3049
free_numbers_of_iterations_estimates (void)
3050
{
3051
  loop_iterator li;
3052 3053
  struct loop *loop;

3054
  FOR_EACH_LOOP (li, loop, 0)
3055
    {
3056
      free_numbers_of_iterations_estimates_loop (loop);
3057 3058
    }
}
3059 3060 3061 3062 3063 3064 3065 3066 3067

/* Substitute value VAL for ssa name NAME inside expressions held
   at LOOP.  */

void
substitute_in_loop_info (struct loop *loop, tree name, tree val)
{
  loop->nb_iterations = simplify_replace_tree (loop->nb_iterations, name, val);
}