lambda-code.c 80.4 KB
Newer Older
Daniel Berlin committed
1
/*  Loop transformation code generation
2
    Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
Daniel Berlin committed
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
    Contributed by Daniel Berlin <dberlin@dberlin.org>

    This file is part of GCC.
    
    GCC is free software; you can redistribute it and/or modify it under
    the terms of the GNU General Public License as published by the Free
    Software Foundation; either version 2, or (at your option) any later
    version.
    
    GCC is distributed in the hope that it will be useful, but WITHOUT ANY
    WARRANTY; without even the implied warranty of MERCHANTABILITY or
    FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    for more details.
    
    You should have received a copy of the GNU General Public License
    along with GCC; see the file COPYING.  If not, write to the Free
Kelley Cook committed
19 20
    Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
    02110-1301, USA.  */
Daniel Berlin committed
21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48

#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "ggc.h"
#include "tree.h"
#include "target.h"
#include "rtl.h"
#include "basic-block.h"
#include "diagnostic.h"
#include "tree-flow.h"
#include "tree-dump.h"
#include "timevar.h"
#include "cfgloop.h"
#include "expr.h"
#include "optabs.h"
#include "tree-chrec.h"
#include "tree-data-ref.h"
#include "tree-pass.h"
#include "tree-scalar-evolution.h"
#include "vec.h"
#include "lambda.h"

/* This loop nest code generation is based on non-singular matrix
   math.
 
 A little terminology and a general sketch of the algorithm.  See "A singular
49
 loop transformation framework based on non-singular matrices" by Wei Li and
Daniel Berlin committed
50 51 52
 Keshav Pingali for formal proofs that the various statements below are
 correct. 

53
 A loop iteration space represents the points traversed by the loop.  A point in the
Daniel Berlin committed
54
 iteration space can be represented by a vector of size <loop depth>.  You can
55
 therefore represent the iteration space as an integral combinations of a set
Daniel Berlin committed
56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
 of basis vectors. 

 A loop iteration space is dense if every integer point between the loop
 bounds is a point in the iteration space.  Every loop with a step of 1
 therefore has a dense iteration space.

 for i = 1 to 3, step 1 is a dense iteration space.
   
 A loop iteration space is sparse if it is not dense.  That is, the iteration
 space skips integer points that are within the loop bounds.  

 for i = 1 to 3, step 2 is a sparse iteration space, because the integer point
 2 is skipped.

 Dense source spaces are easy to transform, because they don't skip any
 points to begin with.  Thus we can compute the exact bounds of the target
 space using min/max and floor/ceil.

 For a dense source space, we take the transformation matrix, decompose it
 into a lower triangular part (H) and a unimodular part (U). 
76 77
 We then compute the auxiliary space from the unimodular part (source loop
 nest . U = auxiliary space) , which has two important properties:
Daniel Berlin committed
78 79 80 81 82
  1. It traverses the iterations in the same lexicographic order as the source
  space.
  2. It is a dense space when the source is a dense space (even if the target
  space is going to be sparse).
 
83
 Given the auxiliary space, we use the lower triangular part to compute the
Daniel Berlin committed
84 85 86 87 88
 bounds in the target space by simple matrix multiplication.
 The gaps in the target space (IE the new loop step sizes) will be the
 diagonals of the H matrix.

 Sparse source spaces require another step, because you can't directly compute
89
 the exact bounds of the auxiliary and target space from the sparse space.
Daniel Berlin committed
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
 Rather than try to come up with a separate algorithm to handle sparse source
 spaces directly, we just find a legal transformation matrix that gives you
 the sparse source space, from a dense space, and then transform the dense
 space.

 For a regular sparse space, you can represent the source space as an integer
 lattice, and the base space of that lattice will always be dense.  Thus, we
 effectively use the lattice to figure out the transformation from the lattice
 base space, to the sparse iteration space (IE what transform was applied to
 the dense space to make it sparse).  We then compose this transform with the
 transformation matrix specified by the user (since our matrix transformations
 are closed under composition, this is okay).  We can then use the base space
 (which is dense) plus the composed transformation matrix, to compute the rest
 of the transform using the dense space algorithm above.
 
 In other words, our sparse source space (B) is decomposed into a dense base
 space (A), and a matrix (L) that transforms A into B, such that A.L = B.
 We then compute the composition of L and the user transformation matrix (T),
 so that T is now a transform from A to the result, instead of from B to the
 result. 
 IE A.(LT) = result instead of B.T = result
 Since A is now a dense source space, we can use the dense source space
 algorithm above to compute the result of applying transform (LT) to A.

 Fourier-Motzkin elimination is used to compute the bounds of the base space
 of the lattice.  */

117 118
DEF_VEC_I(int);
DEF_VEC_ALLOC_I(int,heap);
Daniel Berlin committed
119 120

static bool perfect_nestify (struct loops *, 
121 122 123
			     struct loop *, VEC(tree,heap) *, 
			     VEC(tree,heap) *, VEC(int,heap) *,
			     VEC(tree,heap) *);
Daniel Berlin committed
124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
/* Lattice stuff that is internal to the code generation algorithm.  */

typedef struct
{
  /* Lattice base matrix.  */
  lambda_matrix base;
  /* Lattice dimension.  */
  int dimension;
  /* Origin vector for the coefficients.  */
  lambda_vector origin;
  /* Origin matrix for the invariants.  */
  lambda_matrix origin_invariants;
  /* Number of invariants.  */
  int invariants;
} *lambda_lattice;

#define LATTICE_BASE(T) ((T)->base)
#define LATTICE_DIMENSION(T) ((T)->dimension)
#define LATTICE_ORIGIN(T) ((T)->origin)
#define LATTICE_ORIGIN_INVARIANTS(T) ((T)->origin_invariants)
#define LATTICE_INVARIANTS(T) ((T)->invariants)

static bool lle_equal (lambda_linear_expression, lambda_linear_expression,
		       int, int);
static lambda_lattice lambda_lattice_new (int, int);
static lambda_lattice lambda_lattice_compute_base (lambda_loopnest);

static tree find_induction_var_from_exit_cond (struct loop *);

/* Create a new lambda body vector.  */

lambda_body_vector
lambda_body_vector_new (int size)
{
  lambda_body_vector ret;

  ret = ggc_alloc (sizeof (*ret));
  LBV_COEFFICIENTS (ret) = lambda_vector_new (size);
  LBV_SIZE (ret) = size;
  LBV_DENOMINATOR (ret) = 1;
  return ret;
}

/* Compute the new coefficients for the vector based on the
  *inverse* of the transformation matrix.  */

lambda_body_vector
lambda_body_vector_compute_new (lambda_trans_matrix transform,
				lambda_body_vector vect)
{
  lambda_body_vector temp;
  int depth;

  /* Make sure the matrix is square.  */
178
  gcc_assert (LTM_ROWSIZE (transform) == LTM_COLSIZE (transform));
Daniel Berlin committed
179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295

  depth = LTM_ROWSIZE (transform);

  temp = lambda_body_vector_new (depth);
  LBV_DENOMINATOR (temp) =
    LBV_DENOMINATOR (vect) * LTM_DENOMINATOR (transform);
  lambda_vector_matrix_mult (LBV_COEFFICIENTS (vect), depth,
			     LTM_MATRIX (transform), depth,
			     LBV_COEFFICIENTS (temp));
  LBV_SIZE (temp) = LBV_SIZE (vect);
  return temp;
}

/* Print out a lambda body vector.  */

void
print_lambda_body_vector (FILE * outfile, lambda_body_vector body)
{
  print_lambda_vector (outfile, LBV_COEFFICIENTS (body), LBV_SIZE (body));
}

/* Return TRUE if two linear expressions are equal.  */

static bool
lle_equal (lambda_linear_expression lle1, lambda_linear_expression lle2,
	   int depth, int invariants)
{
  int i;

  if (lle1 == NULL || lle2 == NULL)
    return false;
  if (LLE_CONSTANT (lle1) != LLE_CONSTANT (lle2))
    return false;
  if (LLE_DENOMINATOR (lle1) != LLE_DENOMINATOR (lle2))
    return false;
  for (i = 0; i < depth; i++)
    if (LLE_COEFFICIENTS (lle1)[i] != LLE_COEFFICIENTS (lle2)[i])
      return false;
  for (i = 0; i < invariants; i++)
    if (LLE_INVARIANT_COEFFICIENTS (lle1)[i] !=
	LLE_INVARIANT_COEFFICIENTS (lle2)[i])
      return false;
  return true;
}

/* Create a new linear expression with dimension DIM, and total number
   of invariants INVARIANTS.  */

lambda_linear_expression
lambda_linear_expression_new (int dim, int invariants)
{
  lambda_linear_expression ret;

  ret = ggc_alloc_cleared (sizeof (*ret));

  LLE_COEFFICIENTS (ret) = lambda_vector_new (dim);
  LLE_CONSTANT (ret) = 0;
  LLE_INVARIANT_COEFFICIENTS (ret) = lambda_vector_new (invariants);
  LLE_DENOMINATOR (ret) = 1;
  LLE_NEXT (ret) = NULL;

  return ret;
}

/* Print out a linear expression EXPR, with SIZE coefficients, to OUTFILE.
   The starting letter used for variable names is START.  */

static void
print_linear_expression (FILE * outfile, lambda_vector expr, int size,
			 char start)
{
  int i;
  bool first = true;
  for (i = 0; i < size; i++)
    {
      if (expr[i] != 0)
	{
	  if (first)
	    {
	      if (expr[i] < 0)
		fprintf (outfile, "-");
	      first = false;
	    }
	  else if (expr[i] > 0)
	    fprintf (outfile, " + ");
	  else
	    fprintf (outfile, " - ");
	  if (abs (expr[i]) == 1)
	    fprintf (outfile, "%c", start + i);
	  else
	    fprintf (outfile, "%d%c", abs (expr[i]), start + i);
	}
    }
}

/* Print out a lambda linear expression structure, EXPR, to OUTFILE. The
   depth/number of coefficients is given by DEPTH, the number of invariants is
   given by INVARIANTS, and the character to start variable names with is given
   by START.  */

void
print_lambda_linear_expression (FILE * outfile,
				lambda_linear_expression expr,
				int depth, int invariants, char start)
{
  fprintf (outfile, "\tLinear expression: ");
  print_linear_expression (outfile, LLE_COEFFICIENTS (expr), depth, start);
  fprintf (outfile, " constant: %d ", LLE_CONSTANT (expr));
  fprintf (outfile, "  invariants: ");
  print_linear_expression (outfile, LLE_INVARIANT_COEFFICIENTS (expr),
			   invariants, 'A');
  fprintf (outfile, "  denominator: %d\n", LLE_DENOMINATOR (expr));
}

/* Print a lambda loop structure LOOP to OUTFILE.  The depth/number of
   coefficients is given by DEPTH, the number of invariants is 
   given by INVARIANTS, and the character to start variable names with is given
296
   by START.  */
Daniel Berlin committed
297 298 299 300 301 302 303 304

void
print_lambda_loop (FILE * outfile, lambda_loop loop, int depth,
		   int invariants, char start)
{
  int step;
  lambda_linear_expression expr;

305
  gcc_assert (loop);
Daniel Berlin committed
306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358

  expr = LL_LINEAR_OFFSET (loop);
  step = LL_STEP (loop);
  fprintf (outfile, "  step size = %d \n", step);

  if (expr)
    {
      fprintf (outfile, "  linear offset: \n");
      print_lambda_linear_expression (outfile, expr, depth, invariants,
				      start);
    }

  fprintf (outfile, "  lower bound: \n");
  for (expr = LL_LOWER_BOUND (loop); expr != NULL; expr = LLE_NEXT (expr))
    print_lambda_linear_expression (outfile, expr, depth, invariants, start);
  fprintf (outfile, "  upper bound: \n");
  for (expr = LL_UPPER_BOUND (loop); expr != NULL; expr = LLE_NEXT (expr))
    print_lambda_linear_expression (outfile, expr, depth, invariants, start);
}

/* Create a new loop nest structure with DEPTH loops, and INVARIANTS as the
   number of invariants.  */

lambda_loopnest
lambda_loopnest_new (int depth, int invariants)
{
  lambda_loopnest ret;
  ret = ggc_alloc (sizeof (*ret));

  LN_LOOPS (ret) = ggc_alloc_cleared (depth * sizeof (lambda_loop));
  LN_DEPTH (ret) = depth;
  LN_INVARIANTS (ret) = invariants;

  return ret;
}

/* Print a lambda loopnest structure, NEST, to OUTFILE.  The starting
   character to use for loop names is given by START.  */

void
print_lambda_loopnest (FILE * outfile, lambda_loopnest nest, char start)
{
  int i;
  for (i = 0; i < LN_DEPTH (nest); i++)
    {
      fprintf (outfile, "Loop %c\n", start + i);
      print_lambda_loop (outfile, LN_LOOPS (nest)[i], LN_DEPTH (nest),
			 LN_INVARIANTS (nest), 'i');
      fprintf (outfile, "\n");
    }
}

/* Allocate a new lattice structure of DEPTH x DEPTH, with INVARIANTS number
359
   of invariants.  */
Daniel Berlin committed
360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399

static lambda_lattice
lambda_lattice_new (int depth, int invariants)
{
  lambda_lattice ret;
  ret = ggc_alloc (sizeof (*ret));
  LATTICE_BASE (ret) = lambda_matrix_new (depth, depth);
  LATTICE_ORIGIN (ret) = lambda_vector_new (depth);
  LATTICE_ORIGIN_INVARIANTS (ret) = lambda_matrix_new (depth, invariants);
  LATTICE_DIMENSION (ret) = depth;
  LATTICE_INVARIANTS (ret) = invariants;
  return ret;
}

/* Compute the lattice base for NEST.  The lattice base is essentially a
   non-singular transform from a dense base space to a sparse iteration space.
   We use it so that we don't have to specially handle the case of a sparse
   iteration space in other parts of the algorithm.  As a result, this routine
   only does something interesting (IE produce a matrix that isn't the
   identity matrix) if NEST is a sparse space.  */

static lambda_lattice
lambda_lattice_compute_base (lambda_loopnest nest)
{
  lambda_lattice ret;
  int depth, invariants;
  lambda_matrix base;

  int i, j, step;
  lambda_loop loop;
  lambda_linear_expression expression;

  depth = LN_DEPTH (nest);
  invariants = LN_INVARIANTS (nest);

  ret = lambda_lattice_new (depth, invariants);
  base = LATTICE_BASE (ret);
  for (i = 0; i < depth; i++)
    {
      loop = LN_LOOPS (nest)[i];
400
      gcc_assert (loop);
Daniel Berlin committed
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
      step = LL_STEP (loop);
      /* If we have a step of 1, then the base is one, and the
         origin and invariant coefficients are 0.  */
      if (step == 1)
	{
	  for (j = 0; j < depth; j++)
	    base[i][j] = 0;
	  base[i][i] = 1;
	  LATTICE_ORIGIN (ret)[i] = 0;
	  for (j = 0; j < invariants; j++)
	    LATTICE_ORIGIN_INVARIANTS (ret)[i][j] = 0;
	}
      else
	{
	  /* Otherwise, we need the lower bound expression (which must
	     be an affine function)  to determine the base.  */
	  expression = LL_LOWER_BOUND (loop);
418
	  gcc_assert (expression && !LLE_NEXT (expression) 
419
		      && LLE_DENOMINATOR (expression) == 1);
Daniel Berlin committed
420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490

	  /* The lower triangular portion of the base is going to be the
	     coefficient times the step */
	  for (j = 0; j < i; j++)
	    base[i][j] = LLE_COEFFICIENTS (expression)[j]
	      * LL_STEP (LN_LOOPS (nest)[j]);
	  base[i][i] = step;
	  for (j = i + 1; j < depth; j++)
	    base[i][j] = 0;

	  /* Origin for this loop is the constant of the lower bound
	     expression.  */
	  LATTICE_ORIGIN (ret)[i] = LLE_CONSTANT (expression);

	  /* Coefficient for the invariants are equal to the invariant
	     coefficients in the expression.  */
	  for (j = 0; j < invariants; j++)
	    LATTICE_ORIGIN_INVARIANTS (ret)[i][j] =
	      LLE_INVARIANT_COEFFICIENTS (expression)[j];
	}
    }
  return ret;
}

/* Compute the greatest common denominator of two numbers (A and B) using
   Euclid's algorithm.  */

static int
gcd (int a, int b)
{

  int x, y, z;

  x = abs (a);
  y = abs (b);

  while (x > 0)
    {
      z = y % x;
      y = x;
      x = z;
    }

  return (y);
}

/* Compute the greatest common denominator of a VECTOR of SIZE numbers.  */

static int
gcd_vector (lambda_vector vector, int size)
{
  int i;
  int gcd1 = 0;

  if (size > 0)
    {
      gcd1 = vector[0];
      for (i = 1; i < size; i++)
	gcd1 = gcd (gcd1, vector[i]);
    }
  return gcd1;
}

/* Compute the least common multiple of two numbers A and B .  */

static int
lcm (int a, int b)
{
  return (abs (a) * abs (b) / gcd (a, b));
}

491
/* Perform Fourier-Motzkin elimination to calculate the bounds of the
492
   auxiliary nest.
493
   Fourier-Motzkin is a way of reducing systems of linear inequalities so that
494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576
   it is easy to calculate the answer and bounds.
   A sketch of how it works:
   Given a system of linear inequalities, ai * xj >= bk, you can always
   rewrite the constraints so they are all of the form
   a <= x, or x <= b, or x >= constant for some x in x1 ... xj (and some b
   in b1 ... bk, and some a in a1...ai)
   You can then eliminate this x from the non-constant inequalities by
   rewriting these as a <= b, x >= constant, and delete the x variable.
   You can then repeat this for any remaining x variables, and then we have
   an easy to use variable <= constant (or no variables at all) form that we
   can construct our bounds from. 
   
   In our case, each time we eliminate, we construct part of the bound from
   the ith variable, then delete the ith variable. 
   
   Remember the constant are in our vector a, our coefficient matrix is A,
   and our invariant coefficient matrix is B.
   
   SIZE is the size of the matrices being passed.
   DEPTH is the loop nest depth.
   INVARIANTS is the number of loop invariants.
   A, B, and a are the coefficient matrix, invariant coefficient, and a
   vector of constants, respectively.  */

static lambda_loopnest 
compute_nest_using_fourier_motzkin (int size,
				    int depth, 
				    int invariants,
				    lambda_matrix A,
				    lambda_matrix B,
				    lambda_vector a)
{

  int multiple, f1, f2;
  int i, j, k;
  lambda_linear_expression expression;
  lambda_loop loop;
  lambda_loopnest auxillary_nest;
  lambda_matrix swapmatrix, A1, B1;
  lambda_vector swapvector, a1;
  int newsize;

  A1 = lambda_matrix_new (128, depth);
  B1 = lambda_matrix_new (128, invariants);
  a1 = lambda_vector_new (128);

  auxillary_nest = lambda_loopnest_new (depth, invariants);

  for (i = depth - 1; i >= 0; i--)
    {
      loop = lambda_loop_new ();
      LN_LOOPS (auxillary_nest)[i] = loop;
      LL_STEP (loop) = 1;

      for (j = 0; j < size; j++)
	{
	  if (A[j][i] < 0)
	    {
	      /* Any linear expression in the matrix with a coefficient less
		 than 0 becomes part of the new lower bound.  */ 
	      expression = lambda_linear_expression_new (depth, invariants);

	      for (k = 0; k < i; k++)
		LLE_COEFFICIENTS (expression)[k] = A[j][k];

	      for (k = 0; k < invariants; k++)
		LLE_INVARIANT_COEFFICIENTS (expression)[k] = -1 * B[j][k];

	      LLE_DENOMINATOR (expression) = -1 * A[j][i];
	      LLE_CONSTANT (expression) = -1 * a[j];

	      /* Ignore if identical to the existing lower bound.  */
	      if (!lle_equal (LL_LOWER_BOUND (loop),
			      expression, depth, invariants))
		{
		  LLE_NEXT (expression) = LL_LOWER_BOUND (loop);
		  LL_LOWER_BOUND (loop) = expression;
		}

	    }
	  else if (A[j][i] > 0)
	    {
	      /* Any linear expression with a coefficient greater than 0
577
		 becomes part of the new upper bound.  */ 
578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652
	      expression = lambda_linear_expression_new (depth, invariants);
	      for (k = 0; k < i; k++)
		LLE_COEFFICIENTS (expression)[k] = -1 * A[j][k];

	      for (k = 0; k < invariants; k++)
		LLE_INVARIANT_COEFFICIENTS (expression)[k] = B[j][k];

	      LLE_DENOMINATOR (expression) = A[j][i];
	      LLE_CONSTANT (expression) = a[j];

	      /* Ignore if identical to the existing upper bound.  */
	      if (!lle_equal (LL_UPPER_BOUND (loop),
			      expression, depth, invariants))
		{
		  LLE_NEXT (expression) = LL_UPPER_BOUND (loop);
		  LL_UPPER_BOUND (loop) = expression;
		}

	    }
	}

      /* This portion creates a new system of linear inequalities by deleting
	 the i'th variable, reducing the system by one variable.  */
      newsize = 0;
      for (j = 0; j < size; j++)
	{
	  /* If the coefficient for the i'th variable is 0, then we can just
	     eliminate the variable straightaway.  Otherwise, we have to
	     multiply through by the coefficients we are eliminating.  */
	  if (A[j][i] == 0)
	    {
	      lambda_vector_copy (A[j], A1[newsize], depth);
	      lambda_vector_copy (B[j], B1[newsize], invariants);
	      a1[newsize] = a[j];
	      newsize++;
	    }
	  else if (A[j][i] > 0)
	    {
	      for (k = 0; k < size; k++)
		{
		  if (A[k][i] < 0)
		    {
		      multiple = lcm (A[j][i], A[k][i]);
		      f1 = multiple / A[j][i];
		      f2 = -1 * multiple / A[k][i];

		      lambda_vector_add_mc (A[j], f1, A[k], f2,
					    A1[newsize], depth);
		      lambda_vector_add_mc (B[j], f1, B[k], f2,
					    B1[newsize], invariants);
		      a1[newsize] = f1 * a[j] + f2 * a[k];
		      newsize++;
		    }
		}
	    }
	}

      swapmatrix = A;
      A = A1;
      A1 = swapmatrix;

      swapmatrix = B;
      B = B1;
      B1 = swapmatrix;

      swapvector = a;
      a = a1;
      a1 = swapvector;

      size = newsize;
    }

  return auxillary_nest;
}

Daniel Berlin committed
653
/* Compute the loop bounds for the auxiliary space NEST.
654 655 656 657 658 659 660 661 662 663 664 665 666
   Input system used is Ax <= b.  TRANS is the unimodular transformation.  
   Given the original nest, this function will 
   1. Convert the nest into matrix form, which consists of a matrix for the
   coefficients, a matrix for the 
   invariant coefficients, and a vector for the constants.  
   2. Use the matrix form to calculate the lattice base for the nest (which is
   a dense space) 
   3. Compose the dense space transform with the user specified transform, to 
   get a transform we can easily calculate transformed bounds for.
   4. Multiply the composed transformation matrix times the matrix form of the
   loop.
   5. Transform the newly created matrix (from step 4) back into a loop nest
   using fourier motzkin elimination to figure out the bounds.  */
Daniel Berlin committed
667 668 669 670 671

static lambda_loopnest
lambda_compute_auxillary_space (lambda_loopnest nest,
				lambda_trans_matrix trans)
{
672 673
  lambda_matrix A, B, A1, B1;
  lambda_vector a, a1;
Daniel Berlin committed
674
  lambda_matrix invertedtrans;
675
  int depth, invariants, size;
676
  int i, j;
Daniel Berlin committed
677 678 679 680 681 682 683 684 685
  lambda_loop loop;
  lambda_linear_expression expression;
  lambda_lattice lattice;

  depth = LN_DEPTH (nest);
  invariants = LN_INVARIANTS (nest);

  /* Unfortunately, we can't know the number of constraints we'll have
     ahead of time, but this should be enough even in ridiculous loop nest
686
     cases. We must not go over this limit.  */
Daniel Berlin committed
687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731
  A = lambda_matrix_new (128, depth);
  B = lambda_matrix_new (128, invariants);
  a = lambda_vector_new (128);

  A1 = lambda_matrix_new (128, depth);
  B1 = lambda_matrix_new (128, invariants);
  a1 = lambda_vector_new (128);

  /* Store the bounds in the equation matrix A, constant vector a, and
     invariant matrix B, so that we have Ax <= a + B.
     This requires a little equation rearranging so that everything is on the
     correct side of the inequality.  */
  size = 0;
  for (i = 0; i < depth; i++)
    {
      loop = LN_LOOPS (nest)[i];

      /* First we do the lower bound.  */
      if (LL_STEP (loop) > 0)
	expression = LL_LOWER_BOUND (loop);
      else
	expression = LL_UPPER_BOUND (loop);

      for (; expression != NULL; expression = LLE_NEXT (expression))
	{
	  /* Fill in the coefficient.  */
	  for (j = 0; j < i; j++)
	    A[size][j] = LLE_COEFFICIENTS (expression)[j];

	  /* And the invariant coefficient.  */
	  for (j = 0; j < invariants; j++)
	    B[size][j] = LLE_INVARIANT_COEFFICIENTS (expression)[j];

	  /* And the constant.  */
	  a[size] = LLE_CONSTANT (expression);

	  /* Convert (2x+3y+2+b)/4 <= z to 2x+3y-4z <= -2-b.  IE put all
	     constants and single variables on   */
	  A[size][i] = -1 * LLE_DENOMINATOR (expression);
	  a[size] *= -1;
	  for (j = 0; j < invariants; j++)
	    B[size][j] *= -1;

	  size++;
	  /* Need to increase matrix sizes above.  */
732 733
	  gcc_assert (size <= 127);
	  
Daniel Berlin committed
734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760
	}

      /* Then do the exact same thing for the upper bounds.  */
      if (LL_STEP (loop) > 0)
	expression = LL_UPPER_BOUND (loop);
      else
	expression = LL_LOWER_BOUND (loop);

      for (; expression != NULL; expression = LLE_NEXT (expression))
	{
	  /* Fill in the coefficient.  */
	  for (j = 0; j < i; j++)
	    A[size][j] = LLE_COEFFICIENTS (expression)[j];

	  /* And the invariant coefficient.  */
	  for (j = 0; j < invariants; j++)
	    B[size][j] = LLE_INVARIANT_COEFFICIENTS (expression)[j];

	  /* And the constant.  */
	  a[size] = LLE_CONSTANT (expression);

	  /* Convert z <= (2x+3y+2+b)/4 to -2x-3y+4z <= 2+b.  */
	  for (j = 0; j < i; j++)
	    A[size][j] *= -1;
	  A[size][i] = LLE_DENOMINATOR (expression);
	  size++;
	  /* Need to increase matrix sizes above.  */
761 762
	  gcc_assert (size <= 127);

Daniel Berlin committed
763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789
	}
    }

  /* Compute the lattice base x = base * y + origin, where y is the
     base space.  */
  lattice = lambda_lattice_compute_base (nest);

  /* Ax <= a + B then becomes ALy <= a+B - A*origin.  L is the lattice base  */

  /* A1 = A * L */
  lambda_matrix_mult (A, LATTICE_BASE (lattice), A1, size, depth, depth);

  /* a1 = a - A * origin constant.  */
  lambda_matrix_vector_mult (A, size, depth, LATTICE_ORIGIN (lattice), a1);
  lambda_vector_add_mc (a, 1, a1, -1, a1, size);

  /* B1 = B - A * origin invariant.  */
  lambda_matrix_mult (A, LATTICE_ORIGIN_INVARIANTS (lattice), B1, size, depth,
		      invariants);
  lambda_matrix_add_mc (B, 1, B1, -1, B1, size, invariants);

  /* Now compute the auxiliary space bounds by first inverting U, multiplying
     it by A1, then performing fourier motzkin.  */

  invertedtrans = lambda_matrix_new (depth, depth);

  /* Compute the inverse of U.  */
790 791
  lambda_matrix_inverse (LTM_MATRIX (trans),
			 invertedtrans, depth);
Daniel Berlin committed
792 793 794 795

  /* A = A1 inv(U).  */
  lambda_matrix_mult (A1, invertedtrans, A, size, depth, depth);

796 797
  return compute_nest_using_fourier_motzkin (size, depth, invariants,
					     A, B1, a1);
Daniel Berlin committed
798 799 800
}

/* Compute the loop bounds for the target space, using the bounds of
801 802
   the auxiliary nest AUXILLARY_NEST, and the triangular matrix H.  
   The target space loop bounds are computed by multiplying the triangular
803
   matrix H by the auxiliary nest, to get the new loop bounds.  The sign of
804 805
   the loop steps (positive or negative) is then used to swap the bounds if
   the loop counts downwards.
Daniel Berlin committed
806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852
   Return the target loopnest.  */

static lambda_loopnest
lambda_compute_target_space (lambda_loopnest auxillary_nest,
			     lambda_trans_matrix H, lambda_vector stepsigns)
{
  lambda_matrix inverse, H1;
  int determinant, i, j;
  int gcd1, gcd2;
  int factor;

  lambda_loopnest target_nest;
  int depth, invariants;
  lambda_matrix target;

  lambda_loop auxillary_loop, target_loop;
  lambda_linear_expression expression, auxillary_expr, target_expr, tmp_expr;

  depth = LN_DEPTH (auxillary_nest);
  invariants = LN_INVARIANTS (auxillary_nest);

  inverse = lambda_matrix_new (depth, depth);
  determinant = lambda_matrix_inverse (LTM_MATRIX (H), inverse, depth);

  /* H1 is H excluding its diagonal.  */
  H1 = lambda_matrix_new (depth, depth);
  lambda_matrix_copy (LTM_MATRIX (H), H1, depth, depth);

  for (i = 0; i < depth; i++)
    H1[i][i] = 0;

  /* Computes the linear offsets of the loop bounds.  */
  target = lambda_matrix_new (depth, depth);
  lambda_matrix_mult (H1, inverse, target, depth, depth, depth);

  target_nest = lambda_loopnest_new (depth, invariants);

  for (i = 0; i < depth; i++)
    {

      /* Get a new loop structure.  */
      target_loop = lambda_loop_new ();
      LN_LOOPS (target_nest)[i] = target_loop;

      /* Computes the gcd of the coefficients of the linear part.  */
      gcd1 = gcd_vector (target[i], i);

853
      /* Include the denominator in the GCD.  */
Daniel Berlin committed
854 855
      gcd1 = gcd (gcd1, determinant);

856
      /* Now divide through by the gcd.  */
Daniel Berlin committed
857 858 859 860 861 862 863 864 865 866 867 868
      for (j = 0; j < i; j++)
	target[i][j] = target[i][j] / gcd1;

      expression = lambda_linear_expression_new (depth, invariants);
      lambda_vector_copy (target[i], LLE_COEFFICIENTS (expression), depth);
      LLE_DENOMINATOR (expression) = determinant / gcd1;
      LLE_CONSTANT (expression) = 0;
      lambda_vector_clear (LLE_INVARIANT_COEFFICIENTS (expression),
			   invariants);
      LL_LINEAR_OFFSET (target_loop) = expression;
    }

869
  /* For each loop, compute the new bounds from H.  */
Daniel Berlin committed
870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059
  for (i = 0; i < depth; i++)
    {
      auxillary_loop = LN_LOOPS (auxillary_nest)[i];
      target_loop = LN_LOOPS (target_nest)[i];
      LL_STEP (target_loop) = LTM_MATRIX (H)[i][i];
      factor = LTM_MATRIX (H)[i][i];

      /* First we do the lower bound.  */
      auxillary_expr = LL_LOWER_BOUND (auxillary_loop);

      for (; auxillary_expr != NULL;
	   auxillary_expr = LLE_NEXT (auxillary_expr))
	{
	  target_expr = lambda_linear_expression_new (depth, invariants);
	  lambda_vector_matrix_mult (LLE_COEFFICIENTS (auxillary_expr),
				     depth, inverse, depth,
				     LLE_COEFFICIENTS (target_expr));
	  lambda_vector_mult_const (LLE_COEFFICIENTS (target_expr),
				    LLE_COEFFICIENTS (target_expr), depth,
				    factor);

	  LLE_CONSTANT (target_expr) = LLE_CONSTANT (auxillary_expr) * factor;
	  lambda_vector_copy (LLE_INVARIANT_COEFFICIENTS (auxillary_expr),
			      LLE_INVARIANT_COEFFICIENTS (target_expr),
			      invariants);
	  lambda_vector_mult_const (LLE_INVARIANT_COEFFICIENTS (target_expr),
				    LLE_INVARIANT_COEFFICIENTS (target_expr),
				    invariants, factor);
	  LLE_DENOMINATOR (target_expr) = LLE_DENOMINATOR (auxillary_expr);

	  if (!lambda_vector_zerop (LLE_COEFFICIENTS (target_expr), depth))
	    {
	      LLE_CONSTANT (target_expr) = LLE_CONSTANT (target_expr)
		* determinant;
	      lambda_vector_mult_const (LLE_INVARIANT_COEFFICIENTS
					(target_expr),
					LLE_INVARIANT_COEFFICIENTS
					(target_expr), invariants,
					determinant);
	      LLE_DENOMINATOR (target_expr) =
		LLE_DENOMINATOR (target_expr) * determinant;
	    }
	  /* Find the gcd and divide by it here, rather than doing it
	     at the tree level.  */
	  gcd1 = gcd_vector (LLE_COEFFICIENTS (target_expr), depth);
	  gcd2 = gcd_vector (LLE_INVARIANT_COEFFICIENTS (target_expr),
			     invariants);
	  gcd1 = gcd (gcd1, gcd2);
	  gcd1 = gcd (gcd1, LLE_CONSTANT (target_expr));
	  gcd1 = gcd (gcd1, LLE_DENOMINATOR (target_expr));
	  for (j = 0; j < depth; j++)
	    LLE_COEFFICIENTS (target_expr)[j] /= gcd1;
	  for (j = 0; j < invariants; j++)
	    LLE_INVARIANT_COEFFICIENTS (target_expr)[j] /= gcd1;
	  LLE_CONSTANT (target_expr) /= gcd1;
	  LLE_DENOMINATOR (target_expr) /= gcd1;
	  /* Ignore if identical to existing bound.  */
	  if (!lle_equal (LL_LOWER_BOUND (target_loop), target_expr, depth,
			  invariants))
	    {
	      LLE_NEXT (target_expr) = LL_LOWER_BOUND (target_loop);
	      LL_LOWER_BOUND (target_loop) = target_expr;
	    }
	}
      /* Now do the upper bound.  */
      auxillary_expr = LL_UPPER_BOUND (auxillary_loop);

      for (; auxillary_expr != NULL;
	   auxillary_expr = LLE_NEXT (auxillary_expr))
	{
	  target_expr = lambda_linear_expression_new (depth, invariants);
	  lambda_vector_matrix_mult (LLE_COEFFICIENTS (auxillary_expr),
				     depth, inverse, depth,
				     LLE_COEFFICIENTS (target_expr));
	  lambda_vector_mult_const (LLE_COEFFICIENTS (target_expr),
				    LLE_COEFFICIENTS (target_expr), depth,
				    factor);
	  LLE_CONSTANT (target_expr) = LLE_CONSTANT (auxillary_expr) * factor;
	  lambda_vector_copy (LLE_INVARIANT_COEFFICIENTS (auxillary_expr),
			      LLE_INVARIANT_COEFFICIENTS (target_expr),
			      invariants);
	  lambda_vector_mult_const (LLE_INVARIANT_COEFFICIENTS (target_expr),
				    LLE_INVARIANT_COEFFICIENTS (target_expr),
				    invariants, factor);
	  LLE_DENOMINATOR (target_expr) = LLE_DENOMINATOR (auxillary_expr);

	  if (!lambda_vector_zerop (LLE_COEFFICIENTS (target_expr), depth))
	    {
	      LLE_CONSTANT (target_expr) = LLE_CONSTANT (target_expr)
		* determinant;
	      lambda_vector_mult_const (LLE_INVARIANT_COEFFICIENTS
					(target_expr),
					LLE_INVARIANT_COEFFICIENTS
					(target_expr), invariants,
					determinant);
	      LLE_DENOMINATOR (target_expr) =
		LLE_DENOMINATOR (target_expr) * determinant;
	    }
	  /* Find the gcd and divide by it here, instead of at the
	     tree level.  */
	  gcd1 = gcd_vector (LLE_COEFFICIENTS (target_expr), depth);
	  gcd2 = gcd_vector (LLE_INVARIANT_COEFFICIENTS (target_expr),
			     invariants);
	  gcd1 = gcd (gcd1, gcd2);
	  gcd1 = gcd (gcd1, LLE_CONSTANT (target_expr));
	  gcd1 = gcd (gcd1, LLE_DENOMINATOR (target_expr));
	  for (j = 0; j < depth; j++)
	    LLE_COEFFICIENTS (target_expr)[j] /= gcd1;
	  for (j = 0; j < invariants; j++)
	    LLE_INVARIANT_COEFFICIENTS (target_expr)[j] /= gcd1;
	  LLE_CONSTANT (target_expr) /= gcd1;
	  LLE_DENOMINATOR (target_expr) /= gcd1;
	  /* Ignore if equal to existing bound.  */
	  if (!lle_equal (LL_UPPER_BOUND (target_loop), target_expr, depth,
			  invariants))
	    {
	      LLE_NEXT (target_expr) = LL_UPPER_BOUND (target_loop);
	      LL_UPPER_BOUND (target_loop) = target_expr;
	    }
	}
    }
  for (i = 0; i < depth; i++)
    {
      target_loop = LN_LOOPS (target_nest)[i];
      /* If necessary, exchange the upper and lower bounds and negate
         the step size.  */
      if (stepsigns[i] < 0)
	{
	  LL_STEP (target_loop) *= -1;
	  tmp_expr = LL_LOWER_BOUND (target_loop);
	  LL_LOWER_BOUND (target_loop) = LL_UPPER_BOUND (target_loop);
	  LL_UPPER_BOUND (target_loop) = tmp_expr;
	}
    }
  return target_nest;
}

/* Compute the step signs of TRANS, using TRANS and stepsigns.  Return the new
   result.  */

static lambda_vector
lambda_compute_step_signs (lambda_trans_matrix trans, lambda_vector stepsigns)
{
  lambda_matrix matrix, H;
  int size;
  lambda_vector newsteps;
  int i, j, factor, minimum_column;
  int temp;

  matrix = LTM_MATRIX (trans);
  size = LTM_ROWSIZE (trans);
  H = lambda_matrix_new (size, size);

  newsteps = lambda_vector_new (size);
  lambda_vector_copy (stepsigns, newsteps, size);

  lambda_matrix_copy (matrix, H, size, size);

  for (j = 0; j < size; j++)
    {
      lambda_vector row;
      row = H[j];
      for (i = j; i < size; i++)
	if (row[i] < 0)
	  lambda_matrix_col_negate (H, size, i);
      while (lambda_vector_first_nz (row, size, j + 1) < size)
	{
	  minimum_column = lambda_vector_min_nz (row, size, j);
	  lambda_matrix_col_exchange (H, size, j, minimum_column);

	  temp = newsteps[j];
	  newsteps[j] = newsteps[minimum_column];
	  newsteps[minimum_column] = temp;

	  for (i = j + 1; i < size; i++)
	    {
	      factor = row[i] / row[j];
	      lambda_matrix_col_add (H, size, j, i, -1 * factor);
	    }
	}
    }
  return newsteps;
}

/* Transform NEST according to TRANS, and return the new loopnest.
   This involves
   1. Computing a lattice base for the transformation
   2. Composing the dense base with the specified transformation (TRANS)
   3. Decomposing the combined transformation into a lower triangular portion,
   and a unimodular portion. 
1060 1061
   4. Computing the auxiliary nest using the unimodular portion.
   5. Computing the target nest using the auxiliary nest and the lower
Daniel Berlin committed
1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154
   triangular portion.  */ 

lambda_loopnest
lambda_loopnest_transform (lambda_loopnest nest, lambda_trans_matrix trans)
{
  lambda_loopnest auxillary_nest, target_nest;

  int depth, invariants;
  int i, j;
  lambda_lattice lattice;
  lambda_trans_matrix trans1, H, U;
  lambda_loop loop;
  lambda_linear_expression expression;
  lambda_vector origin;
  lambda_matrix origin_invariants;
  lambda_vector stepsigns;
  int f;

  depth = LN_DEPTH (nest);
  invariants = LN_INVARIANTS (nest);

  /* Keep track of the signs of the loop steps.  */
  stepsigns = lambda_vector_new (depth);
  for (i = 0; i < depth; i++)
    {
      if (LL_STEP (LN_LOOPS (nest)[i]) > 0)
	stepsigns[i] = 1;
      else
	stepsigns[i] = -1;
    }

  /* Compute the lattice base.  */
  lattice = lambda_lattice_compute_base (nest);
  trans1 = lambda_trans_matrix_new (depth, depth);

  /* Multiply the transformation matrix by the lattice base.  */

  lambda_matrix_mult (LTM_MATRIX (trans), LATTICE_BASE (lattice),
		      LTM_MATRIX (trans1), depth, depth, depth);

  /* Compute the Hermite normal form for the new transformation matrix.  */
  H = lambda_trans_matrix_new (depth, depth);
  U = lambda_trans_matrix_new (depth, depth);
  lambda_matrix_hermite (LTM_MATRIX (trans1), depth, LTM_MATRIX (H),
			 LTM_MATRIX (U));

  /* Compute the auxiliary loop nest's space from the unimodular
     portion.  */
  auxillary_nest = lambda_compute_auxillary_space (nest, U);

  /* Compute the loop step signs from the old step signs and the
     transformation matrix.  */
  stepsigns = lambda_compute_step_signs (trans1, stepsigns);

  /* Compute the target loop nest space from the auxiliary nest and
     the lower triangular matrix H.  */
  target_nest = lambda_compute_target_space (auxillary_nest, H, stepsigns);
  origin = lambda_vector_new (depth);
  origin_invariants = lambda_matrix_new (depth, invariants);
  lambda_matrix_vector_mult (LTM_MATRIX (trans), depth, depth,
			     LATTICE_ORIGIN (lattice), origin);
  lambda_matrix_mult (LTM_MATRIX (trans), LATTICE_ORIGIN_INVARIANTS (lattice),
		      origin_invariants, depth, depth, invariants);

  for (i = 0; i < depth; i++)
    {
      loop = LN_LOOPS (target_nest)[i];
      expression = LL_LINEAR_OFFSET (loop);
      if (lambda_vector_zerop (LLE_COEFFICIENTS (expression), depth))
	f = 1;
      else
	f = LLE_DENOMINATOR (expression);

      LLE_CONSTANT (expression) += f * origin[i];

      for (j = 0; j < invariants; j++)
	LLE_INVARIANT_COEFFICIENTS (expression)[j] +=
	  f * origin_invariants[i][j];
    }

  return target_nest;

}

/* Convert a gcc tree expression EXPR to a lambda linear expression, and
   return the new expression.  DEPTH is the depth of the loopnest.
   OUTERINDUCTIONVARS is an array of the induction variables for outer loops
   in this nest.  INVARIANTS is the array of invariants for the loop.  EXTRA
   is the amount we have to add/subtract from the expression because of the
   type of comparison it is used in.  */

static lambda_linear_expression
gcc_tree_to_linear_expression (int depth, tree expr,
1155 1156
			       VEC(tree,heap) *outerinductionvars,
			       VEC(tree,heap) *invariants, int extra)
Daniel Berlin committed
1157 1158 1159 1160 1161 1162 1163 1164 1165
{
  lambda_linear_expression lle = NULL;
  switch (TREE_CODE (expr))
    {
    case INTEGER_CST:
      {
	lle = lambda_linear_expression_new (depth, 2 * depth);
	LLE_CONSTANT (lle) = TREE_INT_CST_LOW (expr);
	if (extra != 0)
1166
	  LLE_CONSTANT (lle) += extra;
Daniel Berlin committed
1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208

	LLE_DENOMINATOR (lle) = 1;
      }
      break;
    case SSA_NAME:
      {
	tree iv, invar;
	size_t i;
	for (i = 0; VEC_iterate (tree, outerinductionvars, i, iv); i++)
	  if (iv != NULL)
	    {
	      if (SSA_NAME_VAR (iv) == SSA_NAME_VAR (expr))
		{
		  lle = lambda_linear_expression_new (depth, 2 * depth);
		  LLE_COEFFICIENTS (lle)[i] = 1;
		  if (extra != 0)
		    LLE_CONSTANT (lle) = extra;

		  LLE_DENOMINATOR (lle) = 1;
		}
	    }
	for (i = 0; VEC_iterate (tree, invariants, i, invar); i++)
	  if (invar != NULL)
	    {
	      if (SSA_NAME_VAR (invar) == SSA_NAME_VAR (expr))
		{
		  lle = lambda_linear_expression_new (depth, 2 * depth);
		  LLE_INVARIANT_COEFFICIENTS (lle)[i] = 1;
		  if (extra != 0)
		    LLE_CONSTANT (lle) = extra;
		  LLE_DENOMINATOR (lle) = 1;
		}
	    }
      }
      break;
    default:
      return NULL;
    }

  return lle;
}

1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223
/* Return the depth of the loopnest NEST */

static int 
depth_of_nest (struct loop *nest)
{
  size_t depth = 0;
  while (nest)
    {
      depth++;
      nest = nest->inner;
    }
  return depth;
}


Daniel Berlin committed
1224 1225 1226
/* Return true if OP is invariant in LOOP and all outer loops.  */

static bool
1227
invariant_in_loop_and_outer_loops (struct loop *loop, tree op)
Daniel Berlin committed
1228
{
Daniel Berlin committed
1229 1230
  if (is_gimple_min_invariant (op))
    return true;
Daniel Berlin committed
1231 1232
  if (loop->depth == 0)
    return true;
1233 1234 1235 1236 1237 1238
  if (!expr_invariant_in_loop_p (loop, op))
    return false;
  if (loop->outer 
      && !invariant_in_loop_and_outer_loops (loop->outer, op))
    return false;
  return true;
Daniel Berlin committed
1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250
}

/* Generate a lambda loop from a gcc loop LOOP.  Return the new lambda loop,
   or NULL if it could not be converted.
   DEPTH is the depth of the loop.
   INVARIANTS is a pointer to the array of loop invariants.
   The induction variable for this loop should be stored in the parameter
   OURINDUCTIONVAR.
   OUTERINDUCTIONVARS is an array of induction variables for outer loops.  */

static lambda_loop
gcc_loop_to_lambda_loop (struct loop *loop, int depth,
1251
			 VEC(tree,heap) ** invariants,
Daniel Berlin committed
1252
			 tree * ourinductionvar,
1253 1254 1255 1256
			 VEC(tree,heap) * outerinductionvars,
			 VEC(tree,heap) ** lboundvars,
			 VEC(tree,heap) ** uboundvars,
			 VEC(int,heap) ** steps)
Daniel Berlin committed
1257 1258 1259 1260 1261 1262 1263 1264 1265 1266
{
  tree phi;
  tree exit_cond;
  tree access_fn, inductionvar;
  tree step;
  lambda_loop lloop = NULL;
  lambda_linear_expression lbound, ubound;
  tree test;
  int stepint;
  int extra = 0;
1267
  tree lboundvar, uboundvar, uboundresult;
Daniel Berlin committed
1268

Daniel Berlin committed
1269
  /* Find out induction var and exit condition.  */
Daniel Berlin committed
1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295
  inductionvar = find_induction_var_from_exit_cond (loop);
  exit_cond = get_loop_exit_condition (loop);

  if (inductionvar == NULL || exit_cond == NULL)
    {
      if (dump_file && (dump_flags & TDF_DETAILS))
	fprintf (dump_file,
		 "Unable to convert loop: Cannot determine exit condition or induction variable for loop.\n");
      return NULL;
    }

  test = TREE_OPERAND (exit_cond, 0);

  if (SSA_NAME_DEF_STMT (inductionvar) == NULL_TREE)
    {

      if (dump_file && (dump_flags & TDF_DETAILS))
	fprintf (dump_file,
		 "Unable to convert loop: Cannot find PHI node for induction variable\n");

      return NULL;
    }

  phi = SSA_NAME_DEF_STMT (inductionvar);
  if (TREE_CODE (phi) != PHI_NODE)
    {
1296 1297
      phi = SINGLE_SSA_TREE_OPERAND (phi, SSA_OP_USE);
      if (!phi)
Daniel Berlin committed
1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317
	{

	  if (dump_file && (dump_flags & TDF_DETAILS))
	    fprintf (dump_file,
		     "Unable to convert loop: Cannot find PHI node for induction variable\n");

	  return NULL;
	}

      phi = SSA_NAME_DEF_STMT (phi);
      if (TREE_CODE (phi) != PHI_NODE)
	{

	  if (dump_file && (dump_flags & TDF_DETAILS))
	    fprintf (dump_file,
		     "Unable to convert loop: Cannot find PHI node for induction variable\n");
	  return NULL;
	}

    }
1318

Daniel Berlin committed
1319 1320 1321
  /* The induction variable name/version we want to put in the array is the
     result of the induction variable phi node.  */
  *ourinductionvar = PHI_RESULT (phi);
Daniel Berlin committed
1322 1323
  access_fn = instantiate_parameters
    (loop, analyze_scalar_evolution (loop, PHI_RESULT (phi)));
1324
  if (access_fn == chrec_dont_know)
Daniel Berlin committed
1325 1326 1327
    {
      if (dump_file && (dump_flags & TDF_DETAILS))
	fprintf (dump_file,
1328
		 "Unable to convert loop: Access function for induction variable phi is unknown\n");
Daniel Berlin committed
1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376

      return NULL;
    }

  step = evolution_part_in_loop_num (access_fn, loop->num);
  if (!step || step == chrec_dont_know)
    {
      if (dump_file && (dump_flags & TDF_DETAILS))
	fprintf (dump_file,
		 "Unable to convert loop: Cannot determine step of loop.\n");

      return NULL;
    }
  if (TREE_CODE (step) != INTEGER_CST)
    {

      if (dump_file && (dump_flags & TDF_DETAILS))
	fprintf (dump_file,
		 "Unable to convert loop: Step of loop is not integer.\n");
      return NULL;
    }

  stepint = TREE_INT_CST_LOW (step);

  /* Only want phis for induction vars, which will have two
     arguments.  */
  if (PHI_NUM_ARGS (phi) != 2)
    {
      if (dump_file && (dump_flags & TDF_DETAILS))
	fprintf (dump_file,
		 "Unable to convert loop: PHI node for induction variable has >2 arguments\n");
      return NULL;
    }

  /* Another induction variable check. One argument's source should be
     in the loop, one outside the loop.  */
  if (flow_bb_inside_loop_p (loop, PHI_ARG_EDGE (phi, 0)->src)
      && flow_bb_inside_loop_p (loop, PHI_ARG_EDGE (phi, 1)->src))
    {

      if (dump_file && (dump_flags & TDF_DETAILS))
	fprintf (dump_file,
		 "Unable to convert loop: PHI edges both inside loop, or both outside loop.\n");

      return NULL;
    }

  if (flow_bb_inside_loop_p (loop, PHI_ARG_EDGE (phi, 0)->src))
Daniel Berlin committed
1377 1378 1379 1380 1381 1382
    {
      lboundvar = PHI_ARG_DEF (phi, 1);
      lbound = gcc_tree_to_linear_expression (depth, lboundvar,
					      outerinductionvars, *invariants,
					      0);
    }
Daniel Berlin committed
1383
  else
Daniel Berlin committed
1384 1385 1386 1387 1388 1389 1390
    {
      lboundvar = PHI_ARG_DEF (phi, 0);
      lbound = gcc_tree_to_linear_expression (depth, lboundvar,
					      outerinductionvars, *invariants,
					      0);
    }
  
Daniel Berlin committed
1391 1392 1393 1394 1395 1396 1397 1398 1399
  if (!lbound)
    {

      if (dump_file && (dump_flags & TDF_DETAILS))
	fprintf (dump_file,
		 "Unable to convert loop: Cannot convert lower bound to linear expression\n");

      return NULL;
    }
1400
  /* One part of the test may be a loop invariant tree.  */
1401
  VEC_reserve (tree, heap, *invariants, 1);
1402
  if (TREE_CODE (TREE_OPERAND (test, 1)) == SSA_NAME
1403
      && invariant_in_loop_and_outer_loops (loop, TREE_OPERAND (test, 1)))
1404
    VEC_quick_push (tree, *invariants, TREE_OPERAND (test, 1));
1405
  else if (TREE_CODE (TREE_OPERAND (test, 0)) == SSA_NAME
1406
	   && invariant_in_loop_and_outer_loops (loop, TREE_OPERAND (test, 0)))
1407
    VEC_quick_push (tree, *invariants, TREE_OPERAND (test, 0));
1408 1409 1410 1411 1412 1413 1414 1415
  
  /* The non-induction variable part of the test is the upper bound variable.
   */
  if (TREE_OPERAND (test, 0) == inductionvar)
    uboundvar = TREE_OPERAND (test, 1);
  else
    uboundvar = TREE_OPERAND (test, 0);
    
Daniel Berlin committed
1416 1417 1418 1419 1420

  /* We only size the vectors assuming we have, at max, 2 times as many
     invariants as we do loops (one for each bound).
     This is just an arbitrary number, but it has to be matched against the
     code below.  */
1421 1422
  gcc_assert (VEC_length (tree, *invariants) <= (unsigned int) (2 * depth));
  
Daniel Berlin committed
1423

1424
  /* We might have some leftover.  */
Daniel Berlin committed
1425 1426 1427 1428
  if (TREE_CODE (test) == LT_EXPR)
    extra = -1 * stepint;
  else if (TREE_CODE (test) == NE_EXPR)
    extra = -1 * stepint;
1429 1430
  else if (TREE_CODE (test) == GT_EXPR)
    extra = -1 * stepint;
1431 1432 1433 1434
  else if (TREE_CODE (test) == EQ_EXPR)
    extra = 1 * stepint;
  
  ubound = gcc_tree_to_linear_expression (depth, uboundvar,
Daniel Berlin committed
1435 1436
					  outerinductionvars,
					  *invariants, extra);
1437 1438
  uboundresult = build2 (PLUS_EXPR, TREE_TYPE (uboundvar), uboundvar,
			 build_int_cst (TREE_TYPE (uboundvar), extra));
1439 1440 1441
  VEC_safe_push (tree, heap, *uboundvars, uboundresult);
  VEC_safe_push (tree, heap, *lboundvars, lboundvar);
  VEC_safe_push (int, heap, *steps, stepint);
Daniel Berlin committed
1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463
  if (!ubound)
    {
      if (dump_file && (dump_flags & TDF_DETAILS))
	fprintf (dump_file,
		 "Unable to convert loop: Cannot convert upper bound to linear expression\n");
      return NULL;
    }

  lloop = lambda_loop_new ();
  LL_STEP (lloop) = stepint;
  LL_LOWER_BOUND (lloop) = lbound;
  LL_UPPER_BOUND (lloop) = ubound;
  return lloop;
}

/* Given a LOOP, find the induction variable it is testing against in the exit
   condition.  Return the induction variable if found, NULL otherwise.  */

static tree
find_induction_var_from_exit_cond (struct loop *loop)
{
  tree expr = get_loop_exit_condition (loop);
1464
  tree ivarop;
Daniel Berlin committed
1465 1466 1467 1468 1469 1470
  tree test;
  if (expr == NULL_TREE)
    return NULL_TREE;
  if (TREE_CODE (expr) != COND_EXPR)
    return NULL_TREE;
  test = TREE_OPERAND (expr, 0);
1471
  if (!COMPARISON_CLASS_P (test))
Daniel Berlin committed
1472
    return NULL_TREE;
1473 1474 1475 1476 1477

  /* Find the side that is invariant in this loop. The ivar must be the other
     side.  */
  
  if (expr_invariant_in_loop_p (loop, TREE_OPERAND (test, 0)))
1478
      ivarop = TREE_OPERAND (test, 1);
1479 1480 1481 1482 1483
  else if (expr_invariant_in_loop_p (loop, TREE_OPERAND (test, 1)))
      ivarop = TREE_OPERAND (test, 0);
  else
    return NULL_TREE;

1484
  if (TREE_CODE (ivarop) != SSA_NAME)
Daniel Berlin committed
1485
    return NULL_TREE;
1486
  return ivarop;
Daniel Berlin committed
1487 1488
}

1489
DEF_VEC_P(lambda_loop);
1490
DEF_VEC_ALLOC_P(lambda_loop,heap);
1491

Daniel Berlin committed
1492 1493 1494 1495 1496 1497 1498 1499
/* Generate a lambda loopnest from a gcc loopnest LOOP_NEST.
   Return the new loop nest.  
   INDUCTIONVARS is a pointer to an array of induction variables for the
   loopnest that will be filled in during this process.
   INVARIANTS is a pointer to an array of invariants that will be filled in
   during this process.  */

lambda_loopnest
Daniel Berlin committed
1500 1501
gcc_loopnest_to_lambda_loopnest (struct loops *currloops,
				 struct loop * loop_nest,
1502 1503
				 VEC(tree,heap) **inductionvars,
				 VEC(tree,heap) **invariants,
Daniel Berlin committed
1504
				 bool need_perfect_nest)
Daniel Berlin committed
1505
{
1506
  lambda_loopnest ret = NULL;
Daniel Berlin committed
1507 1508 1509
  struct loop *temp;
  int depth = 0;
  size_t i;
1510 1511 1512 1513
  VEC(lambda_loop,heap) *loops = NULL;
  VEC(tree,heap) *uboundvars = NULL;
  VEC(tree,heap) *lboundvars  = NULL;
  VEC(int,heap) *steps = NULL;
Daniel Berlin committed
1514 1515
  lambda_loop newloop;
  tree inductionvar = NULL;
1516 1517
  
  depth = depth_of_nest (loop_nest);
Daniel Berlin committed
1518 1519 1520 1521
  temp = loop_nest;
  while (temp)
    {
      newloop = gcc_loop_to_lambda_loop (temp, depth, invariants,
Daniel Berlin committed
1522 1523 1524
					 &inductionvar, *inductionvars,
					 &lboundvars, &uboundvars,
					 &steps);
Daniel Berlin committed
1525 1526
      if (!newloop)
	return NULL;
1527 1528
      VEC_safe_push (tree, heap, *inductionvars, inductionvar);
      VEC_safe_push (lambda_loop, heap, loops, newloop);
Daniel Berlin committed
1529 1530
      temp = temp->inner;
    }
1531
  if (need_perfect_nest)
Daniel Berlin committed
1532
    {
1533 1534 1535 1536
      if (!perfect_nestify (currloops, loop_nest, 
			    lboundvars, uboundvars, steps, *inductionvars))
	{
	  if (dump_file)
1537 1538 1539
	    fprintf (dump_file,
		     "Not a perfect loop nest and couldn't convert to one.\n");    
	  goto fail;
1540 1541
	}
      else if (dump_file)
1542 1543
	fprintf (dump_file,
		 "Successfully converted loop nest to perfect loop nest.\n");
Daniel Berlin committed
1544
    }
Daniel Berlin committed
1545 1546 1547
  ret = lambda_loopnest_new (depth, 2 * depth);
  for (i = 0; VEC_iterate (lambda_loop, loops, i, newloop); i++)
    LN_LOOPS (ret)[i] = newloop;
1548 1549 1550 1551 1552 1553
 fail:
  VEC_free (lambda_loop, heap, loops);
  VEC_free (tree, heap, uboundvars);
  VEC_free (tree, heap, lboundvars);
  VEC_free (int, heap, steps);
  
Daniel Berlin committed
1554 1555 1556 1557 1558 1559
  return ret;
}

/* Convert a lambda body vector LBV to a gcc tree, and return the new tree. 
   STMTS_TO_INSERT is a pointer to a tree where the statements we need to be
   inserted for us are stored.  INDUCTION_VARS is the array of induction
1560 1561
   variables for the loop this LBV is from.  TYPE is the tree type to use for
   the variables and trees involved.  */
Daniel Berlin committed
1562 1563

static tree
1564
lbv_to_gcc_expression (lambda_body_vector lbv, 
1565 1566
		       tree type, VEC(tree,heap) *induction_vars, 
		       tree *stmts_to_insert)
Daniel Berlin committed
1567 1568
{
  tree stmts, stmt, resvar, name;
1569
  tree iv;
Daniel Berlin committed
1570 1571 1572 1573 1574
  size_t i;
  tree_stmt_iterator tsi;

  /* Create a statement list and a linear expression temporary.  */
  stmts = alloc_stmt_list ();
1575
  resvar = create_tmp_var (type, "lbvtmp");
Daniel Berlin committed
1576 1577 1578
  add_referenced_tmp_var (resvar);

  /* Start at 0.  */
1579
  stmt = build2 (MODIFY_EXPR, void_type_node, resvar, integer_zero_node);
Daniel Berlin committed
1580 1581 1582 1583 1584
  name = make_ssa_name (resvar, stmt);
  TREE_OPERAND (stmt, 0) = name;
  tsi = tsi_last (stmts);
  tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);

1585
  for (i = 0; VEC_iterate (tree, induction_vars, i, iv); i++)
Daniel Berlin committed
1586 1587 1588 1589
    {
      if (LBV_COEFFICIENTS (lbv)[i] != 0)
	{
	  tree newname;
1590 1591
	  tree coeffmult;
	  
Daniel Berlin committed
1592
	  /* newname = coefficient * induction_variable */
1593
	  coeffmult = build_int_cst (type, LBV_COEFFICIENTS (lbv)[i]);
1594 1595
	  stmt = build2 (MODIFY_EXPR, void_type_node, resvar,
			 fold_build2 (MULT_EXPR, type, iv, coeffmult));
1596

Daniel Berlin committed
1597 1598
	  newname = make_ssa_name (resvar, stmt);
	  TREE_OPERAND (stmt, 0) = newname;
1599
	  fold_stmt (&stmt);
Daniel Berlin committed
1600 1601
	  tsi = tsi_last (stmts);
	  tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);
1602

Daniel Berlin committed
1603
	  /* name = name + newname */
1604 1605
	  stmt = build2 (MODIFY_EXPR, void_type_node, resvar,
			 build2 (PLUS_EXPR, type, name, newname));
Daniel Berlin committed
1606 1607
	  name = make_ssa_name (resvar, stmt);
	  TREE_OPERAND (stmt, 0) = name;
1608
	  fold_stmt (&stmt);
Daniel Berlin committed
1609 1610
	  tsi = tsi_last (stmts);
	  tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);
1611

Daniel Berlin committed
1612 1613 1614 1615 1616 1617
	}
    }

  /* Handle any denominator that occurs.  */
  if (LBV_DENOMINATOR (lbv) != 1)
    {
1618
      tree denominator = build_int_cst (type, LBV_DENOMINATOR (lbv));
1619 1620
      stmt = build2 (MODIFY_EXPR, void_type_node, resvar,
		     build2 (CEIL_DIV_EXPR, type, name, denominator));
Daniel Berlin committed
1621 1622
      name = make_ssa_name (resvar, stmt);
      TREE_OPERAND (stmt, 0) = name;
1623
      fold_stmt (&stmt);
Daniel Berlin committed
1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635
      tsi = tsi_last (stmts);
      tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);
    }
  *stmts_to_insert = stmts;
  return name;
}

/* Convert a linear expression from coefficient and constant form to a
   gcc tree.
   Return the tree that represents the final value of the expression.
   LLE is the linear expression to convert.
   OFFSET is the linear offset to apply to the expression.
1636
   TYPE is the tree type to use for the variables and math. 
Daniel Berlin committed
1637 1638 1639 1640 1641 1642 1643 1644 1645 1646
   INDUCTION_VARS is a vector of induction variables for the loops.
   INVARIANTS is a vector of the loop nest invariants.
   WRAP specifies what tree code to wrap the results in, if there is more than
   one (it is either MAX_EXPR, or MIN_EXPR).
   STMTS_TO_INSERT Is a pointer to the statement list we fill in with
   statements that need to be inserted for the linear expression.  */

static tree
lle_to_gcc_expression (lambda_linear_expression lle,
		       lambda_linear_expression offset,
1647
		       tree type,
1648 1649 1650
		       VEC(tree,heap) *induction_vars,
		       VEC(tree,heap) *invariants,
		       enum tree_code wrap, tree *stmts_to_insert)
Daniel Berlin committed
1651 1652 1653 1654
{
  tree stmts, stmt, resvar, name;
  size_t i;
  tree_stmt_iterator tsi;
1655
  tree iv, invar;
1656
  VEC(tree,heap) *results = NULL;
Daniel Berlin committed
1657

1658
  gcc_assert (wrap == MAX_EXPR || wrap == MIN_EXPR);
Daniel Berlin committed
1659 1660 1661
  name = NULL_TREE;
  /* Create a statement list and a linear expression temporary.  */
  stmts = alloc_stmt_list ();
1662
  resvar = create_tmp_var (type, "lletmp");
Daniel Berlin committed
1663 1664 1665 1666 1667 1668 1669
  add_referenced_tmp_var (resvar);

  /* Build up the linear expressions, and put the variable representing the
     result in the results array.  */
  for (; lle != NULL; lle = LLE_NEXT (lle))
    {
      /* Start at name = 0.  */
1670
      stmt = build2 (MODIFY_EXPR, void_type_node, resvar, integer_zero_node);
Daniel Berlin committed
1671 1672
      name = make_ssa_name (resvar, stmt);
      TREE_OPERAND (stmt, 0) = name;
1673
      fold_stmt (&stmt);
Daniel Berlin committed
1674 1675 1676 1677 1678 1679
      tsi = tsi_last (stmts);
      tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);

      /* First do the induction variables.  
         at the end, name = name + all the induction variables added
         together.  */
1680
      for (i = 0; VEC_iterate (tree, induction_vars, i, iv); i++)
Daniel Berlin committed
1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694
	{
	  if (LLE_COEFFICIENTS (lle)[i] != 0)
	    {
	      tree newname;
	      tree mult;
	      tree coeff;

	      /* mult = induction variable * coefficient.  */
	      if (LLE_COEFFICIENTS (lle)[i] == 1)
		{
		  mult = VEC_index (tree, induction_vars, i);
		}
	      else
		{
1695
		  coeff = build_int_cst (type,
Daniel Berlin committed
1696
					 LLE_COEFFICIENTS (lle)[i]);
1697
		  mult = fold_build2 (MULT_EXPR, type, iv, coeff);
Daniel Berlin committed
1698 1699 1700
		}

	      /* newname = mult */
1701
	      stmt = build2 (MODIFY_EXPR, void_type_node, resvar, mult);
Daniel Berlin committed
1702 1703
	      newname = make_ssa_name (resvar, stmt);
	      TREE_OPERAND (stmt, 0) = newname;
1704
	      fold_stmt (&stmt);
Daniel Berlin committed
1705 1706 1707 1708
	      tsi = tsi_last (stmts);
	      tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);

	      /* name = name + newname */
1709 1710
	      stmt = build2 (MODIFY_EXPR, void_type_node, resvar,
			     build2 (PLUS_EXPR, type, name, newname));
Daniel Berlin committed
1711 1712
	      name = make_ssa_name (resvar, stmt);
	      TREE_OPERAND (stmt, 0) = name;
1713
	      fold_stmt (&stmt);
Daniel Berlin committed
1714 1715 1716 1717 1718 1719 1720 1721
	      tsi = tsi_last (stmts);
	      tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);
	    }
	}

      /* Handle our invariants.
         At the end, we have name = name + result of adding all multiplied
         invariants.  */
1722
      for (i = 0; VEC_iterate (tree, invariants, i, invar); i++)
Daniel Berlin committed
1723 1724 1725 1726 1727 1728
	{
	  if (LLE_INVARIANT_COEFFICIENTS (lle)[i] != 0)
	    {
	      tree newname;
	      tree mult;
	      tree coeff;
1729
	      int invcoeff = LLE_INVARIANT_COEFFICIENTS (lle)[i];
Daniel Berlin committed
1730
	      /* mult = invariant * coefficient  */
1731
	      if (invcoeff == 1)
Daniel Berlin committed
1732
		{
1733
		  mult = invar;
Daniel Berlin committed
1734 1735 1736
		}
	      else
		{
1737
		  coeff = build_int_cst (type, invcoeff);
1738
		  mult = fold_build2 (MULT_EXPR, type, invar, coeff);
Daniel Berlin committed
1739 1740 1741
		}

	      /* newname = mult */
1742
	      stmt = build2 (MODIFY_EXPR, void_type_node, resvar, mult);
Daniel Berlin committed
1743 1744
	      newname = make_ssa_name (resvar, stmt);
	      TREE_OPERAND (stmt, 0) = newname;
1745
	      fold_stmt (&stmt);
Daniel Berlin committed
1746 1747 1748 1749
	      tsi = tsi_last (stmts);
	      tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);

	      /* name = name + newname */
1750 1751
	      stmt = build2 (MODIFY_EXPR, void_type_node, resvar,
			     build2 (PLUS_EXPR, type, name, newname));
Daniel Berlin committed
1752 1753
	      name = make_ssa_name (resvar, stmt);
	      TREE_OPERAND (stmt, 0) = name;
1754
	      fold_stmt (&stmt);
Daniel Berlin committed
1755 1756 1757 1758 1759 1760 1761 1762 1763
	      tsi = tsi_last (stmts);
	      tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);
	    }
	}

      /* Now handle the constant.
         name = name + constant.  */
      if (LLE_CONSTANT (lle) != 0)
	{
1764 1765 1766
	  stmt = build2 (MODIFY_EXPR, void_type_node, resvar,
			 build2 (PLUS_EXPR, type, name, 
			         build_int_cst (type, LLE_CONSTANT (lle))));
Daniel Berlin committed
1767 1768
	  name = make_ssa_name (resvar, stmt);
	  TREE_OPERAND (stmt, 0) = name;
1769
	  fold_stmt (&stmt);
Daniel Berlin committed
1770 1771 1772 1773 1774 1775 1776 1777
	  tsi = tsi_last (stmts);
	  tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);
	}

      /* Now handle the offset.
         name = name + linear offset.  */
      if (LLE_CONSTANT (offset) != 0)
	{
1778 1779 1780
	  stmt = build2 (MODIFY_EXPR, void_type_node, resvar,
			 build2 (PLUS_EXPR, type, name, 
			         build_int_cst (type, LLE_CONSTANT (offset))));
Daniel Berlin committed
1781 1782
	  name = make_ssa_name (resvar, stmt);
	  TREE_OPERAND (stmt, 0) = name;
1783
	  fold_stmt (&stmt);
Daniel Berlin committed
1784 1785 1786 1787 1788 1789 1790
	  tsi = tsi_last (stmts);
	  tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);
	}

      /* Handle any denominator that occurs.  */
      if (LLE_DENOMINATOR (lle) != 1)
	{
1791
	  stmt = build_int_cst (type, LLE_DENOMINATOR (lle));
1792 1793 1794
	  stmt = build2 (wrap == MAX_EXPR ? CEIL_DIV_EXPR : FLOOR_DIV_EXPR,
			 type, name, stmt);
	  stmt = build2 (MODIFY_EXPR, void_type_node, resvar, stmt);
Daniel Berlin committed
1795 1796 1797 1798 1799 1800 1801

	  /* name = {ceil, floor}(name/denominator) */
	  name = make_ssa_name (resvar, stmt);
	  TREE_OPERAND (stmt, 0) = name;
	  tsi = tsi_last (stmts);
	  tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);
	}
1802
      VEC_safe_push (tree, heap, results, name);
Daniel Berlin committed
1803 1804 1805 1806
    }

  /* Again, out of laziness, we don't handle this case yet.  It's not
     hard, it just hasn't occurred.  */
1807 1808
  gcc_assert (VEC_length (tree, results) <= 2);
  
Daniel Berlin committed
1809 1810 1811 1812 1813
  /* We may need to wrap the results in a MAX_EXPR or MIN_EXPR.  */
  if (VEC_length (tree, results) > 1)
    {
      tree op1 = VEC_index (tree, results, 0);
      tree op2 = VEC_index (tree, results, 1);
1814 1815
      stmt = build2 (MODIFY_EXPR, void_type_node, resvar,
		     build2 (wrap, type, op1, op2));
Daniel Berlin committed
1816 1817 1818 1819 1820 1821
      name = make_ssa_name (resvar, stmt);
      TREE_OPERAND (stmt, 0) = name;
      tsi = tsi_last (stmts);
      tsi_link_after (&tsi, stmt, TSI_CONTINUE_LINKING);
    }

1822 1823
  VEC_free (tree, heap, results);
  
Daniel Berlin committed
1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838
  *stmts_to_insert = stmts;
  return name;
}

/* Transform a lambda loopnest NEW_LOOPNEST, which had TRANSFORM applied to
   it, back into gcc code.  This changes the
   loops, their induction variables, and their bodies, so that they
   match the transformed loopnest.  
   OLD_LOOPNEST is the loopnest before we've replaced it with the new
   loopnest.
   OLD_IVS is a vector of induction variables from the old loopnest.
   INVARIANTS is a vector of loop invariants from the old loopnest.
   NEW_LOOPNEST is the new lambda loopnest to replace OLD_LOOPNEST with.
   TRANSFORM is the matrix transform that was applied to OLD_LOOPNEST to get 
   NEW_LOOPNEST.  */
1839

Daniel Berlin committed
1840 1841
void
lambda_loopnest_to_gcc_loopnest (struct loop *old_loopnest,
1842 1843
				 VEC(tree,heap) *old_ivs,
				 VEC(tree,heap) *invariants,
Daniel Berlin committed
1844 1845 1846 1847 1848 1849
				 lambda_loopnest new_loopnest,
				 lambda_trans_matrix transform)
{
  struct loop *temp;
  size_t i = 0;
  size_t depth = 0;
1850
  VEC(tree,heap) *new_ivs = NULL;
1851 1852
  tree oldiv;
  
Daniel Berlin committed
1853 1854 1855 1856 1857 1858 1859 1860
  block_stmt_iterator bsi;

  if (dump_file)
    {
      transform = lambda_trans_matrix_inverse (transform);
      fprintf (dump_file, "Inverse of transformation matrix:\n");
      print_lambda_trans_matrix (dump_file, transform);
    }
1861
  depth = depth_of_nest (old_loopnest);
Daniel Berlin committed
1862 1863 1864 1865 1866 1867
  temp = old_loopnest;

  while (temp)
    {
      lambda_loop newloop;
      basic_block bb;
1868
      edge exit;
Daniel Berlin committed
1869 1870 1871 1872
      tree ivvar, ivvarinced, exitcond, stmts;
      enum tree_code testtype;
      tree newupperbound, newlowerbound;
      lambda_linear_expression offset;
1873
      tree type;
1874
      bool insert_after;
1875
      tree inc_stmt;
1876 1877 1878 1879

      oldiv = VEC_index (tree, old_ivs, i);
      type = TREE_TYPE (oldiv);

Daniel Berlin committed
1880 1881
      /* First, build the new induction variable temporary  */

1882
      ivvar = create_tmp_var (type, "lnivtmp");
Daniel Berlin committed
1883 1884
      add_referenced_tmp_var (ivvar);

1885
      VEC_safe_push (tree, heap, new_ivs, ivvar);
Daniel Berlin committed
1886 1887 1888 1889

      newloop = LN_LOOPS (new_loopnest)[i];

      /* Linear offset is a bit tricky to handle.  Punt on the unhandled
1890
         cases for now.  */
Daniel Berlin committed
1891
      offset = LL_LINEAR_OFFSET (newloop);
1892
      
1893 1894
      gcc_assert (LLE_DENOMINATOR (offset) == 1 &&
		  lambda_vector_zerop (LLE_COEFFICIENTS (offset), depth));
1895
	    
Daniel Berlin committed
1896
      /* Now build the  new lower bounds, and insert the statements
1897
         necessary to generate it on the loop preheader.  */
Daniel Berlin committed
1898 1899
      newlowerbound = lle_to_gcc_expression (LL_LOWER_BOUND (newloop),
					     LL_LINEAR_OFFSET (newloop),
1900
					     type,
Daniel Berlin committed
1901 1902 1903
					     new_ivs,
					     invariants, MAX_EXPR, &stmts);
      bsi_insert_on_edge (loop_preheader_edge (temp), stmts);
1904
      bsi_commit_edge_inserts ();
Daniel Berlin committed
1905 1906 1907 1908
      /* Build the new upper bound and insert its statements in the
         basic block of the exit condition */
      newupperbound = lle_to_gcc_expression (LL_UPPER_BOUND (newloop),
					     LL_LINEAR_OFFSET (newloop),
1909
					     type,
Daniel Berlin committed
1910 1911
					     new_ivs,
					     invariants, MIN_EXPR, &stmts);
1912
      exit = temp->single_exit;
Daniel Berlin committed
1913 1914 1915 1916 1917
      exitcond = get_loop_exit_condition (temp);
      bb = bb_for_stmt (exitcond);
      bsi = bsi_start (bb);
      bsi_insert_after (&bsi, stmts, BSI_NEW_STMT);

1918
      /* Create the new iv.  */
Daniel Berlin committed
1919

1920
      standard_iv_increment_position (temp, &bsi, &insert_after);
Daniel Berlin committed
1921
      create_iv (newlowerbound,
1922
		 build_int_cst (type, LL_STEP (newloop)),
1923
		 ivvar, temp, &bsi, insert_after, &ivvar,
1924 1925 1926 1927 1928 1929
		 NULL);

      /* Unfortunately, the incremented ivvar that create_iv inserted may not
	 dominate the block containing the exit condition.
	 So we simply create our own incremented iv to use in the new exit
	 test,  and let redundancy elimination sort it out.  */
1930 1931 1932 1933
      inc_stmt = build2 (PLUS_EXPR, type, 
			 ivvar, build_int_cst (type, LL_STEP (newloop)));
      inc_stmt = build2 (MODIFY_EXPR, void_type_node, SSA_NAME_VAR (ivvar),
			 inc_stmt);
1934 1935 1936 1937
      ivvarinced = make_ssa_name (SSA_NAME_VAR (ivvar), inc_stmt);
      TREE_OPERAND (inc_stmt, 0) = ivvarinced;
      bsi = bsi_for_stmt (exitcond);
      bsi_insert_before (&bsi, inc_stmt, BSI_SAME_STMT);
Daniel Berlin committed
1938 1939 1940

      /* Replace the exit condition with the new upper bound
         comparison.  */
1941
      
Daniel Berlin committed
1942
      testtype = LL_STEP (newloop) >= 0 ? LE_EXPR : GE_EXPR;
1943
      
1944 1945 1946 1947 1948
      /* We want to build a conditional where true means exit the loop, and
	 false means continue the loop.
	 So swap the testtype if this isn't the way things are.*/

      if (exit->flags & EDGE_FALSE_VALUE)
1949
	testtype = swap_tree_comparison (testtype);
1950

1951 1952 1953
      COND_EXPR_COND (exitcond) = build2 (testtype,
					  boolean_type_node,
					  newupperbound, ivvarinced);
1954
      update_stmt (exitcond);
Daniel Berlin committed
1955 1956 1957 1958 1959
      VEC_replace (tree, new_ivs, i, ivvar);

      i++;
      temp = temp->inner;
    }
1960

Daniel Berlin committed
1961 1962
  /* Rewrite uses of the old ivs so that they are now specified in terms of
     the new ivs.  */
1963 1964

  for (i = 0; VEC_iterate (tree, old_ivs, i, oldiv); i++)
Daniel Berlin committed
1965
    {
1966 1967 1968 1969 1970 1971
      imm_use_iterator imm_iter;
      use_operand_p imm_use;
      tree oldiv_def;
      tree oldiv_stmt = SSA_NAME_DEF_STMT (oldiv);

      if (TREE_CODE (oldiv_stmt) == PHI_NODE)
1972
        oldiv_def = PHI_RESULT (oldiv_stmt);
1973
      else
1974 1975
	oldiv_def = SINGLE_SSA_TREE_OPERAND (oldiv_stmt, SSA_OP_DEF);
      gcc_assert (oldiv_def != NULL_TREE);
1976 1977

      FOR_EACH_IMM_USE_SAFE (imm_use, imm_iter, oldiv_def)
Daniel Berlin committed
1978
	{
1979
	  tree stmt = USE_STMT (imm_use);
1980 1981
	  use_operand_p use_p;
	  ssa_op_iter iter;
1982
	  gcc_assert (TREE_CODE (stmt) != PHI_NODE);
1983
	  FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE)
Daniel Berlin committed
1984
	    {
1985
	      if (USE_FROM_PTR (use_p) == oldiv)
Daniel Berlin committed
1986 1987
		{
		  tree newiv, stmts;
1988
		  lambda_body_vector lbv, newlbv;
Daniel Berlin committed
1989 1990 1991 1992 1993
		  /* Compute the new expression for the induction
		     variable.  */
		  depth = VEC_length (tree, new_ivs);
		  lbv = lambda_body_vector_new (depth);
		  LBV_COEFFICIENTS (lbv)[i] = 1;
1994 1995 1996 1997 1998
		  
		  newlbv = lambda_body_vector_compute_new (transform, lbv);

		  newiv = lbv_to_gcc_expression (newlbv, TREE_TYPE (oldiv),
						 new_ivs, &stmts);
1999
		  bsi = bsi_for_stmt (stmt);
Daniel Berlin committed
2000 2001 2002
		  /* Insert the statements to build that
		     expression.  */
		  bsi_insert_before (&bsi, stmts, BSI_SAME_STMT);
2003
		  propagate_value (use_p, newiv);
2004
		  update_stmt (stmt);
Daniel Berlin committed
2005 2006 2007 2008 2009
		  
		}
	    }
	}
    }
2010
  VEC_free (tree, heap, new_ivs);
Daniel Berlin committed
2011 2012
}

Daniel Berlin committed
2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045
/* Return TRUE if this is not interesting statement from the perspective of
   determining if we have a perfect loop nest.  */

static bool
not_interesting_stmt (tree stmt)
{
  /* Note that COND_EXPR's aren't interesting because if they were exiting the
     loop, we would have already failed the number of exits tests.  */
  if (TREE_CODE (stmt) == LABEL_EXPR
      || TREE_CODE (stmt) == GOTO_EXPR
      || TREE_CODE (stmt) == COND_EXPR)
    return true;
  return false;
}

/* Return TRUE if PHI uses DEF for it's in-the-loop edge for LOOP.  */

static bool
phi_loop_edge_uses_def (struct loop *loop, tree phi, tree def)
{
  int i;
  for (i = 0; i < PHI_NUM_ARGS (phi); i++)
    if (flow_bb_inside_loop_p (loop, PHI_ARG_EDGE (phi, i)->src))
      if (PHI_ARG_DEF (phi, i) == def)
	return true;
  return false;
}

/* Return TRUE if STMT is a use of PHI_RESULT.  */

static bool
stmt_uses_phi_result (tree stmt, tree phi_result)
{
2046
  tree use = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
Daniel Berlin committed
2047 2048
  
  /* This is conservatively true, because we only want SIMPLE bumpers
2049
     of the form x +- constant for our pass.  */
2050
  return (use == phi_result);
Daniel Berlin committed
2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063
}

/* STMT is a bumper stmt for LOOP if the version it defines is used in the
   in-loop-edge in a phi node, and the operand it uses is the result of that
   phi node. 
   I.E. i_29 = i_3 + 1
        i_3 = PHI (0, i_29);  */

static bool
stmt_is_bumper_for_loop (struct loop *loop, tree stmt)
{
  tree use;
  tree def;
2064 2065
  imm_use_iterator iter;
  use_operand_p use_p;
Daniel Berlin committed
2066
  
2067 2068
  def = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_DEF);
  if (!def)
Daniel Berlin committed
2069
    return false;
2070

2071
  FOR_EACH_IMM_USE_FAST (use_p, iter, def)
Daniel Berlin committed
2072
    {
2073
      use = USE_STMT (use_p);
Daniel Berlin committed
2074 2075 2076 2077 2078 2079 2080 2081 2082
      if (TREE_CODE (use) == PHI_NODE)
	{
	  if (phi_loop_edge_uses_def (loop, use, def))
	    if (stmt_uses_phi_result (stmt, PHI_RESULT (use)))
	      return true;
	} 
    }
  return false;
}
2083 2084


Daniel Berlin committed
2085 2086 2087 2088 2089
/* Return true if LOOP is a perfect loop nest.
   Perfect loop nests are those loop nests where all code occurs in the
   innermost loop body.
   If S is a program statement, then

2090
   i.e. 
Daniel Berlin committed
2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145
   DO I = 1, 20
       S1
       DO J = 1, 20
       ...
       END DO
   END DO
   is not a perfect loop nest because of S1.
   
   DO I = 1, 20
      DO J = 1, 20
        S1
	...
      END DO
   END DO 
   is a perfect loop nest.  

   Since we don't have high level loops anymore, we basically have to walk our
   statements and ignore those that are there because the loop needs them (IE
   the induction variable increment, and jump back to the top of the loop).  */

bool
perfect_nest_p (struct loop *loop)
{
  basic_block *bbs;
  size_t i;
  tree exit_cond;

  if (!loop->inner)
    return true;
  bbs = get_loop_body (loop);
  exit_cond = get_loop_exit_condition (loop);
  for (i = 0; i < loop->num_nodes; i++)
    {
      if (bbs[i]->loop_father == loop)
	{
	  block_stmt_iterator bsi;
	  for (bsi = bsi_start (bbs[i]); !bsi_end_p (bsi); bsi_next (&bsi))
	    {
	      tree stmt = bsi_stmt (bsi);
	      if (stmt == exit_cond
		  || not_interesting_stmt (stmt)
		  || stmt_is_bumper_for_loop (loop, stmt))
		continue;
	      free (bbs);
	      return false;
	    }
	}
    }
  free (bbs);
  /* See if the inner loops are perfectly nested as well.  */
  if (loop->inner)    
    return perfect_nest_p (loop->inner);
  return true;
}

2146
/* Replace the USES of X in STMT, or uses with the same step as X  with Y.  */
Daniel Berlin committed
2147 2148

static void
2149 2150
replace_uses_equiv_to_x_with_y (struct loop *loop, tree stmt, tree x, 
				int xstep, tree y)
Daniel Berlin committed
2151
{
2152 2153 2154 2155
  ssa_op_iter iter;
  use_operand_p use_p;

  FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE)
Daniel Berlin committed
2156
    {
2157 2158 2159 2160 2161 2162 2163
      tree use = USE_FROM_PTR (use_p);
      tree step = NULL_TREE;
      tree access_fn = NULL_TREE;
      
      
      access_fn = instantiate_parameters
	(loop, analyze_scalar_evolution (loop, use));
2164
      if (access_fn != NULL_TREE && access_fn != chrec_dont_know)
2165
	step = evolution_part_in_loop_num (access_fn, loop->num);
2166 2167 2168
      if ((step && step != chrec_dont_know 
	   && TREE_CODE (step) == INTEGER_CST
	   && int_cst_value (step) == xstep)
2169
	  || USE_FROM_PTR (use_p) == x)
2170
	SET_USE (use_p, y);
Daniel Berlin committed
2171 2172 2173
    }
}

2174
/* Return TRUE if STMT uses tree OP in it's uses.  */
Daniel Berlin committed
2175 2176 2177 2178

static bool
stmt_uses_op (tree stmt, tree op)
{
2179 2180 2181 2182
  ssa_op_iter iter;
  tree use;

  FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
Daniel Berlin committed
2183
    {
2184
      if (use == op)
Daniel Berlin committed
2185 2186 2187 2188 2189
	return true;
    }
  return false;
}

2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239
/* Return true if STMT is an exit PHI for LOOP */

static bool
exit_phi_for_loop_p (struct loop *loop, tree stmt)
{
  
  if (TREE_CODE (stmt) != PHI_NODE
      || PHI_NUM_ARGS (stmt) != 1
      || bb_for_stmt (stmt) != loop->single_exit->dest)
    return false;
  
  return true;
}

/* Return true if STMT can be put back into INNER, a loop by moving it to the 
   beginning of that loop.  */

static bool
can_put_in_inner_loop (struct loop *inner, tree stmt)
{
  imm_use_iterator imm_iter;
  use_operand_p use_p;
  basic_block use_bb = NULL;
  
  gcc_assert (TREE_CODE (stmt) == MODIFY_EXPR);
  if (!ZERO_SSA_OPERANDS (stmt, SSA_OP_ALL_VIRTUALS)
      || !expr_invariant_in_loop_p (inner, TREE_OPERAND (stmt, 1)))
    return false;
  
  /* We require that the basic block of all uses be the same, or the use be an
     exit phi.  */
  FOR_EACH_IMM_USE_FAST (use_p, imm_iter, TREE_OPERAND (stmt, 0))
    {
      if (!exit_phi_for_loop_p (inner, USE_STMT (use_p)))
	{
	  basic_block immbb = bb_for_stmt (USE_STMT (use_p));

	  if (!flow_bb_inside_loop_p (inner, immbb))
	    return false;
	  if (use_bb == NULL)
	    use_bb = immbb;
	  else if (immbb != use_bb)
	    return false;
	}
    }
  return true;
  
}


Daniel Berlin committed
2240 2241 2242 2243 2244 2245 2246
/* Return TRUE if LOOP is an imperfect nest that we can convert to a perfect
   one.  LOOPIVS is a vector of induction variables, one per loop.  
   ATM, we only handle imperfect nests of depth 2, where all of the statements
   occur after the inner loop.  */

static bool
can_convert_to_perfect_nest (struct loop *loop,
2247
			     VEC(tree,heap) *loopivs)
Daniel Berlin committed
2248 2249
{
  basic_block *bbs;
2250
  tree exit_condition, phi;
Daniel Berlin committed
2251 2252
  size_t i;
  block_stmt_iterator bsi;
2253
  basic_block exitdest;
Daniel Berlin committed
2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268

  /* Can't handle triply nested+ loops yet.  */
  if (!loop->inner || loop->inner->inner)
    return false;
  
  bbs = get_loop_body (loop);
  exit_condition = get_loop_exit_condition (loop);
  for (i = 0; i < loop->num_nodes; i++)
    {
      if (bbs[i]->loop_father == loop)
	{
	  for (bsi = bsi_start (bbs[i]); !bsi_end_p (bsi); bsi_next (&bsi))
	    { 
	      size_t j;
	      tree stmt = bsi_stmt (bsi);
2269 2270
	      tree iv;
	      
Daniel Berlin committed
2271 2272 2273 2274 2275
	      if (stmt == exit_condition
		  || not_interesting_stmt (stmt)
		  || stmt_is_bumper_for_loop (loop, stmt))
		continue;
	      /* If the statement uses inner loop ivs, we == screwed.  */
2276 2277 2278
	      for (j = 1; VEC_iterate (tree, loopivs, j, iv); j++)
		if (stmt_uses_op (stmt, iv))
		  goto fail;
Daniel Berlin committed
2279
	      
2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297
	      /* If this is a simple operation like a cast that is invariant
		 in the inner loop, only used there, and we can place it
		 there, then it's not going to hurt us.
		 This means that we will propagate casts and other cheap
		 invariant operations *back*
		 into the inner loop if we can interchange the loop, on the
		 theory that we are going to gain a lot more by interchanging
		 the loop than we are by leaving some invariant code there for
		 some other pass to clean up.  */
	      if (TREE_CODE (stmt) == MODIFY_EXPR
		  && is_gimple_cast (TREE_OPERAND (stmt, 1))
		  && can_put_in_inner_loop (loop->inner, stmt))
		continue;

	      /* Otherwise, if the bb of a statement we care about isn't
		 dominated by the header of the inner loop, then we can't
		 handle this case right now.  This test ensures that the
		 statement comes completely *after* the inner loop.  */
Daniel Berlin committed
2298 2299 2300
	      if (!dominated_by_p (CDI_DOMINATORS,
				   bb_for_stmt (stmt), 
				   loop->inner->header))
2301
		goto fail;
Daniel Berlin committed
2302 2303
	    }
	}
2304
    }
2305 2306 2307 2308 2309 2310 2311 2312

  /* We also need to make sure the loop exit only has simple copy phis in it,
     otherwise we don't know how to transform it into a perfect nest right
     now.  */
  exitdest = loop->single_exit->dest;
  
  for (phi = phi_nodes (exitdest); phi; phi = PHI_CHAIN (phi))
    if (PHI_NUM_ARGS (phi) != 1)
2313 2314 2315
      goto fail;
  
  free (bbs);
Daniel Berlin committed
2316
  return true;
2317 2318 2319 2320
  
 fail:
  free (bbs);
  return false;
Daniel Berlin committed
2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358
}

/* Transform the loop nest into a perfect nest, if possible.
   LOOPS is the current struct loops *
   LOOP is the loop nest to transform into a perfect nest
   LBOUNDS are the lower bounds for the loops to transform
   UBOUNDS are the upper bounds for the loops to transform
   STEPS is the STEPS for the loops to transform.
   LOOPIVS is the induction variables for the loops to transform.
   
   Basically, for the case of

   FOR (i = 0; i < 50; i++)
    {
     FOR (j =0; j < 50; j++)
     {
        <whatever>
     }
     <some code>
    }

   This function will transform it into a perfect loop nest by splitting the
   outer loop into two loops, like so:

   FOR (i = 0; i < 50; i++)
   {
     FOR (j = 0; j < 50; j++)
     {
         <whatever>
     }
   }
   
   FOR (i = 0; i < 50; i ++)
   {
    <some code>
   }

   Return FALSE if we can't make this loop into a perfect nest.  */
2359

Daniel Berlin committed
2360 2361 2362
static bool
perfect_nestify (struct loops *loops,
		 struct loop *loop,
2363 2364 2365 2366
		 VEC(tree,heap) *lbounds,
		 VEC(tree,heap) *ubounds,
		 VEC(int,heap) *steps,
		 VEC(tree,heap) *loopivs)
Daniel Berlin committed
2367 2368 2369 2370 2371
{
  basic_block *bbs;
  tree exit_condition;
  tree then_label, else_label, cond_stmt;
  basic_block preheaderbb, headerbb, bodybb, latchbb, olddest;
2372
  int i;
Daniel Berlin committed
2373
  block_stmt_iterator bsi;
2374
  bool insert_after;
Daniel Berlin committed
2375 2376 2377 2378 2379
  edge e;
  struct loop *newloop;
  tree phi;
  tree uboundvar;
  tree stmt;
2380
  tree oldivvar, ivvar, ivvarinced;
2381
  VEC(tree,heap) *phis = NULL;
Daniel Berlin committed
2382 2383 2384 2385 2386 2387 2388 2389 2390 2391

  if (!can_convert_to_perfect_nest (loop, loopivs))
    return false;

  /* Create the new loop */

  olddest = loop->single_exit->dest;
  preheaderbb =  loop_split_edge_with (loop->single_exit, NULL);
  headerbb = create_empty_bb (EXIT_BLOCK_PTR->prev_bb);
  
2392
  /* Push the exit phi nodes that we are moving.  */
Daniel Berlin committed
2393 2394
  for (phi = phi_nodes (olddest); phi; phi = PHI_CHAIN (phi))
    {
2395
      VEC_reserve (tree, heap, phis, 2);
2396 2397
      VEC_quick_push (tree, phis, PHI_RESULT (phi));
      VEC_quick_push (tree, phis, PHI_ARG_DEF (phi, 0));
Daniel Berlin committed
2398
    }
2399
  e = redirect_edge_and_branch (single_succ_edge (preheaderbb), headerbb);
2400

2401 2402
  /* Remove the exit phis from the old basic block.  Make sure to set
     PHI_RESULT to null so it doesn't get released.  */
2403
  while (phi_nodes (olddest) != NULL)
2404 2405
    {
      SET_PHI_RESULT (phi_nodes (olddest), NULL);
2406
      remove_phi_node (phi_nodes (olddest), NULL);
2407
    }      
2408

2409
  /* and add them back to the new basic block.  */
Daniel Berlin committed
2410 2411 2412 2413 2414
  while (VEC_length (tree, phis) != 0)
    {
      tree def;
      tree phiname;
      def = VEC_pop (tree, phis);
2415
      phiname = VEC_pop (tree, phis);      
Daniel Berlin committed
2416
      phi = create_phi_node (phiname, preheaderbb);
2417
      add_phi_arg (phi, def, single_pred_edge (preheaderbb));
2418
    }
2419
  flush_pending_stmts (e);
2420
  VEC_free (tree, heap, phis);
2421

Daniel Berlin committed
2422 2423 2424 2425 2426
  bodybb = create_empty_bb (EXIT_BLOCK_PTR->prev_bb);
  latchbb = create_empty_bb (EXIT_BLOCK_PTR->prev_bb);
  make_edge (headerbb, bodybb, EDGE_FALLTHRU); 
  then_label = build1 (GOTO_EXPR, void_type_node, tree_block_label (latchbb));
  else_label = build1 (GOTO_EXPR, void_type_node, tree_block_label (olddest));
2427 2428 2429 2430 2431
  cond_stmt = build3 (COND_EXPR, void_type_node,
		      build2 (NE_EXPR, boolean_type_node, 
			      integer_one_node, 
			      integer_zero_node), 
		      then_label, else_label);
Daniel Berlin committed
2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452
  bsi = bsi_start (bodybb);
  bsi_insert_after (&bsi, cond_stmt, BSI_NEW_STMT);
  e = make_edge (bodybb, olddest, EDGE_FALSE_VALUE);
  make_edge (bodybb, latchbb, EDGE_TRUE_VALUE);
  make_edge (latchbb, headerbb, EDGE_FALLTHRU);

  /* Update the loop structures.  */
  newloop = duplicate_loop (loops, loop, olddest->loop_father);  
  newloop->header = headerbb;
  newloop->latch = latchbb;
  newloop->single_exit = e;
  add_bb_to_loop (latchbb, newloop);
  add_bb_to_loop (bodybb, newloop);
  add_bb_to_loop (headerbb, newloop);
  set_immediate_dominator (CDI_DOMINATORS, bodybb, headerbb);
  set_immediate_dominator (CDI_DOMINATORS, headerbb, preheaderbb);
  set_immediate_dominator (CDI_DOMINATORS, preheaderbb, 
			   loop->single_exit->src);
  set_immediate_dominator (CDI_DOMINATORS, latchbb, bodybb);
  set_immediate_dominator (CDI_DOMINATORS, olddest, bodybb);
  /* Create the new iv.  */
2453 2454
  oldivvar = VEC_index (tree, loopivs, 0);
  ivvar = create_tmp_var (TREE_TYPE (oldivvar), "perfectiv");
Daniel Berlin committed
2455
  add_referenced_tmp_var (ivvar);
2456
  standard_iv_increment_position (newloop, &bsi, &insert_after);
Daniel Berlin committed
2457
  create_iv (VEC_index (tree, lbounds, 0),
2458
	     build_int_cst (TREE_TYPE (oldivvar), VEC_index (int, steps, 0)),
2459
	     ivvar, newloop, &bsi, insert_after, &ivvar, &ivvarinced);	     
Daniel Berlin committed
2460 2461 2462 2463 2464 2465 2466

  /* Create the new upper bound.  This may be not just a variable, so we copy
     it to one just in case.  */

  exit_condition = get_loop_exit_condition (newloop);
  uboundvar = create_tmp_var (integer_type_node, "uboundvar");
  add_referenced_tmp_var (uboundvar);
2467 2468
  stmt = build2 (MODIFY_EXPR, void_type_node, uboundvar, 
		 VEC_index (tree, ubounds, 0));
Daniel Berlin committed
2469 2470
  uboundvar = make_ssa_name (uboundvar, stmt);
  TREE_OPERAND (stmt, 0) = uboundvar;
2471 2472 2473 2474 2475

  if (insert_after)
    bsi_insert_after (&bsi, stmt, BSI_SAME_STMT);
  else
    bsi_insert_before (&bsi, stmt, BSI_SAME_STMT);
2476
  update_stmt (stmt);
2477 2478 2479 2480
  COND_EXPR_COND (exit_condition) = build2 (GE_EXPR, 
					    boolean_type_node,
					    uboundvar,
					    ivvarinced);
2481
  update_stmt (exit_condition);
2482 2483 2484
  bbs = get_loop_body_in_dom_order (loop); 
  /* Now move the statements, and replace the induction variable in the moved
     statements with the correct loop induction variable.  */
2485
  oldivvar = VEC_index (tree, loopivs, 0);
2486
  for (i = loop->num_nodes - 1; i >= 0 ; i--)
Daniel Berlin committed
2487 2488 2489 2490
    {
      block_stmt_iterator tobsi = bsi_last (bodybb);
      if (bbs[i]->loop_father == loop)
	{
2491 2492
	  /* If this is true, we are *before* the inner loop.
	     If this isn't true, we are *after* it.
Diego Novillo committed
2493

2494 2495 2496
	     The only time can_convert_to_perfect_nest returns true when we
	     have statements before the inner loop is if they can be moved
	     into the inner loop. 
Diego Novillo committed
2497

2498 2499 2500
	     The only time can_convert_to_perfect_nest returns true when we
	     have statements after the inner loop is if they can be moved into
	     the new split loop.  */
Diego Novillo committed
2501

2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566
	  if (dominated_by_p (CDI_DOMINATORS, loop->inner->header, bbs[i]))
	    {
	      for (bsi = bsi_last (bbs[i]); !bsi_end_p (bsi);)
		{ 
		  use_operand_p use_p;
		  imm_use_iterator imm_iter;
		  tree stmt = bsi_stmt (bsi);

		  if (stmt == exit_condition
		      || not_interesting_stmt (stmt)
		      || stmt_is_bumper_for_loop (loop, stmt))
		    {
		      if (!bsi_end_p (bsi))
			bsi_prev (&bsi);
		      continue;
		    }
		  /* Move this statement back into the inner loop.
		     This looks a bit confusing, but we are really just
		     finding the first non-exit phi use and moving the
		     statement to the beginning of that use's basic
		     block.  */
		  FOR_EACH_IMM_USE_SAFE (use_p, imm_iter, 
					 TREE_OPERAND (stmt, 0))
		    {
		      tree imm_stmt = USE_STMT (use_p);
		      if (!exit_phi_for_loop_p (loop->inner, imm_stmt))
			{
			  block_stmt_iterator tobsi = bsi_after_labels (bb_for_stmt (imm_stmt));
			  bsi_move_after (&bsi, &tobsi);
			  update_stmt (stmt);
			  BREAK_FROM_SAFE_IMM_USE (imm_iter);
			} 
		    }
		}
	    }
	  else
	    { 
	      /* Note that the bsi only needs to be explicitly incremented
		 when we don't move something, since it is automatically
		 incremented when we do.  */
	      for (bsi = bsi_start (bbs[i]); !bsi_end_p (bsi);)
		{ 
		  ssa_op_iter i;
		  tree n, stmt = bsi_stmt (bsi);
		  
		  if (stmt == exit_condition
		      || not_interesting_stmt (stmt)
		      || stmt_is_bumper_for_loop (loop, stmt))
		    {
		      bsi_next (&bsi);
		      continue;
		    }
		  
		  replace_uses_equiv_to_x_with_y (loop, stmt, 
						  oldivvar,  
						  VEC_index (int, steps, 0),
						  ivvar);
		  bsi_move_before (&bsi, &tobsi);
		  
		  /* If the statement has any virtual operands, they may
		     need to be rewired because the original loop may
		     still reference them.  */
		  FOR_EACH_SSA_TREE_OPERAND (n, stmt, i, SSA_OP_ALL_VIRTUALS)
		    mark_sym_for_renaming (SSA_NAME_VAR (n));
		}
Daniel Berlin committed
2567
	    }
2568
	  
Daniel Berlin committed
2569 2570
	}
    }
Diego Novillo committed
2571

Daniel Berlin committed
2572 2573 2574 2575
  free (bbs);
  return perfect_nest_p (loop);
}

Daniel Berlin committed
2576 2577 2578 2579 2580 2581 2582 2583
/* Return true if TRANS is a legal transformation matrix that respects
   the dependence vectors in DISTS and DIRS.  The conservative answer
   is false.

   "Wolfe proves that a unimodular transformation represented by the
   matrix T is legal when applied to a loop nest with a set of
   lexicographically non-negative distance vectors RDG if and only if
   for each vector d in RDG, (T.d >= 0) is lexicographically positive.
2584
   i.e.: if and only if it transforms the lexicographically positive
Daniel Berlin committed
2585 2586 2587 2588 2589
   distance vectors to lexicographically positive vectors.  Note that
   a unimodular matrix must transform the zero vector (and only it) to
   the zero vector." S.Muchnick.  */

bool
Daniel Berlin committed
2590 2591 2592
lambda_transform_legal_p (lambda_trans_matrix trans, 
			  int nb_loops,
			  varray_type dependence_relations)
Daniel Berlin committed
2593
{
2594
  unsigned int i, j;
Daniel Berlin committed
2595 2596 2597
  lambda_vector distres;
  struct data_dependence_relation *ddr;

2598 2599
  gcc_assert (LTM_COLSIZE (trans) == nb_loops
	      && LTM_ROWSIZE (trans) == nb_loops);
Daniel Berlin committed
2600 2601 2602

  /* When there is an unknown relation in the dependence_relations, we
     know that it is no worth looking at this loop nest: give up.  */
Daniel Berlin committed
2603
  ddr = (struct data_dependence_relation *) 
Daniel Berlin committed
2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614
    VARRAY_GENERIC_PTR (dependence_relations, 0);
  if (ddr == NULL)
    return true;
  if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
    return false;

  distres = lambda_vector_new (nb_loops);

  /* For each distance vector in the dependence graph.  */
  for (i = 0; i < VARRAY_ACTIVE_SIZE (dependence_relations); i++)
    {
Daniel Berlin committed
2615
      ddr = (struct data_dependence_relation *) 
2616
	VARRAY_GENERIC_PTR (dependence_relations, i);     
Daniel Berlin committed
2617

Daniel Berlin committed
2618
      /* Don't care about relations for which we know that there is no
Daniel Berlin committed
2619 2620
	 dependence, nor about read-read (aka. output-dependences):
	 these data accesses can happen in any order.  */
Daniel Berlin committed
2621 2622 2623
      if (DDR_ARE_DEPENDENT (ddr) == chrec_known
	  || (DR_IS_READ (DDR_A (ddr)) && DR_IS_READ (DDR_B (ddr))))
	continue;
2624

Daniel Berlin committed
2625 2626 2627
      /* Conservatively answer: "this transformation is not valid".  */
      if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
	return false;
2628 2629 2630
	  
      /* If the dependence could not be captured by a distance vector,
	 conservatively answer that the transform is not valid.  */
2631
      if (DDR_NUM_DIST_VECTS (ddr) == 0)
2632
	return false;
Daniel Berlin committed
2633 2634

      /* Compute trans.dist_vect */
2635 2636 2637 2638
      for (j = 0; j < DDR_NUM_DIST_VECTS (ddr); j++)
	{
	  lambda_matrix_vector_mult (LTM_MATRIX (trans), nb_loops, nb_loops, 
				     DDR_DIST_VECT (ddr, j), distres);
Daniel Berlin committed
2639

2640 2641 2642
	  if (!lambda_vector_lexico_pos (distres, nb_loops))
	    return false;
	}
Daniel Berlin committed
2643 2644 2645
    }
  return true;
}