sched-deps.c 45.7 KB
Newer Older
1 2 3
/* Instruction scheduling pass.  This file computes dependencies between
   instructions.
   Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
Kazu Hirata committed
4
   1999, 2000, 2001, 2002 Free Software Foundation, Inc.
5 6 7
   Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
   and currently maintained by, Jim Wilson (wilson@cygnus.com)

8
This file is part of GCC.
9

10 11 12 13
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 2, or (at your option) any later
version.
14

15 16
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 18 19 20
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
for more details.

You should have received a copy of the GNU General Public License
21 22
along with GCC; see the file COPYING.  If not, write to the Free
Software Foundation, 59 Temple Place - Suite 330, Boston, MA
23 24 25 26
02111-1307, USA.  */

#include "config.h"
#include "system.h"
27 28
#include "coretypes.h"
#include "tm.h"
29 30 31 32 33 34 35 36 37 38 39 40 41 42
#include "toplev.h"
#include "rtl.h"
#include "tm_p.h"
#include "hard-reg-set.h"
#include "basic-block.h"
#include "regs.h"
#include "function.h"
#include "flags.h"
#include "insn-config.h"
#include "insn-attr.h"
#include "except.h"
#include "toplev.h"
#include "recog.h"
#include "sched-int.h"
43
#include "params.h"
44
#include "cselib.h"
45 46 47 48 49 50

extern char *reg_known_equiv_p;
extern rtx *reg_known_value;

static regset_head reg_pending_sets_head;
static regset_head reg_pending_clobbers_head;
51
static regset_head reg_pending_uses_head;
52 53 54

static regset reg_pending_sets;
static regset reg_pending_clobbers;
55 56
static regset reg_pending_uses;
static bool reg_pending_barrier;
57 58 59 60 61 62 63 64 65 66 67 68 69

/* To speed up the test for duplicate dependency links we keep a
   record of dependencies created by add_dependence when the average
   number of instructions in a basic block is very large.

   Studies have shown that there is typically around 5 instructions between
   branches for typical C code.  So we can make a guess that the average
   basic block is approximately 5 instructions long; we will choose 100X
   the average size as a very large basic block.

   Each insn has associated bitmaps for its dependencies.  Each bitmap
   has enough entries to represent a dependency on any other insn in
   the insn chain.  All bitmap for true dependencies cache is
70
   allocated then the rest two ones are also allocated.  */
71 72 73 74 75 76 77
static sbitmap *true_dependency_cache;
static sbitmap *anti_dependency_cache;
static sbitmap *output_dependency_cache;

/* To speed up checking consistency of formed forward insn
   dependencies we use the following cache.  Another possible solution
   could be switching off checking duplication of insns in forward
78
   dependencies.  */
79 80 81 82
#ifdef ENABLE_CHECKING
static sbitmap *forward_dependency_cache;
#endif

83
static int deps_may_trap_p PARAMS ((rtx));
84 85
static void add_dependence_list PARAMS ((rtx, rtx, enum reg_note));
static void add_dependence_list_and_free PARAMS ((rtx, rtx *, enum reg_note));
86 87
static void set_sched_group_p PARAMS ((rtx));

88
static void flush_pending_lists PARAMS ((struct deps *, rtx, int, int));
89 90 91
static void sched_analyze_1 PARAMS ((struct deps *, rtx, rtx));
static void sched_analyze_2 PARAMS ((struct deps *, rtx, rtx));
static void sched_analyze_insn PARAMS ((struct deps *, rtx, rtx, rtx));
92 93 94

static rtx get_condition PARAMS ((rtx));
static int conditions_mutex_p PARAMS ((rtx, rtx));
95

96 97 98 99 100 101 102 103 104
/* Return nonzero if a load of the memory reference MEM can cause a trap.  */

static int
deps_may_trap_p (mem)
     rtx mem;
{
  rtx addr = XEXP (mem, 0);

  if (REG_P (addr)
105 106 107
      && REGNO (addr) >= FIRST_PSEUDO_REGISTER
      && reg_known_value[REGNO (addr)])
    addr = reg_known_value[REGNO (addr)];
108 109 110
  return rtx_addr_can_trap_p (addr);
}

111 112 113
/* Return the INSN_LIST containing INSN in LIST, or NULL
   if LIST does not contain INSN.  */

114
rtx
115 116 117 118 119 120 121 122 123 124 125 126 127
find_insn_list (insn, list)
     rtx insn;
     rtx list;
{
  while (list)
    {
      if (XEXP (list, 0) == insn)
	return list;
      list = XEXP (list, 1);
    }
  return 0;
}

128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
/* Find the condition under which INSN is executed.  */

static rtx
get_condition (insn)
     rtx insn;
{
  rtx pat = PATTERN (insn);
  rtx cond;

  if (pat == 0)
    return 0;
  if (GET_CODE (pat) == COND_EXEC)
    return COND_EXEC_TEST (pat);
  if (GET_CODE (insn) != JUMP_INSN)
    return 0;
  if (GET_CODE (pat) != SET || SET_SRC (pat) != pc_rtx)
    return 0;
  if (GET_CODE (SET_DEST (pat)) != IF_THEN_ELSE)
    return 0;
  pat = SET_DEST (pat);
  cond = XEXP (pat, 0);
  if (GET_CODE (XEXP (cond, 1)) == LABEL_REF
      && XEXP (cond, 2) == pc_rtx)
    return cond;
  else if (GET_CODE (XEXP (cond, 2)) == LABEL_REF
	   && XEXP (cond, 1) == pc_rtx)
    return gen_rtx_fmt_ee (reverse_condition (GET_CODE (cond)), GET_MODE (cond),
			   XEXP (cond, 0), XEXP (cond, 1));
  else
    return 0;
}

/* Return nonzero if conditions COND1 and COND2 can never be both true.  */

static int
conditions_mutex_p (cond1, cond2)
     rtx cond1, cond2;
{
  if (GET_RTX_CLASS (GET_CODE (cond1)) == '<'
      && GET_RTX_CLASS (GET_CODE (cond2)) == '<'
      && GET_CODE (cond1) == reverse_condition (GET_CODE (cond2))
      && XEXP (cond1, 0) == XEXP (cond2, 0)
      && XEXP (cond1, 1) == XEXP (cond2, 1))
    return 1;
  return 0;
}

175 176 177 178 179 180 181 182 183 184
/* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
   LOG_LINKS of INSN, if not already there.  DEP_TYPE indicates the type
   of dependence that this link represents.  */

void
add_dependence (insn, elem, dep_type)
     rtx insn;
     rtx elem;
     enum reg_note dep_type;
{
185
  rtx link;
186
  int present_p;
187
  rtx cond1, cond2;
188 189 190 191 192 193 194 195 196 197 198

  /* Don't depend an insn on itself.  */
  if (insn == elem)
    return;

  /* We can get a dependency on deleted insns due to optimizations in
     the register allocation and reloading or due to splitting.  Any
     such dependency is useless and can be ignored.  */
  if (GET_CODE (elem) == NOTE)
    return;

199 200
  /* flow.c doesn't handle conditional lifetimes entirely correctly;
     calls mess up the conditional lifetimes.  */
201 202 203
  /* ??? add_dependence is the wrong place to be eliding dependencies,
     as that forgets that the condition expressions themselves may
     be dependent.  */
204 205 206 207
  if (GET_CODE (insn) != CALL_INSN && GET_CODE (elem) != CALL_INSN)
    {
      cond1 = get_condition (insn);
      cond2 = get_condition (elem);
208 209
      if (cond1 && cond2
	  && conditions_mutex_p (cond1, cond2)
210 211 212 213 214 215
	  /* Make sure first instruction doesn't affect condition of second
	     instruction if switched.  */
	  && !modified_in_p (cond1, elem)
	  /* Make sure second instruction doesn't affect condition of first
	     instruction if switched.  */
	  && !modified_in_p (cond2, insn))
216 217 218
	return;
    }

219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
  present_p = 1;
#ifdef INSN_SCHEDULING
  /* ??? No good way to tell from here whether we're doing interblock
     scheduling.  Possibly add another callback.  */
#if 0
  /* (This code is guarded by INSN_SCHEDULING, otherwise INSN_BB is undefined.)
     No need for interblock dependences with calls, since
     calls are not moved between blocks.   Note: the edge where
     elem is a CALL is still required.  */
  if (GET_CODE (insn) == CALL_INSN
      && (INSN_BB (elem) != INSN_BB (insn)))
    return;
#endif

  /* If we already have a dependency for ELEM, then we do not need to
     do anything.  Avoiding the list walk below can cut compile times
     dramatically for some code.  */
  if (true_dependency_cache != NULL)
    {
238 239
      enum reg_note present_dep_type = 0;

240 241 242
      if (anti_dependency_cache == NULL || output_dependency_cache == NULL)
	abort ();
      if (TEST_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem)))
243 244
	/* Do nothing (present_set_type is already 0).  */
	;
245 246 247 248 249 250
      else if (TEST_BIT (anti_dependency_cache[INSN_LUID (insn)],
			 INSN_LUID (elem)))
	present_dep_type = REG_DEP_ANTI;
      else if (TEST_BIT (output_dependency_cache[INSN_LUID (insn)],
			 INSN_LUID (elem)))
	present_dep_type = REG_DEP_OUTPUT;
Kazu Hirata committed
251
      else
252 253 254 255 256 257 258 259 260 261 262 263 264
	present_p = 0;
      if (present_p && (int) dep_type >= (int) present_dep_type)
	return;
    }
#endif

  /* Check that we don't already have this dependence.  */
  if (present_p)
    for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
      if (XEXP (link, 0) == elem)
	{
#ifdef INSN_SCHEDULING
	  /* Clear corresponding cache entry because type of the link
265
             may be changed.  */
266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
	  if (true_dependency_cache != NULL)
	    {
	      if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
		RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
			   INSN_LUID (elem));
	      else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
		       && output_dependency_cache)
		RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
			   INSN_LUID (elem));
	      else
		abort ();
	    }
#endif

	  /* If this is a more restrictive type of dependence than the existing
	     one, then change the existing dependence to this type.  */
	  if ((int) dep_type < (int) REG_NOTE_KIND (link))
	    PUT_REG_NOTE_KIND (link, dep_type);
Kazu Hirata committed
284

285 286
#ifdef INSN_SCHEDULING
	  /* If we are adding a dependency to INSN's LOG_LINKs, then
287
	     note that in the bitmap caches of dependency information.  */
288 289
	  if (true_dependency_cache != NULL)
	    {
Kazu Hirata committed
290
	      if ((int) REG_NOTE_KIND (link) == 0)
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
		SET_BIT (true_dependency_cache[INSN_LUID (insn)],
			 INSN_LUID (elem));
	      else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
		SET_BIT (anti_dependency_cache[INSN_LUID (insn)],
			 INSN_LUID (elem));
	      else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
		SET_BIT (output_dependency_cache[INSN_LUID (insn)],
			 INSN_LUID (elem));
	    }
#endif
	  return;
      }
  /* Might want to check one level of transitivity to save conses.  */

  link = alloc_INSN_LIST (elem, LOG_LINKS (insn));
  LOG_LINKS (insn) = link;

  /* Insn dependency, not data dependency.  */
  PUT_REG_NOTE_KIND (link, dep_type);

#ifdef INSN_SCHEDULING
  /* If we are adding a dependency to INSN's LOG_LINKs, then note that
313
     in the bitmap caches of dependency information.  */
314 315
  if (true_dependency_cache != NULL)
    {
Kazu Hirata committed
316
      if ((int) dep_type == 0)
317 318 319 320 321 322 323 324 325
	SET_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
      else if (dep_type == REG_DEP_ANTI)
	SET_BIT (anti_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
      else if (dep_type == REG_DEP_OUTPUT)
	SET_BIT (output_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
    }
#endif
}

326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
/* A convenience wrapper to operate on an entire list.  */

static void
add_dependence_list (insn, list, dep_type)
     rtx insn, list;
     enum reg_note dep_type;
{
  for (; list; list = XEXP (list, 1))
    add_dependence (insn, XEXP (list, 0), dep_type);
}

/* Similar, but free *LISTP at the same time.  */

static void
add_dependence_list_and_free (insn, listp, dep_type)
     rtx insn;
     rtx *listp;
     enum reg_note dep_type;
{
  rtx list, next;
  for (list = *listp, *listp = NULL; list ; list = next)
    {
      next = XEXP (list, 1);
      add_dependence (insn, XEXP (list, 0), dep_type);
      free_INSN_LIST_node (list);
    }
}

354 355 356 357 358 359 360
/* Set SCHED_GROUP_P and care for the rest of the bookkeeping that
   goes along with that.  */

static void
set_sched_group_p (insn)
     rtx insn;
{
361
  rtx prev;
362 363 364 365

  SCHED_GROUP_P (insn) = 1;

  prev = prev_nonnote_insn (insn);
366
  add_dependence (insn, prev, REG_DEP_ANTI);
367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
}

/* Process an insn's memory dependencies.  There are four kinds of
   dependencies:

   (0) read dependence: read follows read
   (1) true dependence: read follows write
   (2) anti dependence: write follows read
   (3) output dependence: write follows write

   We are careful to build only dependencies which actually exist, and
   use transitivity to avoid building too many links.  */

/* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
   The MEM is a memory reference contained within INSN, which we are saving
   so that we can do memory aliasing on it.  */

void
add_insn_mem_dependence (deps, insn_list, mem_list, insn, mem)
     struct deps *deps;
     rtx *insn_list, *mem_list, insn, mem;
{
389
  rtx link;
390 391 392 393

  link = alloc_INSN_LIST (insn, *insn_list);
  *insn_list = link;

394 395 396 397 398
  if (current_sched_info->use_cselib)
    {
      mem = shallow_copy_rtx (mem);
      XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0));
    }
399 400 401 402 403 404 405
  link = alloc_EXPR_LIST (VOIDmode, mem, *mem_list);
  *mem_list = link;

  deps->pending_lists_length++;
}

/* Make a dependency between every memory reference on the pending lists
406 407
   and INSN, thus flushing the pending lists.  FOR_READ is true if emitting
   dependencies for a read operation, similarly with FOR_WRITE.  */
408 409

static void
410
flush_pending_lists (deps, insn, for_read, for_write)
411 412
     struct deps *deps;
     rtx insn;
413
     int for_read, for_write;
414
{
415
  if (for_write)
416
    {
417 418 419
      add_dependence_list_and_free (insn, &deps->pending_read_insns,
				    REG_DEP_ANTI);
      free_EXPR_LIST_list (&deps->pending_read_mems);
420 421
    }

422 423 424
  add_dependence_list_and_free (insn, &deps->pending_write_insns,
				for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
  free_EXPR_LIST_list (&deps->pending_write_mems);
425 426
  deps->pending_lists_length = 0;

427 428
  add_dependence_list_and_free (insn, &deps->last_pending_memory_flush,
				for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
429
  deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
430
  deps->pending_flush_length = 1;
431 432 433 434 435 436 437 438 439 440 441 442
}

/* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
   rtx, X, creating all dependencies generated by the write to the
   destination of X, and reads of everything mentioned.  */

static void
sched_analyze_1 (deps, x, insn)
     struct deps *deps;
     rtx x;
     rtx insn;
{
443 444
  int regno;
  rtx dest = XEXP (x, 0);
445 446 447 448 449
  enum rtx_code code = GET_CODE (x);

  if (dest == 0)
    return;

450
  if (GET_CODE (dest) == PARALLEL)
451
    {
452
      int i;
453

454
      for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
455 456 457 458 459
	if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
	  sched_analyze_1 (deps,
			   gen_rtx_CLOBBER (VOIDmode,
					    XEXP (XVECEXP (dest, 0, i), 0)),
			   insn);
460

461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485
      if (GET_CODE (x) == SET)
	sched_analyze_2 (deps, SET_SRC (x), insn);
      return;
    }

  while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
	 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
    {
      if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
	{
	  /* The second and third arguments are values read by this insn.  */
	  sched_analyze_2 (deps, XEXP (dest, 1), insn);
	  sched_analyze_2 (deps, XEXP (dest, 2), insn);
	}
      dest = XEXP (dest, 0);
    }

  if (GET_CODE (dest) == REG)
    {
      regno = REGNO (dest);

      /* A hard reg in a wide mode may really be multiple registers.
         If so, mark all of them just like the first.  */
      if (regno < FIRST_PSEUDO_REGISTER)
	{
486 487
	  int i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
	  if (code == SET)
488
	    {
489 490 491 492 493 494 495
	      while (--i >= 0)
		SET_REGNO_REG_SET (reg_pending_sets, regno + i);
	    }
	  else
	    {
	      while (--i >= 0)
		SET_REGNO_REG_SET (reg_pending_clobbers, regno + i);
496 497
	    }
	}
498 499 500 501 502 503 504 505 506
      /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
	 it does not reload.  Ignore these as they have served their
	 purpose already.  */
      else if (regno >= deps->max_reg)
	{
	  if (GET_CODE (PATTERN (insn)) != USE
	      && GET_CODE (PATTERN (insn)) != CLOBBER)
	    abort ();
	}
507 508 509
      else
	{
	  if (code == SET)
510
	    SET_REGNO_REG_SET (reg_pending_sets, regno);
511 512 513 514 515 516 517 518 519 520 521 522 523 524
	  else
	    SET_REGNO_REG_SET (reg_pending_clobbers, regno);

	  /* Pseudos that are REG_EQUIV to something may be replaced
	     by that during reloading.  We need only add dependencies for
	     the address in the REG_EQUIV note.  */
	  if (!reload_completed
	      && reg_known_equiv_p[regno]
	      && GET_CODE (reg_known_value[regno]) == MEM)
	    sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);

	  /* Don't let it cross a call after scheduling if it doesn't
	     already cross one.  */
	  if (REG_N_CALLS_CROSSED (regno) == 0)
525
	    add_dependence_list (insn, deps->last_function_call, REG_DEP_ANTI);
526 527 528 529 530
	}
    }
  else if (GET_CODE (dest) == MEM)
    {
      /* Writing memory.  */
531 532 533 534 535 536 537 538
      rtx t = dest;

      if (current_sched_info->use_cselib)
	{
	  t = shallow_copy_rtx (dest);
	  cselib_lookup (XEXP (t, 0), Pmode, 1);
	  XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
	}
539

540
      if (deps->pending_lists_length > MAX_PENDING_LIST_LENGTH)
541 542 543
	{
	  /* Flush all pending reads and writes to prevent the pending lists
	     from getting any larger.  Insn scheduling runs too slowly when
544
	     these lists get long.  When compiling GCC with itself,
545
	     this flush occurs 8 times for sparc, and 10 times for m88k using
546
	     the default value of 32.  */
547
	  flush_pending_lists (deps, insn, false, true);
548 549 550 551 552 553 554 555 556
	}
      else
	{
	  rtx pending, pending_mem;

	  pending = deps->pending_read_insns;
	  pending_mem = deps->pending_read_mems;
	  while (pending)
	    {
557
	      if (anti_dependence (XEXP (pending_mem, 0), t))
558 559 560 561 562 563 564 565 566 567
		add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);

	      pending = XEXP (pending, 1);
	      pending_mem = XEXP (pending_mem, 1);
	    }

	  pending = deps->pending_write_insns;
	  pending_mem = deps->pending_write_mems;
	  while (pending)
	    {
568
	      if (output_dependence (XEXP (pending_mem, 0), t))
569 570 571 572 573 574
		add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);

	      pending = XEXP (pending, 1);
	      pending_mem = XEXP (pending_mem, 1);
	    }

575 576
	  add_dependence_list (insn, deps->last_pending_memory_flush,
			       REG_DEP_ANTI);
577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596

	  add_insn_mem_dependence (deps, &deps->pending_write_insns,
				   &deps->pending_write_mems, insn, dest);
	}
      sched_analyze_2 (deps, XEXP (dest, 0), insn);
    }

  /* Analyze reads.  */
  if (GET_CODE (x) == SET)
    sched_analyze_2 (deps, SET_SRC (x), insn);
}

/* Analyze the uses of memory and registers in rtx X in INSN.  */

static void
sched_analyze_2 (deps, x, insn)
     struct deps *deps;
     rtx x;
     rtx insn;
{
597 598 599 600
  int i;
  int j;
  enum rtx_code code;
  const char *fmt;
601 602 603 604 605 606 607 608 609 610

  if (x == 0)
    return;

  code = GET_CODE (x);

  switch (code)
    {
    case CONST_INT:
    case CONST_DOUBLE:
611
    case CONST_VECTOR:
612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631
    case SYMBOL_REF:
    case CONST:
    case LABEL_REF:
      /* Ignore constants.  Note that we must handle CONST_DOUBLE here
         because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
         this does not mean that this insn is using cc0.  */
      return;

#ifdef HAVE_cc0
    case CC0:
      /* User of CC0 depends on immediately preceding insn.  */
      set_sched_group_p (insn);
      return;
#endif

    case REG:
      {
	int regno = REGNO (x);
	if (regno < FIRST_PSEUDO_REGISTER)
	  {
632
	    int i = HARD_REGNO_NREGS (regno, GET_MODE (x));
633
	    while (--i >= 0)
634
	      SET_REGNO_REG_SET (reg_pending_uses, regno + i);
635
	  }
636 637 638 639 640 641 642 643 644
	/* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
	   it does not reload.  Ignore these as they have served their
	   purpose already.  */
	else if (regno >= deps->max_reg)
	  {
	    if (GET_CODE (PATTERN (insn)) != USE
		&& GET_CODE (PATTERN (insn)) != CLOBBER)
	      abort ();
	  }
645 646
	else
	  {
647
	    SET_REGNO_REG_SET (reg_pending_uses, regno);
648 649 650 651 652 653 654 655 656 657 658 659 660

	    /* Pseudos that are REG_EQUIV to something may be replaced
	       by that during reloading.  We need only add dependencies for
	       the address in the REG_EQUIV note.  */
	    if (!reload_completed
		&& reg_known_equiv_p[regno]
		&& GET_CODE (reg_known_value[regno]) == MEM)
	      sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);

	    /* If the register does not already cross any calls, then add this
	       insn to the sched_before_next_call list so that it will still
	       not cross calls after scheduling.  */
	    if (REG_N_CALLS_CROSSED (regno) == 0)
661 662
	      deps->sched_before_next_call
		= alloc_INSN_LIST (insn, deps->sched_before_next_call);
663 664 665 666 667 668 669 670 671
	  }
	return;
      }

    case MEM:
      {
	/* Reading memory.  */
	rtx u;
	rtx pending, pending_mem;
672
	rtx t = x;
673

674 675 676 677 678 679
	if (current_sched_info->use_cselib)
	  {
	    t = shallow_copy_rtx (t);
	    cselib_lookup (XEXP (t, 0), Pmode, 1);
	    XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
	  }
680 681 682 683
	pending = deps->pending_read_insns;
	pending_mem = deps->pending_read_mems;
	while (pending)
	  {
684
	    if (read_dependence (XEXP (pending_mem, 0), t))
685 686 687 688 689 690 691 692 693 694 695
	      add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);

	    pending = XEXP (pending, 1);
	    pending_mem = XEXP (pending_mem, 1);
	  }

	pending = deps->pending_write_insns;
	pending_mem = deps->pending_write_mems;
	while (pending)
	  {
	    if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
696
				 t, rtx_varies_p))
697 698 699 700 701 702 703
	      add_dependence (insn, XEXP (pending, 0), 0);

	    pending = XEXP (pending, 1);
	    pending_mem = XEXP (pending_mem, 1);
	  }

	for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
704 705 706
	  if (GET_CODE (XEXP (u, 0)) != JUMP_INSN
	      || deps_may_trap_p (x))
	    add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
707 708 709 710 711 712 713 714 715 716 717 718 719

	/* Always add these dependencies to pending_reads, since
	   this insn may be followed by a write.  */
	add_insn_mem_dependence (deps, &deps->pending_read_insns,
				 &deps->pending_read_mems, insn, x);

	/* Take advantage of tail recursion here.  */
	sched_analyze_2 (deps, XEXP (x, 0), insn);
	return;
      }

    /* Force pending stores to memory in case a trap handler needs them.  */
    case TRAP_IF:
720
      flush_pending_lists (deps, insn, true, false);
721 722 723 724 725 726 727 728 729 730 731 732 733 734
      break;

    case ASM_OPERANDS:
    case ASM_INPUT:
    case UNSPEC_VOLATILE:
      {
	/* Traditional and volatile asm instructions must be considered to use
	   and clobber all hard registers, all pseudo-registers and all of
	   memory.  So must TRAP_IF and UNSPEC_VOLATILE operations.

	   Consider for instance a volatile asm that changes the fpu rounding
	   mode.  An insn should not be moved across this even if it only uses
	   pseudo-regs because it might give an incorrectly rounded result.  */
	if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
735
	  reg_pending_barrier = true;
736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796

	/* For all ASM_OPERANDS, we must traverse the vector of input operands.
	   We can not just fall through here since then we would be confused
	   by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
	   traditional asms unlike their normal usage.  */

	if (code == ASM_OPERANDS)
	  {
	    for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
	      sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
	    return;
	  }
	break;
      }

    case PRE_DEC:
    case POST_DEC:
    case PRE_INC:
    case POST_INC:
      /* These both read and modify the result.  We must handle them as writes
         to get proper dependencies for following instructions.  We must handle
         them as reads to get proper dependencies from this to previous
         instructions.  Thus we need to pass them to both sched_analyze_1
         and sched_analyze_2.  We must call sched_analyze_2 first in order
         to get the proper antecedent for the read.  */
      sched_analyze_2 (deps, XEXP (x, 0), insn);
      sched_analyze_1 (deps, x, insn);
      return;

    case POST_MODIFY:
    case PRE_MODIFY:
      /* op0 = op0 + op1 */
      sched_analyze_2 (deps, XEXP (x, 0), insn);
      sched_analyze_2 (deps, XEXP (x, 1), insn);
      sched_analyze_1 (deps, x, insn);
      return;

    default:
      break;
    }

  /* Other cases: walk the insn.  */
  fmt = GET_RTX_FORMAT (code);
  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
    {
      if (fmt[i] == 'e')
	sched_analyze_2 (deps, XEXP (x, i), insn);
      else if (fmt[i] == 'E')
	for (j = 0; j < XVECLEN (x, i); j++)
	  sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
    }
}

/* Analyze an INSN with pattern X to find all dependencies.  */

static void
sched_analyze_insn (deps, x, insn, loop_notes)
     struct deps *deps;
     rtx x, insn;
     rtx loop_notes;
{
797
  RTX_CODE code = GET_CODE (x);
798 799 800 801 802 803 804 805
  rtx link;
  int i;

  if (code == COND_EXEC)
    {
      sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);

      /* ??? Should be recording conditions so we reduce the number of
806
	 false dependencies.  */
807 808 809 810
      x = COND_EXEC_CODE (x);
      code = GET_CODE (x);
    }
  if (code == SET || code == CLOBBER)
811 812 813 814 815 816 817 818 819
    {
      sched_analyze_1 (deps, x, insn);

      /* Bare clobber insns are used for letting life analysis, reg-stack
	 and others know that a value is dead.  Depend on the last call
	 instruction so that reg-stack won't get confused.  */
      if (code == CLOBBER)
	add_dependence_list (insn, deps->last_function_call, REG_DEP_OUTPUT);
    }
820 821
  else if (code == PARALLEL)
    {
822
      int i;
823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844
      for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
	{
	  rtx sub = XVECEXP (x, 0, i);
	  code = GET_CODE (sub);

	  if (code == COND_EXEC)
	    {
	      sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
	      sub = COND_EXEC_CODE (sub);
	      code = GET_CODE (sub);
	    }
	  if (code == SET || code == CLOBBER)
	    sched_analyze_1 (deps, sub, insn);
	  else
	    sched_analyze_2 (deps, sub, insn);
	}
    }
  else
    sched_analyze_2 (deps, x, insn);

  /* Mark registers CLOBBERED or used by called function.  */
  if (GET_CODE (insn) == CALL_INSN)
845 846 847 848 849 850 851 852 853
    {
      for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
	{
	  if (GET_CODE (XEXP (link, 0)) == CLOBBER)
	    sched_analyze_1 (deps, XEXP (link, 0), insn);
	  else
	    sched_analyze_2 (deps, XEXP (link, 0), insn);
	}
      if (find_reg_note (insn, REG_SETJMP, NULL))
854
	reg_pending_barrier = true;
855
    }
856

857 858
  if (GET_CODE (insn) == JUMP_INSN)
    {
859
      rtx next;
860 861
      next = next_nonnote_insn (insn);
      if (next && GET_CODE (next) == BARRIER)
862
	reg_pending_barrier = true;
863 864
      else
	{
865
	  rtx pending, pending_mem;
866 867 868 869
	  regset_head tmp;
	  INIT_REG_SET (&tmp);

	  (*current_sched_info->compute_jump_reg_dependencies) (insn, &tmp);
870 871 872 873 874 875 876 877 878
	  /* Make latency of jump equal to 0 by using anti-dependence.  */
	  EXECUTE_IF_SET_IN_REG_SET (&tmp, 0, i,
	    {
	      struct deps_reg *reg_last = &deps->reg_last[i];
	      add_dependence_list (insn, reg_last->sets, REG_DEP_ANTI);
	      add_dependence_list (insn, reg_last->clobbers, REG_DEP_ANTI);
	      reg_last->uses_length++;
	      reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
	    });
879
	  CLEAR_REG_SET (&tmp);
880

881 882 883 884
	  /* All memory writes and volatile reads must happen before the
	     jump.  Non-volatile reads must happen before the jump iff
	     the result is needed by the above register used mask.  */

885 886 887 888 889
	  pending = deps->pending_write_insns;
	  pending_mem = deps->pending_write_mems;
	  while (pending)
	    {
	      add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
890 891 892
	      pending = XEXP (pending, 1);
	      pending_mem = XEXP (pending_mem, 1);
	    }
893

894 895 896 897 898 899
	  pending = deps->pending_read_insns;
	  pending_mem = deps->pending_read_mems;
	  while (pending)
	    {
	      if (MEM_VOLATILE_P (XEXP (pending_mem, 0)))
		add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
900 901 902 903
	      pending = XEXP (pending, 1);
	      pending_mem = XEXP (pending_mem, 1);
	    }

904 905
	  add_dependence_list (insn, deps->last_pending_memory_flush,
			       REG_DEP_ANTI);
906 907 908
	}
    }

909 910 911 912 913 914 915 916 917 918
  /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic
     block, then we must be sure that no instructions are scheduled across it.
     Otherwise, the reg_n_refs info (which depends on loop_depth) would
     become incorrect.  */
  if (loop_notes)
    {
      rtx link;

      /* Update loop_notes with any notes from this insn.  Also determine
	 if any of the notes on the list correspond to instruction scheduling
919
	 barriers (loop, eh & setjmp notes, but not range notes).  */
920 921 922 923 924 925
      link = loop_notes;
      while (XEXP (link, 1))
	{
	  if (INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_BEG
	      || INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_END
	      || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_BEG
926
	      || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_END)
927
	    reg_pending_barrier = true;
928 929 930 931 932

	  link = XEXP (link, 1);
	}
      XEXP (link, 1) = REG_NOTES (insn);
      REG_NOTES (insn) = loop_notes;
933
    }
934

935
  /* If this instruction can throw an exception, then moving it changes
Kazu Hirata committed
936
     where block boundaries fall.  This is mighty confusing elsewhere.
937
     Therefore, prevent such an instruction from being moved.  */
938
  if (can_throw_internal (insn))
939
    reg_pending_barrier = true;
940 941

  /* Add dependencies if a scheduling barrier was found.  */
942
  if (reg_pending_barrier)
943
    {
944 945
      /* In the case of barrier the most added dependencies are not
         real, so we use anti-dependence here.  */
946
      if (GET_CODE (PATTERN (insn)) == COND_EXEC)
947
	{
948 949 950 951
	  EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
	    {
	      struct deps_reg *reg_last = &deps->reg_last[i];
	      add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
952 953
	      add_dependence_list (insn, reg_last->sets, REG_DEP_ANTI);
	      add_dependence_list (insn, reg_last->clobbers, REG_DEP_ANTI);
954 955 956 957 958 959 960 961 962
	    });
	}
      else
	{
	  EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
	    {
	      struct deps_reg *reg_last = &deps->reg_last[i];
	      add_dependence_list_and_free (insn, &reg_last->uses,
					    REG_DEP_ANTI);
963 964 965 966
	      add_dependence_list_and_free (insn, &reg_last->sets,
					    REG_DEP_ANTI);
	      add_dependence_list_and_free (insn, &reg_last->clobbers,
					    REG_DEP_ANTI);
967 968
	      reg_last->uses_length = 0;
	      reg_last->clobbers_length = 0;
969
	    });
970
	}
971

972
      for (i = 0; i < deps->max_reg; i++)
973
	{
974 975 976
	  struct deps_reg *reg_last = &deps->reg_last[i];
	  reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
	  SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
977
	}
978 979 980

      flush_pending_lists (deps, insn, true, true);
      reg_pending_barrier = false;
981 982
    }
  else
983
    {
984 985 986 987
      /* If the current insn is conditional, we can't free any
	 of the lists.  */
      if (GET_CODE (PATTERN (insn)) == COND_EXEC)
	{
988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003
	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i,
	    {
	      struct deps_reg *reg_last = &deps->reg_last[i];
	      add_dependence_list (insn, reg_last->sets, 0);
	      add_dependence_list (insn, reg_last->clobbers, 0);
	      reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
	      reg_last->uses_length++;
	    });
	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i,
	    {
	      struct deps_reg *reg_last = &deps->reg_last[i];
	      add_dependence_list (insn, reg_last->sets, REG_DEP_OUTPUT);
	      add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
	      reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
	      reg_last->clobbers_length++;
	    });
1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
	    {
	      struct deps_reg *reg_last = &deps->reg_last[i];
	      add_dependence_list (insn, reg_last->sets, REG_DEP_OUTPUT);
	      add_dependence_list (insn, reg_last->clobbers, REG_DEP_OUTPUT);
	      add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
	      reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
	    });
	}
      else
	{
1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034
	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i,
	    {
	      struct deps_reg *reg_last = &deps->reg_last[i];
	      add_dependence_list (insn, reg_last->sets, 0);
	      add_dependence_list (insn, reg_last->clobbers, 0);
	      reg_last->uses_length++;
	      reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
	    });
	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i,
	    {
	      struct deps_reg *reg_last = &deps->reg_last[i];
	      if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
		  || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
		{
		  add_dependence_list_and_free (insn, &reg_last->sets,
					        REG_DEP_OUTPUT);
		  add_dependence_list_and_free (insn, &reg_last->uses,
						REG_DEP_ANTI);
		  add_dependence_list_and_free (insn, &reg_last->clobbers,
						REG_DEP_OUTPUT);
1035
		  reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046
		  reg_last->clobbers_length = 0;
		  reg_last->uses_length = 0;
		}
	      else
		{
		  add_dependence_list (insn, reg_last->sets, REG_DEP_OUTPUT);
		  add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
		}
	      reg_last->clobbers_length++;
	      reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
	    });
1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
	    {
	      struct deps_reg *reg_last = &deps->reg_last[i];
	      add_dependence_list_and_free (insn, &reg_last->sets,
					    REG_DEP_OUTPUT);
	      add_dependence_list_and_free (insn, &reg_last->clobbers,
					    REG_DEP_OUTPUT);
	      add_dependence_list_and_free (insn, &reg_last->uses,
					    REG_DEP_ANTI);
	      reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1057 1058
	      reg_last->uses_length = 0;
	      reg_last->clobbers_length = 0;
1059 1060 1061 1062 1063 1064
	    });
	}

      IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
      IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
      IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
1065
    }
1066
  CLEAR_REG_SET (reg_pending_uses);
1067
  CLEAR_REG_SET (reg_pending_clobbers);
1068
  CLEAR_REG_SET (reg_pending_sets);
1069

1070 1071 1072 1073 1074 1075 1076 1077 1078 1079
  /* If we are currently in a libcall scheduling group, then mark the
     current insn as being in a scheduling group and that it can not
     be moved into a different basic block.  */

  if (deps->libcall_block_tail_insn)
    {
      set_sched_group_p (insn);
      CANT_MOVE (insn) = 1;
    }

1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121
  /* If a post-call group is still open, see if it should remain so.
     This insn must be a simple move of a hard reg to a pseudo or
     vice-versa.

     We must avoid moving these insns for correctness on
     SMALL_REGISTER_CLASS machines, and for special registers like
     PIC_OFFSET_TABLE_REGNUM.  For simplicity, extend this to all
     hard regs for all targets.  */

  if (deps->in_post_call_group_p)
    {
      rtx tmp, set = single_set (insn);
      int src_regno, dest_regno;

      if (set == NULL)
	goto end_call_group;

      tmp = SET_DEST (set);
      if (GET_CODE (tmp) == SUBREG)
	tmp = SUBREG_REG (tmp);
      if (GET_CODE (tmp) == REG)
	dest_regno = REGNO (tmp);
      else
	goto end_call_group;

      tmp = SET_SRC (set);
      if (GET_CODE (tmp) == SUBREG)
	tmp = SUBREG_REG (tmp);
      if (GET_CODE (tmp) == REG)
	src_regno = REGNO (tmp);
      else
	goto end_call_group;

      if (src_regno < FIRST_PSEUDO_REGISTER
	  || dest_regno < FIRST_PSEUDO_REGISTER)
	{
	  set_sched_group_p (insn);
	  CANT_MOVE (insn) = 1;
	}
      else
	{
	end_call_group:
1122
	  deps->in_post_call_group_p = false;
1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134
	}
    }
}

/* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
   for every dependency.  */

void
sched_analyze (deps, head, tail)
     struct deps *deps;
     rtx head, tail;
{
1135
  rtx insn;
1136 1137
  rtx loop_notes = 0;

1138 1139 1140
  if (current_sched_info->use_cselib)
    cselib_init ();

1141 1142
  for (insn = head;; insn = NEXT_INSN (insn))
    {
1143
      rtx link, end_seq, r0, set;
1144

1145 1146 1147 1148 1149 1150 1151 1152
      if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
	{
	  /* Clear out the stale LOG_LINKS from flow.  */
	  free_INSN_LIST_list (&LOG_LINKS (insn));

	  /* Make each JUMP_INSN a scheduling barrier for memory
             references.  */
	  if (GET_CODE (insn) == JUMP_INSN)
1153 1154 1155
	    {
	      /* Keep the list a reasonable size.  */
	      if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
1156
		flush_pending_lists (deps, insn, true, true);
1157 1158 1159 1160
	      else
		deps->last_pending_memory_flush
		  = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
	    }
1161 1162 1163 1164 1165
	  sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
	  loop_notes = 0;
	}
      else if (GET_CODE (insn) == CALL_INSN)
	{
1166
	  int i;
1167 1168 1169 1170 1171 1172

	  CANT_MOVE (insn) = 1;

	  /* Clear out the stale LOG_LINKS from flow.  */
	  free_INSN_LIST_list (&LOG_LINKS (insn));

1173
	  if (find_reg_note (insn, REG_SETJMP, NULL))
1174
	    {
1175 1176 1177
	      /* This is setjmp.  Assume that all registers, not just
		 hard registers, may be clobbered by this call.  */
	      reg_pending_barrier = true;
1178 1179 1180 1181
	    }
	  else
	    {
	      for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1182
		/* A call may read and modify global register variables.  */
1183
		if (global_regs[i])
1184
		  {
1185 1186
		    SET_REGNO_REG_SET (reg_pending_sets, i);
		    SET_REGNO_REG_SET (reg_pending_uses, i);
1187
		  }
1188 1189 1190 1191 1192 1193
		/* Other call-clobbered hard regs may be clobbered.
		   Since we only have a choice between 'might be clobbered'
		   and 'definitely not clobbered', we must include all
		   partly call-clobbered registers here.  */
		else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
			 || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
1194
		  SET_REGNO_REG_SET (reg_pending_clobbers, i);
1195 1196 1197
		/* We don't know what set of fixed registers might be used
		   by the function, but it is certain that the stack pointer
		   is among them, but be conservative.  */
1198 1199
		else if (fixed_regs[i])
		  SET_REGNO_REG_SET (reg_pending_uses, i);
1200 1201 1202 1203 1204 1205 1206 1207 1208
		/* The frame pointer is normally not used by the function
		   itself, but by the debugger.  */
		/* ??? MIPS o32 is an exception.  It uses the frame pointer
		   in the macro expansion of jal but does not represent this
		   fact in the call_insn rtl.  */
		else if (i == FRAME_POINTER_REGNUM
			 || (i == HARD_FRAME_POINTER_REGNUM
			     && (! reload_completed || frame_pointer_needed)))
		  SET_REGNO_REG_SET (reg_pending_uses, i);
1209 1210 1211 1212
	    }

	  /* For each insn which shouldn't cross a call, add a dependence
	     between that insn and this call insn.  */
1213 1214
	  add_dependence_list_and_free (insn, &deps->sched_before_next_call,
					REG_DEP_ANTI);
1215 1216 1217 1218 1219 1220 1221 1222

	  sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
	  loop_notes = 0;

	  /* In the absence of interprocedural alias analysis, we must flush
	     all pending reads and writes, and start new dependencies starting
	     from here.  But only flush writes for constant calls (which may
	     be passed a pointer to something we haven't written yet).  */
1223
	  flush_pending_lists (deps, insn, true, !CONST_OR_PURE_CALL_P (insn));
1224

1225
	  /* Remember the last function call for limiting lifetimes.  */
1226 1227 1228 1229 1230 1231
	  free_INSN_LIST_list (&deps->last_function_call);
	  deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);

	  /* Before reload, begin a post-call group, so as to keep the
	     lifetimes of hard registers correct.  */
	  if (! reload_completed)
1232
	    deps->in_post_call_group_p = true;
1233 1234 1235 1236 1237
	}

      /* See comments on reemit_notes as to why we do this.
	 ??? Actually, the reemit_notes just say what is done, not why.  */

1238
      if (GET_CODE (insn) == NOTE
1239 1240 1241
	       && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
		   || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END
		   || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1242
		   || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END))
1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257
	{
	  rtx rtx_region;

	  if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
	      || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END)
	    rtx_region = GEN_INT (NOTE_EH_HANDLER (insn));
	  else
	    rtx_region = GEN_INT (0);

	  loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
					rtx_region,
					loop_notes);
	  loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
					GEN_INT (NOTE_LINE_NUMBER (insn)),
					loop_notes);
1258
	  CONST_OR_PURE_CALL_P (loop_notes) = CONST_OR_PURE_CALL_P (insn);
1259 1260
	}

1261 1262
      if (current_sched_info->use_cselib)
	cselib_process_insn (insn);
1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302

      /* Now that we have completed handling INSN, check and see if it is
	 a CLOBBER beginning a libcall block.   If it is, record the
	 end of the libcall sequence. 

	 We want to schedule libcall blocks as a unit before reload.  While
	 this restricts scheduling, it preserves the meaning of a libcall
	 block.

	 As a side effect, we may get better code due to decreased register
	 pressure as well as less chance of a foreign insn appearing in
	 a libcall block.  */
      if (!reload_completed
	  /* Note we may have nested libcall sequences.  We only care about
	     the outermost libcall sequence.  */ 
	  && deps->libcall_block_tail_insn == 0
	  /* The sequence must start with a clobber of a register.  */
	  && GET_CODE (insn) == INSN
	  && GET_CODE (PATTERN (insn)) == CLOBBER
          && (r0 = XEXP (PATTERN (insn), 0), GET_CODE (r0) == REG)
	  && GET_CODE (XEXP (PATTERN (insn), 0)) == REG
	  /* The CLOBBER must also have a REG_LIBCALL note attached.  */
	  && (link = find_reg_note (insn, REG_LIBCALL, NULL_RTX)) != 0
	  && (end_seq = XEXP (link, 0)) != 0
	  /* The insn referenced by the REG_LIBCALL note must be a
	     simple nop copy with the same destination as the register
	     mentioned in the clobber.  */
	  && (set = single_set (end_seq)) != 0
	  && SET_DEST (set) == r0 && SET_SRC (set) == r0
	  /* And finally the insn referenced by the REG_LIBCALL must
	     also contain a REG_EQUAL note and a REG_RETVAL note.  */
	  && find_reg_note (end_seq, REG_EQUAL, NULL_RTX) != 0
	  && find_reg_note (end_seq, REG_RETVAL, NULL_RTX) != 0)
	deps->libcall_block_tail_insn = XEXP (link, 0);

      /* If we have reached the end of a libcall block, then close the
	 block.  */
      if (deps->libcall_block_tail_insn == insn)
	deps->libcall_block_tail_insn = 0;

1303
      if (insn == tail)
1304 1305 1306 1307 1308
	{
	  if (current_sched_info->use_cselib)
	    cselib_finish ();
	  return;
	}
1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332
    }
  abort ();
}

/* Examine insns in the range [ HEAD, TAIL ] and Use the backward
   dependences from LOG_LINKS to build forward dependences in
   INSN_DEPEND.  */

void
compute_forward_dependences (head, tail)
     rtx head, tail;
{
  rtx insn, link;
  rtx next_tail;
  enum reg_note dep_type;

  next_tail = NEXT_INSN (tail);
  for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
    {
      if (! INSN_P (insn))
	continue;

      for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
	{
1333
	  rtx x = XEXP (link, 0);
1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376
	  rtx new_link;

	  if (x != XEXP (link, 0))
	    continue;

#ifdef ENABLE_CHECKING
	  /* If add_dependence is working properly there should never
	     be notes, deleted insns or duplicates in the backward
	     links.  Thus we need not check for them here.

	     However, if we have enabled checking we might as well go
	     ahead and verify that add_dependence worked properly.  */
	  if (GET_CODE (x) == NOTE
	      || INSN_DELETED_P (x)
	      || (forward_dependency_cache != NULL
		  && TEST_BIT (forward_dependency_cache[INSN_LUID (x)],
			       INSN_LUID (insn)))
	      || (forward_dependency_cache == NULL
		  && find_insn_list (insn, INSN_DEPEND (x))))
	    abort ();
	  if (forward_dependency_cache != NULL)
	    SET_BIT (forward_dependency_cache[INSN_LUID (x)],
		     INSN_LUID (insn));
#endif

	  new_link = alloc_INSN_LIST (insn, INSN_DEPEND (x));

	  dep_type = REG_NOTE_KIND (link);
	  PUT_REG_NOTE_KIND (new_link, dep_type);

	  INSN_DEPEND (x) = new_link;
	  INSN_DEP_COUNT (insn) += 1;
	}
    }
}

/* Initialize variables for region data dependence analysis.
   n_bbs is the number of region blocks.  */

void
init_deps (deps)
     struct deps *deps;
{
1377 1378 1379 1380 1381 1382
  int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());

  deps->max_reg = max_reg;
  deps->reg_last = (struct deps_reg *)
    xcalloc (max_reg, sizeof (struct deps_reg));
  INIT_REG_SET (&deps->reg_last_in_use);
1383 1384 1385 1386 1387 1388

  deps->pending_read_insns = 0;
  deps->pending_read_mems = 0;
  deps->pending_write_insns = 0;
  deps->pending_write_mems = 0;
  deps->pending_lists_length = 0;
1389
  deps->pending_flush_length = 0;
1390 1391
  deps->last_pending_memory_flush = 0;
  deps->last_function_call = 0;
1392 1393
  deps->sched_before_next_call = 0;
  deps->in_post_call_group_p = false;
1394
  deps->libcall_block_tail_insn = 0;
1395 1396 1397 1398 1399 1400 1401 1402 1403 1404
}

/* Free insn lists found in DEPS.  */

void
free_deps (deps)
     struct deps *deps;
{
  int i;

1405 1406 1407 1408 1409 1410
  free_INSN_LIST_list (&deps->pending_read_insns);
  free_EXPR_LIST_list (&deps->pending_read_mems);
  free_INSN_LIST_list (&deps->pending_write_insns);
  free_EXPR_LIST_list (&deps->pending_write_mems);
  free_INSN_LIST_list (&deps->last_pending_memory_flush);

1411 1412 1413 1414
  /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
     times.  For a test case with 42000 regs and 8000 small basic blocks,
     this loop accounted for nearly 60% (84 sec) of the total -O2 runtime.  */
  EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
1415
    {
1416
      struct deps_reg *reg_last = &deps->reg_last[i];
1417 1418 1419 1420 1421 1422
      if (reg_last->uses)
	free_INSN_LIST_list (&reg_last->uses);
      if (reg_last->sets)
	free_INSN_LIST_list (&reg_last->sets);
      if (reg_last->clobbers)
	free_INSN_LIST_list (&reg_last->clobbers);
1423 1424 1425 1426
    });
  CLEAR_REG_SET (&deps->reg_last_in_use);

  free (deps->reg_last);
1427 1428 1429
}

/* If it is profitable to use them, initialize caches for tracking
1430
   dependency information.  LUID is the number of insns to be scheduled,
1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442
   it is used in the estimate of profitability.  */

void
init_dependency_caches (luid)
     int luid;
{
  /* ?!? We could save some memory by computing a per-region luid mapping
     which could reduce both the number of vectors in the cache and the size
     of each vector.  Instead we just avoid the cache entirely unless the
     average number of instructions in a basic block is very high.  See
     the comment before the declaration of true_dependency_cache for
     what we consider "very high".  */
1443
  if (luid / n_basic_blocks > 100 * 5)
1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464
    {
      true_dependency_cache = sbitmap_vector_alloc (luid, luid);
      sbitmap_vector_zero (true_dependency_cache, luid);
      anti_dependency_cache = sbitmap_vector_alloc (luid, luid);
      sbitmap_vector_zero (anti_dependency_cache, luid);
      output_dependency_cache = sbitmap_vector_alloc (luid, luid);
      sbitmap_vector_zero (output_dependency_cache, luid);
#ifdef ENABLE_CHECKING
      forward_dependency_cache = sbitmap_vector_alloc (luid, luid);
      sbitmap_vector_zero (forward_dependency_cache, luid);
#endif
    }
}

/* Free the caches allocated in init_dependency_caches.  */

void
free_dependency_caches ()
{
  if (true_dependency_cache)
    {
1465
      sbitmap_vector_free (true_dependency_cache);
1466
      true_dependency_cache = NULL;
1467
      sbitmap_vector_free (anti_dependency_cache);
1468
      anti_dependency_cache = NULL;
1469
      sbitmap_vector_free (output_dependency_cache);
1470 1471
      output_dependency_cache = NULL;
#ifdef ENABLE_CHECKING
1472
      sbitmap_vector_free (forward_dependency_cache);
1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485
      forward_dependency_cache = NULL;
#endif
    }
}

/* Initialize some global variables needed by the dependency analysis
   code.  */

void
init_deps_global ()
{
  reg_pending_sets = INITIALIZE_REG_SET (reg_pending_sets_head);
  reg_pending_clobbers = INITIALIZE_REG_SET (reg_pending_clobbers_head);
1486 1487
  reg_pending_uses = INITIALIZE_REG_SET (reg_pending_uses_head);
  reg_pending_barrier = false;
1488 1489 1490 1491 1492 1493 1494 1495 1496
}

/* Free everything used by the dependency analysis code.  */

void
finish_deps_global ()
{
  FREE_REG_SET (reg_pending_sets);
  FREE_REG_SET (reg_pending_clobbers);
1497
  FREE_REG_SET (reg_pending_uses);
1498
}