gcse.c 124 KB
Newer Older
1
/* Partial redundancy elimination / Hoisting for RTL.
2
   Copyright (C) 1997-2014 Free Software Foundation, Inc.
3

4
This file is part of GCC.
5

6 7
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
8
Software Foundation; either version 3, or (at your option) any later
9
version.
10

11 12 13 14
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
for more details.
15 16

You should have received a copy of the GNU General Public License
17 18
along with GCC; see the file COPYING3.  If not see
<http://www.gnu.org/licenses/>.  */
19 20 21

/* TODO
   - reordering of memory allocation and freeing to be more space efficient
22 23
   - calc rough register pressure information and use the info to drive all
     kinds of code motion (including code hoisting) in a unified way.
24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
*/

/* References searched while implementing this.

   Compilers Principles, Techniques and Tools
   Aho, Sethi, Ullman
   Addison-Wesley, 1988

   Global Optimization by Suppression of Partial Redundancies
   E. Morel, C. Renvoise
   communications of the acm, Vol. 22, Num. 2, Feb. 1979

   A Portable Machine-Independent Global Optimizer - Design and Measurements
   Frederick Chow
   Stanford Ph.D. thesis, Dec. 1983

   A Fast Algorithm for Code Movement Optimization
   D.M. Dhamdhere
   SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988

   A Solution to a Problem with Morel and Renvoise's
   Global Optimization by Suppression of Partial Redundancies
   K-H Drechsler, M.P. Stadel
   ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988

   Practical Adaptation of the Global Optimization
   Algorithm of Morel and Renvoise
   D.M. Dhamdhere
   ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991

   Efficiently Computing Static Single Assignment Form and the Control
   Dependence Graph
   R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
   ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991

   Lazy Code Motion
   J. Knoop, O. Ruthing, B. Steffen
   ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI

   What's In a Region?  Or Computing Control Dependence Regions in Near-Linear
   Time for Reducible Flow Control
   Thomas Ball
   ACM Letters on Programming Languages and Systems,
   Vol. 2, Num. 1-4, Mar-Dec 1993

   An Efficient Representation for Sparse Sets
   Preston Briggs, Linda Torczon
   ACM Letters on Programming Languages and Systems,
   Vol. 2, Num. 1-4, Mar-Dec 1993

   A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
   K-H Drechsler, M.P. Stadel
   ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993

   Partial Dead Code Elimination
   J. Knoop, O. Ruthing, B. Steffen
   ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994

   Effective Partial Redundancy Elimination
   P. Briggs, K.D. Cooper
   ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994

   The Program Structure Tree: Computing Control Regions in Linear Time
   R. Johnson, D. Pearson, K. Pingali
   ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994

   Optimal Code Motion: Theory and Practice
   J. Knoop, O. Ruthing, B. Steffen
   ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994

   The power of assignment motion
   J. Knoop, O. Ruthing, B. Steffen
   ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI

   Global code motion / global value numbering
   C. Click
   ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI

   Value Driven Redundancy Elimination
   L.T. Simpson
   Rice University Ph.D. thesis, Apr. 1996

   Value Numbering
   L.T. Simpson
   Massively Scalar Compiler Project, Rice University, Sep. 1996

   High Performance Compilers for Parallel Computing
   Michael Wolfe
   Addison-Wesley, 1996

114 115 116 117
   Advanced Compiler Design and Implementation
   Steven Muchnick
   Morgan Kaufmann, 1997

118 119 120 121
   Building an Optimizing Compiler
   Robert Morgan
   Digital Press, 1998

122 123 124 125 126 127 128 129 130
   People wishing to speed up the code here should read:
     Elimination Algorithms for Data Flow Analysis
     B.G. Ryder, M.C. Paull
     ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986

     How to Analyze Large Programs Efficiently and Informatively
     D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
     ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI

131 132 133 134 135
   People wishing to do something different can find various possibilities
   in the above papers and elsewhere.
*/

#include "config.h"
Kaveh R. Ghazi committed
136
#include "system.h"
137 138
#include "coretypes.h"
#include "tm.h"
139
#include "diagnostic-core.h"
140
#include "toplev.h"
141

142
#include "hard-reg-set.h"
143
#include "rtl.h"
144
#include "tree.h"
145
#include "tm_p.h"
146
#include "regs.h"
147
#include "ira.h"
148 149 150 151
#include "flags.h"
#include "insn-config.h"
#include "recog.h"
#include "basic-block.h"
152
#include "function.h"
153
#include "expr.h"
154
#include "except.h"
155
#include "ggc.h"
156
#include "params.h"
Jan Hubicka committed
157
#include "cselib.h"
158
#include "intl.h"
159
#include "obstack.h"
160
#include "tree-pass.h"
161
#include "hash-table.h"
162 163
#include "df.h"
#include "dbgcnt.h"
164
#include "target.h"
165
#include "gcse.h"
166

167
/* We support GCSE via Partial Redundancy Elimination.  PRE optimizations
168
   are a superset of those done by classic GCSE.
169

170 171 172 173 174 175
   Two passes of copy/constant propagation are done around PRE or hoisting
   because the first one enables more GCSE and the second one helps to clean
   up the copies that PRE and HOIST create.  This is needed more for PRE than
   for HOIST because code hoisting will try to use an existing register
   containing the common subexpression rather than create a new one.  This is
   harder to do for PRE because of the code motion (which HOIST doesn't do).
176 177 178 179 180

   Expressions we are interested in GCSE-ing are of the form
   (set (pseudo-reg) (expression)).
   Function want_to_gcse_p says what these are.

181
   In addition, expressions in REG_EQUAL notes are candidates for GCSE-ing.
182
   This allows PRE to hoist expressions that are expressed in multiple insns,
183 184
   such as complex address calculations (e.g. for PIC code, or loads with a
   high part and a low part).
185

186
   PRE handles moving invariant expressions out of loops (by treating them as
187
   partially redundant).
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204

   **********************

   We used to support multiple passes but there are diminishing returns in
   doing so.  The first pass usually makes 90% of the changes that are doable.
   A second pass can make a few more changes made possible by the first pass.
   Experiments show any further passes don't make enough changes to justify
   the expense.

   A study of spec92 using an unlimited number of passes:
   [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
   [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
   [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1

   It was found doing copy propagation between each pass enables further
   substitutions.

205 206 207 208 209
   This study was done before expressions in REG_EQUAL notes were added as
   candidate expressions for optimization, and before the GIMPLE optimizers
   were added.  Probably, multiple passes is even less efficient now than
   at the time when the study was conducted.

210
   PRE is quite expensive in complicated functions because the DFA can take
211
   a while to converge.  Hence we only perform one pass.
212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235

   **********************

   The steps for PRE are:

   1) Build the hash table of expressions we wish to GCSE (expr_hash_table).

   2) Perform the data flow analysis for PRE.

   3) Delete the redundant instructions

   4) Insert the required copies [if any] that make the partially
      redundant instructions fully redundant.

   5) For other reaching expressions, insert an instruction to copy the value
      to a newly created pseudo that will reach the redundant instruction.

   The deletion is done first so that when we do insertions we
   know which pseudo reg to use.

   Various papers have argued that PRE DFA is expensive (O(n^2)) and others
   argue it is not.  The number of iterations for the algorithm to converge
   is typically 2-4 so I don't view it as that expensive (relatively speaking).

236
   PRE GCSE depends heavily on the second CPROP pass to clean up the copies
237 238 239 240
   we create.  To make an expression reach the place where it's redundant,
   the result of the expression is copied to a new register, and the redundant
   expression is deleted by replacing it with this new register.  Classic GCSE
   doesn't have this problem as much as it computes the reaching defs of
241 242
   each register in each block and thus can try to use an existing
   register.  */
243 244 245

/* GCSE global vars.  */

246 247 248 249 250
struct target_gcse default_target_gcse;
#if SWITCHABLE_TARGET
struct target_gcse *this_target_gcse = &default_target_gcse;
#endif

251 252
/* Set to non-zero if CSE should run after all GCSE optimizations are done.  */
int flag_rerun_cse_after_global_opts;
253

254 255 256 257 258 259 260
/* An obstack for our working variables.  */
static struct obstack gcse_obstack;

/* Hash table of expressions.  */

struct expr
{
261
  /* The expression.  */
262 263 264 265 266 267 268
  rtx expr;
  /* Index in the available expression bitmaps.  */
  int bitmap_index;
  /* Next entry with the same hash.  */
  struct expr *next_same_hash;
  /* List of anticipatable occurrences in basic blocks in the function.
     An "anticipatable occurrence" is one that is the first occurrence in the
269 270 271
     basic block, the operands are not modified in the basic block prior
     to the occurrence and the output is not used between the start of
     the block and the occurrence.  */
272 273 274 275 276 277 278 279 280 281
  struct occr *antic_occr;
  /* List of available occurrence in basic blocks in the function.
     An "available occurrence" is one that is the last occurrence in the
     basic block and the operands are not modified by following statements in
     the basic block [including this insn].  */
  struct occr *avail_occr;
  /* Non-null if the computation is PRE redundant.
     The value is the newly created pseudo-reg to record a copy of the
     expression in all the places that reach the redundant copy.  */
  rtx reaching_reg;
282 283 284 285 286 287
  /* Maximum distance in instructions this expression can travel.
     We avoid moving simple expressions for more than a few instructions
     to keep register pressure under control.
     A value of "0" removes restrictions on how far the expression can
     travel.  */
  int max_distance;
288 289 290 291 292 293 294 295 296 297 298 299
};

/* Occurrence of an expression.
   There is one per basic block.  If a pattern appears more than once the
   last appearance is used [or first for anticipatable expressions].  */

struct occr
{
  /* Next occurrence of this expression.  */
  struct occr *next;
  /* The insn that computes the expression.  */
  rtx insn;
300
  /* Nonzero if this [anticipatable] occurrence has been deleted.  */
301
  char deleted_p;
302
  /* Nonzero if this [available] occurrence has been copied to
303 304 305 306 307 308
     reaching_reg.  */
  /* ??? This is mutually exclusive with deleted_p, so they could share
     the same byte.  */
  char copied_p;
};

309 310
typedef struct occr *occr_t;

311
/* Expression hash tables.
312 313 314 315 316 317
   Each hash table is an array of buckets.
   ??? It is known that if it were an array of entries, structure elements
   `next_same_hash' and `bitmap_index' wouldn't be necessary.  However, it is
   not clear whether in the final analysis a sufficient amount of memory would
   be saved as the size of the available expression bitmaps would be larger
   [one could build a mapping table without holes afterwards though].
318
   Someday I'll perform the computation and figure it out.  */
319

320
struct hash_table_d
321 322 323 324 325 326 327
{
  /* The table itself.
     This is an array of `expr_hash_table_size' elements.  */
  struct expr **table;

  /* Size of the hash table, in elements.  */
  unsigned int size;
328

329 330 331
  /* Number of hash table elements.  */
  unsigned int n_elems;
};
332

333
/* Expression hash table.  */
334
static struct hash_table_d expr_hash_table;
335

336
/* This is a list of expressions which are MEMs and will be used by load
337
   or store motion.
338 339
   Load motion tracks MEMs which aren't killed by anything except itself,
   i.e. loads and stores to a single location.
340
   We can then allow movement of these MEM refs with a little special
341 342
   allowance. (all stores copy the same value to the reaching reg used
   for the loads).  This means all values used to store into memory must have
343
   no side effects so we can re-issue the setter value.  */
344 345 346 347 348

struct ls_expr
{
  struct expr * expr;		/* Gcse expression reference for LM.  */
  rtx pattern;			/* Pattern of this mem.  */
349
  rtx pattern_regs;		/* List of registers mentioned by the mem.  */
350 351
  rtx loads;			/* INSN list of loads seen.  */
  rtx stores;			/* INSN list of stores seen.  */
352 353 354
  struct ls_expr * next;	/* Next in the list.  */
  int invalid;			/* Invalid for some reason.  */
  int index;			/* If it maps to a bitmap index.  */
355
  unsigned int hash_index;	/* Index when in a hash table.  */
356 357 358 359 360 361
  rtx reaching_reg;		/* Register to use when re-writing.  */
};

/* Head of the list of load/store memory refs.  */
static struct ls_expr * pre_ldst_mems = NULL;

362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387
struct pre_ldst_expr_hasher : typed_noop_remove <ls_expr>
{
  typedef ls_expr value_type;
  typedef value_type compare_type;
  static inline hashval_t hash (const value_type *);
  static inline bool equal (const value_type *, const compare_type *);
};

/* Hashtable helpers.  */
inline hashval_t
pre_ldst_expr_hasher::hash (const value_type *x)
{
  int do_not_record_p = 0;
  return
    hash_rtx (x->pattern, GET_MODE (x->pattern), &do_not_record_p, NULL, false);
}

static int expr_equiv_p (const_rtx, const_rtx);

inline bool
pre_ldst_expr_hasher::equal (const value_type *ptr1,
			     const compare_type *ptr2)
{
  return expr_equiv_p (ptr1->pattern, ptr2->pattern);
}

388
/* Hashtable for the load/store memory refs.  */
389
static hash_table<pre_ldst_expr_hasher> *pre_ldst_table;
390

391 392 393
/* Bitmap containing one bit for each register in the program.
   Used when performing GCSE to track which registers have been set since
   the start of the basic block.  */
394
static regset reg_set_bitmap;
395

396 397
/* Array, indexed by basic block number for a list of insns which modify
   memory within that block.  */
398
static vec<rtx> *modify_mem_list;
399
static bitmap modify_mem_list_set;
400

401 402 403 404 405 406 407 408 409
typedef struct modify_pair_s
{
  rtx dest;			/* A MEM.  */
  rtx dest_addr;		/* The canonical address of `dest'.  */
} modify_pair;


/* This array parallels modify_mem_list, except that it stores MEMs
   being set and their canonicalized memory addresses.  */
410
static vec<modify_pair> *canon_modify_mem_list;
411

412 413 414 415
/* Bitmap indexed by block numbers to record which blocks contain
   function calls.  */
static bitmap blocks_with_calls;

416 417 418 419 420 421
/* Various variables for statistics gathering.  */

/* Memory used in a pass.
   This isn't intended to be absolutely precise.  Its intent is only
   to keep an eye on memory usage.  */
static int bytes_used;
422

423 424 425 426 427
/* GCSE substitutions made.  */
static int gcse_subst_count;
/* Number of copy instructions created.  */
static int gcse_create_count;

428 429 430
/* Doing code hoisting.  */
static bool doing_code_hoisting_p = false;

431
/* For available exprs */
432
static sbitmap *ae_kill;
433

434 435 436 437 438 439
/* Data stored for each basic block.  */
struct bb_data
{
  /* Maximal register pressure inside basic block for given register class
     (defined only for the pressure classes).  */
  int max_reg_pressure[N_REG_CLASSES];
440 441 442 443 444 445 446 447 448
  /* Recorded register pressure of basic block before trying to hoist
     an expression.  Will be used to restore the register pressure
     if the expression should not be hoisted.  */
  int old_pressure;
  /* Recorded register live_in info of basic block during code hoisting
     process.  BACKUP is used to record live_in info before trying to
     hoist an expression, and will be used to restore LIVE_IN if the
     expression should not be hoisted.  */
  bitmap live_in, backup;
449 450 451 452 453 454 455 456 457 458
};

#define BB_DATA(bb) ((struct bb_data *) (bb)->aux)

static basic_block curr_bb;

/* Current register pressure for each pressure class.  */
static int curr_reg_pressure[N_REG_CLASSES];


459
static void compute_can_copy (void);
460 461
static void *gmalloc (size_t) ATTRIBUTE_MALLOC;
static void *gcalloc (size_t, size_t) ATTRIBUTE_MALLOC;
462
static void *gcse_alloc (unsigned long);
463
static void alloc_gcse_mem (void);
464
static void free_gcse_mem (void);
465 466 467 468
static void hash_scan_insn (rtx, struct hash_table_d *);
static void hash_scan_set (rtx, rtx, struct hash_table_d *);
static void hash_scan_clobber (rtx, rtx, struct hash_table_d *);
static void hash_scan_call (rtx, rtx, struct hash_table_d *);
469
static int want_to_gcse_p (rtx, int *);
470 471 472
static int oprs_unchanged_p (const_rtx, const_rtx, int);
static int oprs_anticipatable_p (const_rtx, const_rtx);
static int oprs_available_p (const_rtx, const_rtx);
473
static void insert_expr_in_table (rtx, enum machine_mode, rtx, int, int, int,
474
				  struct hash_table_d *);
475
static unsigned int hash_expr (const_rtx, enum machine_mode, int *, int);
476 477
static void record_last_reg_set_info (rtx, int);
static void record_last_mem_set_info (rtx);
478
static void record_last_set_info (rtx, const_rtx, void *);
479
static void compute_hash_table (struct hash_table_d *);
480
static void alloc_hash_table (struct hash_table_d *);
481 482 483
static void free_hash_table (struct hash_table_d *);
static void compute_hash_table_work (struct hash_table_d *);
static void dump_hash_table (FILE *, const char *, struct hash_table_d *);
484
static void compute_transp (const_rtx, int, sbitmap *);
485
static void compute_local_properties (sbitmap *, sbitmap *, sbitmap *,
486
				      struct hash_table_d *);
487
static void mems_conflict_for_gcse_p (rtx, const_rtx, void *);
488
static int load_killed_in_block_p (const_basic_block, int, const_rtx, int);
489
static void canon_list_insert (rtx, const_rtx, void *);
490 491
static void alloc_pre_mem (int, int);
static void free_pre_mem (void);
492
static struct edge_list *compute_pre_data (void);
493 494
static int pre_expr_reaches_here_p (basic_block, struct expr *,
				    basic_block);
495
static void insert_insn_end_basic_block (struct expr *, basic_block);
496 497 498
static void pre_insert_copy_insn (struct expr *, rtx);
static void pre_insert_copies (void);
static int pre_delete (void);
499
static int pre_gcse (struct edge_list *);
500
static int one_pre_gcse_pass (void);
501 502 503 504 505
static void add_label_notes (rtx, rtx);
static void alloc_code_hoist_mem (int, int);
static void free_code_hoist_mem (void);
static void compute_code_hoist_vbeinout (void);
static void compute_code_hoist_data (void);
506 507
static int should_hoist_expr_to_dom (basic_block, struct expr *, basic_block,
				     sbitmap, int, int *, enum reg_class,
508
				     int *, bitmap, rtx);
509
static int hoist_code (void);
510
static enum reg_class get_regno_pressure_class (int regno, int *nregs);
511
static enum reg_class get_pressure_class_and_nregs (rtx insn, int *nregs);
512 513 514 515 516 517 518
static int one_code_hoisting_pass (void);
static rtx process_insert_insn (struct expr *);
static int pre_edge_insert (struct edge_list *, struct expr **);
static int pre_expr_reaches_here_p_work (basic_block, struct expr *,
					 basic_block, char *);
static struct ls_expr * ldst_entry (rtx);
static void free_ldst_entry (struct ls_expr *);
519
static void free_ld_motion_mems (void);
520 521
static void print_ldst_list (FILE *);
static struct ls_expr * find_rtx_in_ldst (rtx);
522
static int simple_mem (const_rtx);
523 524 525 526 527 528 529
static void invalidate_any_buried_refs (rtx);
static void compute_ld_motion_mems (void);
static void trim_ld_motion_mems (void);
static void update_ld_motion_stores (struct expr *);
static void clear_modify_mem_tables (void);
static void free_modify_mem_tables (void);
static rtx gcse_emit_move_after (rtx, rtx, rtx);
530
static bool is_too_expensive (const char *);
531 532 533 534 535 536 537 538 539 540 541 542

#define GNEW(T)			((T *) gmalloc (sizeof (T)))
#define GCNEW(T)		((T *) gcalloc (1, sizeof (T)))

#define GNEWVEC(T, N)		((T *) gmalloc (sizeof (T) * (N)))
#define GCNEWVEC(T, N)		((T *) gcalloc ((N), sizeof (T)))

#define GNEWVAR(T, S)		((T *) gmalloc ((S)))
#define GCNEWVAR(T, S)		((T *) gcalloc (1, (S)))

#define GOBNEW(T)		((T *) gcse_alloc (sizeof (T)))
#define GOBNEWVAR(T, S)		((T *) gcse_alloc ((S)))
543 544 545

/* Misc. utilities.  */

546 547 548 549
#define can_copy \
  (this_target_gcse->x_can_copy)
#define can_copy_init_p \
  (this_target_gcse->x_can_copy_init_p)
550

551 552 553
/* Compute which modes support reg/reg copy operations.  */

static void
554
compute_can_copy (void)
555 556
{
  int i;
Kaveh R. Ghazi committed
557
#ifndef AVOID_CCMODE_COPIES
Kazu Hirata committed
558
  rtx reg, insn;
Kaveh R. Ghazi committed
559
#endif
560
  memset (can_copy, 0, NUM_MACHINE_MODES);
561 562 563

  start_sequence ();
  for (i = 0; i < NUM_MACHINE_MODES; i++)
564 565
    if (GET_MODE_CLASS (i) == MODE_CC)
      {
566
#ifdef AVOID_CCMODE_COPIES
567
	can_copy[i] = 0;
568
#else
569 570
	reg = gen_rtx_REG ((enum machine_mode) i, LAST_VIRTUAL_REGISTER + 1);
	insn = emit_insn (gen_rtx_SET (VOIDmode, reg, reg));
571
	if (recog (PATTERN (insn), insn, NULL) >= 0)
572
	  can_copy[i] = 1;
573
#endif
574
      }
575
    else
576
      can_copy[i] = 1;
577

578 579
  end_sequence ();
}
580 581 582 583

/* Returns whether the mode supports reg/reg copy operations.  */

bool
584
can_copy_p (enum machine_mode mode)
585 586 587 588 589 590 591 592 593
{
  if (! can_copy_init_p)
    {
      compute_can_copy ();
      can_copy_init_p = true;
    }

  return can_copy[mode] != 0;
}
594 595 596

/* Cover function to xmalloc to record bytes allocated.  */

597
static void *
598
gmalloc (size_t size)
599 600 601 602 603
{
  bytes_used += size;
  return xmalloc (size);
}

604 605 606 607 608 609 610 611 612
/* Cover function to xcalloc to record bytes allocated.  */

static void *
gcalloc (size_t nelem, size_t elsize)
{
  bytes_used += nelem * elsize;
  return xcalloc (nelem, elsize);
}

613
/* Cover function to obstack_alloc.  */
614

615
static void *
616
gcse_alloc (unsigned long size)
617
{
618
  bytes_used += size;
619
  return obstack_alloc (&gcse_obstack, size);
620 621
}

622
/* Allocate memory for the reg/memory set tracking tables.
623 624 625
   This is called at the start of each pass.  */

static void
626
alloc_gcse_mem (void)
627 628
{
  /* Allocate vars to track sets of regs.  */
629
  reg_set_bitmap = ALLOC_REG_SET (NULL);
630

631
  /* Allocate array to keep a list of insns which modify memory in each
632 633 634 635
     basic block.  The two typedefs are needed to work around the
     pre-processor limitation with template types in macro arguments.  */
  typedef vec<rtx> vec_rtx_heap;
  typedef vec<modify_pair> vec_modify_pair_heap;
636 637 638
  modify_mem_list = GCNEWVEC (vec_rtx_heap, last_basic_block_for_fn (cfun));
  canon_modify_mem_list = GCNEWVEC (vec_modify_pair_heap,
				    last_basic_block_for_fn (cfun));
639 640
  modify_mem_list_set = BITMAP_ALLOC (NULL);
  blocks_with_calls = BITMAP_ALLOC (NULL);
641 642 643 644 645
}

/* Free memory allocated by alloc_gcse_mem.  */

static void
646
free_gcse_mem (void)
647
{
648 649
  FREE_REG_SET (reg_set_bitmap);

650
  free_modify_mem_tables ();
651 652
  BITMAP_FREE (modify_mem_list_set);
  BITMAP_FREE (blocks_with_calls);
653
}
654 655

/* Compute the local properties of each recorded expression.
656 657 658

   Local properties are those that are defined by the block, irrespective of
   other blocks.
659 660 661 662 663 664 665 666 667 668 669 670

   An expression is transparent in a block if its operands are not modified
   in the block.

   An expression is computed (locally available) in a block if it is computed
   at least once and expression would contain the same value if the
   computation was moved to the end of the block.

   An expression is locally anticipatable in a block if it is computed at
   least once and expression would contain the same value if the computation
   was moved to the beginning of the block.

671
   We call this routine for pre and code hoisting.  They all compute
672
   basically the same information and thus can easily share this code.
673

674 675 676
   TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
   properties.  If NULL, then it is not necessary to compute or record that
   particular property.
677

678
   TABLE controls which hash table to look at.  */
679

680
static void
681
compute_local_properties (sbitmap *transp, sbitmap *comp, sbitmap *antloc,
682
			  struct hash_table_d *table)
683
{
684
  unsigned int i;
685

686 687
  /* Initialize any bitmaps that were passed in.  */
  if (transp)
688
    {
689
      bitmap_vector_ones (transp, last_basic_block_for_fn (cfun));
690
    }
691

692
  if (comp)
693
    bitmap_vector_clear (comp, last_basic_block_for_fn (cfun));
694
  if (antloc)
695
    bitmap_vector_clear (antloc, last_basic_block_for_fn (cfun));
696

697
  for (i = 0; i < table->size; i++)
698
    {
699 700
      struct expr *expr;

701
      for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
702 703
	{
	  int indx = expr->bitmap_index;
704
	  struct occr *occr;
705 706 707 708 709

	  /* The expression is transparent in this block if it is not killed.
	     We start by assuming all are transparent [none are killed], and
	     then reset the bits for those that are.  */
	  if (transp)
710
	    compute_transp (expr->expr, indx, transp);
711 712

	  /* The occurrences recorded in antic_occr are exactly those that
713
	     we want to set to nonzero in ANTLOC.  */
714
	  if (antloc)
715 716
	    for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
	      {
717
		bitmap_set_bit (antloc[BLOCK_FOR_INSN (occr->insn)->index], indx);
718

719 720 721 722
		/* While we're scanning the table, this is a good place to
		   initialize this.  */
		occr->deleted_p = 0;
	      }
723 724

	  /* The occurrences recorded in avail_occr are exactly those that
725
	     we want to set to nonzero in COMP.  */
726
	  if (comp)
727 728
	    for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
	      {
729
		bitmap_set_bit (comp[BLOCK_FOR_INSN (occr->insn)->index], indx);
730

731 732 733 734
		/* While we're scanning the table, this is a good place to
		   initialize this.  */
		occr->copied_p = 0;
	      }
735 736 737 738 739

	  /* While we're scanning the table, this is a good place to
	     initialize this.  */
	  expr->reaching_reg = 0;
	}
740 741 742 743 744
    }
}

/* Hash table support.  */

745 746
struct reg_avail_info
{
747
  basic_block last_bb;
748 749 750 751 752
  int first_set;
  int last_set;
};

static struct reg_avail_info *reg_avail_info;
753
static basic_block current_bb;
754

755 756
/* See whether X, the source of a set, is something we want to consider for
   GCSE.  */
757 758

static int
759
want_to_gcse_p (rtx x, int *max_distance_ptr)
760
{
761 762 763 764 765 766 767 768
#ifdef STACK_REGS
  /* On register stack architectures, don't GCSE constants from the
     constant pool, as the benefits are often swamped by the overhead
     of shuffling the register stack between basic blocks.  */
  if (IS_STACK_MODE (GET_MODE (x)))
    x = avoid_constant_pool_reference (x);
#endif

769 770 771
  /* GCSE'ing constants:

     We do not specifically distinguish between constant and non-constant
772
     expressions in PRE and Hoist.  We use set_src_cost below to limit
773 774 775 776 777 778 779 780 781 782 783 784 785 786
     the maximum distance simple expressions can travel.

     Nevertheless, constants are much easier to GCSE, and, hence,
     it is easy to overdo the optimizations.  Usually, excessive PRE and
     Hoisting of constant leads to increased register pressure.

     RA can deal with this by rematerialing some of the constants.
     Therefore, it is important that the back-end generates sets of constants
     in a way that allows reload rematerialize them under high register
     pressure, i.e., a pseudo register with REG_EQUAL to constant
     is set only once.  Failing to do so will result in IRA/reload
     spilling such constants under high register pressure instead of
     rematerializing them.  */

787
  switch (GET_CODE (x))
788 789 790
    {
    case REG:
    case SUBREG:
791 792 793
    case CALL:
      return 0;

794
    CASE_CONST_ANY:
795 796 797 798 799
      if (!doing_code_hoisting_p)
	/* Do not PRE constants.  */
	return 0;

      /* FALLTHRU */
800 801

    default:
802 803 804 805 806 807 808 809
      if (doing_code_hoisting_p)
	/* PRE doesn't implement max_distance restriction.  */
	{
	  int cost;
	  int max_distance;

	  gcc_assert (!optimize_function_for_speed_p (cfun)
		      && optimize_function_for_size_p (cfun));
810
	  cost = set_src_cost (x, 0);
811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826

	  if (cost < COSTS_N_INSNS (GCSE_UNRESTRICTED_COST))
	    {
	      max_distance = (GCSE_COST_DISTANCE_RATIO * cost) / 10;
	      if (max_distance == 0)
		return 0;

	      gcc_assert (max_distance > 0);
	    }
	  else
	    max_distance = 0;

	  if (max_distance_ptr)
	    *max_distance_ptr = max_distance;
	}

827
      return can_assign_to_reg_without_clobbers_p (x);
828
    }
829 830
}

831
/* Used internally by can_assign_to_reg_without_clobbers_p.  */
832 833 834

static GTY(()) rtx test_insn;

835 836 837
/* Return true if we can assign X to a pseudo register such that the
   resulting insn does not result in clobbering a hard register as a
   side-effect.
838 839 840 841 842

   Additionally, if the target requires it, check that the resulting insn
   can be copied.  If it cannot, this means that X is special and probably
   has hidden side-effects we don't want to mess with.

843 844 845
   This function is typically used by code motion passes, to verify
   that it is safe to insert an insn without worrying about clobbering
   maybe live hard regs.  */
846

847 848
bool
can_assign_to_reg_without_clobbers_p (rtx x)
849 850 851
{
  int num_clobbers = 0;
  int icode;
852
  bool can_assign = false;
853

854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869
  /* If this is a valid operand, we are OK.  If it's VOIDmode, we aren't.  */
  if (general_operand (x, GET_MODE (x)))
    return 1;
  else if (GET_MODE (x) == VOIDmode)
    return 0;

  /* Otherwise, check if we can make a valid insn from it.  First initialize
     our test insn if we haven't already.  */
  if (test_insn == 0)
    {
      test_insn
	= make_insn_raw (gen_rtx_SET (VOIDmode,
				      gen_rtx_REG (word_mode,
						   FIRST_PSEUDO_REGISTER * 2),
				      const0_rtx));
      NEXT_INSN (test_insn) = PREV_INSN (test_insn) = 0;
870
      INSN_LOCATION (test_insn) = UNKNOWN_LOCATION;
871 872 873 874 875 876
    }

  /* Now make an insn like the one we would make when GCSE'ing and see if
     valid.  */
  PUT_MODE (SET_DEST (PATTERN (test_insn)), GET_MODE (x));
  SET_SRC (PATTERN (test_insn)) = x;
H.J. Lu committed
877

878
  icode = recog (PATTERN (test_insn), test_insn, &num_clobbers);
H.J. Lu committed
879

880 881 882 883 884 885 886
  /* If the test insn is valid and doesn't need clobbers, and the target also
     has no objections, we're good.  */
  if (icode >= 0
      && (num_clobbers == 0 || !added_clobbers_hard_reg_p (icode))
      && ! (targetm.cannot_copy_insn_p
	    && targetm.cannot_copy_insn_p (test_insn)))
    can_assign = true;
H.J. Lu committed
887

888 889
  /* Make sure test_insn doesn't have any pointers into GC space.  */
  SET_SRC (PATTERN (test_insn)) = NULL_RTX;
H.J. Lu committed
890

891
  return can_assign;
892 893
}

894
/* Return nonzero if the operands of expression X are unchanged from the
895 896 897 898
   start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
   or from INSN to the end of INSN's basic block (if AVAIL_P != 0).  */

static int
899
oprs_unchanged_p (const_rtx x, const_rtx insn, int avail_p)
900
{
901
  int i, j;
902
  enum rtx_code code;
903
  const char *fmt;
904 905 906 907 908 909 910 911

  if (x == 0)
    return 1;

  code = GET_CODE (x);
  switch (code)
    {
    case REG:
912 913 914 915 916
      {
	struct reg_avail_info *info = &reg_avail_info[REGNO (x)];

	if (info->last_bb != current_bb)
	  return 1;
917
	if (avail_p)
918
	  return info->last_set < DF_INSN_LUID (insn);
919
	else
920
	  return info->first_set >= DF_INSN_LUID (insn);
921
      }
922 923

    case MEM:
924 925 926
      if (! flag_gcse_lm
	  || load_killed_in_block_p (current_bb, DF_INSN_LUID (insn),
				     x, avail_p))
927
	return 0;
928
      else
929
	return oprs_unchanged_p (XEXP (x, 0), insn, avail_p);
930 931 932 933 934

    case PRE_DEC:
    case PRE_INC:
    case POST_DEC:
    case POST_INC:
935 936
    case PRE_MODIFY:
    case POST_MODIFY:
937 938 939 940 941
      return 0;

    case PC:
    case CC0: /*FIXME*/
    case CONST:
942
    CASE_CONST_ANY:
943 944 945 946 947 948 949 950 951 952
    case SYMBOL_REF:
    case LABEL_REF:
    case ADDR_VEC:
    case ADDR_DIFF_VEC:
      return 1;

    default:
      break;
    }

953
  for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
954 955 956
    {
      if (fmt[i] == 'e')
	{
957 958 959
	  /* If we are about to do the last recursive call needed at this
	     level, change it into iteration.  This function is called enough
	     to be worth it.  */
960
	  if (i == 0)
961 962 963
	    return oprs_unchanged_p (XEXP (x, i), insn, avail_p);

	  else if (! oprs_unchanged_p (XEXP (x, i), insn, avail_p))
964 965 966
	    return 0;
	}
      else if (fmt[i] == 'E')
967 968 969
	for (j = 0; j < XVECLEN (x, i); j++)
	  if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p))
	    return 0;
970 971 972 973 974
    }

  return 1;
}

975
/* Info passed from load_killed_in_block_p to mems_conflict_for_gcse_p.  */
976

977 978 979 980 981
struct mem_conflict_info
{
  /* A memory reference for a load instruction, mems_conflict_for_gcse_p will
     see if a memory store conflicts with this memory load.  */
  const_rtx mem;
982

983 984 985 986 987 988 989 990
  /* True if mems_conflict_for_gcse_p finds a conflict between two memory
     references.  */
  bool conflict;
};

/* DEST is the output of an instruction.  If it is a memory reference and
   possibly conflicts with the load found in DATA, then communicate this
   information back through DATA.  */
991 992

static void
993
mems_conflict_for_gcse_p (rtx dest, const_rtx setter ATTRIBUTE_UNUSED,
994
			  void *data)
995
{
996 997
  struct mem_conflict_info *mci = (struct mem_conflict_info *) data;

998 999 1000 1001 1002 1003 1004 1005
  while (GET_CODE (dest) == SUBREG
	 || GET_CODE (dest) == ZERO_EXTRACT
	 || GET_CODE (dest) == STRICT_LOW_PART)
    dest = XEXP (dest, 0);

  /* If DEST is not a MEM, then it will not conflict with the load.  Note
     that function calls are assumed to clobber memory, but are handled
     elsewhere.  */
1006
  if (! MEM_P (dest))
1007
    return;
1008

1009
  /* If we are setting a MEM in our list of specially recognized MEMs,
1010
     don't mark as killed this time.  */
1011
  if (pre_ldst_mems != NULL && expr_equiv_p (dest, mci->mem))
1012 1013
    {
      if (!find_rtx_in_ldst (dest))
1014
	mci->conflict = true;
1015 1016
      return;
    }
1017

1018
  if (true_dependence (dest, GET_MODE (dest), mci->mem))
1019
    mci->conflict = true;
1020 1021 1022
}

/* Return nonzero if the expression in X (a memory reference) is killed
1023
   in block BB before or after the insn with the LUID in UID_LIMIT.
1024 1025 1026 1027 1028 1029 1030
   AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills
   before UID_LIMIT.

   To check the entire block, set UID_LIMIT to max_uid + 1 and
   AVAIL_P to 0.  */

static int
1031 1032
load_killed_in_block_p (const_basic_block bb, int uid_limit, const_rtx x,
			int avail_p)
1033
{
1034
  vec<rtx> list = modify_mem_list[bb->index];
1035 1036
  rtx setter;
  unsigned ix;
1037 1038 1039 1040 1041

  /* If this is a readonly then we aren't going to be changing it.  */
  if (MEM_READONLY_P (x))
    return 0;

1042
  FOR_EACH_VEC_ELT_REVERSE (list, ix, setter)
1043
    {
1044 1045
      struct mem_conflict_info mci;

1046 1047
      /* Ignore entries in the list that do not apply.  */
      if ((avail_p
1048
	   && DF_INSN_LUID (setter) < uid_limit)
1049
	  || (! avail_p
1050 1051
	      && DF_INSN_LUID (setter) > uid_limit))
	continue;
1052 1053 1054 1055

      /* If SETTER is a call everything is clobbered.  Note that calls
	 to pure functions are never put on the list, so we need not
	 worry about them.  */
1056
      if (CALL_P (setter))
1057 1058 1059
	return 1;

      /* SETTER must be an INSN of some kind that sets memory.  Call
1060 1061 1062 1063 1064
	 note_stores to examine each hunk of memory that is modified.  */
      mci.mem = x;
      mci.conflict = false;
      note_stores (PATTERN (setter), mems_conflict_for_gcse_p, &mci);
      if (mci.conflict)
1065 1066 1067 1068 1069
	return 1;
    }
  return 0;
}

1070
/* Return nonzero if the operands of expression X are unchanged from
1071 1072 1073
   the start of INSN's basic block up to but not including INSN.  */

static int
1074
oprs_anticipatable_p (const_rtx x, const_rtx insn)
1075 1076 1077 1078
{
  return oprs_unchanged_p (x, insn, 0);
}

1079
/* Return nonzero if the operands of expression X are unchanged from
1080 1081 1082
   INSN to the end of INSN's basic block.  */

static int
1083
oprs_available_p (const_rtx x, const_rtx insn)
1084 1085 1086 1087 1088
{
  return oprs_unchanged_p (x, insn, 1);
}

/* Hash expression X.
1089 1090 1091

   MODE is only used if X is a CONST_INT.  DO_NOT_RECORD_P is a boolean
   indicating if a volatile operand is found or if the expression contains
1092
   something we don't want to insert in the table.  HASH_TABLE_SIZE is
1093
   the current size of the hash table to be probed.  */
1094 1095

static unsigned int
1096
hash_expr (const_rtx x, enum machine_mode mode, int *do_not_record_p,
1097
	   int hash_table_size)
1098 1099 1100 1101 1102
{
  unsigned int hash;

  *do_not_record_p = 0;

1103
  hash = hash_rtx (x, mode, do_not_record_p, NULL, /*have_reg_qty=*/false);
1104 1105
  return hash % hash_table_size;
}
1106

1107
/* Return nonzero if exp1 is equivalent to exp2.  */
1108 1109

static int
1110
expr_equiv_p (const_rtx x, const_rtx y)
1111
{
1112
  return exp_equiv_p (x, y, 0, true);
1113 1114
}

1115
/* Insert expression X in INSN in the hash TABLE.
1116 1117 1118 1119 1120 1121
   If it is already present, record it as the last occurrence in INSN's
   basic block.

   MODE is the mode of the value X is being stored into.
   It is only used if X is a CONST_INT.

1122
   ANTIC_P is nonzero if X is an anticipatable expression.
1123 1124 1125 1126
   AVAIL_P is nonzero if X is an available expression.

   MAX_DISTANCE is the maximum distance in instructions this expression can
   be moved.  */
1127 1128

static void
1129
insert_expr_in_table (rtx x, enum machine_mode mode, rtx insn, int antic_p,
1130
		      int avail_p, int max_distance, struct hash_table_d *table)
1131 1132 1133 1134 1135 1136
{
  int found, do_not_record_p;
  unsigned int hash;
  struct expr *cur_expr, *last_expr = NULL;
  struct occr *antic_occr, *avail_occr;

1137
  hash = hash_expr (x, mode, &do_not_record_p, table->size);
1138 1139 1140 1141 1142 1143 1144

  /* Do not insert expression in table if it contains volatile operands,
     or if hash_expr determines the expression is something we don't want
     to or can't handle.  */
  if (do_not_record_p)
    return;

1145
  cur_expr = table->table[hash];
1146 1147
  found = 0;

1148
  while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1149 1150 1151 1152 1153 1154 1155 1156 1157
    {
      /* If the expression isn't found, save a pointer to the end of
	 the list.  */
      last_expr = cur_expr;
      cur_expr = cur_expr->next_same_hash;
    }

  if (! found)
    {
1158
      cur_expr = GOBNEW (struct expr);
1159
      bytes_used += sizeof (struct expr);
1160
      if (table->table[hash] == NULL)
1161
	/* This is the first pattern that hashed to this index.  */
1162
	table->table[hash] = cur_expr;
1163
      else
1164 1165 1166
	/* Add EXPR to end of this hash chain.  */
	last_expr->next_same_hash = cur_expr;

1167
      /* Set the fields of the expr element.  */
1168
      cur_expr->expr = x;
1169
      cur_expr->bitmap_index = table->n_elems++;
1170 1171 1172
      cur_expr->next_same_hash = NULL;
      cur_expr->antic_occr = NULL;
      cur_expr->avail_occr = NULL;
1173 1174
      gcc_assert (max_distance >= 0);
      cur_expr->max_distance = max_distance;
1175
    }
1176 1177
  else
    gcc_assert (cur_expr->max_distance == max_distance);
1178 1179 1180 1181 1182 1183

  /* Now record the occurrence(s).  */
  if (antic_p)
    {
      antic_occr = cur_expr->antic_occr;

1184 1185
      if (antic_occr
	  && BLOCK_FOR_INSN (antic_occr->insn) != BLOCK_FOR_INSN (insn))
1186
	antic_occr = NULL;
1187 1188

      if (antic_occr)
1189 1190 1191 1192
	/* Found another instance of the expression in the same basic block.
	   Prefer the currently recorded one.  We want the first one in the
	   block and the block is scanned from start to end.  */
	; /* nothing to do */
1193 1194 1195
      else
	{
	  /* First occurrence of this expression in this basic block.  */
1196
	  antic_occr = GOBNEW (struct occr);
1197 1198
	  bytes_used += sizeof (struct occr);
	  antic_occr->insn = insn;
1199
	  antic_occr->next = cur_expr->antic_occr;
1200
	  antic_occr->deleted_p = 0;
1201
	  cur_expr->antic_occr = antic_occr;
1202 1203 1204 1205 1206 1207 1208
	}
    }

  if (avail_p)
    {
      avail_occr = cur_expr->avail_occr;

1209 1210
      if (avail_occr
	  && BLOCK_FOR_INSN (avail_occr->insn) == BLOCK_FOR_INSN (insn))
1211
	{
1212 1213 1214 1215 1216
	  /* Found another instance of the expression in the same basic block.
	     Prefer this occurrence to the currently recorded one.  We want
	     the last one in the block and the block is scanned from start
	     to end.  */
	  avail_occr->insn = insn;
1217 1218 1219 1220
	}
      else
	{
	  /* First occurrence of this expression in this basic block.  */
1221
	  avail_occr = GOBNEW (struct occr);
1222 1223
	  bytes_used += sizeof (struct occr);
	  avail_occr->insn = insn;
1224
	  avail_occr->next = cur_expr->avail_occr;
1225
	  avail_occr->deleted_p = 0;
1226
	  cur_expr->avail_occr = avail_occr;
1227 1228 1229 1230
	}
    }
}

1231
/* Scan SET present in INSN and add an entry to the hash TABLE.  */
1232 1233

static void
1234
hash_scan_set (rtx set, rtx insn, struct hash_table_d *table)
1235
{
1236 1237
  rtx src = SET_SRC (set);
  rtx dest = SET_DEST (set);
1238
  rtx note;
1239

1240
  if (GET_CODE (src) == CALL)
1241
    hash_scan_call (src, insn, table);
1242

1243
  else if (REG_P (dest))
1244
    {
1245
      unsigned int regno = REGNO (dest);
1246
      int max_distance = 0;
1247

1248 1249
      /* See if a REG_EQUAL note shows this equivalent to a simpler expression.

1250 1251
	 This allows us to do a single GCSE pass and still eliminate
	 redundant constants, addresses or other expressions that are
1252 1253
	 constructed with multiple instructions.

1254
	 However, keep the original SRC if INSN is a simple reg-reg move.
1255 1256 1257 1258 1259 1260
	 In this case, there will almost always be a REG_EQUAL note on the
	 insn that sets SRC.  By recording the REG_EQUAL value here as SRC
	 for INSN, we miss copy propagation opportunities and we perform the
	 same PRE GCSE operation repeatedly on the same REG_EQUAL value if we
	 do more than one PRE GCSE pass.

1261
	 Note that this does not impede profitable constant propagations.  We
1262
	 "look through" reg-reg sets in lookup_avail_set.  */
1263 1264
      note = find_reg_equal_equiv_note (insn);
      if (note != 0
1265 1266
	  && REG_NOTE_KIND (note) == REG_EQUAL
	  && !REG_P (src)
1267
	  && want_to_gcse_p (XEXP (note, 0), NULL))
1268
	src = XEXP (note, 0), set = gen_rtx_SET (VOIDmode, dest, src);
1269

1270
      /* Only record sets of pseudo-regs in the hash table.  */
1271
      if (regno >= FIRST_PSEUDO_REGISTER
1272
	  /* Don't GCSE something if we can't do a reg/reg copy.  */
1273
	  && can_copy_p (GET_MODE (dest))
1274
	  /* GCSE commonly inserts instruction after the insn.  We can't
1275 1276 1277 1278 1279
	     do that easily for EH edges so disable GCSE on these for now.  */
	  /* ??? We can now easily create new EH landing pads at the
	     gimple level, for splitting edges; there's no reason we
	     can't do the same thing at the rtl level.  */
	  && !can_throw_internal (insn)
1280
	  /* Is SET_SRC something we want to gcse?  */
1281
	  && want_to_gcse_p (src, &max_distance)
1282
	  /* Don't CSE a nop.  */
1283
	  && ! set_noop_p (set)
1284 1285 1286
	  /* Don't GCSE if it has attached REG_EQUIV note.
	     At this point this only function parameters should have
	     REG_EQUIV notes and if the argument slot is used somewhere
1287
	     explicitly, it means address of parameter has been taken,
1288
	     so we should not extend the lifetime of the pseudo.  */
1289
	  && (note == NULL_RTX || ! MEM_P (XEXP (note, 0))))
1290 1291
	{
	  /* An expression is not anticipatable if its operands are
1292
	     modified before this insn or if this is not the only SET in
1293 1294 1295 1296 1297
	     this insn.  The latter condition does not have to mean that
	     SRC itself is not anticipatable, but we just will not be
	     able to handle code motion of insns with multiple sets.  */
	  int antic_p = oprs_anticipatable_p (src, insn)
			&& !multiple_sets (insn);
1298
	  /* An expression is not available if its operands are
1299 1300 1301 1302 1303
	     subsequently modified, including this insn.  It's also not
	     available if this is a branch, because we can't insert
	     a set after the branch.  */
	  int avail_p = (oprs_available_p (src, insn)
			 && ! JUMP_P (insn));
1304

1305 1306
	  insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p,
				max_distance, table);
1307 1308
	}
    }
1309
  /* In case of store we want to consider the memory value as available in
1310 1311
     the REG stored in that memory. This makes it possible to remove
     redundant loads from due to stores to the same location.  */
1312
  else if (flag_gcse_las && REG_P (src) && MEM_P (dest))
1313 1314
      {
        unsigned int regno = REGNO (src);
1315
	int max_distance = 0;
1316

1317 1318
	/* Only record sets of pseudo-regs in the hash table.  */
        if (regno >= FIRST_PSEUDO_REGISTER
1319 1320 1321
	   /* Don't GCSE something if we can't do a reg/reg copy.  */
	   && can_copy_p (GET_MODE (src))
	   /* GCSE commonly inserts instruction after the insn.  We can't
1322 1323
	      do that easily for EH edges so disable GCSE on these for now.  */
	   && !can_throw_internal (insn)
1324
	   /* Is SET_DEST something we want to gcse?  */
1325
	   && want_to_gcse_p (dest, &max_distance)
1326
	   /* Don't CSE a nop.  */
1327
	   && ! set_noop_p (set)
1328 1329 1330 1331 1332 1333
	   /* Don't GCSE if it has attached REG_EQUIV note.
	      At this point this only function parameters should have
	      REG_EQUIV notes and if the argument slot is used somewhere
	      explicitly, it means address of parameter has been taken,
	      so we should not extend the lifetime of the pseudo.  */
	   && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0
1334
	       || ! MEM_P (XEXP (note, 0))))
1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345
             {
               /* Stores are never anticipatable.  */
               int antic_p = 0;
	       /* An expression is not available if its operands are
	          subsequently modified, including this insn.  It's also not
	          available if this is a branch, because we can't insert
	          a set after the branch.  */
               int avail_p = oprs_available_p (dest, insn)
			     && ! JUMP_P (insn);

	       /* Record the memory expression (DEST) in the hash table.  */
1346
	       insert_expr_in_table (dest, GET_MODE (dest), insn,
1347
				     antic_p, avail_p, max_distance, table);
1348 1349
             }
      }
1350 1351 1352
}

static void
1353
hash_scan_clobber (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED,
1354
		   struct hash_table_d *table ATTRIBUTE_UNUSED)
1355 1356 1357 1358 1359
{
  /* Currently nothing to do.  */
}

static void
1360
hash_scan_call (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED,
1361
		struct hash_table_d *table ATTRIBUTE_UNUSED)
1362 1363 1364 1365
{
  /* Currently nothing to do.  */
}

1366
/* Process INSN and add hash table entries as appropriate.  */
1367 1368

static void
1369
hash_scan_insn (rtx insn, struct hash_table_d *table)
1370 1371
{
  rtx pat = PATTERN (insn);
1372
  int i;
1373 1374 1375 1376

  /* Pick out the sets of INSN and for other forms of instructions record
     what's been modified.  */

1377
  if (GET_CODE (pat) == SET)
1378
    hash_scan_set (pat, insn, table);
1379 1380 1381 1382 1383 1384 1385

  else if (GET_CODE (pat) == CLOBBER)
    hash_scan_clobber (pat, insn, table);

  else if (GET_CODE (pat) == CALL)
    hash_scan_call (pat, insn, table);

1386
  else if (GET_CODE (pat) == PARALLEL)
1387 1388 1389
    for (i = 0; i < XVECLEN (pat, 0); i++)
      {
	rtx x = XVECEXP (pat, 0, i);
1390

1391
	if (GET_CODE (x) == SET)
1392
	  hash_scan_set (x, insn, table);
1393
	else if (GET_CODE (x) == CLOBBER)
1394
	  hash_scan_clobber (x, insn, table);
1395
	else if (GET_CODE (x) == CALL)
1396
	  hash_scan_call (x, insn, table);
1397
      }
1398 1399
}

1400 1401
/* Dump the hash table TABLE to file FILE under the name NAME.  */

1402
static void
1403
dump_hash_table (FILE *file, const char *name, struct hash_table_d *table)
1404 1405 1406
{
  int i;
  /* Flattened out table, so it's printed in proper order.  */
1407 1408
  struct expr **flat_table;
  unsigned int *hash_val;
1409
  struct expr *expr;
1410

1411 1412
  flat_table = XCNEWVEC (struct expr *, table->n_elems);
  hash_val = XNEWVEC (unsigned int, table->n_elems);
1413

1414 1415
  for (i = 0; i < (int) table->size; i++)
    for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
1416 1417 1418 1419
      {
	flat_table[expr->bitmap_index] = expr;
	hash_val[expr->bitmap_index] = i;
      }
1420 1421

  fprintf (file, "%s hash table (%d buckets, %d entries)\n",
1422
	   name, table->size, table->n_elems);
1423

1424
  for (i = 0; i < (int) table->n_elems; i++)
1425 1426
    if (flat_table[i] != 0)
      {
1427
	expr = flat_table[i];
1428 1429
	fprintf (file, "Index %d (hash value %d; max distance %d)\n  ",
		 expr->bitmap_index, hash_val[i], expr->max_distance);
1430
	print_rtl (file, expr->expr);
1431 1432
	fprintf (file, "\n");
      }
1433 1434

  fprintf (file, "\n");
1435 1436 1437

  free (flat_table);
  free (hash_val);
1438 1439 1440
}

/* Record register first/last/block set information for REGNO in INSN.
1441

1442
   first_set records the first place in the block where the register
1443
   is set and is used to compute "anticipatability".
1444

1445
   last_set records the last place in the block where the register
1446
   is set and is used to compute "availability".
1447

1448
   last_bb records the block for which first_set and last_set are
1449
   valid, as a quick test to invalidate them.  */
1450 1451

static void
1452
record_last_reg_set_info (rtx insn, int regno)
1453
{
1454
  struct reg_avail_info *info = &reg_avail_info[regno];
1455
  int luid = DF_INSN_LUID (insn);
1456

1457
  info->last_set = luid;
1458 1459 1460
  if (info->last_bb != current_bb)
    {
      info->last_bb = current_bb;
1461
      info->first_set = luid;
1462
    }
1463 1464
}

1465 1466 1467 1468
/* Record all of the canonicalized MEMs of record_last_mem_set_info's insn.
   Note we store a pair of elements in the list, so they have to be
   taken off pairwise.  */

1469
static void
1470
canon_list_insert (rtx dest ATTRIBUTE_UNUSED, const_rtx x ATTRIBUTE_UNUSED,
1471
		   void * v_insn)
1472 1473
{
  rtx dest_addr, insn;
1474
  int bb;
1475
  modify_pair pair;
1476 1477 1478 1479 1480 1481 1482 1483 1484 1485

  while (GET_CODE (dest) == SUBREG
      || GET_CODE (dest) == ZERO_EXTRACT
      || GET_CODE (dest) == STRICT_LOW_PART)
    dest = XEXP (dest, 0);

  /* If DEST is not a MEM, then it will not conflict with a load.  Note
     that function calls are assumed to clobber memory, but are handled
     elsewhere.  */

1486
  if (! MEM_P (dest))
1487 1488 1489 1490
    return;

  dest_addr = get_addr (XEXP (dest, 0));
  dest_addr = canon_rtx (dest_addr);
1491
  insn = (rtx) v_insn;
1492
  bb = BLOCK_FOR_INSN (insn)->index;
1493

1494 1495
  pair.dest = dest;
  pair.dest_addr = dest_addr;
1496
  canon_modify_mem_list[bb].safe_push (pair);
1497 1498 1499 1500 1501
}

/* Record memory modification information for INSN.  We do not actually care
   about the memory location(s) that are set, or even how they are set (consider
   a CALL_INSN).  We merely need to record which insns modify memory.  */
1502 1503

static void
1504
record_last_mem_set_info (rtx insn)
1505
{
1506 1507 1508 1509
  int bb;

  if (! flag_gcse_lm)
    return;
1510

1511
  /* load_killed_in_block_p will handle the case of calls clobbering
1512
     everything.  */
1513
  bb = BLOCK_FOR_INSN (insn)->index;
1514
  modify_mem_list[bb].safe_push (insn);
1515
  bitmap_set_bit (modify_mem_list_set, bb);
1516

1517
  if (CALL_P (insn))
1518
    bitmap_set_bit (blocks_with_calls, bb);
1519
  else
1520
    note_stores (PATTERN (insn), canon_list_insert, (void*) insn);
1521 1522 1523
}

/* Called from compute_hash_table via note_stores to handle one
1524 1525
   SET or CLOBBER in an insn.  DATA is really the instruction in which
   the SET is taking place.  */
1526 1527

static void
1528
record_last_set_info (rtx dest, const_rtx setter ATTRIBUTE_UNUSED, void *data)
1529
{
1530 1531
  rtx last_set_insn = (rtx) data;

1532 1533 1534
  if (GET_CODE (dest) == SUBREG)
    dest = SUBREG_REG (dest);

1535
  if (REG_P (dest))
1536
    record_last_reg_set_info (last_set_insn, REGNO (dest));
1537
  else if (MEM_P (dest)
1538 1539 1540 1541 1542
	   /* Ignore pushes, they clobber nothing.  */
	   && ! push_operand (dest, GET_MODE (dest)))
    record_last_mem_set_info (last_set_insn);
}

1543
/* Top level function to create an expression hash table.
1544 1545 1546 1547 1548 1549 1550 1551

   Expression entries are placed in the hash table if
   - they are of the form (set (pseudo-reg) src),
   - src is something we want to perform GCSE on,
   - none of the operands are subsequently modified in the block

   Currently src must be a pseudo-reg or a const_int.

1552
   TABLE is the table computed.  */
1553 1554

static void
1555
compute_hash_table_work (struct hash_table_d *table)
1556
{
1557
  int i;
1558

1559
  /* re-Cache any INSN_LIST nodes we have allocated.  */
1560
  clear_modify_mem_tables ();
1561
  /* Some working arrays used to track first and last set in each block.  */
1562
  reg_avail_info = GNEWVEC (struct reg_avail_info, max_reg_num ());
1563

1564
  for (i = 0; i < max_reg_num (); ++i)
1565
    reg_avail_info[i].last_bb = NULL;
1566

1567
  FOR_EACH_BB_FN (current_bb, cfun)
1568 1569
    {
      rtx insn;
1570
      unsigned int regno;
1571 1572

      /* First pass over the instructions records information used to
1573
	 determine when registers and memory are first and last set.  */
1574
      FOR_BB_INSNS (current_bb, insn)
1575
	{
1576
	  if (!NONDEBUG_INSN_P (insn))
1577 1578
	    continue;

1579
	  if (CALL_P (insn))
1580
	    {
1581 1582 1583 1584
	      hard_reg_set_iterator hrsi;
	      EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call,
					      0, regno, hrsi)
		record_last_reg_set_info (insn, regno);
1585

1586 1587
	      if (! RTL_CONST_OR_PURE_CALL_P (insn))
		record_last_mem_set_info (insn);
1588 1589
	    }

1590
	  note_stores (PATTERN (insn), record_last_set_info, insn);
1591 1592 1593
	}

      /* The next pass builds the hash table.  */
1594
      FOR_BB_INSNS (current_bb, insn)
1595
	if (NONDEBUG_INSN_P (insn))
Steven Bosscher committed
1596
	  hash_scan_insn (insn, table);
1597 1598
    }

1599 1600
  free (reg_avail_info);
  reg_avail_info = NULL;
1601 1602
}

1603
/* Allocate space for the set/expr hash TABLE.
1604
   It is used to determine the number of buckets to use.  */
1605 1606

static void
1607
alloc_hash_table (struct hash_table_d *table)
1608 1609 1610
{
  int n;

1611 1612 1613
  n = get_max_insn_count ();

  table->size = n / 4;
1614 1615
  if (table->size < 11)
    table->size = 11;
1616

1617 1618 1619
  /* Attempt to maintain efficient use of hash table.
     Making it an odd number is simplest for now.
     ??? Later take some measurements.  */
1620 1621
  table->size |= 1;
  n = table->size * sizeof (struct expr *);
1622
  table->table = GNEWVAR (struct expr *, n);
1623 1624
}

1625
/* Free things allocated by alloc_hash_table.  */
1626 1627

static void
1628
free_hash_table (struct hash_table_d *table)
1629
{
1630
  free (table->table);
1631 1632
}

1633
/* Compute the expression hash table TABLE.  */
1634 1635

static void
1636
compute_hash_table (struct hash_table_d *table)
1637 1638
{
  /* Initialize count of number of entries in hash table.  */
1639
  table->n_elems = 0;
1640
  memset (table->table, 0, table->size * sizeof (struct expr *));
1641

1642
  compute_hash_table_work (table);
1643 1644 1645 1646
}

/* Expression tracking support.  */

1647 1648 1649
/* Clear canon_modify_mem_list and modify_mem_list tables.  */
static void
clear_modify_mem_tables (void)
1650
{
1651 1652
  unsigned i;
  bitmap_iterator bi;
1653

1654
  EXECUTE_IF_SET_IN_BITMAP (modify_mem_list_set, 0, i, bi)
1655
    {
1656 1657
      modify_mem_list[i].release ();
      canon_modify_mem_list[i].release ();
1658
    }
1659 1660
  bitmap_clear (modify_mem_list_set);
  bitmap_clear (blocks_with_calls);
1661 1662
}

1663
/* Release memory used by modify_mem_list_set.  */
1664

1665 1666
static void
free_modify_mem_tables (void)
1667
{
1668 1669 1670 1671 1672
  clear_modify_mem_tables ();
  free (modify_mem_list);
  free (canon_modify_mem_list);
  modify_mem_list = 0;
  canon_modify_mem_list = 0;
1673
}
1674 1675 1676 1677 1678

/* For each block, compute whether X is transparent.  X is either an
   expression or an assignment [though we don't care which, for this context
   an assignment is treated as an expression].  For each block where an
   element of X is modified, reset the INDX bit in BMAP.  */
1679

1680 1681
static void
compute_transp (const_rtx x, int indx, sbitmap *bmap)
1682
{
1683 1684 1685
  int i, j;
  enum rtx_code code;
  const char *fmt;
1686

1687 1688 1689
  /* repeat is used to turn tail-recursion into iteration since GCC
     can't do it when there's no return value.  */
 repeat:
1690

1691 1692
  if (x == 0)
    return;
1693

1694 1695
  code = GET_CODE (x);
  switch (code)
1696
    {
1697
    case REG:
1698
	{
1699 1700 1701 1702
	  df_ref def;
	  for (def = DF_REG_DEF_CHAIN (REGNO (x));
	       def;
	       def = DF_REF_NEXT_REG (def))
1703
	    bitmap_clear_bit (bmap[DF_REF_BB (def)->index], indx);
1704
	}
1705

1706
      return;
1707

1708 1709
    case MEM:
      if (! MEM_READONLY_P (x))
1710
	{
1711 1712
	  bitmap_iterator bi;
	  unsigned bb_index;
1713 1714 1715 1716
	  rtx x_addr;

	  x_addr = get_addr (XEXP (x, 0));
	  x_addr = canon_rtx (x_addr);
1717

1718 1719 1720 1721
	  /* First handle all the blocks with calls.  We don't need to
	     do any list walking for them.  */
	  EXECUTE_IF_SET_IN_BITMAP (blocks_with_calls, 0, bb_index, bi)
	    {
1722
	      bitmap_clear_bit (bmap[bb_index], indx);
1723
	    }
1724

1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742
	  /* Now iterate over the blocks which have memory modifications
	     but which do not have any calls.  */
	  EXECUTE_IF_AND_COMPL_IN_BITMAP (modify_mem_list_set,
					  blocks_with_calls,
					  0, bb_index, bi)
	    {
	      vec<modify_pair> list
		= canon_modify_mem_list[bb_index];
	      modify_pair *pair;
	      unsigned ix;

	      FOR_EACH_VEC_ELT_REVERSE (list, ix, pair)
		{
		  rtx dest = pair->dest;
		  rtx dest_addr = pair->dest_addr;

		  if (canon_true_dependence (dest, GET_MODE (dest),
					     dest_addr, x, x_addr))
1743 1744 1745 1746
		    {
		      bitmap_clear_bit (bmap[bb_index], indx);
		      break;
		    }
1747 1748
	        }
	    }
1749 1750
	}

1751 1752
      x = XEXP (x, 0);
      goto repeat;
1753

1754 1755 1756
    case PC:
    case CC0: /*FIXME*/
    case CONST:
1757
    CASE_CONST_ANY:
1758 1759 1760 1761 1762
    case SYMBOL_REF:
    case LABEL_REF:
    case ADDR_VEC:
    case ADDR_DIFF_VEC:
      return;
1763

1764 1765 1766
    default:
      break;
    }
1767

1768
  for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
1769
    {
1770
      if (fmt[i] == 'e')
1771
	{
1772 1773 1774 1775 1776 1777 1778 1779 1780 1781
	  /* If we are about to do the last recursive call
	     needed at this level, change it into iteration.
	     This function is called enough to be worth it.  */
	  if (i == 0)
	    {
	      x = XEXP (x, i);
	      goto repeat;
	    }

	  compute_transp (XEXP (x, i), indx, bmap);
1782
	}
1783 1784 1785
      else if (fmt[i] == 'E')
	for (j = 0; j < XVECLEN (x, i); j++)
	  compute_transp (XVECEXP (x, i, j), indx, bmap);
1786 1787 1788
    }
}

1789
/* Compute PRE+LCM working variables.  */
1790 1791

/* Local properties of expressions.  */
1792

1793
/* Nonzero for expressions that are transparent in the block.  */
1794
static sbitmap *transp;
1795

1796 1797
/* Nonzero for expressions that are computed (available) in the block.  */
static sbitmap *comp;
1798

1799 1800
/* Nonzero for expressions that are locally anticipatable in the block.  */
static sbitmap *antloc;
1801

1802 1803 1804
/* Nonzero for expressions where this block is an optimal computation
   point.  */
static sbitmap *pre_optimal;
1805

1806 1807
/* Nonzero for expressions which are redundant in a particular block.  */
static sbitmap *pre_redundant;
1808

1809 1810 1811 1812 1813 1814
/* Nonzero for expressions which should be inserted on a specific edge.  */
static sbitmap *pre_insert_map;

/* Nonzero for expressions which should be deleted in a specific block.  */
static sbitmap *pre_delete_map;

1815
/* Allocate vars used for PRE analysis.  */
1816 1817

static void
1818
alloc_pre_mem (int n_blocks, int n_exprs)
1819
{
1820 1821 1822
  transp = sbitmap_vector_alloc (n_blocks, n_exprs);
  comp = sbitmap_vector_alloc (n_blocks, n_exprs);
  antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
1823

1824 1825 1826 1827 1828
  pre_optimal = NULL;
  pre_redundant = NULL;
  pre_insert_map = NULL;
  pre_delete_map = NULL;
  ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs);
1829

1830
  /* pre_insert and pre_delete are allocated later.  */
1831 1832
}

1833
/* Free vars used for PRE analysis.  */
1834 1835

static void
1836
free_pre_mem (void)
1837
{
1838 1839
  sbitmap_vector_free (transp);
  sbitmap_vector_free (comp);
1840 1841

  /* ANTLOC and AE_KILL are freed just after pre_lcm finishes.  */
1842

1843
  if (pre_optimal)
1844
    sbitmap_vector_free (pre_optimal);
1845
  if (pre_redundant)
1846
    sbitmap_vector_free (pre_redundant);
1847
  if (pre_insert_map)
1848
    sbitmap_vector_free (pre_insert_map);
1849
  if (pre_delete_map)
1850
    sbitmap_vector_free (pre_delete_map);
1851

1852
  transp = comp = NULL;
1853
  pre_optimal = pre_redundant = pre_insert_map = pre_delete_map = NULL;
1854 1855
}

1856 1857 1858 1859 1860
/* Remove certain expressions from anticipatable and transparent
   sets of basic blocks that have incoming abnormal edge.
   For PRE remove potentially trapping expressions to avoid placing
   them on abnormal edges.  For hoisting remove memory references that
   can be clobbered by calls.  */
1861 1862

static void
1863
prune_expressions (bool pre_p)
1864
{
1865
  sbitmap prune_exprs;
1866
  struct expr *expr;
1867
  unsigned int ui;
1868
  basic_block bb;
1869

1870
  prune_exprs = sbitmap_alloc (expr_hash_table.n_elems);
1871
  bitmap_clear (prune_exprs);
1872
  for (ui = 0; ui < expr_hash_table.size; ui++)
1873
    {
1874
      for (expr = expr_hash_table.table[ui]; expr; expr = expr->next_same_hash)
1875 1876
	{
	  /* Note potentially trapping expressions.  */
1877
	  if (may_trap_p (expr->expr))
1878
	    {
1879
	      bitmap_set_bit (prune_exprs, expr->bitmap_index);
1880 1881
	      continue;
	    }
1882

1883
	  if (!pre_p && MEM_P (expr->expr))
1884 1885 1886 1887 1888 1889 1890
	    /* Note memory references that can be clobbered by a call.
	       We do not split abnormal edges in hoisting, so would
	       a memory reference get hoisted along an abnormal edge,
	       it would be placed /before/ the call.  Therefore, only
	       constant memory references can be hoisted along abnormal
	       edges.  */
	    {
1891 1892
	      if (GET_CODE (XEXP (expr->expr, 0)) == SYMBOL_REF
		  && CONSTANT_POOL_ADDRESS_P (XEXP (expr->expr, 0)))
1893
		continue;
1894

1895 1896 1897
	      if (MEM_READONLY_P (expr->expr)
		  && !MEM_VOLATILE_P (expr->expr)
		  && MEM_NOTRAP_P (expr->expr))
1898 1899 1900 1901 1902 1903 1904
		/* Constant memory reference, e.g., a PIC address.  */
		continue;

	      /* ??? Optimally, we would use interprocedural alias
		 analysis to determine if this mem is actually killed
		 by this call.  */

1905
	      bitmap_set_bit (prune_exprs, expr->bitmap_index);
1906 1907 1908
	    }
	}
    }
1909

1910
  FOR_EACH_BB_FN (bb, cfun)
1911
    {
1912
      edge e;
1913
      edge_iterator ei;
1914 1915

      /* If the current block is the destination of an abnormal edge, we
1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926
	 kill all trapping (for PRE) and memory (for hoist) expressions
	 because we won't be able to properly place the instruction on
	 the edge.  So make them neither anticipatable nor transparent.
	 This is fairly conservative.

	 ??? For hoisting it may be necessary to check for set-and-jump
	 instructions here, not just for abnormal edges.  The general problem
	 is that when an expression cannot not be placed right at the end of
	 a basic block we should account for any side-effects of a subsequent
	 jump instructions that could clobber the expression.  It would
	 be best to implement this check along the lines of
1927
	 should_hoist_expr_to_dom where the target block is already known
1928 1929
	 and, hence, there's no need to conservatively prune expressions on
	 "intermediate" set-and-jump instructions.  */
1930
      FOR_EACH_EDGE (e, ei, bb->preds)
1931 1932
	if ((e->flags & EDGE_ABNORMAL)
	    && (pre_p || CALL_P (BB_END (e->src))))
1933
	  {
1934
	    bitmap_and_compl (antloc[bb->index],
1935
				antloc[bb->index], prune_exprs);
1936
	    bitmap_and_compl (transp[bb->index],
1937
				transp[bb->index], prune_exprs);
1938 1939
	    break;
	  }
1940 1941 1942 1943 1944
    }

  sbitmap_free (prune_exprs);
}

1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971
/* It may be necessary to insert a large number of insns on edges to
   make the existing occurrences of expressions fully redundant.  This
   routine examines the set of insertions and deletions and if the ratio
   of insertions to deletions is too high for a particular expression, then
   the expression is removed from the insertion/deletion sets. 

   N_ELEMS is the number of elements in the hash table.  */

static void
prune_insertions_deletions (int n_elems)
{
  sbitmap_iterator sbi;
  sbitmap prune_exprs;

  /* We always use I to iterate over blocks/edges and J to iterate over
     expressions.  */
  unsigned int i, j;

  /* Counts for the number of times an expression needs to be inserted and
     number of times an expression can be removed as a result.  */
  int *insertions = GCNEWVEC (int, n_elems);
  int *deletions = GCNEWVEC (int, n_elems);

  /* Set of expressions which require too many insertions relative to
     the number of deletions achieved.  We will prune these out of the
     insertion/deletion sets.  */
  prune_exprs = sbitmap_alloc (n_elems);
1972
  bitmap_clear (prune_exprs);
1973 1974 1975

  /* Iterate over the edges counting the number of times each expression
     needs to be inserted.  */
David Malcolm committed
1976
  for (i = 0; i < (unsigned) n_edges_for_fn (cfun); i++)
1977
    {
1978
      EXECUTE_IF_SET_IN_BITMAP (pre_insert_map[i], 0, j, sbi)
1979 1980 1981 1982 1983
	insertions[j]++;
    }

  /* Similarly for deletions, but those occur in blocks rather than on
     edges.  */
1984
  for (i = 0; i < (unsigned) last_basic_block_for_fn (cfun); i++)
1985
    {
1986
      EXECUTE_IF_SET_IN_BITMAP (pre_delete_map[i], 0, j, sbi)
1987 1988 1989 1990 1991 1992 1993 1994 1995 1996
	deletions[j]++;
    }

  /* Now that we have accurate counts, iterate over the elements in the
     hash table and see if any need too many insertions relative to the
     number of evaluations that can be removed.  If so, mark them in
     PRUNE_EXPRS.  */
  for (j = 0; j < (unsigned) n_elems; j++)
    if (deletions[j]
	&& ((unsigned) insertions[j] / deletions[j]) > MAX_GCSE_INSERTION_RATIO)
1997
      bitmap_set_bit (prune_exprs, j);
1998 1999

  /* Now prune PRE_INSERT_MAP and PRE_DELETE_MAP based on PRUNE_EXPRS.  */
2000
  EXECUTE_IF_SET_IN_BITMAP (prune_exprs, 0, j, sbi)
2001
    {
David Malcolm committed
2002
      for (i = 0; i < (unsigned) n_edges_for_fn (cfun); i++)
2003
	bitmap_clear_bit (pre_insert_map[i], j);
2004

2005
      for (i = 0; i < (unsigned) last_basic_block_for_fn (cfun); i++)
2006
	bitmap_clear_bit (pre_delete_map[i], j);
2007 2008 2009 2010 2011 2012 2013
    }

  sbitmap_free (prune_exprs);
  free (insertions);
  free (deletions);
}

2014
/* Top level routine to do the dataflow analysis needed by PRE.  */
2015

2016
static struct edge_list *
2017 2018
compute_pre_data (void)
{
2019
  struct edge_list *edge_list;
2020 2021 2022 2023
  basic_block bb;

  compute_local_properties (transp, comp, antloc, &expr_hash_table);
  prune_expressions (true);
2024
  bitmap_vector_clear (ae_kill, last_basic_block_for_fn (cfun));
2025 2026 2027 2028 2029 2030

  /* Compute ae_kill for each basic block using:

     ~(TRANSP | COMP)
  */

2031
  FOR_EACH_BB_FN (bb, cfun)
2032
    {
2033 2034
      bitmap_ior (ae_kill[bb->index], transp[bb->index], comp[bb->index]);
      bitmap_not (ae_kill[bb->index], ae_kill[bb->index]);
2035 2036
    }

2037
  edge_list = pre_edge_lcm (expr_hash_table.n_elems, transp, comp, antloc,
2038
			    ae_kill, &pre_insert_map, &pre_delete_map);
2039
  sbitmap_vector_free (antloc);
2040
  antloc = NULL;
2041
  sbitmap_vector_free (ae_kill);
2042
  ae_kill = NULL;
2043 2044

  prune_insertions_deletions (expr_hash_table.n_elems);
2045 2046

  return edge_list;
2047 2048 2049 2050
}

/* PRE utilities */

2051
/* Return nonzero if an occurrence of expression EXPR in OCCR_BB would reach
2052
   block BB.
2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064

   VISITED is a pointer to a working buffer for tracking which BB's have
   been visited.  It is NULL for the top-level call.

   We treat reaching expressions that go through blocks containing the same
   reaching expression as "not reaching".  E.g. if EXPR is generated in blocks
   2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
   2 as not reaching.  The intent is to improve the probability of finding
   only one reaching expression and to reduce register lifetimes by picking
   the closest such expression.  */

static int
2065 2066
pre_expr_reaches_here_p_work (basic_block occr_bb, struct expr *expr,
			      basic_block bb, char *visited)
2067
{
2068
  edge pred;
2069
  edge_iterator ei;
H.J. Lu committed
2070

2071
  FOR_EACH_EDGE (pred, ei, bb->preds)
2072
    {
2073
      basic_block pred_bb = pred->src;
2074

2075
      if (pred->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
2076
	  /* Has predecessor has already been visited?  */
2077
	  || visited[pred_bb->index])
2078 2079
	;/* Nothing to do.  */

2080
      /* Does this predecessor generate this expression?  */
2081
      else if (bitmap_bit_p (comp[pred_bb->index], expr->bitmap_index))
2082 2083 2084 2085
	{
	  /* Is this the occurrence we're looking for?
	     Note that there's only one generating occurrence per block
	     so we just need to check the block number.  */
2086
	  if (occr_bb == pred_bb)
2087
	    return 1;
2088

2089
	  visited[pred_bb->index] = 1;
2090 2091
	}
      /* Ignore this predecessor if it kills the expression.  */
2092
      else if (! bitmap_bit_p (transp[pred_bb->index], expr->bitmap_index))
2093
	visited[pred_bb->index] = 1;
2094

2095 2096
      /* Neither gen nor kill.  */
      else
Jeff Law committed
2097
	{
2098
	  visited[pred_bb->index] = 1;
2099
	  if (pre_expr_reaches_here_p_work (occr_bb, expr, pred_bb, visited))
2100
	    return 1;
Jeff Law committed
2101
	}
2102 2103 2104 2105 2106
    }

  /* All paths have been checked.  */
  return 0;
}
2107 2108

/* The wrapper for pre_expr_reaches_here_work that ensures that any
2109
   memory allocated for that function is returned.  */
2110 2111

static int
2112
pre_expr_reaches_here_p (basic_block occr_bb, struct expr *expr, basic_block bb)
2113 2114
{
  int rval;
2115
  char *visited = XCNEWVEC (char, last_basic_block_for_fn (cfun));
2116

Kazu Hirata committed
2117
  rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited);
2118 2119

  free (visited);
2120
  return rval;
2121
}
2122

2123
/* Generate RTL to copy an EXPR to its `reaching_reg' and return it.  */
2124 2125

static rtx
2126
process_insert_insn (struct expr *expr)
2127 2128
{
  rtx reg = expr->reaching_reg;
2129
  /* Copy the expression to make sure we don't have any sharing issues.  */
2130 2131
  rtx exp = copy_rtx (expr->expr);
  rtx pat;
2132 2133

  start_sequence ();
2134 2135 2136 2137 2138 2139 2140

  /* If the expression is something that's an operand, like a constant,
     just copy it to a register.  */
  if (general_operand (exp, GET_MODE (reg)))
    emit_move_insn (reg, exp);

  /* Otherwise, make a new insn to compute this expression and make sure the
2141
     insn will be recognized (this also adds any needed CLOBBERs).  */
2142 2143 2144 2145
  else
    {
      rtx insn = emit_insn (gen_rtx_SET (VOIDmode, reg, exp));

2146
      if (insn_invalid_p (insn, false))
2147
	gcc_unreachable ();
2148
    }
H.J. Lu committed
2149

2150
  pat = get_insns ();
2151 2152 2153 2154
  end_sequence ();

  return pat;
}
2155

2156 2157
/* Add EXPR to the end of basic block BB.

2158
   This is used by both the PRE and code hoisting.  */
2159 2160

static void
2161
insert_insn_end_basic_block (struct expr *expr, basic_block bb)
2162
{
2163
  rtx insn = BB_END (bb);
2164 2165 2166
  rtx new_insn;
  rtx reg = expr->reaching_reg;
  int regno = REGNO (reg);
2167
  rtx pat, pat_end;
2168

2169
  pat = process_insert_insn (expr);
2170
  gcc_assert (pat && INSN_P (pat));
2171 2172 2173 2174

  pat_end = pat;
  while (NEXT_INSN (pat_end) != NULL_RTX)
    pat_end = NEXT_INSN (pat_end);
2175 2176

  /* If the last insn is a jump, insert EXPR in front [taking care to
2177
     handle cc0, etc. properly].  Similarly we need to care trapping
2178
     instructions in presence of non-call exceptions.  */
2179

2180
  if (JUMP_P (insn)
2181
      || (NONJUMP_INSN_P (insn)
2182 2183
	  && (!single_succ_p (bb)
	      || single_succ_edge (bb)->flags & EDGE_ABNORMAL)))
2184
    {
Kaveh R. Ghazi committed
2185
#ifdef HAVE_cc0
2186 2187
      /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
	 if cc0 isn't set.  */
2188
      rtx note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
2189 2190 2191 2192 2193 2194
      if (note)
	insn = XEXP (note, 0);
      else
	{
	  rtx maybe_cc0_setter = prev_nonnote_insn (insn);
	  if (maybe_cc0_setter
2195
	      && INSN_P (maybe_cc0_setter)
2196 2197 2198 2199 2200
	      && sets_cc0_p (PATTERN (maybe_cc0_setter)))
	    insn = maybe_cc0_setter;
	}
#endif
      /* FIXME: What if something in cc0/jump uses value set in new insn?  */
2201
      new_insn = emit_insn_before_noloc (pat, insn, bb);
2202
    }
2203

2204 2205
  /* Likewise if the last insn is a call, as will happen in the presence
     of exception handling.  */
2206
  else if (CALL_P (insn)
2207 2208
	   && (!single_succ_p (bb)
	       || single_succ_edge (bb)->flags & EDGE_ABNORMAL))
2209
    {
2210 2211 2212
      /* Keeping in mind targets with small register classes and parameters
         in registers, we search backward and place the instructions before
	 the first parameter is loaded.  Do this for everyone for consistency
2213
	 and a presumption that we'll get better code elsewhere as well.  */
2214 2215 2216 2217

      /* Since different machines initialize their parameter registers
	 in different orders, assume nothing.  Collect the set of all
	 parameter registers.  */
2218
      insn = find_first_parameter_load (insn, BB_HEAD (bb));
2219

2220 2221 2222 2223 2224 2225 2226
      /* If we found all the parameter loads, then we want to insert
	 before the first parameter load.

	 If we did not find all the parameter loads, then we might have
	 stopped on the head of the block, which could be a CODE_LABEL.
	 If we inserted before the CODE_LABEL, then we would be putting
	 the insn in the wrong basic block.  In that case, put the insn
2227
	 after the CODE_LABEL.  Also, respect NOTE_INSN_BASIC_BLOCK.  */
2228
      while (LABEL_P (insn)
2229
	     || NOTE_INSN_BASIC_BLOCK_P (insn))
2230
	insn = NEXT_INSN (insn);
2231

2232
      new_insn = emit_insn_before_noloc (pat, insn, bb);
2233 2234
    }
  else
2235
    new_insn = emit_insn_after_noloc (pat, insn, bb);
2236

2237
  while (1)
2238
    {
2239
      if (INSN_P (pat))
2240
	add_label_notes (PATTERN (pat), new_insn);
2241 2242 2243
      if (pat == pat_end)
	break;
      pat = NEXT_INSN (pat);
2244
    }
2245

2246 2247
  gcse_create_count++;

2248
  if (dump_file)
2249
    {
2250
      fprintf (dump_file, "PRE/HOIST: end of bb %d, insn %d, ",
2251
	       bb->index, INSN_UID (new_insn));
2252
      fprintf (dump_file, "copying expression %d to reg %d\n",
2253
	       expr->bitmap_index, regno);
2254 2255 2256
    }
}

2257 2258
/* Insert partially redundant expressions on edges in the CFG to make
   the expressions fully redundant.  */
2259

2260
static int
2261
pre_edge_insert (struct edge_list *edge_list, struct expr **index_map)
2262
{
2263
  int e, i, j, num_edges, set_size, did_insert = 0;
2264 2265
  sbitmap *inserted;

2266 2267
  /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge
     if it reaches any of the deleted expressions.  */
2268

2269 2270
  set_size = pre_insert_map[0]->size;
  num_edges = NUM_EDGES (edge_list);
2271
  inserted = sbitmap_vector_alloc (num_edges, expr_hash_table.n_elems);
2272
  bitmap_vector_clear (inserted, num_edges);
2273

2274
  for (e = 0; e < num_edges; e++)
2275 2276
    {
      int indx;
2277
      basic_block bb = INDEX_EDGE_PRED_BB (edge_list, e);
2278 2279

      for (i = indx = 0; i < set_size; i++, indx += SBITMAP_ELT_BITS)
2280
	{
2281
	  SBITMAP_ELT_TYPE insert = pre_insert_map[e]->elms[i];
2282

2283 2284 2285
	  for (j = indx;
	       insert && j < (int) expr_hash_table.n_elems;
	       j++, insert >>= 1)
2286 2287 2288 2289
	    if ((insert & 1) != 0 && index_map[j]->reaching_reg != NULL_RTX)
	      {
		struct expr *expr = index_map[j];
		struct occr *occr;
2290

2291
		/* Now look at each deleted occurrence of this expression.  */
2292 2293 2294 2295 2296
		for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
		  {
		    if (! occr->deleted_p)
		      continue;

2297
		    /* Insert this expression on this edge if it would
2298
		       reach the deleted occurrence in BB.  */
2299
		    if (!bitmap_bit_p (inserted[e], j))
2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310
		      {
			rtx insn;
			edge eg = INDEX_EDGE (edge_list, e);

			/* We can't insert anything on an abnormal and
			   critical edge, so we insert the insn at the end of
			   the previous block. There are several alternatives
			   detailed in Morgans book P277 (sec 10.5) for
			   handling this situation.  This one is easiest for
			   now.  */

2311
			if (eg->flags & EDGE_ABNORMAL)
2312
			  insert_insn_end_basic_block (index_map[j], bb);
2313 2314 2315 2316 2317 2318
			else
			  {
			    insn = process_insert_insn (index_map[j]);
			    insert_insn_on_edge (insn, eg);
			  }

2319
			if (dump_file)
2320
			  {
2321
			    fprintf (dump_file, "PRE: edge (%d,%d), ",
2322 2323
				     bb->index,
				     INDEX_EDGE_SUCC_BB (edge_list, e)->index);
2324
			    fprintf (dump_file, "copy expression %d\n",
2325 2326 2327
				     expr->bitmap_index);
			  }

2328
			update_ld_motion_stores (expr);
2329
			bitmap_set_bit (inserted[e], j);
2330 2331 2332 2333 2334
			did_insert = 1;
			gcse_create_count++;
		      }
		  }
	      }
2335 2336
	}
    }
2337

2338
  sbitmap_vector_free (inserted);
2339
  return did_insert;
2340 2341
}

2342
/* Copy the result of EXPR->EXPR generated by INSN to EXPR->REACHING_REG.
2343 2344 2345 2346 2347 2348
   Given "old_reg <- expr" (INSN), instead of adding after it
     reaching_reg <- old_reg
   it's better to do the following:
     reaching_reg <- expr
     old_reg      <- reaching_reg
   because this way copy propagation can discover additional PRE
2349 2350 2351 2352 2353 2354 2355
   opportunities.  But if this fails, we try the old way.
   When "expr" is a store, i.e.
   given "MEM <- old_reg", instead of adding after it
     reaching_reg <- old_reg
   it's better to add it before as follows:
     reaching_reg <- old_reg
     MEM          <- reaching_reg.  */
2356 2357

static void
2358
pre_insert_copy_insn (struct expr *expr, rtx insn)
2359 2360 2361 2362
{
  rtx reg = expr->reaching_reg;
  int regno = REGNO (reg);
  int indx = expr->bitmap_index;
2363
  rtx pat = PATTERN (insn);
2364
  rtx set, first_set, new_insn;
2365
  rtx old_reg;
2366
  int i;
2367

2368
  /* This block matches the logic in hash_scan_insn.  */
2369
  switch (GET_CODE (pat))
2370
    {
2371 2372 2373 2374 2375
    case SET:
      set = pat;
      break;

    case PARALLEL:
2376 2377
      /* Search through the parallel looking for the set whose
	 source was the expression that we're interested in.  */
2378
      first_set = NULL_RTX;
2379 2380 2381 2382
      set = NULL_RTX;
      for (i = 0; i < XVECLEN (pat, 0); i++)
	{
	  rtx x = XVECEXP (pat, 0, i);
2383
	  if (GET_CODE (x) == SET)
2384
	    {
2385 2386 2387 2388 2389 2390 2391 2392 2393 2394
	      /* If the source was a REG_EQUAL or REG_EQUIV note, we
		 may not find an equivalent expression, but in this
		 case the PARALLEL will have a single set.  */
	      if (first_set == NULL_RTX)
		first_set = x;
	      if (expr_equiv_p (SET_SRC (x), expr->expr))
	        {
	          set = x;
	          break;
	        }
2395 2396
	    }
	}
2397 2398 2399 2400

      gcc_assert (first_set);
      if (set == NULL_RTX)
        set = first_set;
2401 2402 2403 2404
      break;

    default:
      gcc_unreachable ();
2405
    }
2406

2407
  if (REG_P (SET_DEST (set)))
2408
    {
2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420
      old_reg = SET_DEST (set);
      /* Check if we can modify the set destination in the original insn.  */
      if (validate_change (insn, &SET_DEST (set), reg, 0))
        {
          new_insn = gen_move_insn (old_reg, reg);
          new_insn = emit_insn_after (new_insn, insn);
        }
      else
        {
          new_insn = gen_move_insn (reg, old_reg);
          new_insn = emit_insn_after (new_insn, insn);
        }
2421
    }
2422
  else /* This is possible only in case of a store to memory.  */
2423
    {
2424
      old_reg = SET_SRC (set);
2425
      new_insn = gen_move_insn (reg, old_reg);
2426 2427 2428 2429 2430 2431

      /* Check if we can modify the set source in the original insn.  */
      if (validate_change (insn, &SET_SRC (set), reg, 0))
        new_insn = emit_insn_before (new_insn, insn);
      else
        new_insn = emit_insn_after (new_insn, insn);
2432
    }
2433 2434 2435

  gcse_create_count++;

2436 2437
  if (dump_file)
    fprintf (dump_file,
2438
	     "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
2439
	      BLOCK_FOR_INSN (insn)->index, INSN_UID (new_insn), indx,
2440
	      INSN_UID (insn), regno);
2441 2442 2443 2444 2445 2446
}

/* Copy available expressions that reach the redundant expression
   to `reaching_reg'.  */

static void
2447
pre_insert_copies (void)
2448
{
2449
  unsigned int i, added_copy;
2450 2451 2452
  struct expr *expr;
  struct occr *occr;
  struct occr *avail;
2453

2454 2455 2456 2457 2458 2459
  /* For each available expression in the table, copy the result to
     `reaching_reg' if the expression reaches a deleted one.

     ??? The current algorithm is rather brute force.
     Need to do some profiling.  */

2460
  for (i = 0; i < expr_hash_table.size; i++)
2461
    for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2462 2463 2464 2465 2466 2467 2468 2469
      {
	/* If the basic block isn't reachable, PPOUT will be TRUE.  However,
	   we don't want to insert a copy here because the expression may not
	   really be redundant.  So only insert an insn if the expression was
	   deleted.  This test also avoids further processing if the
	   expression wasn't deleted anywhere.  */
	if (expr->reaching_reg == NULL)
	  continue;
2470

2471
	/* Set when we add a copy for that expression.  */
2472
	added_copy = 0;
2473 2474 2475 2476 2477

	for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
	  {
	    if (! occr->deleted_p)
	      continue;
2478

2479 2480 2481
	    for (avail = expr->avail_occr; avail != NULL; avail = avail->next)
	      {
		rtx insn = avail->insn;
2482

2483 2484 2485
		/* No need to handle this one if handled already.  */
		if (avail->copied_p)
		  continue;
2486

2487
		/* Don't handle this one if it's a redundant one.  */
2488
		if (INSN_DELETED_P (insn))
2489
		  continue;
2490

2491
		/* Or if the expression doesn't reach the deleted one.  */
2492
		if (! pre_expr_reaches_here_p (BLOCK_FOR_INSN (avail->insn),
2493 2494
					       expr,
					       BLOCK_FOR_INSN (occr->insn)))
2495
		  continue;
2496

2497 2498
                added_copy = 1;

2499 2500 2501 2502 2503
		/* Copy the result of avail to reaching_reg.  */
		pre_insert_copy_insn (expr, insn);
		avail->copied_p = 1;
	      }
	  }
2504

2505
	  if (added_copy)
2506
            update_ld_motion_stores (expr);
2507
      }
2508 2509
}

2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568
struct set_data
{
  rtx insn;
  const_rtx set;
  int nsets;
};

/* Increment number of sets and record set in DATA.  */

static void
record_set_data (rtx dest, const_rtx set, void *data)
{
  struct set_data *s = (struct set_data *)data;

  if (GET_CODE (set) == SET)
    {
      /* We allow insns having multiple sets, where all but one are
	 dead as single set insns.  In the common case only a single
	 set is present, so we want to avoid checking for REG_UNUSED
	 notes unless necessary.  */
      if (s->nsets == 1
	  && find_reg_note (s->insn, REG_UNUSED, SET_DEST (s->set))
	  && !side_effects_p (s->set))
	s->nsets = 0;

      if (!s->nsets)
	{
	  /* Record this set.  */
	  s->nsets += 1;
	  s->set = set;
	}
      else if (!find_reg_note (s->insn, REG_UNUSED, dest)
	       || side_effects_p (set))
	s->nsets += 1;
    }
}

static const_rtx
single_set_gcse (rtx insn)
{
  struct set_data s;
  rtx pattern;
  
  gcc_assert (INSN_P (insn));

  /* Optimize common case.  */
  pattern = PATTERN (insn);
  if (GET_CODE (pattern) == SET)
    return pattern;

  s.insn = insn;
  s.nsets = 0;
  note_stores (pattern, record_set_data, &s);

  /* Considered invariant insns have exactly one set.  */
  gcc_assert (s.nsets == 1);
  return s.set;
}

2569 2570
/* Emit move from SRC to DEST noting the equivalence with expression computed
   in INSN.  */
2571

2572
static rtx
2573
gcse_emit_move_after (rtx dest, rtx src, rtx insn)
2574
{
2575
  rtx new_rtx;
2576 2577
  const_rtx set = single_set_gcse (insn);
  rtx set2;
2578
  rtx note;
2579
  rtx eqv = NULL_RTX;
2580 2581 2582 2583

  /* This should never fail since we're creating a reg->reg copy
     we've verified to be valid.  */

2584
  new_rtx = emit_insn_after (gen_move_insn (dest, src), insn);
2585

2586 2587 2588
  /* Note the equivalence for local CSE pass.  Take the note from the old
     set if there was one.  Otherwise record the SET_SRC from the old set
     unless DEST is also an operand of the SET_SRC.  */
2589
  set2 = single_set (new_rtx);
2590
  if (!set2 || !rtx_equal_p (SET_DEST (set2), dest))
2591
    return new_rtx;
2592 2593
  if ((note = find_reg_equal_equiv_note (insn)))
    eqv = XEXP (note, 0);
2594 2595
  else if (! REG_P (dest)
	   || ! reg_mentioned_p (dest, SET_SRC (set)))
2596 2597
    eqv = SET_SRC (set);

2598 2599
  if (eqv != NULL_RTX)
    set_unique_reg_note (new_rtx, REG_EQUAL, copy_insn_1 (eqv));
2600

2601
  return new_rtx;
2602 2603
}

2604 2605 2606
/* Delete redundant computations.
   Deletion is done by changing the insn to copy the `reaching_reg' of
   the expression into the result of the SET.  It is left to later passes
2607
   to propagate the copy or eliminate it.
2608

2609
   Return nonzero if a change is made.  */
2610 2611

static int
2612
pre_delete (void)
2613
{
2614
  unsigned int i;
2615
  int changed;
2616 2617
  struct expr *expr;
  struct occr *occr;
2618

2619
  changed = 0;
2620
  for (i = 0; i < expr_hash_table.size; i++)
2621
    for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2622 2623
      {
	int indx = expr->bitmap_index;
2624

2625
	/* We only need to search antic_occr since we require ANTLOC != 0.  */
2626 2627 2628 2629
	for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
	  {
	    rtx insn = occr->insn;
	    rtx set;
2630
	    basic_block bb = BLOCK_FOR_INSN (insn);
2631

2632
	    /* We only delete insns that have a single_set.  */
2633
	    if (bitmap_bit_p (pre_delete_map[bb->index], indx)
2634 2635
		&& (set = single_set (insn)) != 0
                && dbg_cnt (pre_insn))
2636 2637 2638 2639 2640
	      {
		/* Create a pseudo-reg to store the result of reaching
		   expressions into.  Get the mode for the new pseudo from
		   the mode of the original destination pseudo.  */
		if (expr->reaching_reg == NULL)
Peter Bergner committed
2641
		  expr->reaching_reg = gen_reg_rtx_and_attrs (SET_DEST (set));
2642

2643
		gcse_emit_move_after (SET_DEST (set), expr->reaching_reg, insn);
2644 2645 2646 2647
		delete_insn (insn);
		occr->deleted_p = 1;
		changed = 1;
		gcse_subst_count++;
2648

2649
		if (dump_file)
2650
		  {
2651
		    fprintf (dump_file,
2652 2653
			     "PRE: redundant insn %d (expression %d) in ",
			       INSN_UID (insn), indx);
2654
		    fprintf (dump_file, "bb %d, reaching reg is %d\n",
2655
			     bb->index, REGNO (expr->reaching_reg));
2656 2657 2658 2659
		  }
	      }
	  }
      }
2660 2661 2662 2663 2664 2665 2666 2667

  return changed;
}

/* Perform GCSE optimizations using PRE.
   This is called by one_pre_gcse_pass after all the dataflow analysis
   has been done.

2668 2669 2670
   This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
   lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
   Compiler Design and Implementation.
2671

2672 2673 2674 2675 2676
   ??? A new pseudo reg is created to hold the reaching expression.  The nice
   thing about the classical approach is that it would try to use an existing
   reg.  If the register can't be adequately optimized [i.e. we introduce
   reload problems], one could add a pass here to propagate the new register
   through the block.
2677

2678 2679 2680 2681
   ??? We don't handle single sets in PARALLELs because we're [currently] not
   able to copy the rest of the parallel when we insert copies to create full
   redundancies from partial redundancies.  However, there's no reason why we
   can't handle PARALLELs in the cases where there are no partial
2682 2683 2684
   redundancies.  */

static int
2685
pre_gcse (struct edge_list *edge_list)
2686
{
2687 2688
  unsigned int i;
  int did_insert, changed;
2689
  struct expr **index_map;
2690
  struct expr *expr;
2691 2692 2693 2694

  /* Compute a mapping from expression number (`bitmap_index') to
     hash table entry.  */

2695
  index_map = XCNEWVEC (struct expr *, expr_hash_table.n_elems);
2696
  for (i = 0; i < expr_hash_table.size; i++)
2697
    for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2698
      index_map[expr->bitmap_index] = expr;
2699 2700 2701 2702 2703

  /* Delete the redundant insns first so that
     - we know what register to use for the new insns and for the other
       ones with reaching expressions
     - we know which insns are redundant when we go to create copies  */
2704

2705
  changed = pre_delete ();
2706
  did_insert = pre_edge_insert (edge_list, index_map);
2707

2708
  /* In other places with reaching expressions, copy the expression to the
2709
     specially allocated pseudo-reg that reaches the redundant expr.  */
2710
  pre_insert_copies ();
2711 2712 2713 2714 2715
  if (did_insert)
    {
      commit_edge_insertions ();
      changed = 1;
    }
2716

2717
  free (index_map);
2718 2719 2720 2721 2722
  return changed;
}

/* Top level routine to perform one PRE GCSE pass.

2723
   Return nonzero if a change was made.  */
2724 2725

static int
2726
one_pre_gcse_pass (void)
2727 2728 2729 2730 2731 2732
{
  int changed = 0;

  gcse_subst_count = 0;
  gcse_create_count = 0;

2733
  /* Return if there's nothing to do, or it is too expensive.  */
2734
  if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1
2735 2736 2737 2738 2739 2740 2741 2742 2743 2744
      || is_too_expensive (_("PRE disabled")))
    return 0;

  /* We need alias.  */
  init_alias_analysis ();

  bytes_used = 0;
  gcc_obstack_init (&gcse_obstack);
  alloc_gcse_mem ();

2745
  alloc_hash_table (&expr_hash_table);
2746
  add_noreturn_fake_exit_edges ();
2747 2748 2749
  if (flag_gcse_lm)
    compute_ld_motion_mems ();

2750
  compute_hash_table (&expr_hash_table);
2751 2752
  if (flag_gcse_lm)
    trim_ld_motion_mems ();
2753 2754
  if (dump_file)
    dump_hash_table (dump_file, "Expression", &expr_hash_table);
2755

2756
  if (expr_hash_table.n_elems > 0)
2757
    {
2758
      struct edge_list *edge_list;
2759
      alloc_pre_mem (last_basic_block_for_fn (cfun), expr_hash_table.n_elems);
2760 2761
      edge_list = compute_pre_data ();
      changed |= pre_gcse (edge_list);
2762
      free_edge_list (edge_list);
2763 2764
      free_pre_mem ();
    }
2765

2766 2767
  if (flag_gcse_lm)
    free_ld_motion_mems ();
2768
  remove_fake_exit_edges ();
2769
  free_hash_table (&expr_hash_table);
2770

2771 2772 2773 2774 2775 2776
  free_gcse_mem ();
  obstack_free (&gcse_obstack, NULL);

  /* We are finished with alias.  */
  end_alias_analysis ();

2777
  if (dump_file)
2778
    {
2779
      fprintf (dump_file, "PRE GCSE of %s, %d basic blocks, %d bytes needed, ",
2780 2781
	       current_function_name (), n_basic_blocks_for_fn (cfun),
	       bytes_used);
2782
      fprintf (dump_file, "%d substs, %d insns created\n",
2783
	       gcse_subst_count, gcse_create_count);
2784 2785 2786 2787
    }

  return changed;
}
2788

2789 2790 2791 2792 2793
/* If X contains any LABEL_REF's, add REG_LABEL_OPERAND notes for them
   to INSN.  If such notes are added to an insn which references a
   CODE_LABEL, the LABEL_NUSES count is incremented.  We have to add
   that note, because the following loop optimization pass requires
   them.  */
2794 2795 2796

/* ??? If there was a jump optimization pass after gcse and before loop,
   then we would not need to do this here, because jump would add the
2797
   necessary REG_LABEL_OPERAND and REG_LABEL_TARGET notes.  */
2798 2799

static void
2800
add_label_notes (rtx x, rtx insn)
2801 2802 2803
{
  enum rtx_code code = GET_CODE (x);
  int i, j;
2804
  const char *fmt;
2805 2806 2807

  if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
    {
2808
      /* This code used to ignore labels that referred to dispatch tables to
2809
	 avoid flow generating (slightly) worse code.
2810

Jeff Law committed
2811 2812
	 We no longer ignore such label references (see LABEL_REF handling in
	 mark_jump_label for additional information).  */
2813

2814 2815 2816 2817
      /* There's no reason for current users to emit jump-insns with
	 such a LABEL_REF, so we don't have to handle REG_LABEL_TARGET
	 notes.  */
      gcc_assert (!JUMP_P (insn));
2818 2819
      add_reg_note (insn, REG_LABEL_OPERAND, XEXP (x, 0));

2820 2821 2822
      if (LABEL_P (XEXP (x, 0)))
	LABEL_NUSES (XEXP (x, 0))++;

2823 2824 2825
      return;
    }

2826
  for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2827 2828 2829 2830 2831 2832 2833 2834
    {
      if (fmt[i] == 'e')
	add_label_notes (XEXP (x, i), insn);
      else if (fmt[i] == 'E')
	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
	  add_label_notes (XVECEXP (x, i, j), insn);
    }
}
2835

2836 2837 2838 2839 2840 2841 2842
/* Code Hoisting variables and subroutines.  */

/* Very busy expressions.  */
static sbitmap *hoist_vbein;
static sbitmap *hoist_vbeout;

/* ??? We could compute post dominators and run this algorithm in
2843
   reverse to perform tail merging, doing so would probably be
2844 2845 2846 2847 2848 2849 2850 2851
   more effective than the tail merging code in jump.c.

   It's unclear if tail merging could be run in parallel with
   code hoisting.  It would be nice.  */

/* Allocate vars used for code hoisting analysis.  */

static void
2852
alloc_code_hoist_mem (int n_blocks, int n_exprs)
2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864
{
  antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
  transp = sbitmap_vector_alloc (n_blocks, n_exprs);
  comp = sbitmap_vector_alloc (n_blocks, n_exprs);

  hoist_vbein = sbitmap_vector_alloc (n_blocks, n_exprs);
  hoist_vbeout = sbitmap_vector_alloc (n_blocks, n_exprs);
}

/* Free vars used for code hoisting analysis.  */

static void
2865
free_code_hoist_mem (void)
2866
{
2867 2868 2869
  sbitmap_vector_free (antloc);
  sbitmap_vector_free (transp);
  sbitmap_vector_free (comp);
2870

2871 2872
  sbitmap_vector_free (hoist_vbein);
  sbitmap_vector_free (hoist_vbeout);
2873

2874
  free_dominance_info (CDI_DOMINATORS);
2875 2876 2877 2878 2879 2880 2881 2882
}

/* Compute the very busy expressions at entry/exit from each block.

   An expression is very busy if all paths from a given point
   compute the expression.  */

static void
2883
compute_code_hoist_vbeinout (void)
2884
{
2885 2886
  int changed, passes;
  basic_block bb;
2887

2888 2889
  bitmap_vector_clear (hoist_vbeout, last_basic_block_for_fn (cfun));
  bitmap_vector_clear (hoist_vbein, last_basic_block_for_fn (cfun));
2890 2891 2892

  passes = 0;
  changed = 1;
2893

2894 2895 2896
  while (changed)
    {
      changed = 0;
2897

2898 2899
      /* We scan the blocks in the reverse order to speed up
	 the convergence.  */
2900
      FOR_EACH_BB_REVERSE_FN (bb, cfun)
2901
	{
2902
	  if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
2903
	    {
2904 2905
	      bitmap_intersection_of_succs (hoist_vbeout[bb->index],
					    hoist_vbein, bb);
2906 2907 2908

	      /* Include expressions in VBEout that are calculated
		 in BB and available at its end.  */
2909
	      bitmap_ior (hoist_vbeout[bb->index],
2910 2911
			      hoist_vbeout[bb->index], comp[bb->index]);
	    }
2912

2913
	  changed |= bitmap_or_and (hoist_vbein[bb->index],
2914 2915 2916
					      antloc[bb->index],
					      hoist_vbeout[bb->index],
					      transp[bb->index]);
2917
	}
2918

2919 2920 2921
      passes++;
    }

2922
  if (dump_file)
2923 2924 2925
    {
      fprintf (dump_file, "hoisting vbeinout computation: %d passes\n", passes);

2926
      FOR_EACH_BB_FN (bb, cfun)
2927 2928
        {
	  fprintf (dump_file, "vbein (%d): ", bb->index);
2929
	  dump_bitmap_file (dump_file, hoist_vbein[bb->index]);
2930
	  fprintf (dump_file, "vbeout(%d): ", bb->index);
2931
	  dump_bitmap_file (dump_file, hoist_vbeout[bb->index]);
2932 2933
	}
    }
2934 2935 2936 2937 2938
}

/* Top level routine to do the dataflow analysis needed by code hoisting.  */

static void
2939
compute_code_hoist_data (void)
2940
{
2941
  compute_local_properties (transp, comp, antloc, &expr_hash_table);
2942
  prune_expressions (false);
2943
  compute_code_hoist_vbeinout ();
2944
  calculate_dominance_info (CDI_DOMINATORS);
2945 2946
  if (dump_file)
    fprintf (dump_file, "\n");
2947 2948
}

2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963
/* Update register pressure for BB when hoisting an expression from
   instruction FROM, if live ranges of inputs are shrunk.  Also
   maintain live_in information if live range of register referred
   in FROM is shrunk.
   
   Return 0 if register pressure doesn't change, otherwise return
   the number by which register pressure is decreased.
   
   NOTE: Register pressure won't be increased in this function.  */

static int
update_bb_reg_pressure (basic_block bb, rtx from)
{
  rtx dreg, insn;
  basic_block succ_bb;
2964
  df_ref use, op_ref;
2965 2966 2967 2968 2969
  edge succ;
  edge_iterator ei;
  int decreased_pressure = 0;
  int nregs;
  enum reg_class pressure_class;
2970 2971

  FOR_EACH_INSN_USE (use, from)
2972
    {
2973
      dreg = DF_REF_REAL_REG (use);
2974 2975 2976 2977 2978 2979
      /* The live range of register is shrunk only if it isn't:
	 1. referred on any path from the end of this block to EXIT, or
	 2. referred by insns other than FROM in this block.  */
      FOR_EACH_EDGE (succ, ei, bb->succs)
	{
	  succ_bb = succ->dest;
2980
	  if (succ_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013
	    continue;

	  if (bitmap_bit_p (BB_DATA (succ_bb)->live_in, REGNO (dreg)))
	    break;
	}
      if (succ != NULL)
	continue;

      op_ref = DF_REG_USE_CHAIN (REGNO (dreg));
      for (; op_ref; op_ref = DF_REF_NEXT_REG (op_ref))
	{
	  if (!DF_REF_INSN_INFO (op_ref))
	    continue;

	  insn = DF_REF_INSN (op_ref);
	  if (BLOCK_FOR_INSN (insn) == bb
	      && NONDEBUG_INSN_P (insn) && insn != from)
	    break;
	}

      pressure_class = get_regno_pressure_class (REGNO (dreg), &nregs);
      /* Decrease register pressure and update live_in information for
	 this block.  */
      if (!op_ref && pressure_class != NO_REGS)
	{
	  decreased_pressure += nregs;
	  BB_DATA (bb)->max_reg_pressure[pressure_class] -= nregs;
	  bitmap_clear_bit (BB_DATA (bb)->live_in, REGNO (dreg));
	}
    }
  return decreased_pressure;
}

3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028
/* Determine if the expression EXPR should be hoisted to EXPR_BB up in
   flow graph, if it can reach BB unimpared.  Stop the search if the
   expression would need to be moved more than DISTANCE instructions.

   DISTANCE is the number of instructions through which EXPR can be
   hoisted up in flow graph.

   BB_SIZE points to an array which contains the number of instructions
   for each basic block.

   PRESSURE_CLASS and NREGS are register class and number of hard registers
   for storing EXPR.

   HOISTED_BBS points to a bitmap indicating basic blocks through which
   EXPR is hoisted.
3029

3030 3031
   FROM is the instruction from which EXPR is hoisted.

3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042
   It's unclear exactly what Muchnick meant by "unimpared".  It seems
   to me that the expression must either be computed or transparent in
   *every* block in the path(s) from EXPR_BB to BB.  Any other definition
   would allow the expression to be hoisted out of loops, even if
   the expression wasn't a loop invariant.

   Contrast this to reachability for PRE where an expression is
   considered reachable if *any* path reaches instead of *all*
   paths.  */

static int
3043 3044 3045
should_hoist_expr_to_dom (basic_block expr_bb, struct expr *expr,
			  basic_block bb, sbitmap visited, int distance,
			  int *bb_size, enum reg_class pressure_class,
3046
			  int *nregs, bitmap hoisted_bbs, rtx from)
3047
{
3048
  unsigned int i;
3049
  edge pred;
3050
  edge_iterator ei;
3051
  sbitmap_iterator sbi;
3052
  int visited_allocated_locally = 0;
3053
  int decreased_pressure = 0;
3054

3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066
  if (flag_ira_hoist_pressure)
    {
      /* Record old information of basic block BB when it is visited
	 at the first time.  */
      if (!bitmap_bit_p (hoisted_bbs, bb->index))
	{
	  struct bb_data *data = BB_DATA (bb);
	  bitmap_copy (data->backup, data->live_in);
	  data->old_pressure = data->max_reg_pressure[pressure_class];
	}
      decreased_pressure = update_bb_reg_pressure (bb, from);
    }
3067 3068 3069 3070
  /* Terminate the search if distance, for which EXPR is allowed to move,
     is exhausted.  */
  if (distance > 0)
    {
3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093
      if (flag_ira_hoist_pressure)
	{
	  /* Prefer to hoist EXPR if register pressure is decreased.  */
	  if (decreased_pressure > *nregs)
	    distance += bb_size[bb->index];
	  /* Let EXPR be hoisted through basic block at no cost if one
	     of following conditions is satisfied:

	     1. The basic block has low register pressure.
	     2. Register pressure won't be increases after hoisting EXPR.

	     Constant expressions is handled conservatively, because
	     hoisting constant expression aggressively results in worse
	     code.  This decision is made by the observation of CSiBE
	     on ARM target, while it has no obvious effect on other
	     targets like x86, x86_64, mips and powerpc.  */
	  else if (CONST_INT_P (expr->expr)
		   || (BB_DATA (bb)->max_reg_pressure[pressure_class]
			 >= ira_class_hard_regs_num[pressure_class]
		       && decreased_pressure < *nregs))
	    distance -= bb_size[bb->index];
	}
      else
3094
	distance -= bb_size[bb->index];
3095 3096 3097 3098 3099 3100

      if (distance <= 0)
	return 0;
    }
  else
    gcc_assert (distance == 0);
3101 3102 3103

  if (visited == NULL)
    {
Kazu Hirata committed
3104
      visited_allocated_locally = 1;
3105
      visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
3106
      bitmap_clear (visited);
3107 3108
    }

3109
  FOR_EACH_EDGE (pred, ei, bb->preds)
3110
    {
3111
      basic_block pred_bb = pred->src;
3112

3113
      if (pred->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
3114
	break;
3115 3116
      else if (pred_bb == expr_bb)
	continue;
3117
      else if (bitmap_bit_p (visited, pred_bb->index))
3118
	continue;
3119
      else if (! bitmap_bit_p (transp[pred_bb->index], expr->bitmap_index))
3120 3121 3122 3123
	break;
      /* Not killed.  */
      else
	{
3124
	  bitmap_set_bit (visited, pred_bb->index);
3125 3126
	  if (! should_hoist_expr_to_dom (expr_bb, expr, pred_bb,
					  visited, distance, bb_size,
3127 3128
					  pressure_class, nregs,
					  hoisted_bbs, from))
3129 3130 3131
	    break;
	}
    }
3132
  if (visited_allocated_locally)
3133 3134
    {
      /* If EXPR can be hoisted to expr_bb, record basic blocks through
3135
	 which EXPR is hoisted in hoisted_bbs.  */
3136 3137
      if (flag_ira_hoist_pressure && !pred)
	{
3138 3139
	  /* Record the basic block from which EXPR is hoisted.  */
	  bitmap_set_bit (visited, bb->index);
3140
	  EXECUTE_IF_SET_IN_BITMAP (visited, 0, i, sbi)
3141
	    bitmap_set_bit (hoisted_bbs, i);
3142 3143 3144
	}
      sbitmap_free (visited);
    }
3145

3146 3147 3148
  return (pred == NULL);
}

Joseph Myers committed
3149
/* Find occurrence in BB.  */
3150

3151 3152 3153 3154 3155 3156 3157 3158 3159 3160
static struct occr *
find_occr_in_bb (struct occr *occr, basic_block bb)
{
  /* Find the right occurrence of this expression.  */
  while (occr && BLOCK_FOR_INSN (occr->insn) != bb)
    occr = occr->next;

  return occr;
}

3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181
/* Actually perform code hoisting.

   The code hoisting pass can hoist multiple computations of the same
   expression along dominated path to a dominating basic block, like
   from b2/b3 to b1 as depicted below:

          b1      ------
          /\         |
         /  \        |
        bx   by   distance
       /      \      |
      /        \     |
     b2        b3 ------

   Unfortunately code hoisting generally extends the live range of an
   output pseudo register, which increases register pressure and hurts
   register allocation.  To address this issue, an attribute MAX_DISTANCE
   is computed and attached to each expression.  The attribute is computed
   from rtx cost of the corresponding expression and it's used to control
   how long the expression can be hoisted up in flow graph.  As the
   expression is hoisted up in flow graph, GCC decreases its DISTANCE
3182 3183
   and stops the hoist if DISTANCE reaches 0.  Code hoisting can decrease
   register pressure if live ranges of inputs are shrunk.
3184 3185 3186 3187 3188 3189

   Option "-fira-hoist-pressure" implements register pressure directed
   hoist based on upper method.  The rationale is:
     1. Calculate register pressure for each basic block by reusing IRA
	facility.
     2. When expression is hoisted through one basic block, GCC checks
3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203
	the change of live ranges for inputs/output.  The basic block's
	register pressure will be increased because of extended live
	range of output.  However, register pressure will be decreased
	if the live ranges of inputs are shrunk.
     3. After knowing how hoisting affects register pressure, GCC prefers
	to hoist the expression if it can decrease register pressure, by
	increasing DISTANCE of the corresponding expression.
     4. If hoisting the expression increases register pressure, GCC checks
	register pressure of the basic block and decrease DISTANCE only if
	the register pressure is high.  In other words, expression will be
	hoisted through at no cost if the basic block has low register
	pressure.
     5. Update register pressure information for basic blocks through
	which expression is hoisted.  */
3204

3205
static int
3206
hoist_code (void)
3207
{
3208
  basic_block bb, dominated;
3209
  vec<basic_block> dom_tree_walk;
3210
  unsigned int dom_tree_walk_index;
3211
  vec<basic_block> domby;
3212
  unsigned int i, j, k;
3213
  struct expr **index_map;
3214
  struct expr *expr;
3215 3216
  int *to_bb_head;
  int *bb_size;
3217
  int changed = 0;
3218 3219 3220 3221 3222 3223
  struct bb_data *data;
  /* Basic blocks that have occurrences reachable from BB.  */
  bitmap from_bbs;
  /* Basic blocks through which expr is hoisted.  */
  bitmap hoisted_bbs = NULL;
  bitmap_iterator bi;
3224 3225 3226 3227

  /* Compute a mapping from expression number (`bitmap_index') to
     hash table entry.  */

3228
  index_map = XCNEWVEC (struct expr *, expr_hash_table.n_elems);
3229
  for (i = 0; i < expr_hash_table.size; i++)
3230
    for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
3231
      index_map[expr->bitmap_index] = expr;
3232

3233 3234 3235 3236 3237
  /* Calculate sizes of basic blocks and note how far
     each instruction is from the start of its block.  We then use this
     data to restrict distance an expression can travel.  */

  to_bb_head = XCNEWVEC (int, get_max_uid ());
3238
  bb_size = XCNEWVEC (int, last_basic_block_for_fn (cfun));
3239

3240
  FOR_EACH_BB_FN (bb, cfun)
3241 3242 3243 3244 3245
    {
      rtx insn;
      int to_head;

      to_head = 0;
3246
      FOR_BB_INSNS (bb, insn)
3247 3248 3249 3250 3251 3252 3253 3254 3255 3256
	{
	  /* Don't count debug instructions to avoid them affecting
	     decision choices.  */
	  if (NONDEBUG_INSN_P (insn))
	    to_bb_head[INSN_UID (insn)] = to_head++;
	}

      bb_size[bb->index] = to_head;
    }

3257 3258 3259
  gcc_assert (EDGE_COUNT (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs) == 1
	      && (EDGE_SUCC (ENTRY_BLOCK_PTR_FOR_FN (cfun), 0)->dest
		  == ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb));
3260

3261 3262 3263 3264
  from_bbs = BITMAP_ALLOC (NULL);
  if (flag_ira_hoist_pressure)
    hoisted_bbs = BITMAP_ALLOC (NULL);

3265
  dom_tree_walk = get_all_dominated_blocks (CDI_DOMINATORS,
3266
					    ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb);
3267

3268 3269
  /* Walk over each basic block looking for potentially hoistable
     expressions, nothing gets hoisted from the entry block.  */
3270
  FOR_EACH_VEC_ELT (dom_tree_walk, dom_tree_walk_index, bb)
3271
    {
3272 3273
      domby = get_dominated_to_depth (CDI_DOMINATORS, bb, MAX_HOIST_DEPTH);

3274
      if (domby.length () == 0)
3275
	continue;
3276 3277 3278

      /* Examine each expression that is very busy at the exit of this
	 block.  These are the potentially hoistable expressions.  */
3279
      for (i = 0; i < SBITMAP_SIZE (hoist_vbeout[bb->index]); i++)
3280
	{
3281
	  if (bitmap_bit_p (hoist_vbeout[bb->index], i))
3282
	    {
3283 3284
	      int nregs = 0;
	      enum reg_class pressure_class = NO_REGS;
3285 3286
	      /* Current expression.  */
	      struct expr *expr = index_map[i];
Joseph Myers committed
3287
	      /* Number of occurrences of EXPR that can be hoisted to BB.  */
3288
	      int hoistable = 0;
Joseph Myers committed
3289
	      /* Occurrences reachable from BB.  */
3290
	      vec<occr_t> occrs_to_hoist = vNULL;
3291 3292 3293 3294 3295
	      /* We want to insert the expression into BB only once, so
		 note when we've inserted it.  */
	      int insn_inserted_p;
	      occr_t occr;

3296
	      /* If an expression is computed in BB and is available at end of
Joseph Myers committed
3297
		 BB, hoist all occurrences dominated by BB to BB.  */
3298
	      if (bitmap_bit_p (comp[bb->index], i))
3299 3300 3301 3302 3303
		{
		  occr = find_occr_in_bb (expr->antic_occr, bb);

		  if (occr)
		    {
Joseph Myers committed
3304
		      /* An occurrence might've been already deleted
3305
			 while processing a dominator of BB.  */
3306
		      if (!occr->deleted_p)
3307 3308 3309 3310 3311 3312 3313 3314
			{
			  gcc_assert (NONDEBUG_INSN_P (occr->insn));
			  hoistable++;
			}
		    }
		  else
		    hoistable++;
		}
3315

3316 3317 3318
	      /* We've found a potentially hoistable expression, now
		 we look at every block BB dominates to see if it
		 computes the expression.  */
3319
	      FOR_EACH_VEC_ELT (domby, j, dominated)
3320
		{
3321 3322
		  int max_distance;

3323
		  /* Ignore self dominance.  */
3324
		  if (bb == dominated)
3325 3326 3327 3328
		    continue;
		  /* We've found a dominated block, now see if it computes
		     the busy expression and whether or not moving that
		     expression to the "beginning" of that block is safe.  */
3329
		  if (!bitmap_bit_p (antloc[dominated->index], i))
3330 3331
		    continue;

3332 3333
		  occr = find_occr_in_bb (expr->antic_occr, dominated);
		  gcc_assert (occr);
3334

Joseph Myers committed
3335
		  /* An occurrence might've been already deleted
3336 3337
		     while processing a dominator of BB.  */
		  if (occr->deleted_p)
3338
		    continue;
3339 3340 3341 3342 3343 3344 3345 3346 3347
		  gcc_assert (NONDEBUG_INSN_P (occr->insn));

		  max_distance = expr->max_distance;
		  if (max_distance > 0)
		    /* Adjust MAX_DISTANCE to account for the fact that
		       OCCR won't have to travel all of DOMINATED, but
		       only part of it.  */
		    max_distance += (bb_size[dominated->index]
				     - to_bb_head[INSN_UID (occr->insn)]);
3348

3349 3350 3351 3352 3353
		  pressure_class = get_pressure_class_and_nregs (occr->insn,
								 &nregs);

		  /* Note if the expression should be hoisted from the dominated
		     block to BB if it can reach DOMINATED unimpared.
3354 3355 3356

		     Keep track of how many times this expression is hoistable
		     from a dominated block into BB.  */
3357 3358 3359
		  if (should_hoist_expr_to_dom (bb, expr, dominated, NULL,
						max_distance, bb_size,
						pressure_class,	&nregs,
3360
						hoisted_bbs, occr->insn))
3361 3362
		    {
		      hoistable++;
3363
		      occrs_to_hoist.safe_push (occr);
3364 3365
		      bitmap_set_bit (from_bbs, dominated->index);
		    }
3366 3367
		}

3368
	      /* If we found more than one hoistable occurrence of this
3369
		 expression, then note it in the vector of expressions to
3370 3371 3372 3373 3374 3375
		 hoist.  It makes no sense to hoist things which are computed
		 in only one BB, and doing so tends to pessimize register
		 allocation.  One could increase this value to try harder
		 to avoid any possible code expansion due to register
		 allocation issues; however experiments have shown that
		 the vast majority of hoistable expressions are only movable
3376
		 from two successors, so raising this threshold is likely
3377
		 to nullify any benefit we get from code hoisting.  */
3378
	      if (hoistable > 1 && dbg_cnt (hoist_insn))
3379
		{
3380
		  /* If (hoistable != vec::length), then there is
Joseph Myers committed
3381
		     an occurrence of EXPR in BB itself.  Don't waste
3382
		     time looking for LCA in this case.  */
3383
		  if ((unsigned) hoistable == occrs_to_hoist.length ())
3384 3385 3386 3387 3388 3389
		    {
		      basic_block lca;

		      lca = nearest_common_dominator_for_set (CDI_DOMINATORS,
							      from_bbs);
		      if (lca != bb)
Joseph Myers committed
3390
			/* Punt, it's better to hoist these occurrences to
3391
			   LCA.  */
3392
			occrs_to_hoist.release ();
3393
		    }
3394
		}
3395
	      else
Ondřej Bílka committed
3396
		/* Punt, no point hoisting a single occurrence.  */
3397
		occrs_to_hoist.release ();
3398

3399
	      if (flag_ira_hoist_pressure
3400
		  && !occrs_to_hoist.is_empty ())
3401
		{
3402 3403 3404
		  /* Increase register pressure of basic blocks to which
		     expr is hoisted because of extended live range of
		     output.  */
3405 3406
		  data = BB_DATA (bb);
		  data->max_reg_pressure[pressure_class] += nregs;
3407 3408
		  EXECUTE_IF_SET_IN_BITMAP (hoisted_bbs, 0, k, bi)
		    {
3409
		      data = BB_DATA (BASIC_BLOCK_FOR_FN (cfun, k));
3410 3411
		      data->max_reg_pressure[pressure_class] += nregs;
		    }
3412 3413 3414
		}
	      else if (flag_ira_hoist_pressure)
		{
3415 3416 3417
		  /* Restore register pressure and live_in info for basic
		     blocks recorded in hoisted_bbs when expr will not be
		     hoisted.  */
3418 3419
		  EXECUTE_IF_SET_IN_BITMAP (hoisted_bbs, 0, k, bi)
		    {
3420
		      data = BB_DATA (BASIC_BLOCK_FOR_FN (cfun, k));
3421 3422 3423
		      bitmap_copy (data->live_in, data->backup);
		      data->max_reg_pressure[pressure_class]
			  = data->old_pressure;
3424 3425 3426 3427 3428 3429
		    }
		}

	      if (flag_ira_hoist_pressure)
		bitmap_clear (hoisted_bbs);

3430
	      insn_inserted_p = 0;
3431

Joseph Myers committed
3432
	      /* Walk through occurrences of I'th expressions we want
3433
		 to hoist to BB and make the transformations.  */
3434
	      FOR_EACH_VEC_ELT (occrs_to_hoist, j, occr)
3435
		{
3436
		  rtx insn;
3437
		  const_rtx set;
3438 3439 3440 3441

		  gcc_assert (!occr->deleted_p);

		  insn = occr->insn;
3442
		  set = single_set_gcse (insn);
3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454

		  /* Create a pseudo-reg to store the result of reaching
		     expressions into.  Get the mode for the new pseudo
		     from the mode of the original destination pseudo.

		     It is important to use new pseudos whenever we
		     emit a set.  This will allow reload to use
		     rematerialization for such registers.  */
		  if (!insn_inserted_p)
		    expr->reaching_reg
		      = gen_reg_rtx_and_attrs (SET_DEST (set));

3455
		  gcse_emit_move_after (SET_DEST (set), expr->reaching_reg,
3456 3457 3458 3459 3460 3461 3462
					insn);
		  delete_insn (insn);
		  occr->deleted_p = 1;
		  changed = 1;
		  gcse_subst_count++;

		  if (!insn_inserted_p)
3463
		    {
3464 3465
		      insert_insn_end_basic_block (expr, bb);
		      insn_inserted_p = 1;
3466 3467
		    }
		}
3468

3469
	      occrs_to_hoist.release ();
3470
	      bitmap_clear (from_bbs);
3471 3472
	    }
	}
3473
      domby.release ();
3474
    }
3475

3476
  dom_tree_walk.release ();
3477 3478 3479 3480
  BITMAP_FREE (from_bbs);
  if (flag_ira_hoist_pressure)
    BITMAP_FREE (hoisted_bbs);

3481 3482
  free (bb_size);
  free (to_bb_head);
Kazu Hirata committed
3483
  free (index_map);
3484 3485

  return changed;
3486 3487
}

3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522
/* Return pressure class and number of needed hard registers (through
   *NREGS) of register REGNO.  */
static enum reg_class
get_regno_pressure_class (int regno, int *nregs)
{
  if (regno >= FIRST_PSEUDO_REGISTER)
    {
      enum reg_class pressure_class;

      pressure_class = reg_allocno_class (regno);
      pressure_class = ira_pressure_class_translate[pressure_class];
      *nregs
	= ira_reg_class_max_nregs[pressure_class][PSEUDO_REGNO_MODE (regno)];
      return pressure_class;
    }
  else if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno)
	   && ! TEST_HARD_REG_BIT (eliminable_regset, regno))
    {
      *nregs = 1;
      return ira_pressure_class_translate[REGNO_REG_CLASS (regno)];
    }
  else
    {
      *nregs = 0;
      return NO_REGS;
    }
}

/* Return pressure class and number of hard registers (through *NREGS)
   for destination of INSN. */
static enum reg_class
get_pressure_class_and_nregs (rtx insn, int *nregs)
{
  rtx reg;
  enum reg_class pressure_class;
3523
  const_rtx set = single_set_gcse (insn);
3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577

  reg = SET_DEST (set);
  if (GET_CODE (reg) == SUBREG)
    reg = SUBREG_REG (reg);
  if (MEM_P (reg))
    {
      *nregs = 0;
      pressure_class = NO_REGS;
    }
  else
    {
      gcc_assert (REG_P (reg));
      pressure_class = reg_allocno_class (REGNO (reg));
      pressure_class = ira_pressure_class_translate[pressure_class];
      *nregs
	= ira_reg_class_max_nregs[pressure_class][GET_MODE (SET_SRC (set))];
    }
  return pressure_class;
}

/* Increase (if INCR_P) or decrease current register pressure for
   register REGNO.  */
static void
change_pressure (int regno, bool incr_p)
{
  int nregs;
  enum reg_class pressure_class;

  pressure_class = get_regno_pressure_class (regno, &nregs);
  if (! incr_p)
    curr_reg_pressure[pressure_class] -= nregs;
  else
    {
      curr_reg_pressure[pressure_class] += nregs;
      if (BB_DATA (curr_bb)->max_reg_pressure[pressure_class]
	  < curr_reg_pressure[pressure_class])
	BB_DATA (curr_bb)->max_reg_pressure[pressure_class]
	  = curr_reg_pressure[pressure_class];
    }
}

/* Calculate register pressure for each basic block by walking insns
   from last to first.  */
static void
calculate_bb_reg_pressure (void)
{
  int i;
  unsigned int j;
  rtx insn;
  basic_block bb;
  bitmap curr_regs_live;
  bitmap_iterator bi;


3578
  ira_setup_eliminable_regset ();
3579
  curr_regs_live = BITMAP_ALLOC (&reg_obstack);
3580
  FOR_EACH_BB_FN (bb, cfun)
3581 3582
    {
      curr_bb = bb;
3583 3584 3585 3586
      BB_DATA (bb)->live_in = BITMAP_ALLOC (NULL);
      BB_DATA (bb)->backup = BITMAP_ALLOC (NULL);
      bitmap_copy (BB_DATA (bb)->live_in, df_get_live_in (bb));
      bitmap_copy (curr_regs_live, df_get_live_out (bb));
3587 3588 3589 3590 3591 3592 3593 3594 3595
      for (i = 0; i < ira_pressure_classes_num; i++)
	curr_reg_pressure[ira_pressure_classes[i]] = 0;
      EXECUTE_IF_SET_IN_BITMAP (curr_regs_live, 0, j, bi)
	change_pressure (j, true);

      FOR_BB_INSNS_REVERSE (bb, insn)
	{
	  rtx dreg;
	  int regno;
3596
	  df_ref def, use;
3597 3598 3599 3600

	  if (! NONDEBUG_INSN_P (insn))
	    continue;

3601
	  FOR_EACH_INSN_DEF (def, insn)
3602
	    {
3603
	      dreg = DF_REF_REAL_REG (def);
3604 3605
	      gcc_assert (REG_P (dreg));
	      regno = REGNO (dreg);
3606
	      if (!(DF_REF_FLAGS (def)
3607 3608 3609 3610 3611 3612 3613
		    & (DF_REF_PARTIAL | DF_REF_CONDITIONAL)))
		{
		  if (bitmap_clear_bit (curr_regs_live, regno))
		    change_pressure (regno, false);
		}
	    }

3614
	  FOR_EACH_INSN_USE (use, insn)
3615
	    {
3616
	      dreg = DF_REF_REAL_REG (use);
3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629
	      gcc_assert (REG_P (dreg));
	      regno = REGNO (dreg);
	      if (bitmap_set_bit (curr_regs_live, regno))
		change_pressure (regno, true);
	    }
	}
    }
  BITMAP_FREE (curr_regs_live);

  if (dump_file == NULL)
    return;

  fprintf (dump_file, "\nRegister Pressure: \n");
3630
  FOR_EACH_BB_FN (bb, cfun)
3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647
    {
      fprintf (dump_file, "  Basic block %d: \n", bb->index);
      for (i = 0; (int) i < ira_pressure_classes_num; i++)
	{
	  enum reg_class pressure_class;

	  pressure_class = ira_pressure_classes[i];
	  if (BB_DATA (bb)->max_reg_pressure[pressure_class] == 0)
	    continue;

	  fprintf (dump_file, "    %s=%d\n", reg_class_names[pressure_class],
		   BB_DATA (bb)->max_reg_pressure[pressure_class]);
	}
    }
  fprintf (dump_file, "\n");
}

3648 3649
/* Top level routine to perform one code hoisting (aka unification) pass

3650
   Return nonzero if a change was made.  */
3651 3652

static int
3653
one_code_hoisting_pass (void)
3654 3655 3656
{
  int changed = 0;

3657 3658 3659 3660
  gcse_subst_count = 0;
  gcse_create_count = 0;

  /* Return if there's nothing to do, or it is too expensive.  */
3661
  if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1
3662 3663 3664
      || is_too_expensive (_("GCSE disabled")))
    return 0;

3665 3666
  doing_code_hoisting_p = true;

3667 3668 3669 3670 3671 3672 3673 3674 3675 3676
  /* Calculate register pressure for each basic block.  */
  if (flag_ira_hoist_pressure)
    {
      regstat_init_n_sets_and_refs ();
      ira_set_pseudo_classes (false, dump_file);
      alloc_aux_for_blocks (sizeof (struct bb_data));
      calculate_bb_reg_pressure ();
      regstat_free_n_sets_and_refs ();
    }

3677 3678 3679 3680 3681 3682 3683
  /* We need alias.  */
  init_alias_analysis ();

  bytes_used = 0;
  gcc_obstack_init (&gcse_obstack);
  alloc_gcse_mem ();

3684
  alloc_hash_table (&expr_hash_table);
3685
  compute_hash_table (&expr_hash_table);
3686 3687
  if (dump_file)
    dump_hash_table (dump_file, "Code Hosting Expressions", &expr_hash_table);
3688

3689
  if (expr_hash_table.n_elems > 0)
3690
    {
3691 3692
      alloc_code_hoist_mem (last_basic_block_for_fn (cfun),
			    expr_hash_table.n_elems);
3693
      compute_code_hoist_data ();
3694
      changed = hoist_code ();
3695 3696
      free_code_hoist_mem ();
    }
3697

3698 3699 3700 3701 3702
  if (flag_ira_hoist_pressure)
    {
      free_aux_for_blocks ();
      free_reg_info ();
    }
3703
  free_hash_table (&expr_hash_table);
3704 3705 3706 3707 3708 3709 3710 3711 3712
  free_gcse_mem ();
  obstack_free (&gcse_obstack, NULL);

  /* We are finished with alias.  */
  end_alias_analysis ();

  if (dump_file)
    {
      fprintf (dump_file, "HOIST of %s, %d basic blocks, %d bytes needed, ",
3713 3714
	       current_function_name (), n_basic_blocks_for_fn (cfun),
	       bytes_used);
3715 3716 3717
      fprintf (dump_file, "%d substs, %d insns created\n",
	       gcse_subst_count, gcse_create_count);
    }
3718

3719 3720
  doing_code_hoisting_p = false;

3721 3722
  return changed;
}
3723

3724 3725 3726
/*  Here we provide the things required to do store motion towards the exit.
    In order for this to be effective, gcse also needed to be taught how to
    move a load when it is killed only by a store to itself.
3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737

	    int i;
	    float a[10];

	    void foo(float scale)
	    {
	      for (i=0; i<10; i++)
		a[i] *= scale;
	    }

    'i' is both loaded and stored to in the loop. Normally, gcse cannot move
3738 3739
    the load out since its live around the loop, and stored at the bottom
    of the loop.
3740

3741
      The 'Load Motion' referred to and implemented in this file is
3742
    an enhancement to gcse which when using edge based LCM, recognizes
3743 3744 3745 3746 3747 3748
    this situation and allows gcse to move the load out of the loop.

      Once gcse has hoisted the load, store motion can then push this
    load towards the exit, and we end up with no loads or stores of 'i'
    in the loop.  */

3749
/* This will search the ldst list for a matching expression. If it
3750 3751 3752
   doesn't find one, we create one and initialize it.  */

static struct ls_expr *
3753
ldst_entry (rtx x)
3754
{
3755
  int do_not_record_p = 0;
3756
  struct ls_expr * ptr;
3757
  unsigned int hash;
3758
  ls_expr **slot;
3759
  struct ls_expr e;
3760

3761 3762
  hash = hash_rtx (x, GET_MODE (x), &do_not_record_p,
		   NULL,  /*have_reg_qty=*/false);
3763

3764
  e.pattern = x;
3765
  slot = pre_ldst_table->find_slot_with_hash (&e, hash, INSERT);
3766
  if (*slot)
3767
    return *slot;
3768

3769
  ptr = XNEW (struct ls_expr);
3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781

  ptr->next         = pre_ldst_mems;
  ptr->expr         = NULL;
  ptr->pattern      = x;
  ptr->pattern_regs = NULL_RTX;
  ptr->loads        = NULL_RTX;
  ptr->stores       = NULL_RTX;
  ptr->reaching_reg = NULL_RTX;
  ptr->invalid      = 0;
  ptr->index        = 0;
  ptr->hash_index   = hash;
  pre_ldst_mems     = ptr;
3782
  *slot = ptr;
3783

3784 3785 3786 3787 3788
  return ptr;
}

/* Free up an individual ldst entry.  */

3789
static void
3790
free_ldst_entry (struct ls_expr * ptr)
3791
{
3792 3793
  free_INSN_LIST_list (& ptr->loads);
  free_INSN_LIST_list (& ptr->stores);
3794 3795 3796 3797 3798 3799 3800

  free (ptr);
}

/* Free up all memory associated with the ldst list.  */

static void
3801
free_ld_motion_mems (void)
3802
{
3803 3804
  delete pre_ldst_table;
  pre_ldst_table = NULL;
3805

3806
  while (pre_ldst_mems)
3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820
    {
      struct ls_expr * tmp = pre_ldst_mems;

      pre_ldst_mems = pre_ldst_mems->next;

      free_ldst_entry (tmp);
    }

  pre_ldst_mems = NULL;
}

/* Dump debugging info about the ldst list.  */

static void
3821
print_ldst_list (FILE * file)
3822 3823 3824 3825 3826
{
  struct ls_expr * ptr;

  fprintf (file, "LDST list: \n");

3827
  for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855
    {
      fprintf (file, "  Pattern (%3d): ", ptr->index);

      print_rtl (file, ptr->pattern);

      fprintf (file, "\n	 Loads : ");

      if (ptr->loads)
	print_rtl (file, ptr->loads);
      else
	fprintf (file, "(nil)");

      fprintf (file, "\n	Stores : ");

      if (ptr->stores)
	print_rtl (file, ptr->stores);
      else
	fprintf (file, "(nil)");

      fprintf (file, "\n\n");
    }

  fprintf (file, "\n");
}

/* Returns 1 if X is in the list of ldst only expressions.  */

static struct ls_expr *
3856
find_rtx_in_ldst (rtx x)
3857
{
3858
  struct ls_expr e;
3859
  ls_expr **slot;
3860
  if (!pre_ldst_table)
3861
    return NULL;
3862
  e.pattern = x;
3863
  slot = pre_ldst_table->find_slot (&e, NO_INSERT);
3864
  if (!slot || (*slot)->invalid)
3865
    return NULL;
3866
  return *slot;
3867 3868 3869 3870
}

/* Load Motion for loads which only kill themselves.  */

3871 3872 3873
/* Return true if x, a MEM, is a simple access with no side effects.
   These are the types of loads we consider for the ld_motion list,
   otherwise we let the usual aliasing take care of it.  */
3874

3875
static int
3876
simple_mem (const_rtx x)
3877 3878 3879
{
  if (MEM_VOLATILE_P (x))
    return 0;
3880

3881 3882
  if (GET_MODE (x) == BLKmode)
    return 0;
3883

3884
  /* If we are handling exceptions, we must be careful with memory references
3885
     that may trap.  If we are not, the behavior is undefined, so we may just
3886
     continue.  */
3887
  if (cfun->can_throw_non_call_exceptions && may_trap_p (x))
3888 3889
    return 0;

3890 3891
  if (side_effects_p (x))
    return 0;
3892

3893 3894 3895 3896 3897 3898 3899 3900
  /* Do not consider function arguments passed on stack.  */
  if (reg_mentioned_p (stack_pointer_rtx, x))
    return 0;

  if (flag_float_store && FLOAT_MODE_P (GET_MODE (x)))
    return 0;

  return 1;
3901 3902
}

3903 3904 3905
/* Make sure there isn't a buried reference in this pattern anywhere.
   If there is, invalidate the entry for it since we're not capable
   of fixing it up just yet.. We have to be sure we know about ALL
3906 3907
   loads since the aliasing code will allow all entries in the
   ld_motion list to not-alias itself.  If we miss a load, we will get
3908
   the wrong value since gcse might common it and we won't know to
3909 3910 3911
   fix it up.  */

static void
3912
invalidate_any_buried_refs (rtx x)
3913 3914
{
  const char * fmt;
Kazu Hirata committed
3915
  int i, j;
3916 3917 3918
  struct ls_expr * ptr;

  /* Invalidate it in the list.  */
3919
  if (MEM_P (x) && simple_mem (x))
3920 3921 3922 3923 3924 3925 3926
    {
      ptr = ldst_entry (x);
      ptr->invalid = 1;
    }

  /* Recursively process the insn.  */
  fmt = GET_RTX_FORMAT (GET_CODE (x));
3927

3928 3929 3930 3931 3932 3933 3934 3935 3936 3937
  for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
    {
      if (fmt[i] == 'e')
	invalidate_any_buried_refs (XEXP (x, i));
      else if (fmt[i] == 'E')
	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
	  invalidate_any_buried_refs (XVECEXP (x, i, j));
    }
}

3938 3939 3940 3941 3942 3943 3944
/* Find all the 'simple' MEMs which are used in LOADs and STORES.  Simple
   being defined as MEM loads and stores to symbols, with no side effects
   and no registers in the expression.  For a MEM destination, we also
   check that the insn is still valid if we replace the destination with a
   REG, as is done in update_ld_motion_stores.  If there are any uses/defs
   which don't match this criteria, they are invalidated and trimmed out
   later.  */
3945

3946
static void
3947
compute_ld_motion_mems (void)
3948 3949
{
  struct ls_expr * ptr;
3950
  basic_block bb;
3951
  rtx insn;
3952

3953
  pre_ldst_mems = NULL;
3954
  pre_ldst_table = new hash_table<pre_ldst_expr_hasher> (13);
3955

3956
  FOR_EACH_BB_FN (bb, cfun)
3957
    {
3958
      FOR_BB_INSNS (bb, insn)
3959
	{
3960
	  if (NONDEBUG_INSN_P (insn))
3961 3962 3963 3964 3965
	    {
	      if (GET_CODE (PATTERN (insn)) == SET)
		{
		  rtx src = SET_SRC (PATTERN (insn));
		  rtx dest = SET_DEST (PATTERN (insn));
3966 3967
		  rtx note = find_reg_equal_equiv_note (insn);
		  rtx src_eq;
3968 3969

		  /* Check for a simple LOAD...  */
3970
		  if (MEM_P (src) && simple_mem (src))
3971 3972
		    {
		      ptr = ldst_entry (src);
3973
		      if (REG_P (dest))
3974 3975 3976 3977 3978 3979 3980 3981 3982
			ptr->loads = alloc_INSN_LIST (insn, ptr->loads);
		      else
			ptr->invalid = 1;
		    }
		  else
		    {
		      /* Make sure there isn't a buried load somewhere.  */
		      invalidate_any_buried_refs (src);
		    }
3983

3984 3985 3986 3987 3988 3989 3990 3991 3992
		  if (note != 0 && REG_NOTE_KIND (note) == REG_EQUAL)
		    src_eq = XEXP (note, 0);
		  else
		    src_eq = NULL_RTX;

		  if (src_eq != NULL_RTX
		      && !(MEM_P (src_eq) && simple_mem (src_eq)))
		    invalidate_any_buried_refs (src_eq);

3993 3994 3995 3996
		  /* Check for stores. Don't worry about aliased ones, they
		     will block any movement we might do later. We only care
		     about this exact pattern since those are the only
		     circumstance that we will ignore the aliasing info.  */
3997
		  if (MEM_P (dest) && simple_mem (dest))
3998 3999
		    {
		      ptr = ldst_entry (dest);
4000

4001
		      if (! MEM_P (src)
4002 4003 4004
			  && GET_CODE (src) != ASM_OPERANDS
			  /* Check for REG manually since want_to_gcse_p
			     returns 0 for all REGs.  */
4005
			  && can_assign_to_reg_without_clobbers_p (src))
4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017
			ptr->stores = alloc_INSN_LIST (insn, ptr->stores);
		      else
			ptr->invalid = 1;
		    }
		}
	      else
		invalidate_any_buried_refs (PATTERN (insn));
	    }
	}
    }
}

4018
/* Remove any references that have been either invalidated or are not in the
4019 4020 4021
   expression list for pre gcse.  */

static void
4022
trim_ld_motion_mems (void)
4023
{
4024 4025
  struct ls_expr * * last = & pre_ldst_mems;
  struct ls_expr * ptr = pre_ldst_mems;
4026 4027 4028

  while (ptr != NULL)
    {
4029
      struct expr * expr;
4030

4031
      /* Delete if entry has been made invalid.  */
4032
      if (! ptr->invalid)
4033 4034
	{
	  /* Delete if we cannot find this mem in the expression list.  */
4035
	  unsigned int hash = ptr->hash_index % expr_hash_table.size;
4036

4037 4038 4039 4040 4041
	  for (expr = expr_hash_table.table[hash];
	       expr != NULL;
	       expr = expr->next_same_hash)
	    if (expr_equiv_p (expr->expr, ptr->pattern))
	      break;
4042 4043
	}
      else
4044 4045 4046
	expr = (struct expr *) 0;

      if (expr)
4047 4048 4049
	{
	  /* Set the expression field if we are keeping it.  */
	  ptr->expr = expr;
4050
	  last = & ptr->next;
4051 4052
	  ptr = ptr->next;
	}
4053 4054 4055
      else
	{
	  *last = ptr->next;
4056
	  pre_ldst_table->remove_elt_with_hash (ptr, ptr->hash_index);
4057 4058 4059
	  free_ldst_entry (ptr);
	  ptr = * last;
	}
4060 4061 4062
    }

  /* Show the world what we've found.  */
4063 4064
  if (dump_file && pre_ldst_mems != NULL)
    print_ldst_list (dump_file);
4065 4066 4067 4068 4069
}

/* This routine will take an expression which we are replacing with
   a reaching register, and update any stores that are needed if
   that expression is in the ld_motion list.  Stores are updated by
4070
   copying their SRC to the reaching register, and then storing
4071 4072 4073 4074
   the reaching register into the store location. These keeps the
   correct value in the reaching register for the loads.  */

static void
4075
update_ld_motion_stores (struct expr * expr)
4076 4077 4078 4079 4080
{
  struct ls_expr * mem_ptr;

  if ((mem_ptr = find_rtx_in_ldst (expr->expr)))
    {
4081 4082
      /* We can try to find just the REACHED stores, but is shouldn't
	 matter to set the reaching reg everywhere...  some might be
4083 4084
	 dead and should be eliminated later.  */

4085 4086 4087 4088
      /* We replace (set mem expr) with (set reg expr) (set mem reg)
	 where reg is the reaching reg used in the load.  We checked in
	 compute_ld_motion_mems that we can replace (set mem expr) with
	 (set reg expr) in that insn.  */
4089
      rtx list = mem_ptr->stores;
4090

4091 4092 4093 4094 4095 4096
      for ( ; list != NULL_RTX; list = XEXP (list, 1))
	{
	  rtx insn = XEXP (list, 0);
	  rtx pat = PATTERN (insn);
	  rtx src = SET_SRC (pat);
	  rtx reg = expr->reaching_reg;
Paolo Carlini committed
4097
	  rtx copy;
4098 4099 4100 4101

	  /* If we've already copied it, continue.  */
	  if (expr->reaching_reg == src)
	    continue;
4102

4103
	  if (dump_file)
4104
	    {
4105
	      fprintf (dump_file, "PRE:  store updated with reaching reg ");
4106
	      print_rtl (dump_file, reg);
4107 4108 4109
	      fprintf (dump_file, ":\n	");
	      print_inline_rtx (dump_file, insn, 8);
	      fprintf (dump_file, "\n");
4110
	    }
4111

4112
	  copy = gen_move_insn (reg, copy_rtx (SET_SRC (pat)));
Paolo Carlini committed
4113
	  emit_insn_before (copy, insn);
4114
	  SET_SRC (pat) = reg;
4115
	  df_insn_rescan (insn);
4116 4117 4118 4119 4120 4121 4122 4123

	  /* un-recognize this pattern since it's probably different now.  */
	  INSN_CODE (insn) = -1;
	  gcse_create_count++;
	}
    }
}

4124 4125
/* Return true if the graph is too expensive to optimize. PASS is the
   optimization about to be performed.  */
4126

4127 4128 4129 4130 4131 4132
static bool
is_too_expensive (const char *pass)
{
  /* Trying to perform global optimizations on flow graphs which have
     a high connectivity will take a long time and is unlikely to be
     particularly useful.
4133

4134 4135 4136 4137 4138
     In normal circumstances a cfg should have about twice as many
     edges as blocks.  But we do not want to punish small functions
     which have a couple switch statements.  Rather than simply
     threshold the number of blocks, uses something with a more
     graceful degradation.  */
David Malcolm committed
4139
  if (n_edges_for_fn (cfun) > 20000 + n_basic_blocks_for_fn (cfun) * 4)
4140 4141 4142
    {
      warning (OPT_Wdisabled_optimization,
	       "%s: %d basic blocks and %d edges/basic block",
4143
	       pass, n_basic_blocks_for_fn (cfun),
David Malcolm committed
4144
	       n_edges_for_fn (cfun) / n_basic_blocks_for_fn (cfun));
4145

4146 4147
      return true;
    }
4148

4149
  /* If allocating memory for the dataflow bitmaps would take up too much
4150
     storage it's better just to disable the optimization.  */
4151
  if ((n_basic_blocks_for_fn (cfun)
4152 4153 4154 4155 4156
       * SBITMAP_SET_SIZE (max_reg_num ())
       * sizeof (SBITMAP_ELT_TYPE)) > MAX_GCSE_MEMORY)
    {
      warning (OPT_Wdisabled_optimization,
	       "%s: %d basic blocks and %d registers",
4157
	       pass, n_basic_blocks_for_fn (cfun), max_reg_num ());
4158

4159 4160
      return true;
    }
4161

4162
  return false;
4163
}
4164 4165 4166 4167

static unsigned int
execute_rtl_pre (void)
{
4168
  int changed;
4169 4170
  delete_unreachable_blocks ();
  df_analyze ();
4171 4172 4173 4174
  changed = one_pre_gcse_pass ();
  flag_rerun_cse_after_global_opts |= changed;
  if (changed)
    cleanup_cfg (0);
4175 4176
  return 0;
}
4177

4178 4179 4180
static unsigned int
execute_rtl_hoist (void)
{
4181
  int changed;
4182 4183
  delete_unreachable_blocks ();
  df_analyze ();
4184 4185 4186 4187
  changed = one_code_hoisting_pass ();
  flag_rerun_cse_after_global_opts |= changed;
  if (changed)
    cleanup_cfg (0);
4188 4189
  return 0;
}
4190

4191 4192 4193
namespace {

const pass_data pass_data_rtl_pre =
4194
{
4195 4196 4197 4198 4199 4200 4201 4202
  RTL_PASS, /* type */
  "rtl pre", /* name */
  OPTGROUP_NONE, /* optinfo_flags */
  TV_PRE, /* tv_id */
  PROP_cfglayout, /* properties_required */
  0, /* properties_provided */
  0, /* properties_destroyed */
  0, /* todo_flags_start */
4203
  TODO_df_finish, /* todo_flags_finish */
4204
};
4205

4206 4207 4208
class pass_rtl_pre : public rtl_opt_pass
{
public:
4209 4210
  pass_rtl_pre (gcc::context *ctxt)
    : rtl_opt_pass (pass_data_rtl_pre, ctxt)
4211 4212 4213
  {}

  /* opt_pass methods: */
4214
  virtual bool gate (function *);
4215
  virtual unsigned int execute (function *) { return execute_rtl_pre (); }
4216 4217 4218

}; // class pass_rtl_pre

4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232
/* We do not construct an accurate cfg in functions which call
   setjmp, so none of these passes runs if the function calls
   setjmp.
   FIXME: Should just handle setjmp via REG_SETJMP notes.  */

bool
pass_rtl_pre::gate (function *fun)
{
  return optimize > 0 && flag_gcse
    && !fun->calls_setjmp
    && optimize_function_for_speed_p (fun)
    && dbg_cnt (pre);
}

4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243
} // anon namespace

rtl_opt_pass *
make_pass_rtl_pre (gcc::context *ctxt)
{
  return new pass_rtl_pre (ctxt);
}

namespace {

const pass_data pass_data_rtl_hoist =
4244
{
4245 4246 4247 4248 4249 4250 4251 4252
  RTL_PASS, /* type */
  "hoist", /* name */
  OPTGROUP_NONE, /* optinfo_flags */
  TV_HOIST, /* tv_id */
  PROP_cfglayout, /* properties_required */
  0, /* properties_provided */
  0, /* properties_destroyed */
  0, /* todo_flags_start */
4253
  TODO_df_finish, /* todo_flags_finish */
4254 4255
};

4256 4257 4258
class pass_rtl_hoist : public rtl_opt_pass
{
public:
4259 4260
  pass_rtl_hoist (gcc::context *ctxt)
    : rtl_opt_pass (pass_data_rtl_hoist, ctxt)
4261 4262 4263
  {}

  /* opt_pass methods: */
4264
  virtual bool gate (function *);
4265
  virtual unsigned int execute (function *) { return execute_rtl_hoist (); }
4266 4267 4268

}; // class pass_rtl_hoist

4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280
bool
pass_rtl_hoist::gate (function *)
{
  return optimize > 0 && flag_gcse
    && !cfun->calls_setjmp
    /* It does not make sense to run code hoisting unless we are optimizing
       for code size -- it rarely makes programs faster, and can make then
       bigger if we did PRE (when optimizing for space, we don't run PRE).  */
    && optimize_function_for_size_p (cfun)
    && dbg_cnt (hoist);
}

4281 4282 4283 4284 4285 4286 4287 4288
} // anon namespace

rtl_opt_pass *
make_pass_rtl_hoist (gcc::context *ctxt)
{
  return new pass_rtl_hoist (ctxt);
}

4289
#include "gt-gcse.h"