gcse.c 122 KB
Newer Older
1
/* Partial redundancy elimination / Hoisting for RTL.
2
   Copyright (C) 1997-2019 Free Software Foundation, Inc.
3

4
This file is part of GCC.
5

6 7
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
8
Software Foundation; either version 3, or (at your option) any later
9
version.
10

11 12 13 14
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
for more details.
15 16

You should have received a copy of the GNU General Public License
17 18
along with GCC; see the file COPYING3.  If not see
<http://www.gnu.org/licenses/>.  */
19 20 21

/* TODO
   - reordering of memory allocation and freeing to be more space efficient
22 23
   - calc rough register pressure information and use the info to drive all
     kinds of code motion (including code hoisting) in a unified way.
24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
*/

/* References searched while implementing this.

   Compilers Principles, Techniques and Tools
   Aho, Sethi, Ullman
   Addison-Wesley, 1988

   Global Optimization by Suppression of Partial Redundancies
   E. Morel, C. Renvoise
   communications of the acm, Vol. 22, Num. 2, Feb. 1979

   A Portable Machine-Independent Global Optimizer - Design and Measurements
   Frederick Chow
   Stanford Ph.D. thesis, Dec. 1983

   A Fast Algorithm for Code Movement Optimization
   D.M. Dhamdhere
   SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988

   A Solution to a Problem with Morel and Renvoise's
   Global Optimization by Suppression of Partial Redundancies
   K-H Drechsler, M.P. Stadel
   ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988

   Practical Adaptation of the Global Optimization
   Algorithm of Morel and Renvoise
   D.M. Dhamdhere
   ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991

   Efficiently Computing Static Single Assignment Form and the Control
   Dependence Graph
   R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
   ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991

   Lazy Code Motion
   J. Knoop, O. Ruthing, B. Steffen
   ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI

   What's In a Region?  Or Computing Control Dependence Regions in Near-Linear
   Time for Reducible Flow Control
   Thomas Ball
   ACM Letters on Programming Languages and Systems,
   Vol. 2, Num. 1-4, Mar-Dec 1993

   An Efficient Representation for Sparse Sets
   Preston Briggs, Linda Torczon
   ACM Letters on Programming Languages and Systems,
   Vol. 2, Num. 1-4, Mar-Dec 1993

   A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
   K-H Drechsler, M.P. Stadel
   ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993

   Partial Dead Code Elimination
   J. Knoop, O. Ruthing, B. Steffen
   ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994

   Effective Partial Redundancy Elimination
   P. Briggs, K.D. Cooper
   ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994

   The Program Structure Tree: Computing Control Regions in Linear Time
   R. Johnson, D. Pearson, K. Pingali
   ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994

   Optimal Code Motion: Theory and Practice
   J. Knoop, O. Ruthing, B. Steffen
   ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994

   The power of assignment motion
   J. Knoop, O. Ruthing, B. Steffen
   ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI

   Global code motion / global value numbering
   C. Click
   ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI

   Value Driven Redundancy Elimination
   L.T. Simpson
   Rice University Ph.D. thesis, Apr. 1996

   Value Numbering
   L.T. Simpson
   Massively Scalar Compiler Project, Rice University, Sep. 1996

   High Performance Compilers for Parallel Computing
   Michael Wolfe
   Addison-Wesley, 1996

114 115 116 117
   Advanced Compiler Design and Implementation
   Steven Muchnick
   Morgan Kaufmann, 1997

118 119 120 121
   Building an Optimizing Compiler
   Robert Morgan
   Digital Press, 1998

122 123 124 125 126 127 128 129 130
   People wishing to speed up the code here should read:
     Elimination Algorithms for Data Flow Analysis
     B.G. Ryder, M.C. Paull
     ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986

     How to Analyze Large Programs Efficiently and Informatively
     D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
     ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI

131 132 133 134 135
   People wishing to do something different can find various possibilities
   in the above papers and elsewhere.
*/

#include "config.h"
Kaveh R. Ghazi committed
136
#include "system.h"
137
#include "coretypes.h"
138
#include "backend.h"
139
#include "target.h"
140
#include "rtl.h"
141 142
#include "tree.h"
#include "predict.h"
143
#include "df.h"
144
#include "memmodel.h"
145
#include "tm_p.h"
146
#include "insn-config.h"
147
#include "print-rtl.h"
148
#include "regs.h"
149
#include "ira.h"
150
#include "recog.h"
151
#include "diagnostic-core.h"
152 153 154 155
#include "cfgrtl.h"
#include "cfganal.h"
#include "lcm.h"
#include "cfgcleanup.h"
156
#include "expr.h"
157
#include "intl.h"
158
#include "tree-pass.h"
159
#include "dbgcnt.h"
160
#include "gcse.h"
161
#include "gcse-common.h"
162
#include "function-abi.h"
163

164
/* We support GCSE via Partial Redundancy Elimination.  PRE optimizations
165
   are a superset of those done by classic GCSE.
166

167 168 169 170 171 172
   Two passes of copy/constant propagation are done around PRE or hoisting
   because the first one enables more GCSE and the second one helps to clean
   up the copies that PRE and HOIST create.  This is needed more for PRE than
   for HOIST because code hoisting will try to use an existing register
   containing the common subexpression rather than create a new one.  This is
   harder to do for PRE because of the code motion (which HOIST doesn't do).
173 174 175 176 177

   Expressions we are interested in GCSE-ing are of the form
   (set (pseudo-reg) (expression)).
   Function want_to_gcse_p says what these are.

178
   In addition, expressions in REG_EQUAL notes are candidates for GCSE-ing.
179
   This allows PRE to hoist expressions that are expressed in multiple insns,
180 181
   such as complex address calculations (e.g. for PIC code, or loads with a
   high part and a low part).
182

183
   PRE handles moving invariant expressions out of loops (by treating them as
184
   partially redundant).
185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201

   **********************

   We used to support multiple passes but there are diminishing returns in
   doing so.  The first pass usually makes 90% of the changes that are doable.
   A second pass can make a few more changes made possible by the first pass.
   Experiments show any further passes don't make enough changes to justify
   the expense.

   A study of spec92 using an unlimited number of passes:
   [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
   [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
   [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1

   It was found doing copy propagation between each pass enables further
   substitutions.

202 203 204 205 206
   This study was done before expressions in REG_EQUAL notes were added as
   candidate expressions for optimization, and before the GIMPLE optimizers
   were added.  Probably, multiple passes is even less efficient now than
   at the time when the study was conducted.

207
   PRE is quite expensive in complicated functions because the DFA can take
208
   a while to converge.  Hence we only perform one pass.
209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232

   **********************

   The steps for PRE are:

   1) Build the hash table of expressions we wish to GCSE (expr_hash_table).

   2) Perform the data flow analysis for PRE.

   3) Delete the redundant instructions

   4) Insert the required copies [if any] that make the partially
      redundant instructions fully redundant.

   5) For other reaching expressions, insert an instruction to copy the value
      to a newly created pseudo that will reach the redundant instruction.

   The deletion is done first so that when we do insertions we
   know which pseudo reg to use.

   Various papers have argued that PRE DFA is expensive (O(n^2)) and others
   argue it is not.  The number of iterations for the algorithm to converge
   is typically 2-4 so I don't view it as that expensive (relatively speaking).

233
   PRE GCSE depends heavily on the second CPROP pass to clean up the copies
234 235 236 237
   we create.  To make an expression reach the place where it's redundant,
   the result of the expression is copied to a new register, and the redundant
   expression is deleted by replacing it with this new register.  Classic GCSE
   doesn't have this problem as much as it computes the reaching defs of
238 239
   each register in each block and thus can try to use an existing
   register.  */
240 241 242

/* GCSE global vars.  */

243 244 245 246 247
struct target_gcse default_target_gcse;
#if SWITCHABLE_TARGET
struct target_gcse *this_target_gcse = &default_target_gcse;
#endif

248 249
/* Set to non-zero if CSE should run after all GCSE optimizations are done.  */
int flag_rerun_cse_after_global_opts;
250

251 252 253 254 255
/* An obstack for our working variables.  */
static struct obstack gcse_obstack;

/* Hash table of expressions.  */

256
struct gcse_expr
257
{
258
  /* The expression.  */
259 260 261 262
  rtx expr;
  /* Index in the available expression bitmaps.  */
  int bitmap_index;
  /* Next entry with the same hash.  */
263
  struct gcse_expr *next_same_hash;
264 265
  /* List of anticipatable occurrences in basic blocks in the function.
     An "anticipatable occurrence" is one that is the first occurrence in the
266 267 268
     basic block, the operands are not modified in the basic block prior
     to the occurrence and the output is not used between the start of
     the block and the occurrence.  */
269
  struct gcse_occr *antic_occr;
270 271 272 273
  /* List of available occurrence in basic blocks in the function.
     An "available occurrence" is one that is the last occurrence in the
     basic block and the operands are not modified by following statements in
     the basic block [including this insn].  */
274
  struct gcse_occr *avail_occr;
275 276 277 278
  /* Non-null if the computation is PRE redundant.
     The value is the newly created pseudo-reg to record a copy of the
     expression in all the places that reach the redundant copy.  */
  rtx reaching_reg;
279 280 281 282 283
  /* Maximum distance in instructions this expression can travel.
     We avoid moving simple expressions for more than a few instructions
     to keep register pressure under control.
     A value of "0" removes restrictions on how far the expression can
     travel.  */
284
  HOST_WIDE_INT max_distance;
285 286 287 288 289 290
};

/* Occurrence of an expression.
   There is one per basic block.  If a pattern appears more than once the
   last appearance is used [or first for anticipatable expressions].  */

291
struct gcse_occr
292 293
{
  /* Next occurrence of this expression.  */
294
  struct gcse_occr *next;
295
  /* The insn that computes the expression.  */
David Malcolm committed
296
  rtx_insn *insn;
297
  /* Nonzero if this [anticipatable] occurrence has been deleted.  */
298
  char deleted_p;
299
  /* Nonzero if this [available] occurrence has been copied to
300 301 302 303 304 305
     reaching_reg.  */
  /* ??? This is mutually exclusive with deleted_p, so they could share
     the same byte.  */
  char copied_p;
};

306
typedef struct gcse_occr *occr_t;
307

308
/* Expression hash tables.
309 310 311 312 313 314
   Each hash table is an array of buckets.
   ??? It is known that if it were an array of entries, structure elements
   `next_same_hash' and `bitmap_index' wouldn't be necessary.  However, it is
   not clear whether in the final analysis a sufficient amount of memory would
   be saved as the size of the available expression bitmaps would be larger
   [one could build a mapping table without holes afterwards though].
315
   Someday I'll perform the computation and figure it out.  */
316

317
struct gcse_hash_table_d
318 319 320
{
  /* The table itself.
     This is an array of `expr_hash_table_size' elements.  */
321
  struct gcse_expr **table;
322 323 324

  /* Size of the hash table, in elements.  */
  unsigned int size;
325

326 327 328
  /* Number of hash table elements.  */
  unsigned int n_elems;
};
329

330
/* Expression hash table.  */
331
static struct gcse_hash_table_d expr_hash_table;
332

333
/* This is a list of expressions which are MEMs and will be used by load
334
   or store motion.
335 336
   Load motion tracks MEMs which aren't killed by anything except itself,
   i.e. loads and stores to a single location.
337
   We can then allow movement of these MEM refs with a little special
338 339
   allowance. (all stores copy the same value to the reaching reg used
   for the loads).  This means all values used to store into memory must have
340
   no side effects so we can re-issue the setter value.  */
341 342 343

struct ls_expr
{
344
  struct gcse_expr * expr;	/* Gcse expression reference for LM.  */
345
  rtx pattern;			/* Pattern of this mem.  */
346
  rtx pattern_regs;		/* List of registers mentioned by the mem.  */
347
  vec<rtx_insn *> stores;	/* INSN list of stores seen.  */
348 349 350
  struct ls_expr * next;	/* Next in the list.  */
  int invalid;			/* Invalid for some reason.  */
  int index;			/* If it maps to a bitmap index.  */
351
  unsigned int hash_index;	/* Index when in a hash table.  */
352 353 354 355 356 357
  rtx reaching_reg;		/* Register to use when re-writing.  */
};

/* Head of the list of load/store memory refs.  */
static struct ls_expr * pre_ldst_mems = NULL;

358
struct pre_ldst_expr_hasher : nofree_ptr_hash <ls_expr>
359 360
{
  typedef value_type compare_type;
361 362
  static inline hashval_t hash (const ls_expr *);
  static inline bool equal (const ls_expr *, const ls_expr *);
363 364 365 366
};

/* Hashtable helpers.  */
inline hashval_t
367
pre_ldst_expr_hasher::hash (const ls_expr *x)
368 369 370 371 372 373 374 375 376
{
  int do_not_record_p = 0;
  return
    hash_rtx (x->pattern, GET_MODE (x->pattern), &do_not_record_p, NULL, false);
}

static int expr_equiv_p (const_rtx, const_rtx);

inline bool
377 378
pre_ldst_expr_hasher::equal (const ls_expr *ptr1,
			     const ls_expr *ptr2)
379 380 381 382
{
  return expr_equiv_p (ptr1->pattern, ptr2->pattern);
}

383
/* Hashtable for the load/store memory refs.  */
384
static hash_table<pre_ldst_expr_hasher> *pre_ldst_table;
385

386 387 388
/* Bitmap containing one bit for each register in the program.
   Used when performing GCSE to track which registers have been set since
   the start of the basic block.  */
389
static regset reg_set_bitmap;
390

391 392
/* Array, indexed by basic block number for a list of insns which modify
   memory within that block.  */
393
static vec<rtx_insn *> *modify_mem_list;
394
static bitmap modify_mem_list_set;
395

396 397
/* This array parallels modify_mem_list, except that it stores MEMs
   being set and their canonicalized memory addresses.  */
398
static vec<modify_pair> *canon_modify_mem_list;
399

400 401 402 403
/* Bitmap indexed by block numbers to record which blocks contain
   function calls.  */
static bitmap blocks_with_calls;

404 405 406 407 408 409
/* Various variables for statistics gathering.  */

/* Memory used in a pass.
   This isn't intended to be absolutely precise.  Its intent is only
   to keep an eye on memory usage.  */
static int bytes_used;
410

411 412 413 414 415
/* GCSE substitutions made.  */
static int gcse_subst_count;
/* Number of copy instructions created.  */
static int gcse_create_count;

416 417 418
/* Doing code hoisting.  */
static bool doing_code_hoisting_p = false;

419
/* For available exprs */
420
static sbitmap *ae_kill;
421

422 423 424 425 426 427
/* Data stored for each basic block.  */
struct bb_data
{
  /* Maximal register pressure inside basic block for given register class
     (defined only for the pressure classes).  */
  int max_reg_pressure[N_REG_CLASSES];
428 429 430 431 432 433 434 435 436
  /* Recorded register pressure of basic block before trying to hoist
     an expression.  Will be used to restore the register pressure
     if the expression should not be hoisted.  */
  int old_pressure;
  /* Recorded register live_in info of basic block during code hoisting
     process.  BACKUP is used to record live_in info before trying to
     hoist an expression, and will be used to restore LIVE_IN if the
     expression should not be hoisted.  */
  bitmap live_in, backup;
437 438 439 440 441 442 443 444 445 446
};

#define BB_DATA(bb) ((struct bb_data *) (bb)->aux)

static basic_block curr_bb;

/* Current register pressure for each pressure class.  */
static int curr_reg_pressure[N_REG_CLASSES];


447
static void compute_can_copy (void);
448 449
static void *gmalloc (size_t) ATTRIBUTE_MALLOC;
static void *gcalloc (size_t, size_t) ATTRIBUTE_MALLOC;
450
static void *gcse_alloc (unsigned long);
451
static void alloc_gcse_mem (void);
452
static void free_gcse_mem (void);
453 454 455 456
static void hash_scan_insn (rtx_insn *, struct gcse_hash_table_d *);
static void hash_scan_set (rtx, rtx_insn *, struct gcse_hash_table_d *);
static void hash_scan_clobber (rtx, rtx_insn *, struct gcse_hash_table_d *);
static void hash_scan_call (rtx, rtx_insn *, struct gcse_hash_table_d *);
David Malcolm committed
457 458 459
static int oprs_unchanged_p (const_rtx, const rtx_insn *, int);
static int oprs_anticipatable_p (const_rtx, const rtx_insn *);
static int oprs_available_p (const_rtx, const rtx_insn *);
460
static void insert_expr_in_table (rtx, machine_mode, rtx_insn *, int, int,
461
				  HOST_WIDE_INT, struct gcse_hash_table_d *);
462
static unsigned int hash_expr (const_rtx, machine_mode, int *, int);
463
static void record_last_reg_set_info (rtx_insn *, int);
464
static void record_last_mem_set_info (rtx_insn *);
465
static void record_last_set_info (rtx, const_rtx, void *);
466 467 468 469 470
static void compute_hash_table (struct gcse_hash_table_d *);
static void alloc_hash_table (struct gcse_hash_table_d *);
static void free_hash_table (struct gcse_hash_table_d *);
static void compute_hash_table_work (struct gcse_hash_table_d *);
static void dump_hash_table (FILE *, const char *, struct gcse_hash_table_d *);
471
static void compute_local_properties (sbitmap *, sbitmap *, sbitmap *,
472
				      struct gcse_hash_table_d *);
473
static void mems_conflict_for_gcse_p (rtx, const_rtx, void *);
474
static int load_killed_in_block_p (const_basic_block, int, const_rtx, int);
475 476
static void alloc_pre_mem (int, int);
static void free_pre_mem (void);
477
static struct edge_list *compute_pre_data (void);
478
static int pre_expr_reaches_here_p (basic_block, struct gcse_expr *,
479
				    basic_block);
480 481
static void insert_insn_end_basic_block (struct gcse_expr *, basic_block);
static void pre_insert_copy_insn (struct gcse_expr *, rtx_insn *);
482 483
static void pre_insert_copies (void);
static int pre_delete (void);
484
static int pre_gcse (struct edge_list *);
485
static int one_pre_gcse_pass (void);
486
static void add_label_notes (rtx, rtx_insn *);
487 488 489 490
static void alloc_code_hoist_mem (int, int);
static void free_code_hoist_mem (void);
static void compute_code_hoist_vbeinout (void);
static void compute_code_hoist_data (void);
491 492 493 494
static int should_hoist_expr_to_dom (basic_block, struct gcse_expr *,
				     basic_block,
				     sbitmap, HOST_WIDE_INT, int *,
				     enum reg_class,
David Malcolm committed
495
				     int *, bitmap, rtx_insn *);
496
static int hoist_code (void);
497
static enum reg_class get_regno_pressure_class (int regno, int *nregs);
David Malcolm committed
498
static enum reg_class get_pressure_class_and_nregs (rtx_insn *insn, int *nregs);
499
static int one_code_hoisting_pass (void);
500 501 502
static rtx_insn *process_insert_insn (struct gcse_expr *);
static int pre_edge_insert (struct edge_list *, struct gcse_expr **);
static int pre_expr_reaches_here_p_work (basic_block, struct gcse_expr *,
503 504 505
					 basic_block, char *);
static struct ls_expr * ldst_entry (rtx);
static void free_ldst_entry (struct ls_expr *);
506
static void free_ld_motion_mems (void);
507 508
static void print_ldst_list (FILE *);
static struct ls_expr * find_rtx_in_ldst (rtx);
509
static int simple_mem (const_rtx);
510 511 512
static void invalidate_any_buried_refs (rtx);
static void compute_ld_motion_mems (void);
static void trim_ld_motion_mems (void);
513
static void update_ld_motion_stores (struct gcse_expr *);
514 515
static void clear_modify_mem_tables (void);
static void free_modify_mem_tables (void);
516 517 518 519 520 521 522 523 524 525 526 527

#define GNEW(T)			((T *) gmalloc (sizeof (T)))
#define GCNEW(T)		((T *) gcalloc (1, sizeof (T)))

#define GNEWVEC(T, N)		((T *) gmalloc (sizeof (T) * (N)))
#define GCNEWVEC(T, N)		((T *) gcalloc ((N), sizeof (T)))

#define GNEWVAR(T, S)		((T *) gmalloc ((S)))
#define GCNEWVAR(T, S)		((T *) gcalloc (1, (S)))

#define GOBNEW(T)		((T *) gcse_alloc (sizeof (T)))
#define GOBNEWVAR(T, S)		((T *) gcse_alloc ((S)))
528 529 530

/* Misc. utilities.  */

531 532 533 534
#define can_copy \
  (this_target_gcse->x_can_copy)
#define can_copy_init_p \
  (this_target_gcse->x_can_copy_init_p)
535

536 537 538
/* Compute which modes support reg/reg copy operations.  */

static void
539
compute_can_copy (void)
540 541
{
  int i;
Kaveh R. Ghazi committed
542
#ifndef AVOID_CCMODE_COPIES
543 544
  rtx reg;
 rtx_insn *insn;
Kaveh R. Ghazi committed
545
#endif
546
  memset (can_copy, 0, NUM_MACHINE_MODES);
547 548 549

  start_sequence ();
  for (i = 0; i < NUM_MACHINE_MODES; i++)
550 551
    if (GET_MODE_CLASS (i) == MODE_CC)
      {
552
#ifdef AVOID_CCMODE_COPIES
553
	can_copy[i] = 0;
554
#else
555
	reg = gen_rtx_REG ((machine_mode) i, LAST_VIRTUAL_REGISTER + 1);
556
	insn = emit_insn (gen_rtx_SET (reg, reg));
557
	if (recog (PATTERN (insn), insn, NULL) >= 0)
558
	  can_copy[i] = 1;
559
#endif
560
      }
561
    else
562
      can_copy[i] = 1;
563

564 565
  end_sequence ();
}
566 567 568 569

/* Returns whether the mode supports reg/reg copy operations.  */

bool
570
can_copy_p (machine_mode mode)
571 572 573 574 575 576 577 578 579
{
  if (! can_copy_init_p)
    {
      compute_can_copy ();
      can_copy_init_p = true;
    }

  return can_copy[mode] != 0;
}
580 581 582

/* Cover function to xmalloc to record bytes allocated.  */

583
static void *
584
gmalloc (size_t size)
585 586 587 588 589
{
  bytes_used += size;
  return xmalloc (size);
}

590 591 592 593 594 595 596 597 598
/* Cover function to xcalloc to record bytes allocated.  */

static void *
gcalloc (size_t nelem, size_t elsize)
{
  bytes_used += nelem * elsize;
  return xcalloc (nelem, elsize);
}

599
/* Cover function to obstack_alloc.  */
600

601
static void *
602
gcse_alloc (unsigned long size)
603
{
604
  bytes_used += size;
605
  return obstack_alloc (&gcse_obstack, size);
606 607
}

608
/* Allocate memory for the reg/memory set tracking tables.
609 610 611
   This is called at the start of each pass.  */

static void
612
alloc_gcse_mem (void)
613 614
{
  /* Allocate vars to track sets of regs.  */
615
  reg_set_bitmap = ALLOC_REG_SET (NULL);
616

617
  /* Allocate array to keep a list of insns which modify memory in each
618 619
     basic block.  The two typedefs are needed to work around the
     pre-processor limitation with template types in macro arguments.  */
620
  typedef vec<rtx_insn *> vec_rtx_heap;
621
  typedef vec<modify_pair> vec_modify_pair_heap;
622 623 624
  modify_mem_list = GCNEWVEC (vec_rtx_heap, last_basic_block_for_fn (cfun));
  canon_modify_mem_list = GCNEWVEC (vec_modify_pair_heap,
				    last_basic_block_for_fn (cfun));
625 626
  modify_mem_list_set = BITMAP_ALLOC (NULL);
  blocks_with_calls = BITMAP_ALLOC (NULL);
627 628 629 630 631
}

/* Free memory allocated by alloc_gcse_mem.  */

static void
632
free_gcse_mem (void)
633
{
634 635
  FREE_REG_SET (reg_set_bitmap);

636
  free_modify_mem_tables ();
637 638
  BITMAP_FREE (modify_mem_list_set);
  BITMAP_FREE (blocks_with_calls);
639
}
640 641

/* Compute the local properties of each recorded expression.
642 643 644

   Local properties are those that are defined by the block, irrespective of
   other blocks.
645 646 647 648 649 650 651 652 653 654 655 656

   An expression is transparent in a block if its operands are not modified
   in the block.

   An expression is computed (locally available) in a block if it is computed
   at least once and expression would contain the same value if the
   computation was moved to the end of the block.

   An expression is locally anticipatable in a block if it is computed at
   least once and expression would contain the same value if the computation
   was moved to the beginning of the block.

657
   We call this routine for pre and code hoisting.  They all compute
658
   basically the same information and thus can easily share this code.
659

660 661 662
   TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
   properties.  If NULL, then it is not necessary to compute or record that
   particular property.
663

664
   TABLE controls which hash table to look at.  */
665

666
static void
667
compute_local_properties (sbitmap *transp, sbitmap *comp, sbitmap *antloc,
668
			  struct gcse_hash_table_d *table)
669
{
670
  unsigned int i;
671

672 673
  /* Initialize any bitmaps that were passed in.  */
  if (transp)
674
    {
675
      bitmap_vector_ones (transp, last_basic_block_for_fn (cfun));
676
    }
677

678
  if (comp)
679
    bitmap_vector_clear (comp, last_basic_block_for_fn (cfun));
680
  if (antloc)
681
    bitmap_vector_clear (antloc, last_basic_block_for_fn (cfun));
682

683
  for (i = 0; i < table->size; i++)
684
    {
685
      struct gcse_expr *expr;
686

687
      for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
688 689
	{
	  int indx = expr->bitmap_index;
690
	  struct gcse_occr *occr;
691 692 693 694 695

	  /* The expression is transparent in this block if it is not killed.
	     We start by assuming all are transparent [none are killed], and
	     then reset the bits for those that are.  */
	  if (transp)
696 697 698 699
	    compute_transp (expr->expr, indx, transp,
			    blocks_with_calls,
			    modify_mem_list_set,
			    canon_modify_mem_list);
700 701

	  /* The occurrences recorded in antic_occr are exactly those that
702
	     we want to set to nonzero in ANTLOC.  */
703
	  if (antloc)
704 705
	    for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
	      {
706
		bitmap_set_bit (antloc[BLOCK_FOR_INSN (occr->insn)->index], indx);
707

708 709 710 711
		/* While we're scanning the table, this is a good place to
		   initialize this.  */
		occr->deleted_p = 0;
	      }
712 713

	  /* The occurrences recorded in avail_occr are exactly those that
714
	     we want to set to nonzero in COMP.  */
715
	  if (comp)
716 717
	    for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
	      {
718
		bitmap_set_bit (comp[BLOCK_FOR_INSN (occr->insn)->index], indx);
719

720 721 722 723
		/* While we're scanning the table, this is a good place to
		   initialize this.  */
		occr->copied_p = 0;
	      }
724 725 726 727 728

	  /* While we're scanning the table, this is a good place to
	     initialize this.  */
	  expr->reaching_reg = 0;
	}
729 730 731 732 733
    }
}

/* Hash table support.  */

734 735
struct reg_avail_info
{
736
  basic_block last_bb;
737 738 739 740 741
  int first_set;
  int last_set;
};

static struct reg_avail_info *reg_avail_info;
742
static basic_block current_bb;
743

744 745
/* See whether X, the source of a set, is something we want to consider for
   GCSE.  */
746 747

static int
748
want_to_gcse_p (rtx x, machine_mode mode, HOST_WIDE_INT *max_distance_ptr)
749
{
750 751 752 753 754 755 756 757
#ifdef STACK_REGS
  /* On register stack architectures, don't GCSE constants from the
     constant pool, as the benefits are often swamped by the overhead
     of shuffling the register stack between basic blocks.  */
  if (IS_STACK_MODE (GET_MODE (x)))
    x = avoid_constant_pool_reference (x);
#endif

758 759 760
  /* GCSE'ing constants:

     We do not specifically distinguish between constant and non-constant
761
     expressions in PRE and Hoist.  We use set_src_cost below to limit
762 763 764 765 766 767 768 769 770 771 772 773 774 775
     the maximum distance simple expressions can travel.

     Nevertheless, constants are much easier to GCSE, and, hence,
     it is easy to overdo the optimizations.  Usually, excessive PRE and
     Hoisting of constant leads to increased register pressure.

     RA can deal with this by rematerialing some of the constants.
     Therefore, it is important that the back-end generates sets of constants
     in a way that allows reload rematerialize them under high register
     pressure, i.e., a pseudo register with REG_EQUAL to constant
     is set only once.  Failing to do so will result in IRA/reload
     spilling such constants under high register pressure instead of
     rematerializing them.  */

776
  switch (GET_CODE (x))
777 778 779
    {
    case REG:
    case SUBREG:
780 781 782
    case CALL:
      return 0;

783
    CASE_CONST_ANY:
784 785 786 787 788
      if (!doing_code_hoisting_p)
	/* Do not PRE constants.  */
	return 0;

      /* FALLTHRU */
789 790

    default:
791 792 793 794
      if (doing_code_hoisting_p)
	/* PRE doesn't implement max_distance restriction.  */
	{
	  int cost;
795
	  HOST_WIDE_INT max_distance;
796 797 798

	  gcc_assert (!optimize_function_for_speed_p (cfun)
		      && optimize_function_for_size_p (cfun));
799
	  cost = set_src_cost (x, mode, 0);
800

801
	  if (cost < COSTS_N_INSNS (param_gcse_unrestricted_cost))
802
	    {
803
	      max_distance
804
		= ((HOST_WIDE_INT)param_gcse_cost_distance_ratio * cost) / 10;
805 806 807 808 809 810 811 812 813 814 815 816
	      if (max_distance == 0)
		return 0;

	      gcc_assert (max_distance > 0);
	    }
	  else
	    max_distance = 0;

	  if (max_distance_ptr)
	    *max_distance_ptr = max_distance;
	}

817
      return can_assign_to_reg_without_clobbers_p (x, mode);
818
    }
819 820
}

821
/* Used internally by can_assign_to_reg_without_clobbers_p.  */
822

David Malcolm committed
823
static GTY(()) rtx_insn *test_insn;
824

825 826 827
/* Return true if we can assign X to a pseudo register of mode MODE
   such that the resulting insn does not result in clobbering a hard
   register as a side-effect.
828 829 830 831 832

   Additionally, if the target requires it, check that the resulting insn
   can be copied.  If it cannot, this means that X is special and probably
   has hidden side-effects we don't want to mess with.

833 834 835
   This function is typically used by code motion passes, to verify
   that it is safe to insert an insn without worrying about clobbering
   maybe live hard regs.  */
836

837
bool
838
can_assign_to_reg_without_clobbers_p (rtx x, machine_mode mode)
839 840 841
{
  int num_clobbers = 0;
  int icode;
842
  bool can_assign = false;
843

844
  /* If this is a valid operand, we are OK.  If it's VOIDmode, we aren't.  */
845
  if (general_operand (x, mode))
846 847 848 849 850 851 852 853 854
    return 1;
  else if (GET_MODE (x) == VOIDmode)
    return 0;

  /* Otherwise, check if we can make a valid insn from it.  First initialize
     our test insn if we haven't already.  */
  if (test_insn == 0)
    {
      test_insn
855
	= make_insn_raw (gen_rtx_SET (gen_rtx_REG (word_mode,
856 857
						   FIRST_PSEUDO_REGISTER * 2),
				      const0_rtx));
858
      SET_NEXT_INSN (test_insn) = SET_PREV_INSN (test_insn) = 0;
859
      INSN_LOCATION (test_insn) = UNKNOWN_LOCATION;
860 861 862 863
    }

  /* Now make an insn like the one we would make when GCSE'ing and see if
     valid.  */
864
  PUT_MODE (SET_DEST (PATTERN (test_insn)), mode);
865
  SET_SRC (PATTERN (test_insn)) = x;
H.J. Lu committed
866

867
  icode = recog (PATTERN (test_insn), test_insn, &num_clobbers);
H.J. Lu committed
868

869 870 871 872 873 874 875
  /* If the test insn is valid and doesn't need clobbers, and the target also
     has no objections, we're good.  */
  if (icode >= 0
      && (num_clobbers == 0 || !added_clobbers_hard_reg_p (icode))
      && ! (targetm.cannot_copy_insn_p
	    && targetm.cannot_copy_insn_p (test_insn)))
    can_assign = true;
H.J. Lu committed
876

877 878
  /* Make sure test_insn doesn't have any pointers into GC space.  */
  SET_SRC (PATTERN (test_insn)) = NULL_RTX;
H.J. Lu committed
879

880
  return can_assign;
881 882
}

883
/* Return nonzero if the operands of expression X are unchanged from the
884 885 886 887
   start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
   or from INSN to the end of INSN's basic block (if AVAIL_P != 0).  */

static int
David Malcolm committed
888
oprs_unchanged_p (const_rtx x, const rtx_insn *insn, int avail_p)
889
{
890
  int i, j;
891
  enum rtx_code code;
892
  const char *fmt;
893 894 895 896 897 898 899 900

  if (x == 0)
    return 1;

  code = GET_CODE (x);
  switch (code)
    {
    case REG:
901 902 903 904 905
      {
	struct reg_avail_info *info = &reg_avail_info[REGNO (x)];

	if (info->last_bb != current_bb)
	  return 1;
906
	if (avail_p)
907
	  return info->last_set < DF_INSN_LUID (insn);
908
	else
909
	  return info->first_set >= DF_INSN_LUID (insn);
910
      }
911 912

    case MEM:
913 914 915
      if (! flag_gcse_lm
	  || load_killed_in_block_p (current_bb, DF_INSN_LUID (insn),
				     x, avail_p))
916
	return 0;
917
      else
918
	return oprs_unchanged_p (XEXP (x, 0), insn, avail_p);
919 920 921 922 923

    case PRE_DEC:
    case PRE_INC:
    case POST_DEC:
    case POST_INC:
924 925
    case PRE_MODIFY:
    case POST_MODIFY:
926 927 928 929 930
      return 0;

    case PC:
    case CC0: /*FIXME*/
    case CONST:
931
    CASE_CONST_ANY:
932 933 934 935 936 937 938 939 940 941
    case SYMBOL_REF:
    case LABEL_REF:
    case ADDR_VEC:
    case ADDR_DIFF_VEC:
      return 1;

    default:
      break;
    }

942
  for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
943 944 945
    {
      if (fmt[i] == 'e')
	{
946 947 948
	  /* If we are about to do the last recursive call needed at this
	     level, change it into iteration.  This function is called enough
	     to be worth it.  */
949
	  if (i == 0)
950 951 952
	    return oprs_unchanged_p (XEXP (x, i), insn, avail_p);

	  else if (! oprs_unchanged_p (XEXP (x, i), insn, avail_p))
953 954 955
	    return 0;
	}
      else if (fmt[i] == 'E')
956 957 958
	for (j = 0; j < XVECLEN (x, i); j++)
	  if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p))
	    return 0;
959 960 961 962 963
    }

  return 1;
}

964
/* Info passed from load_killed_in_block_p to mems_conflict_for_gcse_p.  */
965

966 967 968 969 970
struct mem_conflict_info
{
  /* A memory reference for a load instruction, mems_conflict_for_gcse_p will
     see if a memory store conflicts with this memory load.  */
  const_rtx mem;
971

972 973 974 975 976 977 978 979
  /* True if mems_conflict_for_gcse_p finds a conflict between two memory
     references.  */
  bool conflict;
};

/* DEST is the output of an instruction.  If it is a memory reference and
   possibly conflicts with the load found in DATA, then communicate this
   information back through DATA.  */
980 981

static void
982
mems_conflict_for_gcse_p (rtx dest, const_rtx setter ATTRIBUTE_UNUSED,
983
			  void *data)
984
{
985 986
  struct mem_conflict_info *mci = (struct mem_conflict_info *) data;

987 988 989 990 991 992 993 994
  while (GET_CODE (dest) == SUBREG
	 || GET_CODE (dest) == ZERO_EXTRACT
	 || GET_CODE (dest) == STRICT_LOW_PART)
    dest = XEXP (dest, 0);

  /* If DEST is not a MEM, then it will not conflict with the load.  Note
     that function calls are assumed to clobber memory, but are handled
     elsewhere.  */
995
  if (! MEM_P (dest))
996
    return;
997

998
  /* If we are setting a MEM in our list of specially recognized MEMs,
999
     don't mark as killed this time.  */
1000
  if (pre_ldst_mems != NULL && expr_equiv_p (dest, mci->mem))
1001 1002
    {
      if (!find_rtx_in_ldst (dest))
1003
	mci->conflict = true;
1004 1005
      return;
    }
1006

1007
  if (true_dependence (dest, GET_MODE (dest), mci->mem))
1008
    mci->conflict = true;
1009 1010 1011
}

/* Return nonzero if the expression in X (a memory reference) is killed
1012
   in block BB before or after the insn with the LUID in UID_LIMIT.
1013 1014 1015 1016 1017 1018 1019
   AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills
   before UID_LIMIT.

   To check the entire block, set UID_LIMIT to max_uid + 1 and
   AVAIL_P to 0.  */

static int
1020 1021
load_killed_in_block_p (const_basic_block bb, int uid_limit, const_rtx x,
			int avail_p)
1022
{
1023 1024
  vec<rtx_insn *> list = modify_mem_list[bb->index];
  rtx_insn *setter;
1025
  unsigned ix;
1026 1027 1028 1029 1030

  /* If this is a readonly then we aren't going to be changing it.  */
  if (MEM_READONLY_P (x))
    return 0;

1031
  FOR_EACH_VEC_ELT_REVERSE (list, ix, setter)
1032
    {
1033 1034
      struct mem_conflict_info mci;

1035 1036
      /* Ignore entries in the list that do not apply.  */
      if ((avail_p
1037
	   && DF_INSN_LUID (setter) < uid_limit)
1038
	  || (! avail_p
1039 1040
	      && DF_INSN_LUID (setter) > uid_limit))
	continue;
1041 1042 1043 1044

      /* If SETTER is a call everything is clobbered.  Note that calls
	 to pure functions are never put on the list, so we need not
	 worry about them.  */
1045
      if (CALL_P (setter))
1046 1047 1048
	return 1;

      /* SETTER must be an INSN of some kind that sets memory.  Call
1049 1050 1051
	 note_stores to examine each hunk of memory that is modified.  */
      mci.mem = x;
      mci.conflict = false;
1052
      note_stores (setter, mems_conflict_for_gcse_p, &mci);
1053
      if (mci.conflict)
1054 1055 1056 1057 1058
	return 1;
    }
  return 0;
}

1059
/* Return nonzero if the operands of expression X are unchanged from
1060 1061 1062
   the start of INSN's basic block up to but not including INSN.  */

static int
David Malcolm committed
1063
oprs_anticipatable_p (const_rtx x, const rtx_insn *insn)
1064 1065 1066 1067
{
  return oprs_unchanged_p (x, insn, 0);
}

1068
/* Return nonzero if the operands of expression X are unchanged from
1069 1070 1071
   INSN to the end of INSN's basic block.  */

static int
David Malcolm committed
1072
oprs_available_p (const_rtx x, const rtx_insn *insn)
1073 1074 1075 1076 1077
{
  return oprs_unchanged_p (x, insn, 1);
}

/* Hash expression X.
1078 1079 1080

   MODE is only used if X is a CONST_INT.  DO_NOT_RECORD_P is a boolean
   indicating if a volatile operand is found or if the expression contains
1081
   something we don't want to insert in the table.  HASH_TABLE_SIZE is
1082
   the current size of the hash table to be probed.  */
1083 1084

static unsigned int
1085
hash_expr (const_rtx x, machine_mode mode, int *do_not_record_p,
1086
	   int hash_table_size)
1087 1088 1089 1090 1091
{
  unsigned int hash;

  *do_not_record_p = 0;

1092
  hash = hash_rtx (x, mode, do_not_record_p, NULL, /*have_reg_qty=*/false);
1093 1094
  return hash % hash_table_size;
}
1095

1096
/* Return nonzero if exp1 is equivalent to exp2.  */
1097 1098

static int
1099
expr_equiv_p (const_rtx x, const_rtx y)
1100
{
1101
  return exp_equiv_p (x, y, 0, true);
1102 1103
}

1104
/* Insert expression X in INSN in the hash TABLE.
1105 1106 1107 1108 1109 1110
   If it is already present, record it as the last occurrence in INSN's
   basic block.

   MODE is the mode of the value X is being stored into.
   It is only used if X is a CONST_INT.

1111
   ANTIC_P is nonzero if X is an anticipatable expression.
1112 1113 1114 1115
   AVAIL_P is nonzero if X is an available expression.

   MAX_DISTANCE is the maximum distance in instructions this expression can
   be moved.  */
1116 1117

static void
1118
insert_expr_in_table (rtx x, machine_mode mode, rtx_insn *insn,
David Malcolm committed
1119
		      int antic_p,
1120 1121
		      int avail_p, HOST_WIDE_INT max_distance,
		      struct gcse_hash_table_d *table)
1122 1123 1124
{
  int found, do_not_record_p;
  unsigned int hash;
1125 1126
  struct gcse_expr *cur_expr, *last_expr = NULL;
  struct gcse_occr *antic_occr, *avail_occr;
1127

1128
  hash = hash_expr (x, mode, &do_not_record_p, table->size);
1129 1130 1131 1132 1133 1134 1135

  /* Do not insert expression in table if it contains volatile operands,
     or if hash_expr determines the expression is something we don't want
     to or can't handle.  */
  if (do_not_record_p)
    return;

1136
  cur_expr = table->table[hash];
1137 1138
  found = 0;

1139
  while (cur_expr && (found = expr_equiv_p (cur_expr->expr, x)) == 0)
1140 1141 1142 1143 1144 1145 1146 1147 1148
    {
      /* If the expression isn't found, save a pointer to the end of
	 the list.  */
      last_expr = cur_expr;
      cur_expr = cur_expr->next_same_hash;
    }

  if (! found)
    {
1149 1150
      cur_expr = GOBNEW (struct gcse_expr);
      bytes_used += sizeof (struct gcse_expr);
1151
      if (table->table[hash] == NULL)
1152
	/* This is the first pattern that hashed to this index.  */
1153
	table->table[hash] = cur_expr;
1154
      else
1155 1156 1157
	/* Add EXPR to end of this hash chain.  */
	last_expr->next_same_hash = cur_expr;

1158
      /* Set the fields of the expr element.  */
1159
      cur_expr->expr = x;
1160
      cur_expr->bitmap_index = table->n_elems++;
1161 1162 1163
      cur_expr->next_same_hash = NULL;
      cur_expr->antic_occr = NULL;
      cur_expr->avail_occr = NULL;
1164 1165
      gcc_assert (max_distance >= 0);
      cur_expr->max_distance = max_distance;
1166
    }
1167 1168
  else
    gcc_assert (cur_expr->max_distance == max_distance);
1169 1170 1171 1172 1173 1174

  /* Now record the occurrence(s).  */
  if (antic_p)
    {
      antic_occr = cur_expr->antic_occr;

1175 1176
      if (antic_occr
	  && BLOCK_FOR_INSN (antic_occr->insn) != BLOCK_FOR_INSN (insn))
1177
	antic_occr = NULL;
1178 1179

      if (antic_occr)
1180 1181 1182 1183
	/* Found another instance of the expression in the same basic block.
	   Prefer the currently recorded one.  We want the first one in the
	   block and the block is scanned from start to end.  */
	; /* nothing to do */
1184 1185 1186
      else
	{
	  /* First occurrence of this expression in this basic block.  */
1187 1188
	  antic_occr = GOBNEW (struct gcse_occr);
	  bytes_used += sizeof (struct gcse_occr);
1189
	  antic_occr->insn = insn;
1190
	  antic_occr->next = cur_expr->antic_occr;
1191
	  antic_occr->deleted_p = 0;
1192
	  cur_expr->antic_occr = antic_occr;
1193 1194 1195 1196 1197 1198 1199
	}
    }

  if (avail_p)
    {
      avail_occr = cur_expr->avail_occr;

1200 1201
      if (avail_occr
	  && BLOCK_FOR_INSN (avail_occr->insn) == BLOCK_FOR_INSN (insn))
1202
	{
1203 1204 1205 1206 1207
	  /* Found another instance of the expression in the same basic block.
	     Prefer this occurrence to the currently recorded one.  We want
	     the last one in the block and the block is scanned from start
	     to end.  */
	  avail_occr->insn = insn;
1208 1209 1210 1211
	}
      else
	{
	  /* First occurrence of this expression in this basic block.  */
1212 1213
	  avail_occr = GOBNEW (struct gcse_occr);
	  bytes_used += sizeof (struct gcse_occr);
1214
	  avail_occr->insn = insn;
1215
	  avail_occr->next = cur_expr->avail_occr;
1216
	  avail_occr->deleted_p = 0;
1217
	  cur_expr->avail_occr = avail_occr;
1218 1219 1220 1221
	}
    }
}

1222
/* Scan SET present in INSN and add an entry to the hash TABLE.  */
1223 1224

static void
1225
hash_scan_set (rtx set, rtx_insn *insn, struct gcse_hash_table_d *table)
1226
{
1227 1228
  rtx src = SET_SRC (set);
  rtx dest = SET_DEST (set);
1229
  rtx note;
1230

1231
  if (GET_CODE (src) == CALL)
1232
    hash_scan_call (src, insn, table);
1233

1234
  else if (REG_P (dest))
1235
    {
1236
      unsigned int regno = REGNO (dest);
1237
      HOST_WIDE_INT max_distance = 0;
1238

1239 1240
      /* See if a REG_EQUAL note shows this equivalent to a simpler expression.

1241 1242
	 This allows us to do a single GCSE pass and still eliminate
	 redundant constants, addresses or other expressions that are
1243 1244
	 constructed with multiple instructions.

1245
	 However, keep the original SRC if INSN is a simple reg-reg move.
1246 1247 1248 1249 1250 1251
	 In this case, there will almost always be a REG_EQUAL note on the
	 insn that sets SRC.  By recording the REG_EQUAL value here as SRC
	 for INSN, we miss copy propagation opportunities and we perform the
	 same PRE GCSE operation repeatedly on the same REG_EQUAL value if we
	 do more than one PRE GCSE pass.

1252
	 Note that this does not impede profitable constant propagations.  We
1253
	 "look through" reg-reg sets in lookup_avail_set.  */
1254 1255
      note = find_reg_equal_equiv_note (insn);
      if (note != 0
1256 1257
	  && REG_NOTE_KIND (note) == REG_EQUAL
	  && !REG_P (src)
1258
	  && want_to_gcse_p (XEXP (note, 0), GET_MODE (dest), NULL))
1259
	src = XEXP (note, 0), set = gen_rtx_SET (dest, src);
1260

1261
      /* Only record sets of pseudo-regs in the hash table.  */
1262
      if (regno >= FIRST_PSEUDO_REGISTER
1263
	  /* Don't GCSE something if we can't do a reg/reg copy.  */
1264
	  && can_copy_p (GET_MODE (dest))
1265
	  /* GCSE commonly inserts instruction after the insn.  We can't
1266 1267 1268 1269 1270
	     do that easily for EH edges so disable GCSE on these for now.  */
	  /* ??? We can now easily create new EH landing pads at the
	     gimple level, for splitting edges; there's no reason we
	     can't do the same thing at the rtl level.  */
	  && !can_throw_internal (insn)
1271
	  /* Is SET_SRC something we want to gcse?  */
1272
	  && want_to_gcse_p (src, GET_MODE (dest), &max_distance)
1273
	  /* Don't CSE a nop.  */
1274
	  && ! set_noop_p (set)
1275 1276 1277
	  /* Don't GCSE if it has attached REG_EQUIV note.
	     At this point this only function parameters should have
	     REG_EQUIV notes and if the argument slot is used somewhere
1278
	     explicitly, it means address of parameter has been taken,
1279
	     so we should not extend the lifetime of the pseudo.  */
1280
	  && (note == NULL_RTX || ! MEM_P (XEXP (note, 0))))
1281 1282
	{
	  /* An expression is not anticipatable if its operands are
1283
	     modified before this insn or if this is not the only SET in
1284 1285 1286 1287 1288
	     this insn.  The latter condition does not have to mean that
	     SRC itself is not anticipatable, but we just will not be
	     able to handle code motion of insns with multiple sets.  */
	  int antic_p = oprs_anticipatable_p (src, insn)
			&& !multiple_sets (insn);
1289
	  /* An expression is not available if its operands are
1290 1291 1292 1293 1294
	     subsequently modified, including this insn.  It's also not
	     available if this is a branch, because we can't insert
	     a set after the branch.  */
	  int avail_p = (oprs_available_p (src, insn)
			 && ! JUMP_P (insn));
1295

1296 1297
	  insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p,
				max_distance, table);
1298 1299
	}
    }
1300
  /* In case of store we want to consider the memory value as available in
1301 1302
     the REG stored in that memory. This makes it possible to remove
     redundant loads from due to stores to the same location.  */
1303
  else if (flag_gcse_las && REG_P (src) && MEM_P (dest))
1304 1305
    {
      unsigned int regno = REGNO (src);
1306
      HOST_WIDE_INT max_distance = 0;
1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339

      /* Only record sets of pseudo-regs in the hash table.  */
      if (regno >= FIRST_PSEUDO_REGISTER
	  /* Don't GCSE something if we can't do a reg/reg copy.  */
	  && can_copy_p (GET_MODE (src))
	  /* GCSE commonly inserts instruction after the insn.  We can't
	     do that easily for EH edges so disable GCSE on these for now.  */
	  && !can_throw_internal (insn)
	  /* Is SET_DEST something we want to gcse?  */
	  && want_to_gcse_p (dest, GET_MODE (dest), &max_distance)
	  /* Don't CSE a nop.  */
	  && ! set_noop_p (set)
	  /* Don't GCSE if it has attached REG_EQUIV note.
	     At this point this only function parameters should have
	     REG_EQUIV notes and if the argument slot is used somewhere
	     explicitly, it means address of parameter has been taken,
	     so we should not extend the lifetime of the pseudo.  */
	  && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0
	      || ! MEM_P (XEXP (note, 0))))
	{
	  /* Stores are never anticipatable.  */
	  int antic_p = 0;
	  /* An expression is not available if its operands are
	     subsequently modified, including this insn.  It's also not
	     available if this is a branch, because we can't insert
	     a set after the branch.  */
	  int avail_p = oprs_available_p (dest, insn) && ! JUMP_P (insn);

	  /* Record the memory expression (DEST) in the hash table.  */
	  insert_expr_in_table (dest, GET_MODE (dest), insn,
				antic_p, avail_p, max_distance, table);
	}
    }
1340 1341 1342
}

static void
David Malcolm committed
1343
hash_scan_clobber (rtx x ATTRIBUTE_UNUSED, rtx_insn *insn ATTRIBUTE_UNUSED,
1344
		   struct gcse_hash_table_d *table ATTRIBUTE_UNUSED)
1345 1346 1347 1348 1349
{
  /* Currently nothing to do.  */
}

static void
David Malcolm committed
1350
hash_scan_call (rtx x ATTRIBUTE_UNUSED, rtx_insn *insn ATTRIBUTE_UNUSED,
1351
		struct gcse_hash_table_d *table ATTRIBUTE_UNUSED)
1352 1353 1354 1355
{
  /* Currently nothing to do.  */
}

1356
/* Process INSN and add hash table entries as appropriate.  */
1357 1358

static void
1359
hash_scan_insn (rtx_insn *insn, struct gcse_hash_table_d *table)
1360 1361
{
  rtx pat = PATTERN (insn);
1362
  int i;
1363 1364 1365 1366

  /* Pick out the sets of INSN and for other forms of instructions record
     what's been modified.  */

1367
  if (GET_CODE (pat) == SET)
1368
    hash_scan_set (pat, insn, table);
1369 1370 1371 1372 1373 1374 1375

  else if (GET_CODE (pat) == CLOBBER)
    hash_scan_clobber (pat, insn, table);

  else if (GET_CODE (pat) == CALL)
    hash_scan_call (pat, insn, table);

1376
  else if (GET_CODE (pat) == PARALLEL)
1377 1378 1379
    for (i = 0; i < XVECLEN (pat, 0); i++)
      {
	rtx x = XVECEXP (pat, 0, i);
1380

1381
	if (GET_CODE (x) == SET)
1382
	  hash_scan_set (x, insn, table);
1383
	else if (GET_CODE (x) == CLOBBER)
1384
	  hash_scan_clobber (x, insn, table);
1385
	else if (GET_CODE (x) == CALL)
1386
	  hash_scan_call (x, insn, table);
1387
      }
1388 1389
}

1390 1391
/* Dump the hash table TABLE to file FILE under the name NAME.  */

1392
static void
1393
dump_hash_table (FILE *file, const char *name, struct gcse_hash_table_d *table)
1394 1395 1396
{
  int i;
  /* Flattened out table, so it's printed in proper order.  */
1397
  struct gcse_expr **flat_table;
1398
  unsigned int *hash_val;
1399
  struct gcse_expr *expr;
1400

1401
  flat_table = XCNEWVEC (struct gcse_expr *, table->n_elems);
1402
  hash_val = XNEWVEC (unsigned int, table->n_elems);
1403

1404 1405
  for (i = 0; i < (int) table->size; i++)
    for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
1406 1407 1408 1409
      {
	flat_table[expr->bitmap_index] = expr;
	hash_val[expr->bitmap_index] = i;
      }
1410 1411

  fprintf (file, "%s hash table (%d buckets, %d entries)\n",
1412
	   name, table->size, table->n_elems);
1413

1414
  for (i = 0; i < (int) table->n_elems; i++)
1415 1416
    if (flat_table[i] != 0)
      {
1417
	expr = flat_table[i];
1418 1419
	fprintf (file, "Index %d (hash value %d; max distance "
		 HOST_WIDE_INT_PRINT_DEC ")\n  ",
1420
		 expr->bitmap_index, hash_val[i], expr->max_distance);
1421
	print_rtl (file, expr->expr);
1422 1423
	fprintf (file, "\n");
      }
1424 1425

  fprintf (file, "\n");
1426 1427 1428

  free (flat_table);
  free (hash_val);
1429 1430 1431
}

/* Record register first/last/block set information for REGNO in INSN.
1432

1433
   first_set records the first place in the block where the register
1434
   is set and is used to compute "anticipatability".
1435

1436
   last_set records the last place in the block where the register
1437
   is set and is used to compute "availability".
1438

1439
   last_bb records the block for which first_set and last_set are
1440
   valid, as a quick test to invalidate them.  */
1441 1442

static void
1443
record_last_reg_set_info (rtx_insn *insn, int regno)
1444
{
1445
  struct reg_avail_info *info = &reg_avail_info[regno];
1446
  int luid = DF_INSN_LUID (insn);
1447

1448
  info->last_set = luid;
1449 1450 1451
  if (info->last_bb != current_bb)
    {
      info->last_bb = current_bb;
1452
      info->first_set = luid;
1453
    }
1454 1455
}

1456 1457 1458
/* Record memory modification information for INSN.  We do not actually care
   about the memory location(s) that are set, or even how they are set (consider
   a CALL_INSN).  We merely need to record which insns modify memory.  */
1459 1460

static void
1461
record_last_mem_set_info (rtx_insn *insn)
1462
{
1463 1464
  if (! flag_gcse_lm)
    return;
1465

1466 1467 1468 1469
  record_last_mem_set_info_common (insn, modify_mem_list,
				   canon_modify_mem_list,
				   modify_mem_list_set,
				   blocks_with_calls);
1470 1471 1472
}

/* Called from compute_hash_table via note_stores to handle one
1473 1474
   SET or CLOBBER in an insn.  DATA is really the instruction in which
   the SET is taking place.  */
1475 1476

static void
1477
record_last_set_info (rtx dest, const_rtx setter ATTRIBUTE_UNUSED, void *data)
1478
{
1479
  rtx_insn *last_set_insn = (rtx_insn *) data;
1480

1481 1482 1483
  if (GET_CODE (dest) == SUBREG)
    dest = SUBREG_REG (dest);

1484
  if (REG_P (dest))
1485
    record_last_reg_set_info (last_set_insn, REGNO (dest));
1486
  else if (MEM_P (dest)
1487 1488 1489 1490 1491
	   /* Ignore pushes, they clobber nothing.  */
	   && ! push_operand (dest, GET_MODE (dest)))
    record_last_mem_set_info (last_set_insn);
}

1492
/* Top level function to create an expression hash table.
1493 1494 1495 1496 1497 1498 1499 1500

   Expression entries are placed in the hash table if
   - they are of the form (set (pseudo-reg) src),
   - src is something we want to perform GCSE on,
   - none of the operands are subsequently modified in the block

   Currently src must be a pseudo-reg or a const_int.

1501
   TABLE is the table computed.  */
1502 1503

static void
1504
compute_hash_table_work (struct gcse_hash_table_d *table)
1505
{
1506
  int i;
1507

1508
  /* re-Cache any INSN_LIST nodes we have allocated.  */
1509
  clear_modify_mem_tables ();
1510
  /* Some working arrays used to track first and last set in each block.  */
1511
  reg_avail_info = GNEWVEC (struct reg_avail_info, max_reg_num ());
1512

1513
  for (i = 0; i < max_reg_num (); ++i)
1514
    reg_avail_info[i].last_bb = NULL;
1515

1516
  FOR_EACH_BB_FN (current_bb, cfun)
1517
    {
David Malcolm committed
1518
      rtx_insn *insn;
1519
      unsigned int regno;
1520 1521

      /* First pass over the instructions records information used to
1522
	 determine when registers and memory are first and last set.  */
1523
      FOR_BB_INSNS (current_bb, insn)
1524
	{
1525
	  if (!NONDEBUG_INSN_P (insn))
1526 1527
	    continue;

1528
	  if (CALL_P (insn))
1529
	    {
1530
	      hard_reg_set_iterator hrsi;
1531 1532 1533 1534 1535 1536 1537

	      /* We don't track modes of hard registers, so we need
		 to be conservative and assume that partial kills
		 are full kills.  */
	      HARD_REG_SET callee_clobbers
		= insn_callee_abi (insn).full_and_partial_reg_clobbers ();
	      EXECUTE_IF_SET_IN_HARD_REG_SET (callee_clobbers, 0, regno, hrsi)
1538
		record_last_reg_set_info (insn, regno);
1539

1540 1541
	      if (! RTL_CONST_OR_PURE_CALL_P (insn)
		  || RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
1542
		record_last_mem_set_info (insn);
1543 1544
	    }

1545
	  note_stores (insn, record_last_set_info, insn);
1546 1547 1548
	}

      /* The next pass builds the hash table.  */
1549
      FOR_BB_INSNS (current_bb, insn)
1550
	if (NONDEBUG_INSN_P (insn))
Steven Bosscher committed
1551
	  hash_scan_insn (insn, table);
1552 1553
    }

1554 1555
  free (reg_avail_info);
  reg_avail_info = NULL;
1556 1557
}

1558
/* Allocate space for the set/expr hash TABLE.
1559
   It is used to determine the number of buckets to use.  */
1560 1561

static void
1562
alloc_hash_table (struct gcse_hash_table_d *table)
1563 1564 1565
{
  int n;

1566 1567 1568
  n = get_max_insn_count ();

  table->size = n / 4;
1569 1570
  if (table->size < 11)
    table->size = 11;
1571

1572 1573 1574
  /* Attempt to maintain efficient use of hash table.
     Making it an odd number is simplest for now.
     ??? Later take some measurements.  */
1575
  table->size |= 1;
1576 1577
  n = table->size * sizeof (struct gcse_expr *);
  table->table = GNEWVAR (struct gcse_expr *, n);
1578 1579
}

1580
/* Free things allocated by alloc_hash_table.  */
1581 1582

static void
1583
free_hash_table (struct gcse_hash_table_d *table)
1584
{
1585
  free (table->table);
1586 1587
}

1588
/* Compute the expression hash table TABLE.  */
1589 1590

static void
1591
compute_hash_table (struct gcse_hash_table_d *table)
1592 1593
{
  /* Initialize count of number of entries in hash table.  */
1594
  table->n_elems = 0;
1595
  memset (table->table, 0, table->size * sizeof (struct gcse_expr *));
1596

1597
  compute_hash_table_work (table);
1598 1599 1600 1601
}

/* Expression tracking support.  */

1602 1603 1604
/* Clear canon_modify_mem_list and modify_mem_list tables.  */
static void
clear_modify_mem_tables (void)
1605
{
1606 1607
  unsigned i;
  bitmap_iterator bi;
1608

1609
  EXECUTE_IF_SET_IN_BITMAP (modify_mem_list_set, 0, i, bi)
1610
    {
1611 1612
      modify_mem_list[i].release ();
      canon_modify_mem_list[i].release ();
1613
    }
1614 1615
  bitmap_clear (modify_mem_list_set);
  bitmap_clear (blocks_with_calls);
1616 1617
}

1618
/* Release memory used by modify_mem_list_set.  */
1619

1620 1621
static void
free_modify_mem_tables (void)
1622
{
1623 1624 1625 1626 1627
  clear_modify_mem_tables ();
  free (modify_mem_list);
  free (canon_modify_mem_list);
  modify_mem_list = 0;
  canon_modify_mem_list = 0;
1628
}
1629

1630
/* Compute PRE+LCM working variables.  */
1631 1632

/* Local properties of expressions.  */
1633

1634
/* Nonzero for expressions that are transparent in the block.  */
1635
static sbitmap *transp;
1636

1637 1638
/* Nonzero for expressions that are computed (available) in the block.  */
static sbitmap *comp;
1639

1640 1641
/* Nonzero for expressions that are locally anticipatable in the block.  */
static sbitmap *antloc;
1642

1643 1644 1645
/* Nonzero for expressions where this block is an optimal computation
   point.  */
static sbitmap *pre_optimal;
1646

1647 1648
/* Nonzero for expressions which are redundant in a particular block.  */
static sbitmap *pre_redundant;
1649

1650 1651 1652 1653 1654 1655
/* Nonzero for expressions which should be inserted on a specific edge.  */
static sbitmap *pre_insert_map;

/* Nonzero for expressions which should be deleted in a specific block.  */
static sbitmap *pre_delete_map;

1656
/* Allocate vars used for PRE analysis.  */
1657 1658

static void
1659
alloc_pre_mem (int n_blocks, int n_exprs)
1660
{
1661 1662 1663
  transp = sbitmap_vector_alloc (n_blocks, n_exprs);
  comp = sbitmap_vector_alloc (n_blocks, n_exprs);
  antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
1664

1665 1666 1667 1668 1669
  pre_optimal = NULL;
  pre_redundant = NULL;
  pre_insert_map = NULL;
  pre_delete_map = NULL;
  ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs);
1670

1671
  /* pre_insert and pre_delete are allocated later.  */
1672 1673
}

1674
/* Free vars used for PRE analysis.  */
1675 1676

static void
1677
free_pre_mem (void)
1678
{
1679 1680
  sbitmap_vector_free (transp);
  sbitmap_vector_free (comp);
1681 1682

  /* ANTLOC and AE_KILL are freed just after pre_lcm finishes.  */
1683

1684
  if (pre_optimal)
1685
    sbitmap_vector_free (pre_optimal);
1686
  if (pre_redundant)
1687
    sbitmap_vector_free (pre_redundant);
1688
  if (pre_insert_map)
1689
    sbitmap_vector_free (pre_insert_map);
1690
  if (pre_delete_map)
1691
    sbitmap_vector_free (pre_delete_map);
1692

1693
  transp = comp = NULL;
1694
  pre_optimal = pre_redundant = pre_insert_map = pre_delete_map = NULL;
1695 1696
}

1697 1698 1699 1700 1701
/* Remove certain expressions from anticipatable and transparent
   sets of basic blocks that have incoming abnormal edge.
   For PRE remove potentially trapping expressions to avoid placing
   them on abnormal edges.  For hoisting remove memory references that
   can be clobbered by calls.  */
1702 1703

static void
1704
prune_expressions (bool pre_p)
1705
{
1706
  struct gcse_expr *expr;
1707
  unsigned int ui;
1708
  basic_block bb;
1709

1710
  auto_sbitmap prune_exprs (expr_hash_table.n_elems);
1711
  bitmap_clear (prune_exprs);
1712
  for (ui = 0; ui < expr_hash_table.size; ui++)
1713
    {
1714
      for (expr = expr_hash_table.table[ui]; expr; expr = expr->next_same_hash)
1715 1716
	{
	  /* Note potentially trapping expressions.  */
1717
	  if (may_trap_p (expr->expr))
1718
	    {
1719
	      bitmap_set_bit (prune_exprs, expr->bitmap_index);
1720 1721
	      continue;
	    }
1722

1723
	  if (!pre_p && contains_mem_rtx_p (expr->expr))
1724 1725 1726 1727 1728 1729 1730
	    /* Note memory references that can be clobbered by a call.
	       We do not split abnormal edges in hoisting, so would
	       a memory reference get hoisted along an abnormal edge,
	       it would be placed /before/ the call.  Therefore, only
	       constant memory references can be hoisted along abnormal
	       edges.  */
	    {
1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752
	      rtx x = expr->expr;

	      /* Common cases where we might find the MEM which may allow us
		 to avoid pruning the expression.  */
	      while (GET_CODE (x) == ZERO_EXTEND || GET_CODE (x) == SIGN_EXTEND)
		x = XEXP (x, 0);

	      /* If we found the MEM, go ahead and look at it to see if it has
		 properties that allow us to avoid pruning its expression out
		 of the tables.  */
	      if (MEM_P (x))
		{
		  if (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
		      && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))
		    continue;

		  if (MEM_READONLY_P (x)
		      && !MEM_VOLATILE_P (x)
		      && MEM_NOTRAP_P (x))
		    /* Constant memory reference, e.g., a PIC address.  */
		    continue;
		}
1753 1754 1755 1756 1757

	      /* ??? Optimally, we would use interprocedural alias
		 analysis to determine if this mem is actually killed
		 by this call.  */

1758
	      bitmap_set_bit (prune_exprs, expr->bitmap_index);
1759 1760 1761
	    }
	}
    }
1762

1763
  FOR_EACH_BB_FN (bb, cfun)
1764
    {
1765
      edge e;
1766
      edge_iterator ei;
1767 1768

      /* If the current block is the destination of an abnormal edge, we
1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779
	 kill all trapping (for PRE) and memory (for hoist) expressions
	 because we won't be able to properly place the instruction on
	 the edge.  So make them neither anticipatable nor transparent.
	 This is fairly conservative.

	 ??? For hoisting it may be necessary to check for set-and-jump
	 instructions here, not just for abnormal edges.  The general problem
	 is that when an expression cannot not be placed right at the end of
	 a basic block we should account for any side-effects of a subsequent
	 jump instructions that could clobber the expression.  It would
	 be best to implement this check along the lines of
1780
	 should_hoist_expr_to_dom where the target block is already known
1781 1782
	 and, hence, there's no need to conservatively prune expressions on
	 "intermediate" set-and-jump instructions.  */
1783
      FOR_EACH_EDGE (e, ei, bb->preds)
1784 1785
	if ((e->flags & EDGE_ABNORMAL)
	    && (pre_p || CALL_P (BB_END (e->src))))
1786
	  {
1787
	    bitmap_and_compl (antloc[bb->index],
1788
				antloc[bb->index], prune_exprs);
1789
	    bitmap_and_compl (transp[bb->index],
1790
				transp[bb->index], prune_exprs);
1791 1792
	    break;
	  }
1793 1794 1795
    }
}

1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820
/* It may be necessary to insert a large number of insns on edges to
   make the existing occurrences of expressions fully redundant.  This
   routine examines the set of insertions and deletions and if the ratio
   of insertions to deletions is too high for a particular expression, then
   the expression is removed from the insertion/deletion sets. 

   N_ELEMS is the number of elements in the hash table.  */

static void
prune_insertions_deletions (int n_elems)
{
  sbitmap_iterator sbi;

  /* We always use I to iterate over blocks/edges and J to iterate over
     expressions.  */
  unsigned int i, j;

  /* Counts for the number of times an expression needs to be inserted and
     number of times an expression can be removed as a result.  */
  int *insertions = GCNEWVEC (int, n_elems);
  int *deletions = GCNEWVEC (int, n_elems);

  /* Set of expressions which require too many insertions relative to
     the number of deletions achieved.  We will prune these out of the
     insertion/deletion sets.  */
1821
  auto_sbitmap prune_exprs (n_elems);
1822
  bitmap_clear (prune_exprs);
1823 1824 1825

  /* Iterate over the edges counting the number of times each expression
     needs to be inserted.  */
David Malcolm committed
1826
  for (i = 0; i < (unsigned) n_edges_for_fn (cfun); i++)
1827
    {
1828
      EXECUTE_IF_SET_IN_BITMAP (pre_insert_map[i], 0, j, sbi)
1829 1830 1831 1832 1833
	insertions[j]++;
    }

  /* Similarly for deletions, but those occur in blocks rather than on
     edges.  */
1834
  for (i = 0; i < (unsigned) last_basic_block_for_fn (cfun); i++)
1835
    {
1836
      EXECUTE_IF_SET_IN_BITMAP (pre_delete_map[i], 0, j, sbi)
1837 1838 1839 1840 1841 1842 1843 1844 1845
	deletions[j]++;
    }

  /* Now that we have accurate counts, iterate over the elements in the
     hash table and see if any need too many insertions relative to the
     number of evaluations that can be removed.  If so, mark them in
     PRUNE_EXPRS.  */
  for (j = 0; j < (unsigned) n_elems; j++)
    if (deletions[j]
1846
	&& (insertions[j] / deletions[j]) > param_max_gcse_insertion_ratio)
1847
      bitmap_set_bit (prune_exprs, j);
1848 1849

  /* Now prune PRE_INSERT_MAP and PRE_DELETE_MAP based on PRUNE_EXPRS.  */
1850
  EXECUTE_IF_SET_IN_BITMAP (prune_exprs, 0, j, sbi)
1851
    {
David Malcolm committed
1852
      for (i = 0; i < (unsigned) n_edges_for_fn (cfun); i++)
1853
	bitmap_clear_bit (pre_insert_map[i], j);
1854

1855
      for (i = 0; i < (unsigned) last_basic_block_for_fn (cfun); i++)
1856
	bitmap_clear_bit (pre_delete_map[i], j);
1857 1858 1859 1860 1861 1862
    }

  free (insertions);
  free (deletions);
}

1863
/* Top level routine to do the dataflow analysis needed by PRE.  */
1864

1865
static struct edge_list *
1866 1867
compute_pre_data (void)
{
1868
  struct edge_list *edge_list;
1869 1870 1871 1872
  basic_block bb;

  compute_local_properties (transp, comp, antloc, &expr_hash_table);
  prune_expressions (true);
1873
  bitmap_vector_clear (ae_kill, last_basic_block_for_fn (cfun));
1874 1875 1876 1877 1878 1879

  /* Compute ae_kill for each basic block using:

     ~(TRANSP | COMP)
  */

1880
  FOR_EACH_BB_FN (bb, cfun)
1881
    {
1882 1883
      bitmap_ior (ae_kill[bb->index], transp[bb->index], comp[bb->index]);
      bitmap_not (ae_kill[bb->index], ae_kill[bb->index]);
1884 1885
    }

1886
  edge_list = pre_edge_lcm (expr_hash_table.n_elems, transp, comp, antloc,
1887
			    ae_kill, &pre_insert_map, &pre_delete_map);
1888
  sbitmap_vector_free (antloc);
1889
  antloc = NULL;
1890
  sbitmap_vector_free (ae_kill);
1891
  ae_kill = NULL;
1892 1893

  prune_insertions_deletions (expr_hash_table.n_elems);
1894 1895

  return edge_list;
1896 1897 1898 1899
}

/* PRE utilities */

1900
/* Return nonzero if an occurrence of expression EXPR in OCCR_BB would reach
1901
   block BB.
1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913

   VISITED is a pointer to a working buffer for tracking which BB's have
   been visited.  It is NULL for the top-level call.

   We treat reaching expressions that go through blocks containing the same
   reaching expression as "not reaching".  E.g. if EXPR is generated in blocks
   2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
   2 as not reaching.  The intent is to improve the probability of finding
   only one reaching expression and to reduce register lifetimes by picking
   the closest such expression.  */

static int
1914
pre_expr_reaches_here_p_work (basic_block occr_bb, struct gcse_expr *expr,
1915
			      basic_block bb, char *visited)
1916
{
1917
  edge pred;
1918
  edge_iterator ei;
H.J. Lu committed
1919

1920
  FOR_EACH_EDGE (pred, ei, bb->preds)
1921
    {
1922
      basic_block pred_bb = pred->src;
1923

1924
      if (pred->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
1925
	  /* Has predecessor has already been visited?  */
1926
	  || visited[pred_bb->index])
1927 1928
	;/* Nothing to do.  */

1929
      /* Does this predecessor generate this expression?  */
1930
      else if (bitmap_bit_p (comp[pred_bb->index], expr->bitmap_index))
1931 1932 1933 1934
	{
	  /* Is this the occurrence we're looking for?
	     Note that there's only one generating occurrence per block
	     so we just need to check the block number.  */
1935
	  if (occr_bb == pred_bb)
1936
	    return 1;
1937

1938
	  visited[pred_bb->index] = 1;
1939 1940
	}
      /* Ignore this predecessor if it kills the expression.  */
1941
      else if (! bitmap_bit_p (transp[pred_bb->index], expr->bitmap_index))
1942
	visited[pred_bb->index] = 1;
1943

1944 1945
      /* Neither gen nor kill.  */
      else
Jeff Law committed
1946
	{
1947
	  visited[pred_bb->index] = 1;
1948
	  if (pre_expr_reaches_here_p_work (occr_bb, expr, pred_bb, visited))
1949
	    return 1;
Jeff Law committed
1950
	}
1951 1952 1953 1954 1955
    }

  /* All paths have been checked.  */
  return 0;
}
1956 1957

/* The wrapper for pre_expr_reaches_here_work that ensures that any
1958
   memory allocated for that function is returned.  */
1959 1960

static int
1961
pre_expr_reaches_here_p (basic_block occr_bb, struct gcse_expr *expr, basic_block bb)
1962 1963
{
  int rval;
1964
  char *visited = XCNEWVEC (char, last_basic_block_for_fn (cfun));
1965

Kazu Hirata committed
1966
  rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited);
1967 1968

  free (visited);
1969
  return rval;
1970
}
1971

1972
/* Generate RTL to copy an EXP to REG and return it.  */
1973

1974 1975
rtx_insn *
prepare_copy_insn (rtx reg, rtx exp)
1976
{
David Malcolm committed
1977
  rtx_insn *pat;
1978 1979

  start_sequence ();
1980 1981 1982 1983 1984 1985 1986

  /* If the expression is something that's an operand, like a constant,
     just copy it to a register.  */
  if (general_operand (exp, GET_MODE (reg)))
    emit_move_insn (reg, exp);

  /* Otherwise, make a new insn to compute this expression and make sure the
1987
     insn will be recognized (this also adds any needed CLOBBERs).  */
1988 1989
  else
    {
1990
      rtx_insn *insn = emit_insn (gen_rtx_SET (reg, exp));
1991

1992
      if (insn_invalid_p (insn, false))
1993
	gcc_unreachable ();
1994
    }
H.J. Lu committed
1995

1996
  pat = get_insns ();
1997 1998 1999 2000
  end_sequence ();

  return pat;
}
2001

2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013
/* Generate RTL to copy an EXPR to its `reaching_reg' and return it.  */

static rtx_insn *
process_insert_insn (struct gcse_expr *expr)
{
  rtx reg = expr->reaching_reg;
  /* Copy the expression to make sure we don't have any sharing issues.  */
  rtx exp = copy_rtx (expr->expr);

  return prepare_copy_insn (reg, exp);
}

2014 2015
/* Add EXPR to the end of basic block BB.

2016
   This is used by both the PRE and code hoisting.  */
2017 2018

static void
2019
insert_insn_end_basic_block (struct gcse_expr *expr, basic_block bb)
2020
{
2021
  rtx_insn *insn = BB_END (bb);
David Malcolm committed
2022
  rtx_insn *new_insn;
2023 2024
  rtx reg = expr->reaching_reg;
  int regno = REGNO (reg);
David Malcolm committed
2025
  rtx_insn *pat, *pat_end;
2026

2027
  pat = process_insert_insn (expr);
2028
  gcc_assert (pat && INSN_P (pat));
2029 2030 2031 2032

  pat_end = pat;
  while (NEXT_INSN (pat_end) != NULL_RTX)
    pat_end = NEXT_INSN (pat_end);
2033 2034

  /* If the last insn is a jump, insert EXPR in front [taking care to
2035
     handle cc0, etc. properly].  Similarly we need to care trapping
2036
     instructions in presence of non-call exceptions.  */
2037

2038
  if (JUMP_P (insn)
2039
      || (NONJUMP_INSN_P (insn)
2040 2041
	  && (!single_succ_p (bb)
	      || single_succ_edge (bb)->flags & EDGE_ABNORMAL)))
2042 2043 2044
    {
      /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
	 if cc0 isn't set.  */
2045
      if (HAVE_cc0)
2046
	{
2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057
	  rtx note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
	  if (note)
	    insn = safe_as_a <rtx_insn *> (XEXP (note, 0));
	  else
	    {
	      rtx_insn *maybe_cc0_setter = prev_nonnote_insn (insn);
	      if (maybe_cc0_setter
		  && INSN_P (maybe_cc0_setter)
		  && sets_cc0_p (PATTERN (maybe_cc0_setter)))
		insn = maybe_cc0_setter;
	    }
2058
	}
2059

2060
      /* FIXME: What if something in cc0/jump uses value set in new insn?  */
2061
      new_insn = emit_insn_before_noloc (pat, insn, bb);
2062
    }
2063

2064 2065
  /* Likewise if the last insn is a call, as will happen in the presence
     of exception handling.  */
2066
  else if (CALL_P (insn)
2067 2068
	   && (!single_succ_p (bb)
	       || single_succ_edge (bb)->flags & EDGE_ABNORMAL))
2069
    {
2070 2071 2072
      /* Keeping in mind targets with small register classes and parameters
         in registers, we search backward and place the instructions before
	 the first parameter is loaded.  Do this for everyone for consistency
2073
	 and a presumption that we'll get better code elsewhere as well.  */
2074 2075 2076 2077

      /* Since different machines initialize their parameter registers
	 in different orders, assume nothing.  Collect the set of all
	 parameter registers.  */
2078
      insn = find_first_parameter_load (insn, BB_HEAD (bb));
2079

2080 2081 2082 2083 2084 2085 2086
      /* If we found all the parameter loads, then we want to insert
	 before the first parameter load.

	 If we did not find all the parameter loads, then we might have
	 stopped on the head of the block, which could be a CODE_LABEL.
	 If we inserted before the CODE_LABEL, then we would be putting
	 the insn in the wrong basic block.  In that case, put the insn
2087
	 after the CODE_LABEL.  Also, respect NOTE_INSN_BASIC_BLOCK.  */
2088
      while (LABEL_P (insn)
2089
	     || NOTE_INSN_BASIC_BLOCK_P (insn))
2090
	insn = NEXT_INSN (insn);
2091

2092
      new_insn = emit_insn_before_noloc (pat, insn, bb);
2093 2094
    }
  else
2095
    new_insn = emit_insn_after_noloc (pat, insn, bb);
2096

2097
  while (1)
2098
    {
2099
      if (INSN_P (pat))
2100
	add_label_notes (PATTERN (pat), new_insn);
2101 2102 2103
      if (pat == pat_end)
	break;
      pat = NEXT_INSN (pat);
2104
    }
2105

2106 2107
  gcse_create_count++;

2108
  if (dump_file)
2109
    {
2110
      fprintf (dump_file, "PRE/HOIST: end of bb %d, insn %d, ",
2111
	       bb->index, INSN_UID (new_insn));
2112
      fprintf (dump_file, "copying expression %d to reg %d\n",
2113
	       expr->bitmap_index, regno);
2114 2115 2116
    }
}

2117 2118
/* Insert partially redundant expressions on edges in the CFG to make
   the expressions fully redundant.  */
2119

2120
static int
2121
pre_edge_insert (struct edge_list *edge_list, struct gcse_expr **index_map)
2122
{
2123
  int e, i, j, num_edges, set_size, did_insert = 0;
2124 2125
  sbitmap *inserted;

2126 2127
  /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge
     if it reaches any of the deleted expressions.  */
2128

2129 2130
  set_size = pre_insert_map[0]->size;
  num_edges = NUM_EDGES (edge_list);
2131
  inserted = sbitmap_vector_alloc (num_edges, expr_hash_table.n_elems);
2132
  bitmap_vector_clear (inserted, num_edges);
2133

2134
  for (e = 0; e < num_edges; e++)
2135 2136
    {
      int indx;
2137
      basic_block bb = INDEX_EDGE_PRED_BB (edge_list, e);
2138 2139

      for (i = indx = 0; i < set_size; i++, indx += SBITMAP_ELT_BITS)
2140
	{
2141
	  SBITMAP_ELT_TYPE insert = pre_insert_map[e]->elms[i];
2142

2143 2144 2145
	  for (j = indx;
	       insert && j < (int) expr_hash_table.n_elems;
	       j++, insert >>= 1)
2146 2147
	    if ((insert & 1) != 0 && index_map[j]->reaching_reg != NULL_RTX)
	      {
2148 2149
		struct gcse_expr *expr = index_map[j];
		struct gcse_occr *occr;
2150

2151
		/* Now look at each deleted occurrence of this expression.  */
2152 2153 2154 2155 2156
		for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
		  {
		    if (! occr->deleted_p)
		      continue;

2157
		    /* Insert this expression on this edge if it would
2158
		       reach the deleted occurrence in BB.  */
2159
		    if (!bitmap_bit_p (inserted[e], j))
2160
		      {
David Malcolm committed
2161
			rtx_insn *insn;
2162 2163 2164 2165 2166 2167 2168 2169 2170
			edge eg = INDEX_EDGE (edge_list, e);

			/* We can't insert anything on an abnormal and
			   critical edge, so we insert the insn at the end of
			   the previous block. There are several alternatives
			   detailed in Morgans book P277 (sec 10.5) for
			   handling this situation.  This one is easiest for
			   now.  */

2171
			if (eg->flags & EDGE_ABNORMAL)
2172
			  insert_insn_end_basic_block (index_map[j], bb);
2173 2174 2175 2176 2177 2178
			else
			  {
			    insn = process_insert_insn (index_map[j]);
			    insert_insn_on_edge (insn, eg);
			  }

2179
			if (dump_file)
2180
			  {
2181
			    fprintf (dump_file, "PRE: edge (%d,%d), ",
2182 2183
				     bb->index,
				     INDEX_EDGE_SUCC_BB (edge_list, e)->index);
2184
			    fprintf (dump_file, "copy expression %d\n",
2185 2186 2187
				     expr->bitmap_index);
			  }

2188
			update_ld_motion_stores (expr);
2189
			bitmap_set_bit (inserted[e], j);
2190 2191 2192 2193 2194
			did_insert = 1;
			gcse_create_count++;
		      }
		  }
	      }
2195 2196
	}
    }
2197

2198
  sbitmap_vector_free (inserted);
2199
  return did_insert;
2200 2201
}

2202
/* Copy the result of EXPR->EXPR generated by INSN to EXPR->REACHING_REG.
2203 2204 2205 2206 2207 2208
   Given "old_reg <- expr" (INSN), instead of adding after it
     reaching_reg <- old_reg
   it's better to do the following:
     reaching_reg <- expr
     old_reg      <- reaching_reg
   because this way copy propagation can discover additional PRE
2209 2210 2211 2212 2213 2214 2215
   opportunities.  But if this fails, we try the old way.
   When "expr" is a store, i.e.
   given "MEM <- old_reg", instead of adding after it
     reaching_reg <- old_reg
   it's better to add it before as follows:
     reaching_reg <- old_reg
     MEM          <- reaching_reg.  */
2216 2217

static void
2218
pre_insert_copy_insn (struct gcse_expr *expr, rtx_insn *insn)
2219 2220 2221 2222
{
  rtx reg = expr->reaching_reg;
  int regno = REGNO (reg);
  int indx = expr->bitmap_index;
2223
  rtx pat = PATTERN (insn);
2224 2225
  rtx set, first_set;
  rtx_insn *new_insn;
2226
  rtx old_reg;
2227
  int i;
2228

2229
  /* This block matches the logic in hash_scan_insn.  */
2230
  switch (GET_CODE (pat))
2231
    {
2232 2233 2234 2235 2236
    case SET:
      set = pat;
      break;

    case PARALLEL:
2237 2238
      /* Search through the parallel looking for the set whose
	 source was the expression that we're interested in.  */
2239
      first_set = NULL_RTX;
2240 2241 2242 2243
      set = NULL_RTX;
      for (i = 0; i < XVECLEN (pat, 0); i++)
	{
	  rtx x = XVECEXP (pat, 0, i);
2244
	  if (GET_CODE (x) == SET)
2245
	    {
2246 2247 2248 2249 2250 2251 2252 2253 2254 2255
	      /* If the source was a REG_EQUAL or REG_EQUIV note, we
		 may not find an equivalent expression, but in this
		 case the PARALLEL will have a single set.  */
	      if (first_set == NULL_RTX)
		first_set = x;
	      if (expr_equiv_p (SET_SRC (x), expr->expr))
	        {
	          set = x;
	          break;
	        }
2256 2257
	    }
	}
2258 2259 2260 2261

      gcc_assert (first_set);
      if (set == NULL_RTX)
        set = first_set;
2262 2263 2264 2265
      break;

    default:
      gcc_unreachable ();
2266
    }
2267

2268
  if (REG_P (SET_DEST (set)))
2269
    {
2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281
      old_reg = SET_DEST (set);
      /* Check if we can modify the set destination in the original insn.  */
      if (validate_change (insn, &SET_DEST (set), reg, 0))
        {
          new_insn = gen_move_insn (old_reg, reg);
          new_insn = emit_insn_after (new_insn, insn);
        }
      else
        {
          new_insn = gen_move_insn (reg, old_reg);
          new_insn = emit_insn_after (new_insn, insn);
        }
2282
    }
2283
  else /* This is possible only in case of a store to memory.  */
2284
    {
2285
      old_reg = SET_SRC (set);
2286
      new_insn = gen_move_insn (reg, old_reg);
2287 2288 2289 2290 2291 2292

      /* Check if we can modify the set source in the original insn.  */
      if (validate_change (insn, &SET_SRC (set), reg, 0))
        new_insn = emit_insn_before (new_insn, insn);
      else
        new_insn = emit_insn_after (new_insn, insn);
2293
    }
2294 2295 2296

  gcse_create_count++;

2297 2298
  if (dump_file)
    fprintf (dump_file,
2299
	     "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
2300
	      BLOCK_FOR_INSN (insn)->index, INSN_UID (new_insn), indx,
2301
	      INSN_UID (insn), regno);
2302 2303 2304 2305 2306 2307
}

/* Copy available expressions that reach the redundant expression
   to `reaching_reg'.  */

static void
2308
pre_insert_copies (void)
2309
{
2310
  unsigned int i, added_copy;
2311 2312 2313
  struct gcse_expr *expr;
  struct gcse_occr *occr;
  struct gcse_occr *avail;
2314

2315 2316 2317 2318 2319 2320
  /* For each available expression in the table, copy the result to
     `reaching_reg' if the expression reaches a deleted one.

     ??? The current algorithm is rather brute force.
     Need to do some profiling.  */

2321
  for (i = 0; i < expr_hash_table.size; i++)
2322
    for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2323 2324 2325 2326 2327 2328 2329 2330
      {
	/* If the basic block isn't reachable, PPOUT will be TRUE.  However,
	   we don't want to insert a copy here because the expression may not
	   really be redundant.  So only insert an insn if the expression was
	   deleted.  This test also avoids further processing if the
	   expression wasn't deleted anywhere.  */
	if (expr->reaching_reg == NULL)
	  continue;
2331

2332
	/* Set when we add a copy for that expression.  */
2333
	added_copy = 0;
2334 2335 2336 2337 2338

	for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
	  {
	    if (! occr->deleted_p)
	      continue;
2339

2340 2341
	    for (avail = expr->avail_occr; avail != NULL; avail = avail->next)
	      {
David Malcolm committed
2342
		rtx_insn *insn = avail->insn;
2343

2344 2345 2346
		/* No need to handle this one if handled already.  */
		if (avail->copied_p)
		  continue;
2347

2348
		/* Don't handle this one if it's a redundant one.  */
2349
		if (insn->deleted ())
2350
		  continue;
2351

2352
		/* Or if the expression doesn't reach the deleted one.  */
2353
		if (! pre_expr_reaches_here_p (BLOCK_FOR_INSN (avail->insn),
2354 2355
					       expr,
					       BLOCK_FOR_INSN (occr->insn)))
2356
		  continue;
2357

2358 2359
                added_copy = 1;

2360 2361 2362 2363 2364
		/* Copy the result of avail to reaching_reg.  */
		pre_insert_copy_insn (expr, insn);
		avail->copied_p = 1;
	      }
	  }
2365

2366
	  if (added_copy)
2367
            update_ld_motion_stores (expr);
2368
      }
2369 2370
}

2371 2372
struct set_data
{
David Malcolm committed
2373
  rtx_insn *insn;
2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408
  const_rtx set;
  int nsets;
};

/* Increment number of sets and record set in DATA.  */

static void
record_set_data (rtx dest, const_rtx set, void *data)
{
  struct set_data *s = (struct set_data *)data;

  if (GET_CODE (set) == SET)
    {
      /* We allow insns having multiple sets, where all but one are
	 dead as single set insns.  In the common case only a single
	 set is present, so we want to avoid checking for REG_UNUSED
	 notes unless necessary.  */
      if (s->nsets == 1
	  && find_reg_note (s->insn, REG_UNUSED, SET_DEST (s->set))
	  && !side_effects_p (s->set))
	s->nsets = 0;

      if (!s->nsets)
	{
	  /* Record this set.  */
	  s->nsets += 1;
	  s->set = set;
	}
      else if (!find_reg_note (s->insn, REG_UNUSED, dest)
	       || side_effects_p (set))
	s->nsets += 1;
    }
}

static const_rtx
David Malcolm committed
2409
single_set_gcse (rtx_insn *insn)
2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422
{
  struct set_data s;
  rtx pattern;
  
  gcc_assert (INSN_P (insn));

  /* Optimize common case.  */
  pattern = PATTERN (insn);
  if (GET_CODE (pattern) == SET)
    return pattern;

  s.insn = insn;
  s.nsets = 0;
2423
  note_pattern_stores (pattern, record_set_data, &s);
2424 2425 2426 2427 2428 2429

  /* Considered invariant insns have exactly one set.  */
  gcc_assert (s.nsets == 1);
  return s.set;
}

2430 2431
/* Emit move from SRC to DEST noting the equivalence with expression computed
   in INSN.  */
2432

2433
static rtx_insn *
David Malcolm committed
2434
gcse_emit_move_after (rtx dest, rtx src, rtx_insn *insn)
2435
{
David Malcolm committed
2436
  rtx_insn *new_rtx;
2437 2438
  const_rtx set = single_set_gcse (insn);
  rtx set2;
2439
  rtx note;
2440
  rtx eqv = NULL_RTX;
2441 2442 2443 2444

  /* This should never fail since we're creating a reg->reg copy
     we've verified to be valid.  */

2445
  new_rtx = emit_insn_after (gen_move_insn (dest, src), insn);
2446

2447 2448 2449
  /* Note the equivalence for local CSE pass.  Take the note from the old
     set if there was one.  Otherwise record the SET_SRC from the old set
     unless DEST is also an operand of the SET_SRC.  */
2450
  set2 = single_set (new_rtx);
2451
  if (!set2 || !rtx_equal_p (SET_DEST (set2), dest))
2452
    return new_rtx;
2453 2454
  if ((note = find_reg_equal_equiv_note (insn)))
    eqv = XEXP (note, 0);
2455 2456
  else if (! REG_P (dest)
	   || ! reg_mentioned_p (dest, SET_SRC (set)))
2457 2458
    eqv = SET_SRC (set);

2459 2460
  if (eqv != NULL_RTX)
    set_unique_reg_note (new_rtx, REG_EQUAL, copy_insn_1 (eqv));
2461

2462
  return new_rtx;
2463 2464
}

2465 2466 2467
/* Delete redundant computations.
   Deletion is done by changing the insn to copy the `reaching_reg' of
   the expression into the result of the SET.  It is left to later passes
2468
   to propagate the copy or eliminate it.
2469

2470
   Return nonzero if a change is made.  */
2471 2472

static int
2473
pre_delete (void)
2474
{
2475
  unsigned int i;
2476
  int changed;
2477 2478
  struct gcse_expr *expr;
  struct gcse_occr *occr;
2479

2480
  changed = 0;
2481
  for (i = 0; i < expr_hash_table.size; i++)
2482
    for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2483 2484
      {
	int indx = expr->bitmap_index;
2485

2486
	/* We only need to search antic_occr since we require ANTLOC != 0.  */
2487 2488
	for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
	  {
David Malcolm committed
2489
	    rtx_insn *insn = occr->insn;
2490
	    rtx set;
2491
	    basic_block bb = BLOCK_FOR_INSN (insn);
2492

2493
	    /* We only delete insns that have a single_set.  */
2494
	    if (bitmap_bit_p (pre_delete_map[bb->index], indx)
2495 2496
		&& (set = single_set (insn)) != 0
                && dbg_cnt (pre_insn))
2497 2498 2499 2500 2501
	      {
		/* Create a pseudo-reg to store the result of reaching
		   expressions into.  Get the mode for the new pseudo from
		   the mode of the original destination pseudo.  */
		if (expr->reaching_reg == NULL)
Peter Bergner committed
2502
		  expr->reaching_reg = gen_reg_rtx_and_attrs (SET_DEST (set));
2503

2504
		gcse_emit_move_after (SET_DEST (set), expr->reaching_reg, insn);
2505 2506 2507 2508
		delete_insn (insn);
		occr->deleted_p = 1;
		changed = 1;
		gcse_subst_count++;
2509

2510
		if (dump_file)
2511
		  {
2512
		    fprintf (dump_file,
2513 2514
			     "PRE: redundant insn %d (expression %d) in ",
			       INSN_UID (insn), indx);
2515
		    fprintf (dump_file, "bb %d, reaching reg is %d\n",
2516
			     bb->index, REGNO (expr->reaching_reg));
2517 2518 2519 2520
		  }
	      }
	  }
      }
2521 2522 2523 2524 2525 2526 2527 2528

  return changed;
}

/* Perform GCSE optimizations using PRE.
   This is called by one_pre_gcse_pass after all the dataflow analysis
   has been done.

2529 2530 2531
   This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
   lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
   Compiler Design and Implementation.
2532

2533 2534 2535 2536 2537
   ??? A new pseudo reg is created to hold the reaching expression.  The nice
   thing about the classical approach is that it would try to use an existing
   reg.  If the register can't be adequately optimized [i.e. we introduce
   reload problems], one could add a pass here to propagate the new register
   through the block.
2538

2539 2540 2541 2542
   ??? We don't handle single sets in PARALLELs because we're [currently] not
   able to copy the rest of the parallel when we insert copies to create full
   redundancies from partial redundancies.  However, there's no reason why we
   can't handle PARALLELs in the cases where there are no partial
2543 2544 2545
   redundancies.  */

static int
2546
pre_gcse (struct edge_list *edge_list)
2547
{
2548 2549
  unsigned int i;
  int did_insert, changed;
2550 2551
  struct gcse_expr **index_map;
  struct gcse_expr *expr;
2552 2553 2554 2555

  /* Compute a mapping from expression number (`bitmap_index') to
     hash table entry.  */

2556
  index_map = XCNEWVEC (struct gcse_expr *, expr_hash_table.n_elems);
2557
  for (i = 0; i < expr_hash_table.size; i++)
2558
    for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2559
      index_map[expr->bitmap_index] = expr;
2560 2561 2562 2563 2564

  /* Delete the redundant insns first so that
     - we know what register to use for the new insns and for the other
       ones with reaching expressions
     - we know which insns are redundant when we go to create copies  */
2565

2566
  changed = pre_delete ();
2567
  did_insert = pre_edge_insert (edge_list, index_map);
2568

2569
  /* In other places with reaching expressions, copy the expression to the
2570
     specially allocated pseudo-reg that reaches the redundant expr.  */
2571
  pre_insert_copies ();
2572 2573 2574 2575 2576
  if (did_insert)
    {
      commit_edge_insertions ();
      changed = 1;
    }
2577

2578
  free (index_map);
2579 2580 2581 2582 2583
  return changed;
}

/* Top level routine to perform one PRE GCSE pass.

2584
   Return nonzero if a change was made.  */
2585 2586

static int
2587
one_pre_gcse_pass (void)
2588 2589 2590 2591 2592 2593
{
  int changed = 0;

  gcse_subst_count = 0;
  gcse_create_count = 0;

2594
  /* Return if there's nothing to do, or it is too expensive.  */
2595
  if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1
2596
      || gcse_or_cprop_is_too_expensive (_("PRE disabled")))
2597 2598 2599 2600 2601 2602 2603 2604 2605
    return 0;

  /* We need alias.  */
  init_alias_analysis ();

  bytes_used = 0;
  gcc_obstack_init (&gcse_obstack);
  alloc_gcse_mem ();

2606
  alloc_hash_table (&expr_hash_table);
2607
  add_noreturn_fake_exit_edges ();
2608 2609 2610
  if (flag_gcse_lm)
    compute_ld_motion_mems ();

2611
  compute_hash_table (&expr_hash_table);
2612 2613
  if (flag_gcse_lm)
    trim_ld_motion_mems ();
2614 2615
  if (dump_file)
    dump_hash_table (dump_file, "Expression", &expr_hash_table);
2616

2617
  if (expr_hash_table.n_elems > 0)
2618
    {
2619
      struct edge_list *edge_list;
2620
      alloc_pre_mem (last_basic_block_for_fn (cfun), expr_hash_table.n_elems);
2621 2622
      edge_list = compute_pre_data ();
      changed |= pre_gcse (edge_list);
2623
      free_edge_list (edge_list);
2624 2625
      free_pre_mem ();
    }
2626

2627 2628
  if (flag_gcse_lm)
    free_ld_motion_mems ();
2629
  remove_fake_exit_edges ();
2630
  free_hash_table (&expr_hash_table);
2631

2632 2633 2634 2635 2636 2637
  free_gcse_mem ();
  obstack_free (&gcse_obstack, NULL);

  /* We are finished with alias.  */
  end_alias_analysis ();

2638
  if (dump_file)
2639
    {
2640
      fprintf (dump_file, "PRE GCSE of %s, %d basic blocks, %d bytes needed, ",
2641 2642
	       current_function_name (), n_basic_blocks_for_fn (cfun),
	       bytes_used);
2643
      fprintf (dump_file, "%d substs, %d insns created\n",
2644
	       gcse_subst_count, gcse_create_count);
2645 2646 2647 2648
    }

  return changed;
}
2649

2650 2651 2652 2653 2654
/* If X contains any LABEL_REF's, add REG_LABEL_OPERAND notes for them
   to INSN.  If such notes are added to an insn which references a
   CODE_LABEL, the LABEL_NUSES count is incremented.  We have to add
   that note, because the following loop optimization pass requires
   them.  */
2655 2656 2657

/* ??? If there was a jump optimization pass after gcse and before loop,
   then we would not need to do this here, because jump would add the
2658
   necessary REG_LABEL_OPERAND and REG_LABEL_TARGET notes.  */
2659 2660

static void
2661
add_label_notes (rtx x, rtx_insn *insn)
2662 2663 2664
{
  enum rtx_code code = GET_CODE (x);
  int i, j;
2665
  const char *fmt;
2666 2667 2668

  if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
    {
2669
      /* This code used to ignore labels that referred to dispatch tables to
2670
	 avoid flow generating (slightly) worse code.
2671

Jeff Law committed
2672 2673
	 We no longer ignore such label references (see LABEL_REF handling in
	 mark_jump_label for additional information).  */
2674

2675 2676 2677 2678
      /* There's no reason for current users to emit jump-insns with
	 such a LABEL_REF, so we don't have to handle REG_LABEL_TARGET
	 notes.  */
      gcc_assert (!JUMP_P (insn));
2679
      add_reg_note (insn, REG_LABEL_OPERAND, label_ref_label (x));
2680

2681 2682
      if (LABEL_P (label_ref_label (x)))
	LABEL_NUSES (label_ref_label (x))++;
2683

2684 2685 2686
      return;
    }

2687
  for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2688 2689 2690 2691 2692 2693 2694 2695
    {
      if (fmt[i] == 'e')
	add_label_notes (XEXP (x, i), insn);
      else if (fmt[i] == 'E')
	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
	  add_label_notes (XVECEXP (x, i, j), insn);
    }
}
2696

2697 2698 2699 2700 2701 2702 2703
/* Code Hoisting variables and subroutines.  */

/* Very busy expressions.  */
static sbitmap *hoist_vbein;
static sbitmap *hoist_vbeout;

/* ??? We could compute post dominators and run this algorithm in
2704
   reverse to perform tail merging, doing so would probably be
2705 2706 2707 2708 2709 2710 2711 2712
   more effective than the tail merging code in jump.c.

   It's unclear if tail merging could be run in parallel with
   code hoisting.  It would be nice.  */

/* Allocate vars used for code hoisting analysis.  */

static void
2713
alloc_code_hoist_mem (int n_blocks, int n_exprs)
2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725
{
  antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
  transp = sbitmap_vector_alloc (n_blocks, n_exprs);
  comp = sbitmap_vector_alloc (n_blocks, n_exprs);

  hoist_vbein = sbitmap_vector_alloc (n_blocks, n_exprs);
  hoist_vbeout = sbitmap_vector_alloc (n_blocks, n_exprs);
}

/* Free vars used for code hoisting analysis.  */

static void
2726
free_code_hoist_mem (void)
2727
{
2728 2729 2730
  sbitmap_vector_free (antloc);
  sbitmap_vector_free (transp);
  sbitmap_vector_free (comp);
2731

2732 2733
  sbitmap_vector_free (hoist_vbein);
  sbitmap_vector_free (hoist_vbeout);
2734

2735
  free_dominance_info (CDI_DOMINATORS);
2736 2737 2738 2739 2740 2741 2742 2743
}

/* Compute the very busy expressions at entry/exit from each block.

   An expression is very busy if all paths from a given point
   compute the expression.  */

static void
2744
compute_code_hoist_vbeinout (void)
2745
{
2746 2747
  int changed, passes;
  basic_block bb;
2748

2749 2750
  bitmap_vector_clear (hoist_vbeout, last_basic_block_for_fn (cfun));
  bitmap_vector_clear (hoist_vbein, last_basic_block_for_fn (cfun));
2751 2752 2753

  passes = 0;
  changed = 1;
2754

2755 2756 2757
  while (changed)
    {
      changed = 0;
2758

2759 2760
      /* We scan the blocks in the reverse order to speed up
	 the convergence.  */
2761
      FOR_EACH_BB_REVERSE_FN (bb, cfun)
2762
	{
2763
	  if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
2764
	    {
2765 2766
	      bitmap_intersection_of_succs (hoist_vbeout[bb->index],
					    hoist_vbein, bb);
2767 2768 2769

	      /* Include expressions in VBEout that are calculated
		 in BB and available at its end.  */
2770
	      bitmap_ior (hoist_vbeout[bb->index],
2771 2772
			      hoist_vbeout[bb->index], comp[bb->index]);
	    }
2773

2774
	  changed |= bitmap_or_and (hoist_vbein[bb->index],
2775 2776 2777
					      antloc[bb->index],
					      hoist_vbeout[bb->index],
					      transp[bb->index]);
2778
	}
2779

2780 2781 2782
      passes++;
    }

2783
  if (dump_file)
2784 2785 2786
    {
      fprintf (dump_file, "hoisting vbeinout computation: %d passes\n", passes);

2787
      FOR_EACH_BB_FN (bb, cfun)
2788 2789
        {
	  fprintf (dump_file, "vbein (%d): ", bb->index);
2790
	  dump_bitmap_file (dump_file, hoist_vbein[bb->index]);
2791
	  fprintf (dump_file, "vbeout(%d): ", bb->index);
2792
	  dump_bitmap_file (dump_file, hoist_vbeout[bb->index]);
2793 2794
	}
    }
2795 2796 2797 2798 2799
}

/* Top level routine to do the dataflow analysis needed by code hoisting.  */

static void
2800
compute_code_hoist_data (void)
2801
{
2802
  compute_local_properties (transp, comp, antloc, &expr_hash_table);
2803
  prune_expressions (false);
2804
  compute_code_hoist_vbeinout ();
2805
  calculate_dominance_info (CDI_DOMINATORS);
2806 2807
  if (dump_file)
    fprintf (dump_file, "\n");
2808 2809
}

2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820
/* Update register pressure for BB when hoisting an expression from
   instruction FROM, if live ranges of inputs are shrunk.  Also
   maintain live_in information if live range of register referred
   in FROM is shrunk.
   
   Return 0 if register pressure doesn't change, otherwise return
   the number by which register pressure is decreased.
   
   NOTE: Register pressure won't be increased in this function.  */

static int
David Malcolm committed
2821
update_bb_reg_pressure (basic_block bb, rtx_insn *from)
2822
{
David Malcolm committed
2823 2824
  rtx dreg;
  rtx_insn *insn;
2825
  basic_block succ_bb;
2826
  df_ref use, op_ref;
2827 2828 2829 2830 2831
  edge succ;
  edge_iterator ei;
  int decreased_pressure = 0;
  int nregs;
  enum reg_class pressure_class;
2832 2833

  FOR_EACH_INSN_USE (use, from)
2834
    {
2835
      dreg = DF_REF_REAL_REG (use);
2836 2837 2838 2839 2840 2841
      /* The live range of register is shrunk only if it isn't:
	 1. referred on any path from the end of this block to EXIT, or
	 2. referred by insns other than FROM in this block.  */
      FOR_EACH_EDGE (succ, ei, bb->succs)
	{
	  succ_bb = succ->dest;
2842
	  if (succ_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875
	    continue;

	  if (bitmap_bit_p (BB_DATA (succ_bb)->live_in, REGNO (dreg)))
	    break;
	}
      if (succ != NULL)
	continue;

      op_ref = DF_REG_USE_CHAIN (REGNO (dreg));
      for (; op_ref; op_ref = DF_REF_NEXT_REG (op_ref))
	{
	  if (!DF_REF_INSN_INFO (op_ref))
	    continue;

	  insn = DF_REF_INSN (op_ref);
	  if (BLOCK_FOR_INSN (insn) == bb
	      && NONDEBUG_INSN_P (insn) && insn != from)
	    break;
	}

      pressure_class = get_regno_pressure_class (REGNO (dreg), &nregs);
      /* Decrease register pressure and update live_in information for
	 this block.  */
      if (!op_ref && pressure_class != NO_REGS)
	{
	  decreased_pressure += nregs;
	  BB_DATA (bb)->max_reg_pressure[pressure_class] -= nregs;
	  bitmap_clear_bit (BB_DATA (bb)->live_in, REGNO (dreg));
	}
    }
  return decreased_pressure;
}

2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890
/* Determine if the expression EXPR should be hoisted to EXPR_BB up in
   flow graph, if it can reach BB unimpared.  Stop the search if the
   expression would need to be moved more than DISTANCE instructions.

   DISTANCE is the number of instructions through which EXPR can be
   hoisted up in flow graph.

   BB_SIZE points to an array which contains the number of instructions
   for each basic block.

   PRESSURE_CLASS and NREGS are register class and number of hard registers
   for storing EXPR.

   HOISTED_BBS points to a bitmap indicating basic blocks through which
   EXPR is hoisted.
2891

2892 2893
   FROM is the instruction from which EXPR is hoisted.

2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904
   It's unclear exactly what Muchnick meant by "unimpared".  It seems
   to me that the expression must either be computed or transparent in
   *every* block in the path(s) from EXPR_BB to BB.  Any other definition
   would allow the expression to be hoisted out of loops, even if
   the expression wasn't a loop invariant.

   Contrast this to reachability for PRE where an expression is
   considered reachable if *any* path reaches instead of *all*
   paths.  */

static int
2905
should_hoist_expr_to_dom (basic_block expr_bb, struct gcse_expr *expr,
2906 2907
			  basic_block bb, sbitmap visited,
			  HOST_WIDE_INT distance,
2908
			  int *bb_size, enum reg_class pressure_class,
David Malcolm committed
2909
			  int *nregs, bitmap hoisted_bbs, rtx_insn *from)
2910
{
2911
  unsigned int i;
2912
  edge pred;
2913
  edge_iterator ei;
2914
  sbitmap_iterator sbi;
2915
  int visited_allocated_locally = 0;
2916
  int decreased_pressure = 0;
2917

2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929
  if (flag_ira_hoist_pressure)
    {
      /* Record old information of basic block BB when it is visited
	 at the first time.  */
      if (!bitmap_bit_p (hoisted_bbs, bb->index))
	{
	  struct bb_data *data = BB_DATA (bb);
	  bitmap_copy (data->backup, data->live_in);
	  data->old_pressure = data->max_reg_pressure[pressure_class];
	}
      decreased_pressure = update_bb_reg_pressure (bb, from);
    }
2930 2931 2932 2933
  /* Terminate the search if distance, for which EXPR is allowed to move,
     is exhausted.  */
  if (distance > 0)
    {
2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956
      if (flag_ira_hoist_pressure)
	{
	  /* Prefer to hoist EXPR if register pressure is decreased.  */
	  if (decreased_pressure > *nregs)
	    distance += bb_size[bb->index];
	  /* Let EXPR be hoisted through basic block at no cost if one
	     of following conditions is satisfied:

	     1. The basic block has low register pressure.
	     2. Register pressure won't be increases after hoisting EXPR.

	     Constant expressions is handled conservatively, because
	     hoisting constant expression aggressively results in worse
	     code.  This decision is made by the observation of CSiBE
	     on ARM target, while it has no obvious effect on other
	     targets like x86, x86_64, mips and powerpc.  */
	  else if (CONST_INT_P (expr->expr)
		   || (BB_DATA (bb)->max_reg_pressure[pressure_class]
			 >= ira_class_hard_regs_num[pressure_class]
		       && decreased_pressure < *nregs))
	    distance -= bb_size[bb->index];
	}
      else
2957
	distance -= bb_size[bb->index];
2958 2959 2960 2961 2962 2963

      if (distance <= 0)
	return 0;
    }
  else
    gcc_assert (distance == 0);
2964 2965 2966

  if (visited == NULL)
    {
Kazu Hirata committed
2967
      visited_allocated_locally = 1;
2968
      visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
2969
      bitmap_clear (visited);
2970 2971
    }

2972
  FOR_EACH_EDGE (pred, ei, bb->preds)
2973
    {
2974
      basic_block pred_bb = pred->src;
2975

2976
      if (pred->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
2977
	break;
2978 2979
      else if (pred_bb == expr_bb)
	continue;
2980
      else if (bitmap_bit_p (visited, pred_bb->index))
2981
	continue;
2982
      else if (! bitmap_bit_p (transp[pred_bb->index], expr->bitmap_index))
2983 2984 2985 2986
	break;
      /* Not killed.  */
      else
	{
2987
	  bitmap_set_bit (visited, pred_bb->index);
2988 2989
	  if (! should_hoist_expr_to_dom (expr_bb, expr, pred_bb,
					  visited, distance, bb_size,
2990 2991
					  pressure_class, nregs,
					  hoisted_bbs, from))
2992 2993 2994
	    break;
	}
    }
2995
  if (visited_allocated_locally)
2996 2997
    {
      /* If EXPR can be hoisted to expr_bb, record basic blocks through
2998
	 which EXPR is hoisted in hoisted_bbs.  */
2999 3000
      if (flag_ira_hoist_pressure && !pred)
	{
3001 3002
	  /* Record the basic block from which EXPR is hoisted.  */
	  bitmap_set_bit (visited, bb->index);
3003
	  EXECUTE_IF_SET_IN_BITMAP (visited, 0, i, sbi)
3004
	    bitmap_set_bit (hoisted_bbs, i);
3005 3006 3007
	}
      sbitmap_free (visited);
    }
3008

3009 3010 3011
  return (pred == NULL);
}

Joseph Myers committed
3012
/* Find occurrence in BB.  */
3013

3014 3015
static struct gcse_occr *
find_occr_in_bb (struct gcse_occr *occr, basic_block bb)
3016 3017 3018 3019 3020 3021 3022 3023
{
  /* Find the right occurrence of this expression.  */
  while (occr && BLOCK_FOR_INSN (occr->insn) != bb)
    occr = occr->next;

  return occr;
}

3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044
/* Actually perform code hoisting.

   The code hoisting pass can hoist multiple computations of the same
   expression along dominated path to a dominating basic block, like
   from b2/b3 to b1 as depicted below:

          b1      ------
          /\         |
         /  \        |
        bx   by   distance
       /      \      |
      /        \     |
     b2        b3 ------

   Unfortunately code hoisting generally extends the live range of an
   output pseudo register, which increases register pressure and hurts
   register allocation.  To address this issue, an attribute MAX_DISTANCE
   is computed and attached to each expression.  The attribute is computed
   from rtx cost of the corresponding expression and it's used to control
   how long the expression can be hoisted up in flow graph.  As the
   expression is hoisted up in flow graph, GCC decreases its DISTANCE
3045 3046
   and stops the hoist if DISTANCE reaches 0.  Code hoisting can decrease
   register pressure if live ranges of inputs are shrunk.
3047 3048 3049 3050 3051 3052

   Option "-fira-hoist-pressure" implements register pressure directed
   hoist based on upper method.  The rationale is:
     1. Calculate register pressure for each basic block by reusing IRA
	facility.
     2. When expression is hoisted through one basic block, GCC checks
3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066
	the change of live ranges for inputs/output.  The basic block's
	register pressure will be increased because of extended live
	range of output.  However, register pressure will be decreased
	if the live ranges of inputs are shrunk.
     3. After knowing how hoisting affects register pressure, GCC prefers
	to hoist the expression if it can decrease register pressure, by
	increasing DISTANCE of the corresponding expression.
     4. If hoisting the expression increases register pressure, GCC checks
	register pressure of the basic block and decrease DISTANCE only if
	the register pressure is high.  In other words, expression will be
	hoisted through at no cost if the basic block has low register
	pressure.
     5. Update register pressure information for basic blocks through
	which expression is hoisted.  */
3067

3068
static int
3069
hoist_code (void)
3070
{
3071
  basic_block bb, dominated;
3072
  vec<basic_block> dom_tree_walk;
3073
  unsigned int dom_tree_walk_index;
3074
  vec<basic_block> domby;
3075
  unsigned int i, j, k;
3076 3077
  struct gcse_expr **index_map;
  struct gcse_expr *expr;
3078 3079
  int *to_bb_head;
  int *bb_size;
3080
  int changed = 0;
3081 3082 3083 3084 3085 3086
  struct bb_data *data;
  /* Basic blocks that have occurrences reachable from BB.  */
  bitmap from_bbs;
  /* Basic blocks through which expr is hoisted.  */
  bitmap hoisted_bbs = NULL;
  bitmap_iterator bi;
3087 3088 3089 3090

  /* Compute a mapping from expression number (`bitmap_index') to
     hash table entry.  */

3091
  index_map = XCNEWVEC (struct gcse_expr *, expr_hash_table.n_elems);
3092
  for (i = 0; i < expr_hash_table.size; i++)
3093
    for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
3094
      index_map[expr->bitmap_index] = expr;
3095

3096 3097 3098 3099 3100
  /* Calculate sizes of basic blocks and note how far
     each instruction is from the start of its block.  We then use this
     data to restrict distance an expression can travel.  */

  to_bb_head = XCNEWVEC (int, get_max_uid ());
3101
  bb_size = XCNEWVEC (int, last_basic_block_for_fn (cfun));
3102

3103
  FOR_EACH_BB_FN (bb, cfun)
3104
    {
David Malcolm committed
3105
      rtx_insn *insn;
3106 3107 3108
      int to_head;

      to_head = 0;
3109
      FOR_BB_INSNS (bb, insn)
3110 3111 3112 3113 3114 3115 3116 3117 3118 3119
	{
	  /* Don't count debug instructions to avoid them affecting
	     decision choices.  */
	  if (NONDEBUG_INSN_P (insn))
	    to_bb_head[INSN_UID (insn)] = to_head++;
	}

      bb_size[bb->index] = to_head;
    }

3120 3121 3122
  gcc_assert (EDGE_COUNT (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs) == 1
	      && (EDGE_SUCC (ENTRY_BLOCK_PTR_FOR_FN (cfun), 0)->dest
		  == ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb));
3123

3124 3125 3126 3127
  from_bbs = BITMAP_ALLOC (NULL);
  if (flag_ira_hoist_pressure)
    hoisted_bbs = BITMAP_ALLOC (NULL);

3128
  dom_tree_walk = get_all_dominated_blocks (CDI_DOMINATORS,
3129
					    ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb);
3130

3131 3132
  /* Walk over each basic block looking for potentially hoistable
     expressions, nothing gets hoisted from the entry block.  */
3133
  FOR_EACH_VEC_ELT (dom_tree_walk, dom_tree_walk_index, bb)
3134
    {
3135 3136
      domby = get_dominated_to_depth (CDI_DOMINATORS, bb,
				      param_max_hoist_depth);
3137

3138
      if (domby.length () == 0)
3139
	continue;
3140 3141 3142

      /* Examine each expression that is very busy at the exit of this
	 block.  These are the potentially hoistable expressions.  */
3143
      for (i = 0; i < SBITMAP_SIZE (hoist_vbeout[bb->index]); i++)
3144
	{
3145
	  if (bitmap_bit_p (hoist_vbeout[bb->index], i))
3146
	    {
3147 3148
	      int nregs = 0;
	      enum reg_class pressure_class = NO_REGS;
3149
	      /* Current expression.  */
3150
	      struct gcse_expr *expr = index_map[i];
Joseph Myers committed
3151
	      /* Number of occurrences of EXPR that can be hoisted to BB.  */
3152
	      int hoistable = 0;
Joseph Myers committed
3153
	      /* Occurrences reachable from BB.  */
3154
	      vec<occr_t> occrs_to_hoist = vNULL;
3155 3156 3157 3158 3159
	      /* We want to insert the expression into BB only once, so
		 note when we've inserted it.  */
	      int insn_inserted_p;
	      occr_t occr;

3160
	      /* If an expression is computed in BB and is available at end of
Joseph Myers committed
3161
		 BB, hoist all occurrences dominated by BB to BB.  */
3162
	      if (bitmap_bit_p (comp[bb->index], i))
3163 3164 3165 3166 3167
		{
		  occr = find_occr_in_bb (expr->antic_occr, bb);

		  if (occr)
		    {
Joseph Myers committed
3168
		      /* An occurrence might've been already deleted
3169
			 while processing a dominator of BB.  */
3170
		      if (!occr->deleted_p)
3171 3172 3173 3174 3175 3176 3177 3178
			{
			  gcc_assert (NONDEBUG_INSN_P (occr->insn));
			  hoistable++;
			}
		    }
		  else
		    hoistable++;
		}
3179

3180 3181 3182
	      /* We've found a potentially hoistable expression, now
		 we look at every block BB dominates to see if it
		 computes the expression.  */
3183
	      FOR_EACH_VEC_ELT (domby, j, dominated)
3184
		{
3185
		  HOST_WIDE_INT max_distance;
3186

3187
		  /* Ignore self dominance.  */
3188
		  if (bb == dominated)
3189 3190 3191 3192
		    continue;
		  /* We've found a dominated block, now see if it computes
		     the busy expression and whether or not moving that
		     expression to the "beginning" of that block is safe.  */
3193
		  if (!bitmap_bit_p (antloc[dominated->index], i))
3194 3195
		    continue;

3196 3197
		  occr = find_occr_in_bb (expr->antic_occr, dominated);
		  gcc_assert (occr);
3198

Joseph Myers committed
3199
		  /* An occurrence might've been already deleted
3200 3201
		     while processing a dominator of BB.  */
		  if (occr->deleted_p)
3202
		    continue;
3203 3204 3205 3206 3207 3208 3209 3210 3211
		  gcc_assert (NONDEBUG_INSN_P (occr->insn));

		  max_distance = expr->max_distance;
		  if (max_distance > 0)
		    /* Adjust MAX_DISTANCE to account for the fact that
		       OCCR won't have to travel all of DOMINATED, but
		       only part of it.  */
		    max_distance += (bb_size[dominated->index]
				     - to_bb_head[INSN_UID (occr->insn)]);
3212

3213 3214 3215 3216 3217
		  pressure_class = get_pressure_class_and_nregs (occr->insn,
								 &nregs);

		  /* Note if the expression should be hoisted from the dominated
		     block to BB if it can reach DOMINATED unimpared.
3218 3219 3220

		     Keep track of how many times this expression is hoistable
		     from a dominated block into BB.  */
3221 3222 3223
		  if (should_hoist_expr_to_dom (bb, expr, dominated, NULL,
						max_distance, bb_size,
						pressure_class,	&nregs,
3224
						hoisted_bbs, occr->insn))
3225 3226
		    {
		      hoistable++;
3227
		      occrs_to_hoist.safe_push (occr);
3228 3229
		      bitmap_set_bit (from_bbs, dominated->index);
		    }
3230 3231
		}

3232
	      /* If we found more than one hoistable occurrence of this
3233
		 expression, then note it in the vector of expressions to
3234 3235 3236 3237 3238 3239
		 hoist.  It makes no sense to hoist things which are computed
		 in only one BB, and doing so tends to pessimize register
		 allocation.  One could increase this value to try harder
		 to avoid any possible code expansion due to register
		 allocation issues; however experiments have shown that
		 the vast majority of hoistable expressions are only movable
3240
		 from two successors, so raising this threshold is likely
3241
		 to nullify any benefit we get from code hoisting.  */
3242
	      if (hoistable > 1 && dbg_cnt (hoist_insn))
3243
		{
3244
		  /* If (hoistable != vec::length), then there is
Joseph Myers committed
3245
		     an occurrence of EXPR in BB itself.  Don't waste
3246
		     time looking for LCA in this case.  */
3247
		  if ((unsigned) hoistable == occrs_to_hoist.length ())
3248 3249 3250 3251 3252 3253
		    {
		      basic_block lca;

		      lca = nearest_common_dominator_for_set (CDI_DOMINATORS,
							      from_bbs);
		      if (lca != bb)
Joseph Myers committed
3254
			/* Punt, it's better to hoist these occurrences to
3255
			   LCA.  */
3256
			occrs_to_hoist.release ();
3257
		    }
3258
		}
3259
	      else
Ondřej Bílka committed
3260
		/* Punt, no point hoisting a single occurrence.  */
3261
		occrs_to_hoist.release ();
3262

3263
	      if (flag_ira_hoist_pressure
3264
		  && !occrs_to_hoist.is_empty ())
3265
		{
3266 3267 3268
		  /* Increase register pressure of basic blocks to which
		     expr is hoisted because of extended live range of
		     output.  */
3269 3270
		  data = BB_DATA (bb);
		  data->max_reg_pressure[pressure_class] += nregs;
3271 3272
		  EXECUTE_IF_SET_IN_BITMAP (hoisted_bbs, 0, k, bi)
		    {
3273
		      data = BB_DATA (BASIC_BLOCK_FOR_FN (cfun, k));
3274 3275
		      data->max_reg_pressure[pressure_class] += nregs;
		    }
3276 3277 3278
		}
	      else if (flag_ira_hoist_pressure)
		{
3279 3280 3281
		  /* Restore register pressure and live_in info for basic
		     blocks recorded in hoisted_bbs when expr will not be
		     hoisted.  */
3282 3283
		  EXECUTE_IF_SET_IN_BITMAP (hoisted_bbs, 0, k, bi)
		    {
3284
		      data = BB_DATA (BASIC_BLOCK_FOR_FN (cfun, k));
3285 3286 3287
		      bitmap_copy (data->live_in, data->backup);
		      data->max_reg_pressure[pressure_class]
			  = data->old_pressure;
3288 3289 3290 3291 3292 3293
		    }
		}

	      if (flag_ira_hoist_pressure)
		bitmap_clear (hoisted_bbs);

3294
	      insn_inserted_p = 0;
3295

Joseph Myers committed
3296
	      /* Walk through occurrences of I'th expressions we want
3297
		 to hoist to BB and make the transformations.  */
3298
	      FOR_EACH_VEC_ELT (occrs_to_hoist, j, occr)
3299
		{
David Malcolm committed
3300
		  rtx_insn *insn;
3301
		  const_rtx set;
3302 3303 3304 3305

		  gcc_assert (!occr->deleted_p);

		  insn = occr->insn;
3306
		  set = single_set_gcse (insn);
3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318

		  /* Create a pseudo-reg to store the result of reaching
		     expressions into.  Get the mode for the new pseudo
		     from the mode of the original destination pseudo.

		     It is important to use new pseudos whenever we
		     emit a set.  This will allow reload to use
		     rematerialization for such registers.  */
		  if (!insn_inserted_p)
		    expr->reaching_reg
		      = gen_reg_rtx_and_attrs (SET_DEST (set));

3319
		  gcse_emit_move_after (SET_DEST (set), expr->reaching_reg,
3320 3321 3322 3323 3324 3325 3326
					insn);
		  delete_insn (insn);
		  occr->deleted_p = 1;
		  changed = 1;
		  gcse_subst_count++;

		  if (!insn_inserted_p)
3327
		    {
3328 3329
		      insert_insn_end_basic_block (expr, bb);
		      insn_inserted_p = 1;
3330 3331
		    }
		}
3332

3333
	      occrs_to_hoist.release ();
3334
	      bitmap_clear (from_bbs);
3335 3336
	    }
	}
3337
      domby.release ();
3338
    }
3339

3340
  dom_tree_walk.release ();
3341 3342 3343 3344
  BITMAP_FREE (from_bbs);
  if (flag_ira_hoist_pressure)
    BITMAP_FREE (hoisted_bbs);

3345 3346
  free (bb_size);
  free (to_bb_head);
Kazu Hirata committed
3347
  free (index_map);
3348 3349

  return changed;
3350 3351
}

3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382
/* Return pressure class and number of needed hard registers (through
   *NREGS) of register REGNO.  */
static enum reg_class
get_regno_pressure_class (int regno, int *nregs)
{
  if (regno >= FIRST_PSEUDO_REGISTER)
    {
      enum reg_class pressure_class;

      pressure_class = reg_allocno_class (regno);
      pressure_class = ira_pressure_class_translate[pressure_class];
      *nregs
	= ira_reg_class_max_nregs[pressure_class][PSEUDO_REGNO_MODE (regno)];
      return pressure_class;
    }
  else if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno)
	   && ! TEST_HARD_REG_BIT (eliminable_regset, regno))
    {
      *nregs = 1;
      return ira_pressure_class_translate[REGNO_REG_CLASS (regno)];
    }
  else
    {
      *nregs = 0;
      return NO_REGS;
    }
}

/* Return pressure class and number of hard registers (through *NREGS)
   for destination of INSN. */
static enum reg_class
David Malcolm committed
3383
get_pressure_class_and_nregs (rtx_insn *insn, int *nregs)
3384 3385 3386
{
  rtx reg;
  enum reg_class pressure_class;
3387
  const_rtx set = single_set_gcse (insn);
3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435

  reg = SET_DEST (set);
  if (GET_CODE (reg) == SUBREG)
    reg = SUBREG_REG (reg);
  if (MEM_P (reg))
    {
      *nregs = 0;
      pressure_class = NO_REGS;
    }
  else
    {
      gcc_assert (REG_P (reg));
      pressure_class = reg_allocno_class (REGNO (reg));
      pressure_class = ira_pressure_class_translate[pressure_class];
      *nregs
	= ira_reg_class_max_nregs[pressure_class][GET_MODE (SET_SRC (set))];
    }
  return pressure_class;
}

/* Increase (if INCR_P) or decrease current register pressure for
   register REGNO.  */
static void
change_pressure (int regno, bool incr_p)
{
  int nregs;
  enum reg_class pressure_class;

  pressure_class = get_regno_pressure_class (regno, &nregs);
  if (! incr_p)
    curr_reg_pressure[pressure_class] -= nregs;
  else
    {
      curr_reg_pressure[pressure_class] += nregs;
      if (BB_DATA (curr_bb)->max_reg_pressure[pressure_class]
	  < curr_reg_pressure[pressure_class])
	BB_DATA (curr_bb)->max_reg_pressure[pressure_class]
	  = curr_reg_pressure[pressure_class];
    }
}

/* Calculate register pressure for each basic block by walking insns
   from last to first.  */
static void
calculate_bb_reg_pressure (void)
{
  int i;
  unsigned int j;
David Malcolm committed
3436
  rtx_insn *insn;
3437 3438 3439 3440 3441
  basic_block bb;
  bitmap curr_regs_live;
  bitmap_iterator bi;


3442
  ira_setup_eliminable_regset ();
3443
  curr_regs_live = BITMAP_ALLOC (&reg_obstack);
3444
  FOR_EACH_BB_FN (bb, cfun)
3445 3446
    {
      curr_bb = bb;
3447 3448 3449 3450
      BB_DATA (bb)->live_in = BITMAP_ALLOC (NULL);
      BB_DATA (bb)->backup = BITMAP_ALLOC (NULL);
      bitmap_copy (BB_DATA (bb)->live_in, df_get_live_in (bb));
      bitmap_copy (curr_regs_live, df_get_live_out (bb));
3451 3452 3453 3454 3455 3456 3457 3458 3459
      for (i = 0; i < ira_pressure_classes_num; i++)
	curr_reg_pressure[ira_pressure_classes[i]] = 0;
      EXECUTE_IF_SET_IN_BITMAP (curr_regs_live, 0, j, bi)
	change_pressure (j, true);

      FOR_BB_INSNS_REVERSE (bb, insn)
	{
	  rtx dreg;
	  int regno;
3460
	  df_ref def, use;
3461 3462 3463 3464

	  if (! NONDEBUG_INSN_P (insn))
	    continue;

3465
	  FOR_EACH_INSN_DEF (def, insn)
3466
	    {
3467
	      dreg = DF_REF_REAL_REG (def);
3468 3469
	      gcc_assert (REG_P (dreg));
	      regno = REGNO (dreg);
3470
	      if (!(DF_REF_FLAGS (def)
3471 3472 3473 3474 3475 3476 3477
		    & (DF_REF_PARTIAL | DF_REF_CONDITIONAL)))
		{
		  if (bitmap_clear_bit (curr_regs_live, regno))
		    change_pressure (regno, false);
		}
	    }

3478
	  FOR_EACH_INSN_USE (use, insn)
3479
	    {
3480
	      dreg = DF_REF_REAL_REG (use);
3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493
	      gcc_assert (REG_P (dreg));
	      regno = REGNO (dreg);
	      if (bitmap_set_bit (curr_regs_live, regno))
		change_pressure (regno, true);
	    }
	}
    }
  BITMAP_FREE (curr_regs_live);

  if (dump_file == NULL)
    return;

  fprintf (dump_file, "\nRegister Pressure: \n");
3494
  FOR_EACH_BB_FN (bb, cfun)
3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511
    {
      fprintf (dump_file, "  Basic block %d: \n", bb->index);
      for (i = 0; (int) i < ira_pressure_classes_num; i++)
	{
	  enum reg_class pressure_class;

	  pressure_class = ira_pressure_classes[i];
	  if (BB_DATA (bb)->max_reg_pressure[pressure_class] == 0)
	    continue;

	  fprintf (dump_file, "    %s=%d\n", reg_class_names[pressure_class],
		   BB_DATA (bb)->max_reg_pressure[pressure_class]);
	}
    }
  fprintf (dump_file, "\n");
}

3512 3513
/* Top level routine to perform one code hoisting (aka unification) pass

3514
   Return nonzero if a change was made.  */
3515 3516

static int
3517
one_code_hoisting_pass (void)
3518 3519 3520
{
  int changed = 0;

3521 3522 3523 3524
  gcse_subst_count = 0;
  gcse_create_count = 0;

  /* Return if there's nothing to do, or it is too expensive.  */
3525
  if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1
3526
      || gcse_or_cprop_is_too_expensive (_("GCSE disabled")))
3527 3528
    return 0;

3529 3530
  doing_code_hoisting_p = true;

3531 3532 3533 3534 3535 3536 3537 3538 3539 3540
  /* Calculate register pressure for each basic block.  */
  if (flag_ira_hoist_pressure)
    {
      regstat_init_n_sets_and_refs ();
      ira_set_pseudo_classes (false, dump_file);
      alloc_aux_for_blocks (sizeof (struct bb_data));
      calculate_bb_reg_pressure ();
      regstat_free_n_sets_and_refs ();
    }

3541 3542 3543 3544 3545 3546 3547
  /* We need alias.  */
  init_alias_analysis ();

  bytes_used = 0;
  gcc_obstack_init (&gcse_obstack);
  alloc_gcse_mem ();

3548
  alloc_hash_table (&expr_hash_table);
3549
  compute_hash_table (&expr_hash_table);
3550 3551
  if (dump_file)
    dump_hash_table (dump_file, "Code Hosting Expressions", &expr_hash_table);
3552

3553
  if (expr_hash_table.n_elems > 0)
3554
    {
3555 3556
      alloc_code_hoist_mem (last_basic_block_for_fn (cfun),
			    expr_hash_table.n_elems);
3557
      compute_code_hoist_data ();
3558
      changed = hoist_code ();
3559 3560
      free_code_hoist_mem ();
    }
3561

3562 3563 3564 3565 3566
  if (flag_ira_hoist_pressure)
    {
      free_aux_for_blocks ();
      free_reg_info ();
    }
3567
  free_hash_table (&expr_hash_table);
3568 3569 3570 3571 3572 3573 3574 3575 3576
  free_gcse_mem ();
  obstack_free (&gcse_obstack, NULL);

  /* We are finished with alias.  */
  end_alias_analysis ();

  if (dump_file)
    {
      fprintf (dump_file, "HOIST of %s, %d basic blocks, %d bytes needed, ",
3577 3578
	       current_function_name (), n_basic_blocks_for_fn (cfun),
	       bytes_used);
3579 3580 3581
      fprintf (dump_file, "%d substs, %d insns created\n",
	       gcse_subst_count, gcse_create_count);
    }
3582

3583 3584
  doing_code_hoisting_p = false;

3585 3586
  return changed;
}
3587

3588 3589 3590
/*  Here we provide the things required to do store motion towards the exit.
    In order for this to be effective, gcse also needed to be taught how to
    move a load when it is killed only by a store to itself.
3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601

	    int i;
	    float a[10];

	    void foo(float scale)
	    {
	      for (i=0; i<10; i++)
		a[i] *= scale;
	    }

    'i' is both loaded and stored to in the loop. Normally, gcse cannot move
3602 3603
    the load out since its live around the loop, and stored at the bottom
    of the loop.
3604

3605
      The 'Load Motion' referred to and implemented in this file is
3606
    an enhancement to gcse which when using edge based LCM, recognizes
3607 3608 3609 3610 3611 3612
    this situation and allows gcse to move the load out of the loop.

      Once gcse has hoisted the load, store motion can then push this
    load towards the exit, and we end up with no loads or stores of 'i'
    in the loop.  */

3613
/* This will search the ldst list for a matching expression. If it
3614 3615 3616
   doesn't find one, we create one and initialize it.  */

static struct ls_expr *
3617
ldst_entry (rtx x)
3618
{
3619
  int do_not_record_p = 0;
3620
  struct ls_expr * ptr;
3621
  unsigned int hash;
3622
  ls_expr **slot;
3623
  struct ls_expr e;
3624

3625 3626
  hash = hash_rtx (x, GET_MODE (x), &do_not_record_p,
		   NULL,  /*have_reg_qty=*/false);
3627

3628
  e.pattern = x;
3629
  slot = pre_ldst_table->find_slot_with_hash (&e, hash, INSERT);
3630
  if (*slot)
3631
    return *slot;
3632

3633
  ptr = XNEW (struct ls_expr);
3634 3635 3636 3637 3638

  ptr->next         = pre_ldst_mems;
  ptr->expr         = NULL;
  ptr->pattern      = x;
  ptr->pattern_regs = NULL_RTX;
3639
  ptr->stores.create (0);
3640 3641 3642 3643 3644
  ptr->reaching_reg = NULL_RTX;
  ptr->invalid      = 0;
  ptr->index        = 0;
  ptr->hash_index   = hash;
  pre_ldst_mems     = ptr;
3645
  *slot = ptr;
3646

3647 3648 3649 3650 3651
  return ptr;
}

/* Free up an individual ldst entry.  */

3652
static void
3653
free_ldst_entry (struct ls_expr * ptr)
3654
{
3655
  ptr->stores.release ();
3656 3657 3658 3659 3660 3661 3662

  free (ptr);
}

/* Free up all memory associated with the ldst list.  */

static void
3663
free_ld_motion_mems (void)
3664
{
3665 3666
  delete pre_ldst_table;
  pre_ldst_table = NULL;
3667

3668
  while (pre_ldst_mems)
3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682
    {
      struct ls_expr * tmp = pre_ldst_mems;

      pre_ldst_mems = pre_ldst_mems->next;

      free_ldst_entry (tmp);
    }

  pre_ldst_mems = NULL;
}

/* Dump debugging info about the ldst list.  */

static void
3683
print_ldst_list (FILE * file)
3684 3685 3686 3687 3688
{
  struct ls_expr * ptr;

  fprintf (file, "LDST list: \n");

3689
  for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
3690 3691 3692 3693 3694 3695
    {
      fprintf (file, "  Pattern (%3d): ", ptr->index);

      print_rtl (file, ptr->pattern);

      fprintf (file, "\n	Stores : ");
3696
      print_rtx_insn_vec (file, ptr->stores);
3697 3698 3699 3700 3701 3702 3703 3704 3705 3706

      fprintf (file, "\n\n");
    }

  fprintf (file, "\n");
}

/* Returns 1 if X is in the list of ldst only expressions.  */

static struct ls_expr *
3707
find_rtx_in_ldst (rtx x)
3708
{
3709
  struct ls_expr e;
3710
  ls_expr **slot;
3711
  if (!pre_ldst_table)
3712
    return NULL;
3713
  e.pattern = x;
3714
  slot = pre_ldst_table->find_slot (&e, NO_INSERT);
3715
  if (!slot || (*slot)->invalid)
3716
    return NULL;
3717
  return *slot;
3718 3719 3720 3721
}

/* Load Motion for loads which only kill themselves.  */

3722 3723 3724
/* Return true if x, a MEM, is a simple access with no side effects.
   These are the types of loads we consider for the ld_motion list,
   otherwise we let the usual aliasing take care of it.  */
3725

3726
static int
3727
simple_mem (const_rtx x)
3728 3729 3730
{
  if (MEM_VOLATILE_P (x))
    return 0;
3731

3732 3733
  if (GET_MODE (x) == BLKmode)
    return 0;
3734

3735
  /* If we are handling exceptions, we must be careful with memory references
3736
     that may trap.  If we are not, the behavior is undefined, so we may just
3737
     continue.  */
3738
  if (cfun->can_throw_non_call_exceptions && may_trap_p (x))
3739 3740
    return 0;

3741 3742
  if (side_effects_p (x))
    return 0;
3743

3744 3745 3746 3747 3748 3749 3750 3751
  /* Do not consider function arguments passed on stack.  */
  if (reg_mentioned_p (stack_pointer_rtx, x))
    return 0;

  if (flag_float_store && FLOAT_MODE_P (GET_MODE (x)))
    return 0;

  return 1;
3752 3753
}

3754 3755 3756
/* Make sure there isn't a buried reference in this pattern anywhere.
   If there is, invalidate the entry for it since we're not capable
   of fixing it up just yet.. We have to be sure we know about ALL
3757 3758
   loads since the aliasing code will allow all entries in the
   ld_motion list to not-alias itself.  If we miss a load, we will get
3759
   the wrong value since gcse might common it and we won't know to
3760 3761 3762
   fix it up.  */

static void
3763
invalidate_any_buried_refs (rtx x)
3764 3765
{
  const char * fmt;
Kazu Hirata committed
3766
  int i, j;
3767 3768 3769
  struct ls_expr * ptr;

  /* Invalidate it in the list.  */
3770
  if (MEM_P (x) && simple_mem (x))
3771 3772 3773 3774 3775 3776 3777
    {
      ptr = ldst_entry (x);
      ptr->invalid = 1;
    }

  /* Recursively process the insn.  */
  fmt = GET_RTX_FORMAT (GET_CODE (x));
3778

3779 3780 3781 3782 3783 3784 3785 3786 3787 3788
  for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
    {
      if (fmt[i] == 'e')
	invalidate_any_buried_refs (XEXP (x, i));
      else if (fmt[i] == 'E')
	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
	  invalidate_any_buried_refs (XVECEXP (x, i, j));
    }
}

3789 3790 3791 3792 3793 3794 3795
/* Find all the 'simple' MEMs which are used in LOADs and STORES.  Simple
   being defined as MEM loads and stores to symbols, with no side effects
   and no registers in the expression.  For a MEM destination, we also
   check that the insn is still valid if we replace the destination with a
   REG, as is done in update_ld_motion_stores.  If there are any uses/defs
   which don't match this criteria, they are invalidated and trimmed out
   later.  */
3796

3797
static void
3798
compute_ld_motion_mems (void)
3799 3800
{
  struct ls_expr * ptr;
3801
  basic_block bb;
David Malcolm committed
3802
  rtx_insn *insn;
3803

3804
  pre_ldst_mems = NULL;
3805
  pre_ldst_table = new hash_table<pre_ldst_expr_hasher> (13);
3806

3807
  FOR_EACH_BB_FN (bb, cfun)
3808
    {
3809
      FOR_BB_INSNS (bb, insn)
3810
	{
3811
	  if (NONDEBUG_INSN_P (insn))
3812 3813 3814 3815 3816 3817
	    {
	      if (GET_CODE (PATTERN (insn)) == SET)
		{
		  rtx src = SET_SRC (PATTERN (insn));
		  rtx dest = SET_DEST (PATTERN (insn));

3818
		  /* Check for a simple load.  */
3819
		  if (MEM_P (src) && simple_mem (src))
3820 3821
		    {
		      ptr = ldst_entry (src);
3822
		      if (!REG_P (dest))
3823 3824 3825 3826 3827 3828 3829
			ptr->invalid = 1;
		    }
		  else
		    {
		      /* Make sure there isn't a buried load somewhere.  */
		      invalidate_any_buried_refs (src);
		    }
3830

3831 3832 3833 3834 3835
		  /* Check for a simple load through a REG_EQUAL note.  */
		  rtx note = find_reg_equal_equiv_note (insn), src_eq;
		  if (note
		      && REG_NOTE_KIND (note) == REG_EQUAL
		      && (src_eq = XEXP (note, 0))
3836 3837 3838
		      && !(MEM_P (src_eq) && simple_mem (src_eq)))
		    invalidate_any_buried_refs (src_eq);

3839 3840 3841 3842
		  /* Check for stores. Don't worry about aliased ones, they
		     will block any movement we might do later. We only care
		     about this exact pattern since those are the only
		     circumstance that we will ignore the aliasing info.  */
3843
		  if (MEM_P (dest) && simple_mem (dest))
3844 3845
		    {
		      ptr = ldst_entry (dest);
3846
		      machine_mode src_mode = GET_MODE (src);
3847
		      if (! MEM_P (src)
3848 3849 3850
			  && GET_CODE (src) != ASM_OPERANDS
			  /* Check for REG manually since want_to_gcse_p
			     returns 0 for all REGs.  */
3851 3852
			  && can_assign_to_reg_without_clobbers_p (src,
								    src_mode))
3853
			ptr->stores.safe_push (insn);
3854 3855 3856 3857 3858
		      else
			ptr->invalid = 1;
		    }
		}
	      else
3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869
		{
		  /* Invalidate all MEMs in the pattern and...  */
		  invalidate_any_buried_refs (PATTERN (insn));

		  /* ...in REG_EQUAL notes for PARALLELs with single SET.  */
		  rtx note = find_reg_equal_equiv_note (insn), src_eq;
		  if (note
		      && REG_NOTE_KIND (note) == REG_EQUAL
		      && (src_eq = XEXP (note, 0)))
		    invalidate_any_buried_refs (src_eq);
		}
3870 3871 3872 3873 3874
	    }
	}
    }
}

3875
/* Remove any references that have been either invalidated or are not in the
3876 3877 3878
   expression list for pre gcse.  */

static void
3879
trim_ld_motion_mems (void)
3880
{
3881 3882
  struct ls_expr * * last = & pre_ldst_mems;
  struct ls_expr * ptr = pre_ldst_mems;
3883 3884 3885

  while (ptr != NULL)
    {
3886
      struct gcse_expr * expr;
3887

3888
      /* Delete if entry has been made invalid.  */
3889
      if (! ptr->invalid)
3890 3891
	{
	  /* Delete if we cannot find this mem in the expression list.  */
3892
	  unsigned int hash = ptr->hash_index % expr_hash_table.size;
3893

3894 3895 3896 3897 3898
	  for (expr = expr_hash_table.table[hash];
	       expr != NULL;
	       expr = expr->next_same_hash)
	    if (expr_equiv_p (expr->expr, ptr->pattern))
	      break;
3899 3900
	}
      else
3901
	expr = (struct gcse_expr *) 0;
3902 3903

      if (expr)
3904 3905 3906
	{
	  /* Set the expression field if we are keeping it.  */
	  ptr->expr = expr;
3907
	  last = & ptr->next;
3908 3909
	  ptr = ptr->next;
	}
3910 3911 3912
      else
	{
	  *last = ptr->next;
3913
	  pre_ldst_table->remove_elt_with_hash (ptr, ptr->hash_index);
3914 3915 3916
	  free_ldst_entry (ptr);
	  ptr = * last;
	}
3917 3918 3919
    }

  /* Show the world what we've found.  */
3920 3921
  if (dump_file && pre_ldst_mems != NULL)
    print_ldst_list (dump_file);
3922 3923 3924 3925 3926
}

/* This routine will take an expression which we are replacing with
   a reaching register, and update any stores that are needed if
   that expression is in the ld_motion list.  Stores are updated by
3927
   copying their SRC to the reaching register, and then storing
3928 3929 3930 3931
   the reaching register into the store location. These keeps the
   correct value in the reaching register for the loads.  */

static void
3932
update_ld_motion_stores (struct gcse_expr * expr)
3933 3934 3935 3936 3937
{
  struct ls_expr * mem_ptr;

  if ((mem_ptr = find_rtx_in_ldst (expr->expr)))
    {
3938 3939
      /* We can try to find just the REACHED stores, but is shouldn't
	 matter to set the reaching reg everywhere...  some might be
3940 3941
	 dead and should be eliminated later.  */

3942 3943 3944 3945
      /* We replace (set mem expr) with (set reg expr) (set mem reg)
	 where reg is the reaching reg used in the load.  We checked in
	 compute_ld_motion_mems that we can replace (set mem expr) with
	 (set reg expr) in that insn.  */
3946 3947 3948
      rtx_insn *insn;
      unsigned int i;
      FOR_EACH_VEC_ELT_REVERSE (mem_ptr->stores, i, insn)
3949 3950 3951 3952 3953 3954 3955 3956
	{
	  rtx pat = PATTERN (insn);
	  rtx src = SET_SRC (pat);
	  rtx reg = expr->reaching_reg;

	  /* If we've already copied it, continue.  */
	  if (expr->reaching_reg == src)
	    continue;
3957

3958
	  if (dump_file)
3959
	    {
3960
	      fprintf (dump_file, "PRE:  store updated with reaching reg ");
3961
	      print_rtl (dump_file, reg);
3962 3963 3964
	      fprintf (dump_file, ":\n	");
	      print_inline_rtx (dump_file, insn, 8);
	      fprintf (dump_file, "\n");
3965
	    }
3966

3967
	  rtx_insn *copy = gen_move_insn (reg, copy_rtx (SET_SRC (pat)));
Paolo Carlini committed
3968
	  emit_insn_before (copy, insn);
3969
	  SET_SRC (pat) = reg;
3970
	  df_insn_rescan (insn);
3971 3972 3973 3974 3975 3976 3977 3978

	  /* un-recognize this pattern since it's probably different now.  */
	  INSN_CODE (insn) = -1;
	  gcse_create_count++;
	}
    }
}

3979 3980
/* Return true if the graph is too expensive to optimize. PASS is the
   optimization about to be performed.  */
3981

3982 3983
bool
gcse_or_cprop_is_too_expensive (const char *pass)
3984
{
3985 3986 3987
  int memory_request = (n_basic_blocks_for_fn (cfun)
			* SBITMAP_SET_SIZE (max_reg_num ())
			* sizeof (SBITMAP_ELT_TYPE));
3988
  
3989 3990 3991
  /* Trying to perform global optimizations on flow graphs which have
     a high connectivity will take a long time and is unlikely to be
     particularly useful.
3992

3993 3994 3995 3996 3997
     In normal circumstances a cfg should have about twice as many
     edges as blocks.  But we do not want to punish small functions
     which have a couple switch statements.  Rather than simply
     threshold the number of blocks, uses something with a more
     graceful degradation.  */
David Malcolm committed
3998
  if (n_edges_for_fn (cfun) > 20000 + n_basic_blocks_for_fn (cfun) * 4)
3999 4000 4001
    {
      warning (OPT_Wdisabled_optimization,
	       "%s: %d basic blocks and %d edges/basic block",
4002
	       pass, n_basic_blocks_for_fn (cfun),
David Malcolm committed
4003
	       n_edges_for_fn (cfun) / n_basic_blocks_for_fn (cfun));
4004

4005 4006
      return true;
    }
4007

4008
  /* If allocating memory for the dataflow bitmaps would take up too much
4009
     storage it's better just to disable the optimization.  */
4010
  if (memory_request > param_max_gcse_memory)
4011 4012
    {
      warning (OPT_Wdisabled_optimization,
4013 4014
	       "%s: %d basic blocks and %d registers; "
	       "increase %<--param max-gcse-memory%> above %d",
4015 4016
	       pass, n_basic_blocks_for_fn (cfun), max_reg_num (),
	       memory_request);
4017

4018 4019
      return true;
    }
4020

4021
  return false;
4022
}
4023 4024 4025 4026

static unsigned int
execute_rtl_pre (void)
{
4027
  int changed;
4028 4029
  delete_unreachable_blocks ();
  df_analyze ();
4030 4031 4032 4033
  changed = one_pre_gcse_pass ();
  flag_rerun_cse_after_global_opts |= changed;
  if (changed)
    cleanup_cfg (0);
4034 4035
  return 0;
}
4036

4037 4038 4039
static unsigned int
execute_rtl_hoist (void)
{
4040
  int changed;
4041 4042
  delete_unreachable_blocks ();
  df_analyze ();
4043 4044 4045 4046
  changed = one_code_hoisting_pass ();
  flag_rerun_cse_after_global_opts |= changed;
  if (changed)
    cleanup_cfg (0);
4047 4048
  return 0;
}
4049

4050 4051 4052
namespace {

const pass_data pass_data_rtl_pre =
4053
{
4054 4055 4056 4057 4058 4059 4060 4061
  RTL_PASS, /* type */
  "rtl pre", /* name */
  OPTGROUP_NONE, /* optinfo_flags */
  TV_PRE, /* tv_id */
  PROP_cfglayout, /* properties_required */
  0, /* properties_provided */
  0, /* properties_destroyed */
  0, /* todo_flags_start */
4062
  TODO_df_finish, /* todo_flags_finish */
4063
};
4064

4065 4066 4067
class pass_rtl_pre : public rtl_opt_pass
{
public:
4068 4069
  pass_rtl_pre (gcc::context *ctxt)
    : rtl_opt_pass (pass_data_rtl_pre, ctxt)
4070 4071 4072
  {}

  /* opt_pass methods: */
4073
  virtual bool gate (function *);
4074
  virtual unsigned int execute (function *) { return execute_rtl_pre (); }
4075 4076 4077

}; // class pass_rtl_pre

4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091
/* We do not construct an accurate cfg in functions which call
   setjmp, so none of these passes runs if the function calls
   setjmp.
   FIXME: Should just handle setjmp via REG_SETJMP notes.  */

bool
pass_rtl_pre::gate (function *fun)
{
  return optimize > 0 && flag_gcse
    && !fun->calls_setjmp
    && optimize_function_for_speed_p (fun)
    && dbg_cnt (pre);
}

4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102
} // anon namespace

rtl_opt_pass *
make_pass_rtl_pre (gcc::context *ctxt)
{
  return new pass_rtl_pre (ctxt);
}

namespace {

const pass_data pass_data_rtl_hoist =
4103
{
4104 4105 4106 4107 4108 4109 4110 4111
  RTL_PASS, /* type */
  "hoist", /* name */
  OPTGROUP_NONE, /* optinfo_flags */
  TV_HOIST, /* tv_id */
  PROP_cfglayout, /* properties_required */
  0, /* properties_provided */
  0, /* properties_destroyed */
  0, /* todo_flags_start */
4112
  TODO_df_finish, /* todo_flags_finish */
4113 4114
};

4115 4116 4117
class pass_rtl_hoist : public rtl_opt_pass
{
public:
4118 4119
  pass_rtl_hoist (gcc::context *ctxt)
    : rtl_opt_pass (pass_data_rtl_hoist, ctxt)
4120 4121 4122
  {}

  /* opt_pass methods: */
4123
  virtual bool gate (function *);
4124
  virtual unsigned int execute (function *) { return execute_rtl_hoist (); }
4125 4126 4127

}; // class pass_rtl_hoist

4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139
bool
pass_rtl_hoist::gate (function *)
{
  return optimize > 0 && flag_gcse
    && !cfun->calls_setjmp
    /* It does not make sense to run code hoisting unless we are optimizing
       for code size -- it rarely makes programs faster, and can make then
       bigger if we did PRE (when optimizing for space, we don't run PRE).  */
    && optimize_function_for_size_p (cfun)
    && dbg_cnt (hoist);
}

4140 4141 4142 4143 4144 4145 4146 4147
} // anon namespace

rtl_opt_pass *
make_pass_rtl_hoist (gcc::context *ctxt)
{
  return new pass_rtl_hoist (ctxt);
}

4148 4149 4150 4151 4152 4153 4154 4155 4156
/* Reset all state within gcse.c so that we can rerun the compiler
   within the same process.  For use by toplev::finalize.  */

void
gcse_c_finalize (void)
{
  test_insn = NULL;
}

4157
#include "gt-gcse.h"