cfgcleanup.c 90 KB
Newer Older
1
/* Control flow optimization code for GNU compiler.
2
   Copyright (C) 1987-2014 Free Software Foundation, Inc.
3 4 5 6 7

This file is part of GCC.

GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
8
Software Foundation; either version 3, or (at your option) any later
9 10 11 12 13 14 15 16
version.

GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
for more details.

You should have received a copy of the GNU General Public License
17 18
along with GCC; see the file COPYING3.  If not see
<http://www.gnu.org/licenses/>.  */
19

20
/* This file contains optimizer of the control flow.  The main entry point is
21 22 23
   cleanup_cfg.  Following optimizations are performed:

   - Unreachable blocks removal
24
   - Edge forwarding (edge to the forwarder block is forwarded to its
25
     successor.  Simplification of the branch instruction is performed by
26
     underlying infrastructure so branch can be converted to simplejump or
27
     eliminated).
28 29 30 31 32 33
   - Cross jumping (tail merging)
   - Conditional jump-around-simplejump simplification
   - Basic block merging.  */

#include "config.h"
#include "system.h"
34 35
#include "coretypes.h"
#include "tm.h"
36
#include "rtl.h"
37
#include "tree.h"
38
#include "hard-reg-set.h"
39
#include "regs.h"
40 41 42
#include "insn-config.h"
#include "flags.h"
#include "recog.h"
43
#include "diagnostic-core.h"
44
#include "cselib.h"
45
#include "params.h"
46
#include "tm_p.h"
Alexandre Oliva committed
47
#include "target.h"
48 49 50 51 52
#include "hashtab.h"
#include "hash-set.h"
#include "vec.h"
#include "machmode.h"
#include "input.h"
Steven Bosscher committed
53
#include "function.h" /* For inline functions in emit-rtl.h they need crtl.  */
54
#include "emit-rtl.h"
55 56 57
#include "tree-pass.h"
#include "cfgloop.h"
#include "expr.h"
58 59 60 61 62 63 64 65
#include "dominance.h"
#include "cfg.h"
#include "cfgrtl.h"
#include "cfganal.h"
#include "cfgbuild.h"
#include "cfgcleanup.h"
#include "predict.h"
#include "basic-block.h"
66
#include "df.h"
67
#include "dce.h"
68
#include "dbgcnt.h"
69
#include "emit-rtl.h"
70
#include "rtl-iter.h"
71

72
#define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
Mike Stump committed
73

74 75
/* Set to true when we are running first pass of try_optimize_cfg loop.  */
static bool first_pass;
76

Joseph Myers committed
77
/* Set to true if crossjumps occurred in the latest run of try_optimize_cfg.  */
78 79
static bool crossjumps_occured;

80 81 82 83
/* Set to true if we couldn't run an optimization due to stale liveness
   information; we should run df_analyze to enable more opportunities.  */
static bool block_was_dirty;

84
static bool try_crossjump_to_edge (int, edge, edge, enum replace_direction);
85
static bool try_crossjump_bb (int, basic_block);
86
static bool outgoing_edges_match (int, basic_block, basic_block);
87
static enum replace_direction old_insns_match_p (int, rtx_insn *, rtx_insn *);
88 89 90 91 92 93

static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
static bool try_optimize_cfg (int);
static bool try_simplify_condjump (basic_block);
static bool try_forward_edges (int, basic_block);
94
static edge thread_jump (edge, basic_block);
95 96 97
static bool mark_effect (rtx, bitmap);
static void notice_new_block (basic_block);
static void update_forwarder_flag (basic_block);
98
static void merge_memattrs (rtx, rtx);
99 100 101 102

/* Set flags for newly created block.  */

static void
103
notice_new_block (basic_block bb)
104 105 106
{
  if (!bb)
    return;
107

108
  if (forwarder_block_p (bb))
109
    bb->flags |= BB_FORWARDER_BLOCK;
110 111 112 113 114
}

/* Recompute forwarder flag after block has been modified.  */

static void
115
update_forwarder_flag (basic_block bb)
116 117
{
  if (forwarder_block_p (bb))
118
    bb->flags |= BB_FORWARDER_BLOCK;
119
  else
120
    bb->flags &= ~BB_FORWARDER_BLOCK;
121
}
122 123 124 125 126

/* Simplify a conditional jump around an unconditional jump.
   Return true if something changed.  */

static bool
127
try_simplify_condjump (basic_block cbranch_block)
128 129 130
{
  basic_block jump_block, jump_dest_block, cbranch_dest_block;
  edge cbranch_jump_edge, cbranch_fallthru_edge;
131
  rtx_insn *cbranch_insn;
132 133

  /* Verify that there are exactly two successors.  */
134
  if (EDGE_COUNT (cbranch_block->succs) != 2)
135 136 137 138
    return false;

  /* Verify that we've got a normal conditional branch at the end
     of the block.  */
139
  cbranch_insn = BB_END (cbranch_block);
140 141 142 143 144 145 146 147 148 149
  if (!any_condjump_p (cbranch_insn))
    return false;

  cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
  cbranch_jump_edge = BRANCH_EDGE (cbranch_block);

  /* The next block must not have multiple predecessors, must not
     be the last block in the function, and must contain just the
     unconditional jump.  */
  jump_block = cbranch_fallthru_edge->dest;
150
  if (!single_pred_p (jump_block)
151
      || jump_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
152
      || !FORWARDER_BLOCK_P (jump_block))
153
    return false;
154
  jump_dest_block = single_succ (jump_block);
155

156 157
  /* If we are partitioning hot/cold basic blocks, we don't want to
     mess up unconditional or indirect jumps that cross between hot
Mike Stump committed
158
     and cold sections.
159 160

     Basic block partitioning may result in some jumps that appear to
Mike Stump committed
161 162 163
     be optimizable (or blocks that appear to be mergeable), but which really
     must be left untouched (they are required to make it safely across
     partition boundaries).  See the comments at the top of
164
     bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
165

166 167
  if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
      || (cbranch_jump_edge->flags & EDGE_CROSSING))
168 169
    return false;

170 171 172 173
  /* The conditional branch must target the block after the
     unconditional branch.  */
  cbranch_dest_block = cbranch_jump_edge->dest;

174
  if (cbranch_dest_block == EXIT_BLOCK_PTR_FOR_FN (cfun)
175
      || !can_fallthru (jump_block, cbranch_dest_block))
176 177
    return false;

178 179 180
  /* Invert the conditional branch.  */
  if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
    return false;
181

182 183
  if (dump_file)
    fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
184
	     INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
185 186 187 188 189 190 191 192 193 194

  /* Success.  Update the CFG to match.  Note that after this point
     the edge variable names appear backwards; the redirection is done
     this way to preserve edge profile data.  */
  cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
						cbranch_dest_block);
  cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
						    jump_dest_block);
  cbranch_jump_edge->flags |= EDGE_FALLTHRU;
  cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
195
  update_br_prob_note (cbranch_block);
196 197

  /* Delete the block with the unconditional jump, and clean up the mess.  */
198 199
  delete_basic_block (jump_block);
  tidy_fallthru_edge (cbranch_jump_edge);
200
  update_forwarder_flag (cbranch_block);
201 202 203 204

  return true;
}

205 206
/* Attempt to prove that operation is NOOP using CSElib or mark the effect
   on register.  Used by jump threading.  */
207

208
static bool
209
mark_effect (rtx exp, regset nonequal)
210
{
211 212
  int regno;
  rtx dest;
213 214 215
  switch (GET_CODE (exp))
    {
      /* In case we do clobber the register, mark it as equal, as we know the
Mike Stump committed
216
	 value is dead so it don't have to match.  */
217 218 219 220 221
    case CLOBBER:
      if (REG_P (XEXP (exp, 0)))
	{
	  dest = XEXP (exp, 0);
	  regno = REGNO (dest);
222 223 224 225 226
	  if (HARD_REGISTER_NUM_P (regno))
	    bitmap_clear_range (nonequal, regno,
				hard_regno_nregs[regno][GET_MODE (dest)]);
	  else
	    bitmap_clear_bit (nonequal, regno);
227 228
	}
      return false;
229

230 231
    case SET:
      if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
232
	return false;
233 234
      dest = SET_DEST (exp);
      if (dest == pc_rtx)
235
	return false;
236 237 238
      if (!REG_P (dest))
	return true;
      regno = REGNO (dest);
239 240 241 242 243
      if (HARD_REGISTER_NUM_P (regno))
	bitmap_set_range (nonequal, regno,
			  hard_regno_nregs[regno][GET_MODE (dest)]);
      else
	bitmap_set_bit (nonequal, regno);
244 245 246 247
      return false;

    default:
      return false;
248 249
    }
}
250

251 252 253
/* Return true if X contains a register in NONEQUAL.  */
static bool
mentions_nonequal_regs (const_rtx x, regset nonequal)
254
{
255 256
  subrtx_iterator::array_type array;
  FOR_EACH_SUBRTX (iter, array, x, NONCONST)
257
    {
258 259
      const_rtx x = *iter;
      if (REG_P (x))
260
	{
261 262 263 264 265 266 267 268 269 270
	  unsigned int regno = REGNO (x);
	  if (REGNO_REG_SET_P (nonequal, regno))
	    return true;
	  if (regno < FIRST_PSEUDO_REGISTER)
	    {
	      int n = hard_regno_nregs[regno][GET_MODE (x)];
	      while (--n > 0)
		if (REGNO_REG_SET_P (nonequal, regno + n))
		  return true;
	    }
271 272
	}
    }
273
  return false;
274
}
275

276
/* Attempt to prove that the basic block B will have no side effects and
277
   always continues in the same edge if reached via E.  Return the edge
278 279 280
   if exist, NULL otherwise.  */

static edge
281
thread_jump (edge e, basic_block b)
282
{
283 284
  rtx set1, set2, cond1, cond2;
  rtx_insn *insn;
285 286
  enum rtx_code code1, code2, reversed_code2;
  bool reverse1 = false;
287
  unsigned i;
288 289
  regset nonequal;
  bool failed = false;
290
  reg_set_iterator rsi;
291

292
  if (b->flags & BB_NONTHREADABLE_BLOCK)
293 294
    return NULL;

295 296
  /* At the moment, we do handle only conditional jumps, but later we may
     want to extend this code to tablejumps and others.  */
297
  if (EDGE_COUNT (e->src->succs) != 2)
298
    return NULL;
299
  if (EDGE_COUNT (b->succs) != 2)
300
    {
301
      b->flags |= BB_NONTHREADABLE_BLOCK;
302 303
      return NULL;
    }
304 305

  /* Second branch must end with onlyjump, as we will eliminate the jump.  */
306
  if (!any_condjump_p (BB_END (e->src)))
307
    return NULL;
308

309
  if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
310
    {
311
      b->flags |= BB_NONTHREADABLE_BLOCK;
312 313
      return NULL;
    }
314

315 316
  set1 = pc_set (BB_END (e->src));
  set2 = pc_set (BB_END (b));
317
  if (((e->flags & EDGE_FALLTHRU) != 0)
318
      != (XEXP (SET_SRC (set1), 1) == pc_rtx))
319 320 321 322 323
    reverse1 = true;

  cond1 = XEXP (SET_SRC (set1), 0);
  cond2 = XEXP (SET_SRC (set2), 0);
  if (reverse1)
324
    code1 = reversed_comparison_code (cond1, BB_END (e->src));
325 326 327 328
  else
    code1 = GET_CODE (cond1);

  code2 = GET_CODE (cond2);
329
  reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
330 331 332 333 334 335

  if (!comparison_dominates_p (code1, code2)
      && !comparison_dominates_p (code1, reversed_code2))
    return NULL;

  /* Ensure that the comparison operators are equivalent.
336
     ??? This is far too pessimistic.  We should allow swapped operands,
337 338 339 340 341 342 343 344
     different CCmodes, or for example comparisons for interval, that
     dominate even when operands are not equivalent.  */
  if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
      || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
    return NULL;

  /* Short circuit cases where block B contains some side effects, as we can't
     safely bypass it.  */
345
  for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
346 347
       insn = NEXT_INSN (insn))
    if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
348
      {
349
	b->flags |= BB_NONTHREADABLE_BLOCK;
350 351
	return NULL;
      }
352

353
  cselib_init (0);
354 355

  /* First process all values computed in the source basic block.  */
356 357
  for (insn = NEXT_INSN (BB_HEAD (e->src));
       insn != NEXT_INSN (BB_END (e->src));
358 359 360 361
       insn = NEXT_INSN (insn))
    if (INSN_P (insn))
      cselib_process_insn (insn);

362
  nonequal = BITMAP_ALLOC (NULL);
363
  CLEAR_REG_SET (nonequal);
364

365 366 367
  /* Now assume that we've continued by the edge E to B and continue
     processing as if it were same basic block.
     Our goal is to prove that whole block is an NOOP.  */
368

369 370
  for (insn = NEXT_INSN (BB_HEAD (b));
       insn != NEXT_INSN (BB_END (b)) && !failed;
371
       insn = NEXT_INSN (insn))
372 373 374 375 376 377 378
    {
      if (INSN_P (insn))
	{
	  rtx pat = PATTERN (insn);

	  if (GET_CODE (pat) == PARALLEL)
	    {
379
	      for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++)
380 381 382 383 384
		failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
	    }
	  else
	    failed |= mark_effect (pat, nonequal);
	}
385

386 387
      cselib_process_insn (insn);
    }
388 389 390 391

  /* Later we should clear nonequal of dead registers.  So far we don't
     have life information in cfg_cleanup.  */
  if (failed)
392
    {
393
      b->flags |= BB_NONTHREADABLE_BLOCK;
394 395
      goto failed_exit;
    }
396

397 398
  /* cond2 must not mention any register that is not equal to the
     former block.  */
399
  if (mentions_nonequal_regs (cond2, nonequal))
400 401
    goto failed_exit;

402 403
  EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi)
    goto failed_exit;
404

405
  BITMAP_FREE (nonequal);
406 407
  cselib_finish ();
  if ((comparison_dominates_p (code1, code2) != 0)
408
      != (XEXP (SET_SRC (set2), 1) == pc_rtx))
409 410 411 412 413
    return BRANCH_EDGE (b);
  else
    return FALLTHRU_EDGE (b);

failed_exit:
414
  BITMAP_FREE (nonequal);
415 416 417 418
  cselib_finish ();
  return NULL;
}

419
/* Attempt to forward edges leaving basic block B.
420
   Return true if successful.  */
421 422

static bool
423
try_forward_edges (int mode, basic_block b)
424 425
{
  bool changed = false;
426 427
  edge_iterator ei;
  edge e, *threaded_edges = NULL;
428

429 430
  /* If we are partitioning hot/cold basic blocks, we don't want to
     mess up unconditional or indirect jumps that cross between hot
Mike Stump committed
431 432
     and cold sections.

433
     Basic block partitioning may result in some jumps that appear to
434 435
     be optimizable (or blocks that appear to be mergeable), but which really
     must be left untouched (they are required to make it safely across
Mike Stump committed
436
     partition boundaries).  See the comments at the top of
437 438
     bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */

439
  if (JUMP_P (BB_END (b)) && CROSSING_JUMP_P (BB_END (b)))
440 441
    return false;

442
  for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
443 444
    {
      basic_block target, first;
445 446
      location_t goto_locus;
      int counter;
447
      bool threaded = false;
448
      int nthreaded_edges = 0;
449
      bool may_thread = first_pass || (b->flags & BB_MODIFIED) != 0;
450 451 452

      /* Skip complex edges because we don't know how to update them.

Mike Stump committed
453 454 455
	 Still handle fallthru edges, as we can succeed to forward fallthru
	 edge to the same place as the branch edge of conditional branch
	 and turn conditional branch to an unconditional branch.  */
456
      if (e->flags & EDGE_COMPLEX)
457 458 459 460
	{
	  ei_next (&ei);
	  continue;
	}
461 462

      target = first = e->dest;
463
      counter = NUM_FIXED_BLOCKS;
464
      goto_locus = e->goto_locus;
465

466
      /* If we are partitioning hot/cold basic_blocks, we don't want to mess
467 468 469
	 up jumps that cross between hot/cold sections.

	 Basic block partitioning may result in some jumps that appear
Mike Stump committed
470 471
	 to be optimizable (or blocks that appear to be mergeable), but which
	 really must be left untouched (they are required to make it safely
472 473 474
	 across partition boundaries).  See the comments at the top of
	 bb-reorder.c:partition_hot_cold_basic_blocks for complete
	 details.  */
475

476
      if (first != EXIT_BLOCK_PTR_FOR_FN (cfun)
477 478
	  && JUMP_P (BB_END (first))
	  && CROSSING_JUMP_P (BB_END (first)))
479
	return changed;
480

481
      while (counter < n_basic_blocks_for_fn (cfun))
482
	{
483 484
	  basic_block new_target = NULL;
	  bool new_target_threaded = false;
485
	  may_thread |= (target->flags & BB_MODIFIED) != 0;
486 487

	  if (FORWARDER_BLOCK_P (target)
Mike Stump committed
488
	      && !(single_succ_edge (target)->flags & EDGE_CROSSING)
489
	      && single_succ (target) != EXIT_BLOCK_PTR_FOR_FN (cfun))
490 491
	    {
	      /* Bypass trivial infinite loops.  */
492 493
	      new_target = single_succ (target);
	      if (target == new_target)
494
		counter = n_basic_blocks_for_fn (cfun);
495 496 497 498
	      else if (!optimize)
		{
		  /* When not optimizing, ensure that edges or forwarder
		     blocks with different locus are not optimized out.  */
499 500
		  location_t new_locus = single_succ_edge (target)->goto_locus;
		  location_t locus = goto_locus;
501

502 503
		  if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION
		      && LOCATION_LOCUS (locus) != UNKNOWN_LOCATION
504
		      && new_locus != locus)
505 506
		    new_target = NULL;
		  else
507
		    {
508
		      if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION)
509
			locus = new_locus;
510

511
		      rtx_insn *last = BB_END (target);
512 513
		      if (DEBUG_INSN_P (last))
			last = prev_nondebug_insn (last);
514 515 516 517
		      if (last && INSN_P (last))
			new_locus = INSN_LOCATION (last);
		      else
			new_locus = UNKNOWN_LOCATION;
518

519 520
		      if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION
			  && LOCATION_LOCUS (locus) != UNKNOWN_LOCATION
521
			  && new_locus != locus)
522 523 524
			new_target = NULL;
		      else
			{
525
			  if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION)
526 527 528 529
			    locus = new_locus;

			  goto_locus = locus;
			}
530 531
		    }
		}
532
	    }
533

534 535
	  /* Allow to thread only over one edge at time to simplify updating
	     of probabilities.  */
536
	  else if ((mode & CLEANUP_THREADING) && may_thread)
537
	    {
538
	      edge t = thread_jump (e, target);
539
	      if (t)
540
		{
541
		  if (!threaded_edges)
542 543
		    threaded_edges = XNEWVEC (edge,
					      n_basic_blocks_for_fn (cfun));
544 545 546 547 548 549 550 551 552 553
		  else
		    {
		      int i;

		      /* Detect an infinite loop across blocks not
			 including the start block.  */
		      for (i = 0; i < nthreaded_edges; ++i)
			if (threaded_edges[i] == t)
			  break;
		      if (i < nthreaded_edges)
554
			{
555
			  counter = n_basic_blocks_for_fn (cfun);
556 557
			  break;
			}
558 559 560 561 562 563
		    }

		  /* Detect an infinite loop across the start block.  */
		  if (t->dest == b)
		    break;

564 565 566
		  gcc_assert (nthreaded_edges
			      < (n_basic_blocks_for_fn (cfun)
				 - NUM_FIXED_BLOCKS));
567
		  threaded_edges[nthreaded_edges++] = t;
568 569 570

		  new_target = t->dest;
		  new_target_threaded = true;
571 572
		}
	    }
573

574 575
	  if (!new_target)
	    break;
576

577 578 579
	  counter++;
	  target = new_target;
	  threaded |= new_target_threaded;
580
	}
581

582
      if (counter >= n_basic_blocks_for_fn (cfun))
583
	{
584 585
	  if (dump_file)
	    fprintf (dump_file, "Infinite loop in BB %i.\n",
586
		     target->index);
587 588 589 590 591 592 593 594
	}
      else if (target == first)
	; /* We didn't do anything.  */
      else
	{
	  /* Save the values now, as the edge may get removed.  */
	  gcov_type edge_count = e->count;
	  int edge_probability = e->probability;
595
	  int edge_frequency;
596
	  int n = 0;
597

598 599
	  e->goto_locus = goto_locus;

600
	  /* Don't force if target is exit block.  */
601
	  if (threaded && target != EXIT_BLOCK_PTR_FOR_FN (cfun))
602
	    {
603
	      notice_new_block (redirect_edge_and_branch_force (e, target));
604 605
	      if (dump_file)
		fprintf (dump_file, "Conditionals threaded.\n");
606
	    }
607
	  else if (!redirect_edge_and_branch (e, target))
608
	    {
609 610
	      if (dump_file)
		fprintf (dump_file,
611
			 "Forwarding edge %i->%i to %i failed.\n",
612
			 b->index, e->dest->index, target->index);
613
	      ei_next (&ei);
614
	      continue;
615
	    }
616

617 618 619
	  /* We successfully forwarded the edge.  Now update profile
	     data: for each edge we traversed in the chain, remove
	     the original edge's execution count.  */
620
	  edge_frequency = apply_probability (b->frequency, edge_probability);
621 622 623 624

	  do
	    {
	      edge t;
625

626
	      if (!single_succ_p (first))
627
		{
628
		  gcc_assert (n < nthreaded_edges);
629
		  t = threaded_edges [n++];
630
		  gcc_assert (t->src == first);
631 632
		  update_bb_profile_for_threading (first, edge_frequency,
						   edge_count, t);
633
		  update_br_prob_note (first);
634
		}
635
	      else
636
		{
637 638 639 640 641 642
		  first->count -= edge_count;
		  if (first->count < 0)
		    first->count = 0;
		  first->frequency -= edge_frequency;
		  if (first->frequency < 0)
		    first->frequency = 0;
643 644 645 646 647 648 649
		  /* It is possible that as the result of
		     threading we've removed edge as it is
		     threaded to the fallthru edge.  Avoid
		     getting out of sync.  */
		  if (n < nthreaded_edges
		      && first == threaded_edges [n]->src)
		    n++;
650
		  t = single_succ_edge (first);
651
		}
652

653 654 655
	      t->count -= edge_count;
	      if (t->count < 0)
		t->count = 0;
656 657 658 659 660
	      first = t->dest;
	    }
	  while (first != target);

	  changed = true;
661
	  continue;
662
	}
663
      ei_next (&ei);
664 665
    }

666
  free (threaded_edges);
667 668 669 670 671 672 673 674
  return changed;
}


/* Blocks A and B are to be merged into a single block.  A has no incoming
   fallthru edge, so it can be moved before B without adding or modifying
   any jumps (aside from the jump from A to B).  */

675
static void
676
merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
677
{
678
  rtx_insn *barrier;
679

680 681
  /* If we are partitioning hot/cold basic blocks, we don't want to
     mess up unconditional or indirect jumps that cross between hot
682
     and cold sections.
Mike Stump committed
683

684
     Basic block partitioning may result in some jumps that appear to
Mike Stump committed
685 686 687
     be optimizable (or blocks that appear to be mergeable), but which really
     must be left untouched (they are required to make it safely across
     partition boundaries).  See the comments at the top of
688 689
     bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */

690
  if (BB_PARTITION (a) != BB_PARTITION (b))
691 692
    return;

693
  barrier = next_nonnote_insn (BB_END (a));
694
  gcc_assert (BARRIER_P (barrier));
695
  delete_insn (barrier);
696 697

  /* Scramble the insn chain.  */
698 699
  if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
    reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
700
  df_set_bb_dirty (a);
701

702 703
  if (dump_file)
    fprintf (dump_file, "Moved block %d before %d and merged.\n",
704
	     a->index, b->index);
705

706
  /* Swap the records for the two blocks around.  */
707

708 709 710
  unlink_block (a);
  link_block (a, b->prev_bb);

711
  /* Now blocks A and B are contiguous.  Merge them.  */
712
  merge_blocks (a, b);
713 714 715 716 717 718
}

/* Blocks A and B are to be merged into a single block.  B has no outgoing
   fallthru edge, so it can be moved after A without adding or modifying
   any jumps (aside from the jump from A to B).  */

719
static void
720
merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
721
{
722
  rtx_insn *barrier, *real_b_end;
723 724
  rtx label;
  rtx_jump_table_data *table;
725

726 727
  /* If we are partitioning hot/cold basic blocks, we don't want to
     mess up unconditional or indirect jumps that cross between hot
Mike Stump committed
728 729
     and cold sections.

730
     Basic block partitioning may result in some jumps that appear to
Mike Stump committed
731 732 733
     be optimizable (or blocks that appear to be mergeable), but which really
     must be left untouched (they are required to make it safely across
     partition boundaries).  See the comments at the top of
734 735
     bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */

736
  if (BB_PARTITION (a) != BB_PARTITION (b))
737 738
    return;

739
  real_b_end = BB_END (b);
740

741 742
  /* If there is a jump table following block B temporarily add the jump table
     to block B so that it will also be moved to the correct location.  */
743 744
  if (tablejump_p (BB_END (b), &label, &table)
      && prev_active_insn (label) == BB_END (b))
745
    {
746
      BB_END (b) = table;
747 748 749
    }

  /* There had better have been a barrier there.  Delete it.  */
750
  barrier = NEXT_INSN (BB_END (b));
751
  if (barrier && BARRIER_P (barrier))
752
    delete_insn (barrier);
753 754 755


  /* Scramble the insn chain.  */
756
  reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
757

758
  /* Restore the real end of b.  */
759
  BB_END (b) = real_b_end;
760

761 762
  if (dump_file)
    fprintf (dump_file, "Moved block %d after %d and merged.\n",
763
	     b->index, a->index);
764 765

  /* Now blocks A and B are contiguous.  Merge them.  */
766
  merge_blocks (a, b);
767 768 769
}

/* Attempt to merge basic blocks that are potentially non-adjacent.
770 771 772
   Return NULL iff the attempt failed, otherwise return basic block
   where cleanup_cfg should continue.  Because the merging commonly
   moves basic block away or introduces another optimization
773
   possibility, return basic block just before B so cleanup_cfg don't
774 775 776 777
   need to iterate.

   It may be good idea to return basic block before C in the case
   C has been moved after B and originally appeared earlier in the
778
   insn sequence, but we have no information available about the
779 780 781
   relative ordering of these two.  Hopefully it is not too common.  */

static basic_block
782
merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
783
{
784
  basic_block next;
785

786 787
  /* If we are partitioning hot/cold basic blocks, we don't want to
     mess up unconditional or indirect jumps that cross between hot
Mike Stump committed
788 789
     and cold sections.

790
     Basic block partitioning may result in some jumps that appear to
Mike Stump committed
791 792 793
     be optimizable (or blocks that appear to be mergeable), but which really
     must be left untouched (they are required to make it safely across
     partition boundaries).  See the comments at the top of
794 795
     bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */

796
  if (BB_PARTITION (b) != BB_PARTITION (c))
797
    return NULL;
Mike Stump committed
798

799 800 801
  /* If B has a fallthru edge to C, no need to move anything.  */
  if (e->flags & EDGE_FALLTHRU)
    {
802
      int b_index = b->index, c_index = c->index;
803 804 805 806 807

      /* Protect the loop latches.  */
      if (current_loops && c->loop_father->latch == c)
	return NULL;

808
      merge_blocks (b, c);
809
      update_forwarder_flag (b);
810

811 812
      if (dump_file)
	fprintf (dump_file, "Merged %d and %d without moving.\n",
813
		 b_index, c_index);
814

815
      return b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? b : b->prev_bb;
816
    }
817

818 819 820 821
  /* Otherwise we will need to move code around.  Do that only if expensive
     transformations are allowed.  */
  else if (mode & CLEANUP_EXPENSIVE)
    {
822 823 824
      edge tmp_edge, b_fallthru_edge;
      bool c_has_outgoing_fallthru;
      bool b_has_incoming_fallthru;
825 826

      /* Avoid overactive code motion, as the forwarder blocks should be
Mike Stump committed
827
	 eliminated by edge redirection instead.  One exception might have
828 829
	 been if B is a forwarder block and C has no fallthru edge, but
	 that should be cleaned up by bb-reorder instead.  */
830
      if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
831
	return NULL;
832 833 834 835 836

      /* We must make sure to not munge nesting of lexical blocks,
	 and loop notes.  This is done by squeezing out all the notes
	 and leaving them there to lie.  Not ideal, but functional.  */

837
      tmp_edge = find_fallthru_edge (c->succs);
838 839
      c_has_outgoing_fallthru = (tmp_edge != NULL);

840
      tmp_edge = find_fallthru_edge (b->preds);
841
      b_has_incoming_fallthru = (tmp_edge != NULL);
842
      b_fallthru_edge = tmp_edge;
843
      next = b->prev_bb;
844 845
      if (next == c)
	next = next->prev_bb;
846 847 848 849 850 851 852

      /* Otherwise, we're going to try to move C after B.  If C does
	 not have an outgoing fallthru, then it can be moved
	 immediately after B without introducing or modifying jumps.  */
      if (! c_has_outgoing_fallthru)
	{
	  merge_blocks_move_successor_nojumps (b, c);
853
	  return next == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? next->next_bb : next;
854
	}
855 856 857 858 859 860

      /* If B does not have an incoming fallthru, then it can be moved
	 immediately before C without introducing or modifying jumps.
	 C cannot be the first block, so we do not have to worry about
	 accessing a non-existent block.  */

861 862
      if (b_has_incoming_fallthru)
	{
863
	  basic_block bb;
864

865
	  if (b_fallthru_edge->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
866
	    return NULL;
867 868 869
	  bb = force_nonfallthru (b_fallthru_edge);
	  if (bb)
	    notice_new_block (bb);
870
	}
871

872
      merge_blocks_move_predecessor_nojumps (b, c);
873
      return next == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? next->next_bb : next;
874
    }
875

876
  return NULL;
877 878
}

879 880 881 882

/* Removes the memory attributes of MEM expression
   if they are not equal.  */

Andrew MacLeod committed
883
static void
884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903
merge_memattrs (rtx x, rtx y)
{
  int i;
  int j;
  enum rtx_code code;
  const char *fmt;

  if (x == y)
    return;
  if (x == 0 || y == 0)
    return;

  code = GET_CODE (x);

  if (code != GET_CODE (y))
    return;

  if (GET_MODE (x) != GET_MODE (y))
    return;

904
  if (code == MEM && !mem_attrs_eq_p (MEM_ATTRS (x), MEM_ATTRS (y)))
905 906 907 908 909
    {
      if (! MEM_ATTRS (x))
	MEM_ATTRS (y) = 0;
      else if (! MEM_ATTRS (y))
	MEM_ATTRS (x) = 0;
Mike Stump committed
910
      else
911
	{
912
	  HOST_WIDE_INT mem_size;
913 914 915 916 917 918

	  if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
	    {
	      set_mem_alias_set (x, 0);
	      set_mem_alias_set (y, 0);
	    }
Mike Stump committed
919

920 921 922 923
	  if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
	    {
	      set_mem_expr (x, 0);
	      set_mem_expr (y, 0);
924 925
	      clear_mem_offset (x);
	      clear_mem_offset (y);
926
	    }
927 928 929
	  else if (MEM_OFFSET_KNOWN_P (x) != MEM_OFFSET_KNOWN_P (y)
		   || (MEM_OFFSET_KNOWN_P (x)
		       && MEM_OFFSET (x) != MEM_OFFSET (y)))
930
	    {
931 932
	      clear_mem_offset (x);
	      clear_mem_offset (y);
933
	    }
Mike Stump committed
934

935 936 937 938 939 940
	  if (MEM_SIZE_KNOWN_P (x) && MEM_SIZE_KNOWN_P (y))
	    {
	      mem_size = MAX (MEM_SIZE (x), MEM_SIZE (y));
	      set_mem_size (x, mem_size);
	      set_mem_size (y, mem_size);
	    }
941
	  else
942 943 944 945
	    {
	      clear_mem_size (x);
	      clear_mem_size (y);
	    }
946 947 948 949 950

	  set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
	  set_mem_align (y, MEM_ALIGN (x));
	}
    }
951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968
  if (code == MEM)
    {
      if (MEM_READONLY_P (x) != MEM_READONLY_P (y))
	{
	  MEM_READONLY_P (x) = 0;
	  MEM_READONLY_P (y) = 0;
	}
      if (MEM_NOTRAP_P (x) != MEM_NOTRAP_P (y))
	{
	  MEM_NOTRAP_P (x) = 0;
	  MEM_NOTRAP_P (y) = 0;
	}
      if (MEM_VOLATILE_P (x) != MEM_VOLATILE_P (y))
	{
	  MEM_VOLATILE_P (x) = 1;
	  MEM_VOLATILE_P (y) = 1;
	}
    }
Mike Stump committed
969

970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992
  fmt = GET_RTX_FORMAT (code);
  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
    {
      switch (fmt[i])
	{
	case 'E':
	  /* Two vectors must have the same length.  */
	  if (XVECLEN (x, i) != XVECLEN (y, i))
	    return;

	  for (j = 0; j < XVECLEN (x, i); j++)
	    merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));

	  break;

	case 'e':
	  merge_memattrs (XEXP (x, i), XEXP (y, i));
	}
    }
  return;
}


993 994
 /* Checks if patterns P1 and P2 are equivalent, apart from the possibly
    different single sets S1 and S2.  */
995 996

static bool
997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032
equal_different_set_p (rtx p1, rtx s1, rtx p2, rtx s2)
{
  int i;
  rtx e1, e2;

  if (p1 == s1 && p2 == s2)
    return true;

  if (GET_CODE (p1) != PARALLEL || GET_CODE (p2) != PARALLEL)
    return false;

  if (XVECLEN (p1, 0) != XVECLEN (p2, 0))
    return false;

  for (i = 0; i < XVECLEN (p1, 0); i++)
    {
      e1 = XVECEXP (p1, 0, i);
      e2 = XVECEXP (p2, 0, i);
      if (e1 == s1 && e2 == s2)
        continue;
      if (reload_completed
          ? rtx_renumbered_equal_p (e1, e2) : rtx_equal_p (e1, e2))
        continue;

        return false;
    }

  return true;
}

/* Examine register notes on I1 and I2 and return:
   - dir_forward if I1 can be replaced by I2, or
   - dir_backward if I2 can be replaced by I1, or
   - dir_both if both are the case.  */

static enum replace_direction
1033
can_replace_by (rtx_insn *i1, rtx_insn *i2)
1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111
{
  rtx s1, s2, d1, d2, src1, src2, note1, note2;
  bool c1, c2;

  /* Check for 2 sets.  */
  s1 = single_set (i1);
  s2 = single_set (i2);
  if (s1 == NULL_RTX || s2 == NULL_RTX)
    return dir_none;

  /* Check that the 2 sets set the same dest.  */
  d1 = SET_DEST (s1);
  d2 = SET_DEST (s2);
  if (!(reload_completed
        ? rtx_renumbered_equal_p (d1, d2) : rtx_equal_p (d1, d2)))
    return dir_none;

  /* Find identical req_equiv or reg_equal note, which implies that the 2 sets
     set dest to the same value.  */
  note1 = find_reg_equal_equiv_note (i1);
  note2 = find_reg_equal_equiv_note (i2);
  if (!note1 || !note2 || !rtx_equal_p (XEXP (note1, 0), XEXP (note2, 0))
      || !CONST_INT_P (XEXP (note1, 0)))
    return dir_none;

  if (!equal_different_set_p (PATTERN (i1), s1, PATTERN (i2), s2))
    return dir_none;

  /* Although the 2 sets set dest to the same value, we cannot replace
       (set (dest) (const_int))
     by
       (set (dest) (reg))
     because we don't know if the reg is live and has the same value at the
     location of replacement.  */
  src1 = SET_SRC (s1);
  src2 = SET_SRC (s2);
  c1 = CONST_INT_P (src1);
  c2 = CONST_INT_P (src2);
  if (c1 && c2)
    return dir_both;
  else if (c2)
    return dir_forward;
  else if (c1)
    return dir_backward;

  return dir_none;
}

/* Merges directions A and B.  */

static enum replace_direction
merge_dir (enum replace_direction a, enum replace_direction b)
{
  /* Implements the following table:
        |bo fw bw no
     ---+-----------
     bo |bo fw bw no
     fw |-- fw no no
     bw |-- -- bw no
     no |-- -- -- no.  */

  if (a == b)
    return a;

  if (a == dir_both)
    return b;
  if (b == dir_both)
    return a;

  return dir_none;
}

/* Examine I1 and I2 and return:
   - dir_forward if I1 can be replaced by I2, or
   - dir_backward if I2 can be replaced by I1, or
   - dir_both if both are the case.  */

static enum replace_direction
1112
old_insns_match_p (int mode ATTRIBUTE_UNUSED, rtx_insn *i1, rtx_insn *i2)
1113 1114 1115 1116 1117
{
  rtx p1, p2;

  /* Verify that I1 and I2 are equivalent.  */
  if (GET_CODE (i1) != GET_CODE (i2))
1118
    return dir_none;
1119

1120 1121 1122
  /* __builtin_unreachable() may lead to empty blocks (ending with
     NOTE_INSN_BASIC_BLOCK).  They may be crossjumped. */
  if (NOTE_INSN_BASIC_BLOCK_P (i1) && NOTE_INSN_BASIC_BLOCK_P (i2))
1123
    return dir_both;
1124

1125 1126 1127
  /* ??? Do not allow cross-jumping between different stack levels.  */
  p1 = find_reg_note (i1, REG_ARGS_SIZE, NULL);
  p2 = find_reg_note (i2, REG_ARGS_SIZE, NULL);
1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141
  if (p1 && p2)
    {
      p1 = XEXP (p1, 0);
      p2 = XEXP (p2, 0);
      if (!rtx_equal_p (p1, p2))
        return dir_none;

      /* ??? Worse, this adjustment had better be constant lest we
         have differing incoming stack levels.  */
      if (!frame_pointer_needed
          && find_args_size_adjust (i1) == HOST_WIDE_INT_MIN)
	return dir_none;
    }
  else if (p1 || p2)
1142 1143
    return dir_none;

1144
  p1 = PATTERN (i1);
1145 1146 1147
  p2 = PATTERN (i2);

  if (GET_CODE (p1) != GET_CODE (p2))
1148
    return dir_none;
1149 1150 1151 1152 1153 1154 1155 1156 1157

  /* If this is a CALL_INSN, compare register usage information.
     If we don't check this on stack register machines, the two
     CALL_INSNs might be merged leaving reg-stack.c with mismatching
     numbers of stack registers in the same basic block.
     If we don't check this on machines with delay slots, a delay slot may
     be filled that clobbers a parameter expected by the subroutine.

     ??? We take the simple route for now and assume that if they're
1158
     equal, they were constructed identically.
1159

1160 1161 1162 1163 1164 1165 1166 1167 1168
     Also check for identical exception regions.  */

  if (CALL_P (i1))
    {
      /* Ensure the same EH region.  */
      rtx n1 = find_reg_note (i1, REG_EH_REGION, 0);
      rtx n2 = find_reg_note (i2, REG_EH_REGION, 0);

      if (!n1 && n2)
1169
	return dir_none;
1170 1171

      if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1172
	return dir_none;
1173 1174

      if (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
Mike Stump committed
1175
			CALL_INSN_FUNCTION_USAGE (i2))
1176
	  || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2))
1177
	return dir_none;
1178 1179 1180

      /* For address sanitizer, never crossjump __asan_report_* builtins,
	 otherwise errors might be reported on incorrect lines.  */
Marek Polacek committed
1181
      if (flag_sanitize & SANITIZE_ADDRESS)
1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194
	{
	  rtx call = get_call_rtx_from (i1);
	  if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
	    {
	      rtx symbol = XEXP (XEXP (call, 0), 0);
	      if (SYMBOL_REF_DECL (symbol)
		  && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
		{
		  if ((DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
		       == BUILT_IN_NORMAL)
		      && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol))
			 >= BUILT_IN_ASAN_REPORT_LOAD1
		      && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol))
1195
			 <= BUILT_IN_ASAN_STOREN)
1196 1197 1198 1199
		    return dir_none;
		}
	    }
	}
1200
    }
1201 1202 1203 1204 1205 1206 1207 1208 1209

#ifdef STACK_REGS
  /* If cross_jump_death_matters is not 0, the insn's mode
     indicates whether or not the insn contains any stack-like
     regs.  */

  if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
    {
      /* If register stack conversion has already been done, then
Mike Stump committed
1210 1211
	 death notes must also be compared before it is certain that
	 the two instruction streams match.  */
1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226

      rtx note;
      HARD_REG_SET i1_regset, i2_regset;

      CLEAR_HARD_REG_SET (i1_regset);
      CLEAR_HARD_REG_SET (i2_regset);

      for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
	if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
	  SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));

      for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
	if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
	  SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));

1227
      if (!hard_reg_set_equal_p (i1_regset, i2_regset))
1228
	return dir_none;
1229 1230 1231 1232 1233
    }
#endif

  if (reload_completed
      ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1234
    return dir_both;
1235

1236
  return can_replace_by (i1, i2);
1237 1238
}

1239 1240 1241 1242
/* When comparing insns I1 and I2 in flow_find_cross_jump or
   flow_find_head_matching_sequence, ensure the notes match.  */

static void
1243
merge_notes (rtx_insn *i1, rtx_insn *i2)
1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261
{
  /* If the merged insns have different REG_EQUAL notes, then
     remove them.  */
  rtx equiv1 = find_reg_equal_equiv_note (i1);
  rtx equiv2 = find_reg_equal_equiv_note (i2);

  if (equiv1 && !equiv2)
    remove_note (i1, equiv1);
  else if (!equiv1 && equiv2)
    remove_note (i2, equiv2);
  else if (equiv1 && equiv2
	   && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
    {
      remove_note (i1, equiv1);
      remove_note (i2, equiv2);
    }
}

1262 1263 1264 1265 1266 1267 1268
 /* Walks from I1 in BB1 backward till the next non-debug insn, and returns the
    resulting insn in I1, and the corresponding bb in BB1.  At the head of a
    bb, if there is a predecessor bb that reaches this bb via fallthru, and
    FOLLOW_FALLTHRU, walks further in the predecessor bb and registers this in
    DID_FALLTHRU.  Otherwise, stops at the head of the bb.  */

static void
1269
walk_to_nondebug_insn (rtx_insn **i1, basic_block *bb1, bool follow_fallthru,
1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288
                       bool *did_fallthru)
{
  edge fallthru;

  *did_fallthru = false;

  /* Ignore notes.  */
  while (!NONDEBUG_INSN_P (*i1))
    {
      if (*i1 != BB_HEAD (*bb1))
        {
          *i1 = PREV_INSN (*i1);
          continue;
        }

      if (!follow_fallthru)
        return;

      fallthru = find_fallthru_edge ((*bb1)->preds);
1289
      if (!fallthru || fallthru->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
1290 1291 1292 1293 1294 1295 1296 1297 1298
          || !single_succ_p (fallthru->src))
        return;

      *bb1 = fallthru->src;
      *i1 = BB_END (*bb1);
      *did_fallthru = true;
     }
}

1299
/* Look through the insns at the end of BB1 and BB2 and find the longest
1300 1301 1302 1303 1304 1305 1306
   sequence that are either equivalent, or allow forward or backward
   replacement.  Store the first insns for that sequence in *F1 and *F2 and
   return the sequence length.

   DIR_P indicates the allowed replacement direction on function entry, and
   the actual replacement direction on function exit.  If NULL, only equivalent
   sequences are allowed.
1307 1308 1309 1310

   To simplify callers of this function, if the blocks match exactly,
   store the head of the blocks in *F1 and *F2.  */

1311
int
1312 1313
flow_find_cross_jump (basic_block bb1, basic_block bb2, rtx_insn **f1,
		      rtx_insn **f2, enum replace_direction *dir_p)
1314
{
1315
  rtx_insn *i1, *i2, *last1, *last2, *afterlast1, *afterlast2;
1316
  int ninsns = 0;
1317
  enum replace_direction dir, last_dir, afterlast_dir;
1318
  bool follow_fallthru, did_fallthru;
1319 1320 1321 1322 1323 1324 1325

  if (dir_p)
    dir = *dir_p;
  else
    dir = dir_both;
  afterlast_dir = dir;
  last_dir = afterlast_dir;
1326 1327 1328 1329 1330

  /* Skip simple jumps at the end of the blocks.  Complex jumps still
     need to be compared for equivalence, which we'll do below.  */

  i1 = BB_END (bb1);
1331
  last1 = afterlast1 = last2 = afterlast2 = NULL;
1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343
  if (onlyjump_p (i1)
      || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
    {
      last1 = i1;
      i1 = PREV_INSN (i1);
    }

  i2 = BB_END (bb2);
  if (onlyjump_p (i2)
      || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
    {
      last2 = i2;
1344 1345 1346
      /* Count everything except for unconditional jump as insn.
	 Don't count any jumps if dir_p is NULL.  */
      if (!simplejump_p (i2) && !returnjump_p (i2) && last1 && dir_p)
1347 1348 1349 1350 1351 1352
	ninsns++;
      i2 = PREV_INSN (i2);
    }

  while (true)
    {
1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377
      /* In the following example, we can replace all jumps to C by jumps to A.

         This removes 4 duplicate insns.
         [bb A] insn1            [bb C] insn1
                insn2                   insn2
         [bb B] insn3                   insn3
                insn4                   insn4
                jump_insn               jump_insn

         We could also replace all jumps to A by jumps to C, but that leaves B
         alive, and removes only 2 duplicate insns.  In a subsequent crossjump
         step, all jumps to B would be replaced with jumps to the middle of C,
         achieving the same result with more effort.
         So we allow only the first possibility, which means that we don't allow
         fallthru in the block that's being replaced.  */

      follow_fallthru = dir_p && dir != dir_forward;
      walk_to_nondebug_insn (&i1, &bb1, follow_fallthru, &did_fallthru);
      if (did_fallthru)
        dir = dir_backward;

      follow_fallthru = dir_p && dir != dir_backward;
      walk_to_nondebug_insn (&i2, &bb2, follow_fallthru, &did_fallthru);
      if (did_fallthru)
        dir = dir_forward;
1378 1379 1380 1381

      if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
	break;

1382 1383
      dir = merge_dir (dir, old_insns_match_p (0, i1, i2));
      if (dir == dir_none || (!dir_p && dir != dir_both))
1384 1385 1386 1387 1388 1389 1390
	break;

      merge_memattrs (i1, i2);

      /* Don't begin a cross-jump with a NOTE insn.  */
      if (INSN_P (i1))
	{
1391
	  merge_notes (i1, i2);
1392 1393 1394

	  afterlast1 = last1, afterlast2 = last2;
	  last1 = i1, last2 = i2;
1395 1396
	  afterlast_dir = last_dir;
	  last_dir = dir;
1397
	  if (active_insn_p (i1))
1398
	    ninsns++;
1399 1400 1401 1402 1403 1404 1405 1406 1407 1408
	}

      i1 = PREV_INSN (i1);
      i2 = PREV_INSN (i2);
    }

#ifdef HAVE_cc0
  /* Don't allow the insn after a compare to be shared by
     cross-jumping unless the compare is also shared.  */
  if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1409
    last1 = afterlast1, last2 = afterlast2, last_dir = afterlast_dir, ninsns--;
1410 1411 1412 1413 1414 1415 1416
#endif

  /* Include preceding notes and labels in the cross-jump.  One,
     this may bring us to the head of the blocks as requested above.
     Two, it keeps line number notes as matched as may be.  */
  if (ninsns)
    {
1417
      bb1 = BLOCK_FOR_INSN (last1);
1418
      while (last1 != BB_HEAD (bb1) && !NONDEBUG_INSN_P (PREV_INSN (last1)))
1419 1420 1421 1422 1423
	last1 = PREV_INSN (last1);

      if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
	last1 = PREV_INSN (last1);

1424
      bb2 = BLOCK_FOR_INSN (last2);
1425
      while (last2 != BB_HEAD (bb2) && !NONDEBUG_INSN_P (PREV_INSN (last2)))
1426 1427 1428 1429 1430 1431 1432 1433 1434
	last2 = PREV_INSN (last2);

      if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
	last2 = PREV_INSN (last2);

      *f1 = last1;
      *f2 = last2;
    }

1435 1436
  if (dir_p)
    *dir_p = last_dir;
1437 1438 1439
  return ninsns;
}

1440 1441 1442
/* Like flow_find_cross_jump, except start looking for a matching sequence from
   the head of the two blocks.  Do not include jumps at the end.
   If STOP_AFTER is nonzero, stop after finding that many matching
1443 1444
   instructions.  If STOP_AFTER is zero, count all INSN_P insns, if it is
   non-zero, only count active insns.  */
1445 1446

int
1447 1448
flow_find_head_matching_sequence (basic_block bb1, basic_block bb2, rtx_insn **f1,
				  rtx_insn **f2, int stop_after)
1449
{
1450
  rtx_insn *i1, *i2, *last1, *last2, *beforelast1, *beforelast2;
1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464
  int ninsns = 0;
  edge e;
  edge_iterator ei;
  int nehedges1 = 0, nehedges2 = 0;

  FOR_EACH_EDGE (e, ei, bb1->succs)
    if (e->flags & EDGE_EH)
      nehedges1++;
  FOR_EACH_EDGE (e, ei, bb2->succs)
    if (e->flags & EDGE_EH)
      nehedges2++;

  i1 = BB_HEAD (bb1);
  i2 = BB_HEAD (bb2);
1465
  last1 = beforelast1 = last2 = beforelast2 = NULL;
1466 1467 1468

  while (true)
    {
1469
      /* Ignore notes, except NOTE_INSN_EPILOGUE_BEG.  */
1470
      while (!NONDEBUG_INSN_P (i1) && i1 != BB_END (bb1))
1471 1472 1473 1474 1475
	{
	  if (NOTE_P (i1) && NOTE_KIND (i1) == NOTE_INSN_EPILOGUE_BEG)
	    break;
	  i1 = NEXT_INSN (i1);
	}
1476 1477

      while (!NONDEBUG_INSN_P (i2) && i2 != BB_END (bb2))
1478 1479 1480 1481 1482
	{
	  if (NOTE_P (i2) && NOTE_KIND (i2) == NOTE_INSN_EPILOGUE_BEG)
	    break;
	  i2 = NEXT_INSN (i2);
	}
1483

1484 1485 1486 1487
      if ((i1 == BB_END (bb1) && !NONDEBUG_INSN_P (i1))
	  || (i2 == BB_END (bb2) && !NONDEBUG_INSN_P (i2)))
	break;

1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503
      if (NOTE_P (i1) || NOTE_P (i2)
	  || JUMP_P (i1) || JUMP_P (i2))
	break;

      /* A sanity check to make sure we're not merging insns with different
	 effects on EH.  If only one of them ends a basic block, it shouldn't
	 have an EH edge; if both end a basic block, there should be the same
	 number of EH edges.  */
      if ((i1 == BB_END (bb1) && i2 != BB_END (bb2)
	   && nehedges1 > 0)
	  || (i2 == BB_END (bb2) && i1 != BB_END (bb1)
	      && nehedges2 > 0)
	  || (i1 == BB_END (bb1) && i2 == BB_END (bb2)
	      && nehedges1 != nehedges2))
	break;

1504
      if (old_insns_match_p (0, i1, i2) != dir_both)
1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515
	break;

      merge_memattrs (i1, i2);

      /* Don't begin a cross-jump with a NOTE insn.  */
      if (INSN_P (i1))
	{
	  merge_notes (i1, i2);

	  beforelast1 = last1, beforelast2 = last2;
	  last1 = i1, last2 = i2;
1516
	  if (!stop_after || active_insn_p (i1))
1517
	    ninsns++;
1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543
	}

      if (i1 == BB_END (bb1) || i2 == BB_END (bb2)
	  || (stop_after > 0 && ninsns == stop_after))
	break;

      i1 = NEXT_INSN (i1);
      i2 = NEXT_INSN (i2);
    }

#ifdef HAVE_cc0
  /* Don't allow a compare to be shared by cross-jumping unless the insn
     after the compare is also shared.  */
  if (ninsns && reg_mentioned_p (cc0_rtx, last1) && sets_cc0_p (last1))
    last1 = beforelast1, last2 = beforelast2, ninsns--;
#endif

  if (ninsns)
    {
      *f1 = last1;
      *f2 = last2;
    }

  return ninsns;
}

1544 1545 1546
/* Return true iff outgoing edges of BB1 and BB2 match, together with
   the branch instruction.  This means that if we commonize the control
   flow before end of the basic block, the semantic remains unchanged.
1547 1548 1549 1550

   We may assume that there exists one edge with a common destination.  */

static bool
1551
outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1552
{
1553 1554 1555
  int nehedges1 = 0, nehedges2 = 0;
  edge fallthru1 = 0, fallthru2 = 0;
  edge e1, e2;
1556
  edge_iterator ei;
1557

1558
  /* If we performed shrink-wrapping, edges to the exit block can
1559 1560 1561 1562
     only be distinguished for JUMP_INSNs.  The two paths may differ in
     whether they went through the prologue.  Sibcalls are fine, we know
     that we either didn't need or inserted an epilogue before them.  */
  if (crtl->shrink_wrapped
1563 1564
      && single_succ_p (bb1)
      && single_succ (bb1) == EXIT_BLOCK_PTR_FOR_FN (cfun)
1565 1566 1567 1568
      && !JUMP_P (BB_END (bb1))
      && !(CALL_P (BB_END (bb1)) && SIBLING_CALL_P (BB_END (bb1))))
    return false;
  
1569 1570
  /* If BB1 has only one successor, we may be looking at either an
     unconditional jump, or a fake edge to exit.  */
1571 1572
  if (single_succ_p (bb1)
      && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1573
      && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
1574 1575 1576
    return (single_succ_p (bb2)
	    && (single_succ_edge (bb2)->flags
		& (EDGE_COMPLEX | EDGE_FAKE)) == 0
1577
	    && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
1578 1579 1580

  /* Match conditional jumps - this may get tricky when fallthru and branch
     edges are crossed.  */
1581
  if (EDGE_COUNT (bb1->succs) == 2
1582 1583
      && any_condjump_p (BB_END (bb1))
      && onlyjump_p (BB_END (bb1)))
1584
    {
1585 1586 1587 1588 1589
      edge b1, f1, b2, f2;
      bool reverse, match;
      rtx set1, set2, cond1, cond2;
      enum rtx_code code1, code2;

1590
      if (EDGE_COUNT (bb2->succs) != 2
1591 1592
	  || !any_condjump_p (BB_END (bb2))
	  || !onlyjump_p (BB_END (bb2)))
1593
	return false;
1594 1595 1596 1597 1598 1599 1600

      b1 = BRANCH_EDGE (bb1);
      b2 = BRANCH_EDGE (bb2);
      f1 = FALLTHRU_EDGE (bb1);
      f2 = FALLTHRU_EDGE (bb2);

      /* Get around possible forwarders on fallthru edges.  Other cases
Mike Stump committed
1601
	 should be optimized out already.  */
1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655
      if (FORWARDER_BLOCK_P (f1->dest))
	f1 = single_succ_edge (f1->dest);

      if (FORWARDER_BLOCK_P (f2->dest))
	f2 = single_succ_edge (f2->dest);

      /* To simplify use of this function, return false if there are
	 unneeded forwarder blocks.  These will get eliminated later
	 during cleanup_cfg.  */
      if (FORWARDER_BLOCK_P (f1->dest)
	  || FORWARDER_BLOCK_P (f2->dest)
	  || FORWARDER_BLOCK_P (b1->dest)
	  || FORWARDER_BLOCK_P (b2->dest))
	return false;

      if (f1->dest == f2->dest && b1->dest == b2->dest)
	reverse = false;
      else if (f1->dest == b2->dest && b1->dest == f2->dest)
	reverse = true;
      else
	return false;

      set1 = pc_set (BB_END (bb1));
      set2 = pc_set (BB_END (bb2));
      if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
	  != (XEXP (SET_SRC (set2), 1) == pc_rtx))
	reverse = !reverse;

      cond1 = XEXP (SET_SRC (set1), 0);
      cond2 = XEXP (SET_SRC (set2), 0);
      code1 = GET_CODE (cond1);
      if (reverse)
	code2 = reversed_comparison_code (cond2, BB_END (bb2));
      else
	code2 = GET_CODE (cond2);

      if (code2 == UNKNOWN)
	return false;

      /* Verify codes and operands match.  */
      match = ((code1 == code2
		&& rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
		&& rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
	       || (code1 == swap_condition (code2)
		   && rtx_renumbered_equal_p (XEXP (cond1, 1),
					      XEXP (cond2, 0))
		   && rtx_renumbered_equal_p (XEXP (cond1, 0),
					      XEXP (cond2, 1))));

      /* If we return true, we will join the blocks.  Which means that
	 we will only have one branch prediction bit to work with.  Thus
	 we require the existing branches to have probabilities that are
	 roughly similar.  */
      if (match
1656 1657
	  && optimize_bb_for_speed_p (bb1)
	  && optimize_bb_for_speed_p (bb2))
1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685
	{
	  int prob2;

	  if (b1->dest == b2->dest)
	    prob2 = b2->probability;
	  else
	    /* Do not use f2 probability as f2 may be forwarded.  */
	    prob2 = REG_BR_PROB_BASE - b2->probability;

	  /* Fail if the difference in probabilities is greater than 50%.
	     This rules out two well-predicted branches with opposite
	     outcomes.  */
	  if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
	    {
	      if (dump_file)
		fprintf (dump_file,
			 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
			 bb1->index, bb2->index, b1->probability, prob2);

	      return false;
	    }
	}

      if (dump_file && match)
	fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
		 bb1->index, bb2->index);

      return match;
1686 1687
    }

1688
  /* Generic case - we are seeing a computed jump, table jump or trapping
1689 1690
     instruction.  */

1691 1692 1693 1694
  /* Check whether there are tablejumps in the end of BB1 and BB2.
     Return true if they are identical.  */
    {
      rtx label1, label2;
1695
      rtx_jump_table_data *table1, *table2;
1696

1697 1698
      if (tablejump_p (BB_END (bb1), &label1, &table1)
	  && tablejump_p (BB_END (bb2), &label2, &table2)
1699 1700 1701 1702 1703
	  && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
	{
	  /* The labels should never be the same rtx.  If they really are same
	     the jump tables are same too. So disable crossjumping of blocks BB1
	     and BB2 because when deleting the common insns in the end of BB1
1704
	     by delete_basic_block () the jump table would be deleted too.  */
1705
	  /* If LABEL2 is referenced in BB1->END do not do anything
1706 1707
	     because we would loose information when replacing
	     LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END.  */
1708
	  if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732
	    {
	      /* Set IDENTICAL to true when the tables are identical.  */
	      bool identical = false;
	      rtx p1, p2;

	      p1 = PATTERN (table1);
	      p2 = PATTERN (table2);
	      if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
		{
		  identical = true;
		}
	      else if (GET_CODE (p1) == ADDR_DIFF_VEC
		       && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
		       && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
		       && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
		{
		  int i;

		  identical = true;
		  for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
		    if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
		      identical = false;
		}

1733
	      if (identical)
1734 1735 1736
		{
		  bool match;

1737
		  /* Temporarily replace references to LABEL1 with LABEL2
1738
		     in BB1->END so that we could compare the instructions.  */
1739
		  replace_label_in_insn (BB_END (bb1), label1, label2, false);
1740

1741 1742
		  match = (old_insns_match_p (mode, BB_END (bb1), BB_END (bb2))
			   == dir_both);
1743 1744
		  if (dump_file && match)
		    fprintf (dump_file,
1745 1746 1747
			     "Tablejumps in bb %i and %i match.\n",
			     bb1->index, bb2->index);

1748 1749 1750
		  /* Set the original label in BB1->END because when deleting
		     a block whose end is a tablejump, the tablejump referenced
		     from the instruction is deleted too.  */
1751
		  replace_label_in_insn (BB_END (bb1), label2, label1, false);
1752

1753 1754 1755 1756 1757 1758 1759
		  return match;
		}
	    }
	  return false;
	}
    }

1760 1761 1762 1763
  /* Find the last non-debug non-note instruction in each bb, except
     stop when we see the NOTE_INSN_BASIC_BLOCK, as old_insns_match_p
     handles that case specially. old_insns_match_p does not handle
     other types of instruction notes.  */
1764 1765
  rtx_insn *last1 = BB_END (bb1);
  rtx_insn *last2 = BB_END (bb2);
1766 1767 1768 1769 1770 1771 1772 1773
  while (!NOTE_INSN_BASIC_BLOCK_P (last1) &&
         (DEBUG_INSN_P (last1) || NOTE_P (last1)))
    last1 = PREV_INSN (last1);
  while (!NOTE_INSN_BASIC_BLOCK_P (last2) &&
         (DEBUG_INSN_P (last2) || NOTE_P (last2)))
    last2 = PREV_INSN (last2);
  gcc_assert (last1 && last2);

1774
  /* First ensure that the instructions match.  There may be many outgoing
1775
     edges so this test is generally cheaper.  */
1776
  if (old_insns_match_p (mode, last1, last2) != dir_both)
1777 1778 1779 1780 1781
    return false;

  /* Search the outgoing edges, ensure that the counts do match, find possible
     fallthru and exception handling edges since these needs more
     validation.  */
1782 1783 1784
  if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
    return false;

1785
  bool nonfakeedges = false;
1786
  FOR_EACH_EDGE (e1, ei, bb1->succs)
1787
    {
1788
      e2 = EDGE_SUCC (bb2, ei.index);
Mike Stump committed
1789

1790 1791 1792
      if ((e1->flags & EDGE_FAKE) == 0)
	nonfakeedges = true;

1793 1794
      if (e1->flags & EDGE_EH)
	nehedges1++;
1795

1796 1797
      if (e2->flags & EDGE_EH)
	nehedges2++;
1798

1799 1800 1801 1802 1803
      if (e1->flags & EDGE_FALLTHRU)
	fallthru1 = e1;
      if (e2->flags & EDGE_FALLTHRU)
	fallthru2 = e2;
    }
1804

1805
  /* If number of edges of various types does not match, fail.  */
1806
  if (nehedges1 != nehedges2
1807
      || (fallthru1 != 0) != (fallthru2 != 0))
1808 1809
    return false;

1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821
  /* If !ACCUMULATE_OUTGOING_ARGS, bb1 (and bb2) have no successors
     and the last real insn doesn't have REG_ARGS_SIZE note, don't
     attempt to optimize, as the two basic blocks might have different
     REG_ARGS_SIZE depths.  For noreturn calls and unconditional
     traps there should be REG_ARG_SIZE notes, they could be missing
     for __builtin_unreachable () uses though.  */
  if (!nonfakeedges
      && !ACCUMULATE_OUTGOING_ARGS
      && (!INSN_P (last1)
          || !find_reg_note (last1, REG_ARGS_SIZE, NULL)))
    return false;

1822 1823 1824 1825
  /* fallthru edges must be forwarded to the same destination.  */
  if (fallthru1)
    {
      basic_block d1 = (forwarder_block_p (fallthru1->dest)
1826
			? single_succ (fallthru1->dest): fallthru1->dest);
1827
      basic_block d2 = (forwarder_block_p (fallthru2->dest)
1828
			? single_succ (fallthru2->dest): fallthru2->dest);
1829

1830 1831 1832
      if (d1 != d2)
	return false;
    }
1833

1834 1835
  /* Ensure the same EH region.  */
  {
1836 1837
    rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
    rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1838

1839 1840 1841 1842 1843 1844
    if (!n1 && n2)
      return false;

    if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
      return false;
  }
1845

1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869
  /* The same checks as in try_crossjump_to_edge. It is required for RTL
     version of sequence abstraction.  */
  FOR_EACH_EDGE (e1, ei, bb2->succs)
    {
      edge e2;
      edge_iterator ei;
      basic_block d1 = e1->dest;

      if (FORWARDER_BLOCK_P (d1))
        d1 = EDGE_SUCC (d1, 0)->dest;

      FOR_EACH_EDGE (e2, ei, bb1->succs)
        {
          basic_block d2 = e2->dest;
          if (FORWARDER_BLOCK_P (d2))
            d2 = EDGE_SUCC (d2, 0)->dest;
          if (d1 == d2)
            break;
        }

      if (!e2)
        return false;
    }

1870
  return true;
1871 1872
}

1873 1874 1875 1876 1877 1878 1879 1880 1881 1882
/* Returns true if BB basic block has a preserve label.  */

static bool
block_has_preserve_label (basic_block bb)
{
  return (bb
          && block_label (bb)
          && LABEL_PRESERVE_P (block_label (bb)));
}

1883 1884
/* E1 and E2 are edges with the same destination block.  Search their
   predecessors for common code.  If found, redirect control flow from
1885 1886 1887
   (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC (dir_forward),
   or the other way around (dir_backward).  DIR specifies the allowed
   replacement direction.  */
1888 1889

static bool
1890 1891
try_crossjump_to_edge (int mode, edge e1, edge e2,
                       enum replace_direction dir)
1892
{
1893
  int nmatch;
1894
  basic_block src1 = e1->src, src2 = e2->src;
1895
  basic_block redirect_to, redirect_from, to_remove;
1896
  basic_block osrc1, osrc2, redirect_edges_to, tmp;
1897
  rtx_insn *newpos1, *newpos2;
1898
  edge s;
1899
  edge_iterator ei;
1900

1901
  newpos1 = newpos2 = NULL;
1902

1903
  /* If we have partitioned hot/cold basic blocks, it is a bad idea
Mike Stump committed
1904
     to try this optimization.
1905 1906

     Basic block partitioning may result in some jumps that appear to
Mike Stump committed
1907 1908 1909
     be optimizable (or blocks that appear to be mergeable), but which really
     must be left untouched (they are required to make it safely across
     partition boundaries).  See the comments at the top of
1910
     bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
1911

1912
  if (crtl->has_bb_partition && reload_completed)
1913 1914
    return false;

1915 1916 1917 1918
  /* Search backward through forwarder blocks.  We don't need to worry
     about multiple entry or chained forwarders, as they will be optimized
     away.  We do this to look past the unconditional jump following a
     conditional jump that is required due to the current CFG shape.  */
1919
  if (single_pred_p (src1)
1920
      && FORWARDER_BLOCK_P (src1))
1921
    e1 = single_pred_edge (src1), src1 = e1->src;
1922

1923
  if (single_pred_p (src2)
1924
      && FORWARDER_BLOCK_P (src2))
1925
    e2 = single_pred_edge (src2), src2 = e2->src;
1926 1927

  /* Nothing to do if we reach ENTRY, or a common source block.  */
1928 1929
  if (src1 == ENTRY_BLOCK_PTR_FOR_FN (cfun) || src2
      == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1930 1931 1932 1933 1934
    return false;
  if (src1 == src2)
    return false;

  /* Seeing more than 1 forwarder blocks would confuse us later...  */
1935
  if (FORWARDER_BLOCK_P (e1->dest)
1936
      && FORWARDER_BLOCK_P (single_succ (e1->dest)))
1937
    return false;
1938

1939
  if (FORWARDER_BLOCK_P (e2->dest)
1940
      && FORWARDER_BLOCK_P (single_succ (e2->dest)))
1941 1942 1943 1944
    return false;

  /* Likewise with dead code (possibly newly created by the other optimizations
     of cfg_cleanup).  */
1945
  if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
1946 1947 1948
    return false;

  /* Look for the common insn sequence, part the first ...  */
1949
  if (!outgoing_edges_match (mode, src1, src2))
1950 1951 1952
    return false;

  /* ... and part the second.  */
1953
  nmatch = flow_find_cross_jump (src1, src2, &newpos1, &newpos2, &dir);
1954

1955 1956 1957 1958 1959 1960 1961
  osrc1 = src1;
  osrc2 = src2;
  if (newpos1 != NULL_RTX)
    src1 = BLOCK_FOR_INSN (newpos1);
  if (newpos2 != NULL_RTX)
    src2 = BLOCK_FOR_INSN (newpos2);

1962 1963 1964 1965 1966 1967
  if (dir == dir_backward)
    {
#define SWAP(T, X, Y) do { T tmp = (X); (X) = (Y); (Y) = tmp; } while (0)
      SWAP (basic_block, osrc1, osrc2);
      SWAP (basic_block, src1, src2);
      SWAP (edge, e1, e2);
1968
      SWAP (rtx_insn *, newpos1, newpos2);
1969 1970 1971
#undef SWAP
    }

1972 1973 1974 1975
  /* Don't proceed with the crossjump unless we found a sufficient number
     of matching instructions or the 'from' block was totally matched
     (such that its predecessors will hopefully be redirected and the
     block removed).  */
1976 1977
  if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
      && (newpos1 != BB_HEAD (src1)))
1978
    return false;
1979

1980
  /* Avoid deleting preserve label when redirecting ABNORMAL edges.  */
1981 1982 1983 1984
  if (block_has_preserve_label (e1->dest)
      && (e1->flags & EDGE_ABNORMAL))
    return false;

1985 1986 1987 1988 1989 1990 1991
  /* Here we know that the insns in the end of SRC1 which are common with SRC2
     will be deleted.
     If we have tablejumps in the end of SRC1 and SRC2
     they have been already compared for equivalence in outgoing_edges_match ()
     so replace the references to TABLE1 by references to TABLE2.  */
    {
      rtx label1, label2;
1992
      rtx_jump_table_data *table1, *table2;
1993

1994 1995
      if (tablejump_p (BB_END (osrc1), &label1, &table1)
	  && tablejump_p (BB_END (osrc2), &label2, &table2)
1996 1997
	  && label1 != label2)
	{
1998
	  rtx_insn *insn;
1999 2000 2001 2002 2003 2004 2005

	  /* Replace references to LABEL1 with LABEL2.  */
	  for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
	    {
	      /* Do not replace the label in SRC1->END because when deleting
		 a block whose end is a tablejump, the tablejump referenced
		 from the instruction is deleted too.  */
2006
	      if (insn != BB_END (osrc1))
2007
		replace_label_in_insn (insn, label1, label2, true);
2008 2009 2010
	    }
	}
    }
2011

2012 2013 2014
  /* Avoid splitting if possible.  We must always split when SRC2 has
     EH predecessor edges, or we may end up with basic blocks with both
     normal and EH predecessor edges.  */
2015
  if (newpos2 == BB_HEAD (src2)
2016
      && !(EDGE_PRED (src2, 0)->flags & EDGE_EH))
2017 2018 2019
    redirect_to = src2;
  else
    {
2020
      if (newpos2 == BB_HEAD (src2))
2021 2022
	{
	  /* Skip possible basic block header.  */
2023 2024
	  if (LABEL_P (newpos2))
	    newpos2 = NEXT_INSN (newpos2);
2025 2026
	  while (DEBUG_INSN_P (newpos2))
	    newpos2 = NEXT_INSN (newpos2);
2027 2028
	  if (NOTE_P (newpos2))
	    newpos2 = NEXT_INSN (newpos2);
2029 2030
	  while (DEBUG_INSN_P (newpos2))
	    newpos2 = NEXT_INSN (newpos2);
2031 2032
	}

2033 2034
      if (dump_file)
	fprintf (dump_file, "Splitting bb %i before %i insns\n",
2035
		 src2->index, nmatch);
2036
      redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
2037 2038
    }

2039
  if (dump_file)
2040 2041 2042
    fprintf (dump_file,
	     "Cross jumping from bb %i to bb %i; %i common insns\n",
	     src1->index, src2->index, nmatch);
2043

2044
  /* We may have some registers visible through the block.  */
2045
  df_set_bb_dirty (redirect_to);
2046

2047 2048 2049 2050 2051
  if (osrc2 == src2)
    redirect_edges_to = redirect_to;
  else
    redirect_edges_to = osrc2;

2052
  /* Recompute the frequencies and counts of outgoing edges.  */
2053
  FOR_EACH_EDGE (s, ei, redirect_edges_to->succs)
2054 2055
    {
      edge s2;
2056
      edge_iterator ei;
2057 2058
      basic_block d = s->dest;

2059
      if (FORWARDER_BLOCK_P (d))
2060
	d = single_succ (d);
2061

2062
      FOR_EACH_EDGE (s2, ei, src1->succs)
2063 2064
	{
	  basic_block d2 = s2->dest;
2065
	  if (FORWARDER_BLOCK_P (d2))
2066
	    d2 = single_succ (d2);
2067 2068 2069
	  if (d == d2)
	    break;
	}
2070

2071 2072 2073
      s->count += s2->count;

      /* Take care to update possible forwarder blocks.  We verified
Mike Stump committed
2074 2075
	 that there is no more than one in the chain, so we can't run
	 into infinite loop.  */
2076
      if (FORWARDER_BLOCK_P (s->dest))
2077
	{
2078
	  single_succ_edge (s->dest)->count += s2->count;
2079 2080 2081
	  s->dest->count += s2->count;
	  s->dest->frequency += EDGE_FREQUENCY (s);
	}
2082

2083
      if (FORWARDER_BLOCK_P (s2->dest))
2084
	{
2085 2086 2087
	  single_succ_edge (s2->dest)->count -= s2->count;
	  if (single_succ_edge (s2->dest)->count < 0)
	    single_succ_edge (s2->dest)->count = 0;
2088 2089
	  s2->dest->count -= s2->count;
	  s2->dest->frequency -= EDGE_FREQUENCY (s);
2090 2091 2092 2093
	  if (s2->dest->frequency < 0)
	    s2->dest->frequency = 0;
	  if (s2->dest->count < 0)
	    s2->dest->count = 0;
2094
	}
2095

2096
      if (!redirect_edges_to->frequency && !src1->frequency)
2097 2098
	s->probability = (s->probability + s2->probability) / 2;
      else
2099
	s->probability
2100
	  = ((s->probability * redirect_edges_to->frequency +
2101
	      s2->probability * src1->frequency)
2102
	     / (redirect_edges_to->frequency + src1->frequency));
2103 2104
    }

2105 2106 2107 2108
  /* Adjust count and frequency for the block.  An earlier jump
     threading pass may have left the profile in an inconsistent
     state (see update_bb_profile_for_threading) so we must be
     prepared for overflows.  */
2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121
  tmp = redirect_to;
  do
    {
      tmp->count += src1->count;
      tmp->frequency += src1->frequency;
      if (tmp->frequency > BB_FREQ_MAX)
        tmp->frequency = BB_FREQ_MAX;
      if (tmp == redirect_edges_to)
        break;
      tmp = find_fallthru_edge (tmp->succs)->dest;
    }
  while (true);
  update_br_prob_note (redirect_edges_to);
2122 2123 2124

  /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1.  */

2125 2126 2127
  /* Skip possible basic block header.  */
  if (LABEL_P (newpos1))
    newpos1 = NEXT_INSN (newpos1);
2128 2129 2130 2131

  while (DEBUG_INSN_P (newpos1))
    newpos1 = NEXT_INSN (newpos1);

2132
  if (NOTE_INSN_BASIC_BLOCK_P (newpos1))
2133 2134
    newpos1 = NEXT_INSN (newpos1);

2135 2136 2137
  while (DEBUG_INSN_P (newpos1))
    newpos1 = NEXT_INSN (newpos1);

2138
  redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
2139
  to_remove = single_succ (redirect_from);
2140

2141
  redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to);
2142
  delete_basic_block (to_remove);
2143

2144
  update_forwarder_flag (redirect_from);
2145 2146
  if (redirect_to != src2)
    update_forwarder_flag (src2);
2147

2148 2149 2150 2151 2152 2153 2154 2155
  return true;
}

/* Search the predecessors of BB for common insn sequences.  When found,
   share code between them by redirecting control flow.  Return true if
   any changes made.  */

static bool
2156
try_crossjump_bb (int mode, basic_block bb)
2157
{
2158
  edge e, e2, fallthru;
2159
  bool changed;
2160
  unsigned max, ix, ix2;
2161

2162
  /* Nothing to do if there is not at least two incoming edges.  */
2163
  if (EDGE_COUNT (bb->preds) < 2)
2164 2165
    return false;

2166 2167
  /* Don't crossjump if this block ends in a computed jump,
     unless we are optimizing for size.  */
2168
  if (optimize_bb_for_size_p (bb)
2169
      && bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
2170 2171 2172
      && computed_jump_p (BB_END (bb)))
    return false;

2173 2174
  /* If we are partitioning hot/cold basic blocks, we don't want to
     mess up unconditional or indirect jumps that cross between hot
Mike Stump committed
2175 2176
     and cold sections.

2177
     Basic block partitioning may result in some jumps that appear to
Mike Stump committed
2178 2179 2180
     be optimizable (or blocks that appear to be mergeable), but which really
     must be left untouched (they are required to make it safely across
     partition boundaries).  See the comments at the top of
2181 2182
     bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */

Mike Stump committed
2183 2184
  if (BB_PARTITION (EDGE_PRED (bb, 0)->src) !=
					BB_PARTITION (EDGE_PRED (bb, 1)->src)
2185
      || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING))
2186 2187
    return false;

2188 2189 2190
  /* It is always cheapest to redirect a block that ends in a branch to
     a block that falls through into BB, as that adds no branches to the
     program.  We'll try that combination first.  */
2191 2192
  fallthru = NULL;
  max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
2193 2194 2195 2196

  if (EDGE_COUNT (bb->preds) > max)
    return false;

2197
  fallthru = find_fallthru_edge (bb->preds);
2198 2199

  changed = false;
2200
  for (ix = 0; ix < EDGE_COUNT (bb->preds);)
2201
    {
2202
      e = EDGE_PRED (bb, ix);
2203
      ix++;
2204

2205 2206
      /* As noted above, first try with the fallthru predecessor (or, a
	 fallthru predecessor if we are in cfglayout mode).  */
2207 2208 2209 2210 2211 2212
      if (fallthru)
	{
	  /* Don't combine the fallthru edge into anything else.
	     If there is a match, we'll do it the other way around.  */
	  if (e == fallthru)
	    continue;
2213 2214 2215
	  /* If nothing changed since the last attempt, there is nothing
	     we can do.  */
	  if (!first_pass
2216 2217
	      && !((e->src->flags & BB_MODIFIED)
		   || (fallthru->src->flags & BB_MODIFIED)))
2218
	    continue;
2219

2220
	  if (try_crossjump_to_edge (mode, e, fallthru, dir_forward))
2221 2222
	    {
	      changed = true;
2223
	      ix = 0;
2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239
	      continue;
	    }
	}

      /* Non-obvious work limiting check: Recognize that we're going
	 to call try_crossjump_bb on every basic block.  So if we have
	 two blocks with lots of outgoing edges (a switch) and they
	 share lots of common destinations, then we would do the
	 cross-jump check once for each common destination.

	 Now, if the blocks actually are cross-jump candidates, then
	 all of their destinations will be shared.  Which means that
	 we only need check them for cross-jump candidacy once.  We
	 can eliminate redundant checks of crossjump(A,B) by arbitrarily
	 choosing to do the check from the block for which the edge
	 in question is the first successor of A.  */
2240
      if (EDGE_SUCC (e->src, 0) != e)
2241 2242
	continue;

2243
      for (ix2 = 0; ix2 < EDGE_COUNT (bb->preds); ix2++)
2244
	{
2245
	  e2 = EDGE_PRED (bb, ix2);
2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257

	  if (e2 == e)
	    continue;

	  /* We've already checked the fallthru edge above.  */
	  if (e2 == fallthru)
	    continue;

	  /* The "first successor" check above only prevents multiple
	     checks of crossjump(A,B).  In order to prevent redundant
	     checks of crossjump(B,A), require that A be the block
	     with the lowest index.  */
2258
	  if (e->src->index > e2->src->index)
2259 2260
	    continue;

2261 2262 2263
	  /* If nothing changed since the last attempt, there is nothing
	     we can do.  */
	  if (!first_pass
2264 2265
	      && !((e->src->flags & BB_MODIFIED)
		   || (e2->src->flags & BB_MODIFIED)))
2266 2267
	    continue;

2268 2269 2270
	  /* Both e and e2 are not fallthru edges, so we can crossjump in either
	     direction.  */
	  if (try_crossjump_to_edge (mode, e, e2, dir_both))
2271 2272
	    {
	      changed = true;
2273
	      ix = 0;
2274 2275 2276 2277 2278
	      break;
	    }
	}
    }

2279 2280 2281
  if (changed)
    crossjumps_occured = true;

2282 2283 2284
  return changed;
}

2285 2286 2287 2288 2289 2290 2291 2292 2293 2294
/* Search the successors of BB for common insn sequences.  When found,
   share code between them by moving it across the basic block
   boundary.  Return true if any changes made.  */

static bool
try_head_merge_bb (basic_block bb)
{
  basic_block final_dest_bb = NULL;
  int max_match = INT_MAX;
  edge e0;
2295
  rtx_insn **headptr, **currptr, **nextptr;
2296 2297
  bool changed, moveall;
  unsigned ix;
2298
  rtx_insn *e0_last_head;
2299 2300
  rtx cond;
  rtx_insn *move_before;
2301
  unsigned nedges = EDGE_COUNT (bb->succs);
2302
  rtx_insn *jump = BB_END (bb);
2303 2304 2305 2306 2307 2308 2309 2310 2311
  regset live, live_union;

  /* Nothing to do if there is not at least two outgoing edges.  */
  if (nedges < 2)
    return false;

  /* Don't crossjump if this block ends in a computed jump,
     unless we are optimizing for size.  */
  if (optimize_bb_for_size_p (bb)
2312
      && bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
2313 2314 2315 2316 2317
      && computed_jump_p (BB_END (bb)))
    return false;

  cond = get_condition (jump, &move_before, true, false);
  if (cond == NULL_RTX)
2318 2319 2320 2321 2322 2323 2324 2325
    {
#ifdef HAVE_cc0
      if (reg_mentioned_p (cc0_rtx, jump))
	move_before = prev_nonnote_nondebug_insn (jump);
      else
#endif
	move_before = jump;
    }
2326 2327

  for (ix = 0; ix < nedges; ix++)
2328
    if (EDGE_SUCC (bb, ix)->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391
      return false;

  for (ix = 0; ix < nedges; ix++)
    {
      edge e = EDGE_SUCC (bb, ix);
      basic_block other_bb = e->dest;

      if (df_get_bb_dirty (other_bb))
	{
	  block_was_dirty = true;
	  return false;
	}

      if (e->flags & EDGE_ABNORMAL)
	return false;

      /* Normally, all destination blocks must only be reachable from this
	 block, i.e. they must have one incoming edge.

	 There is one special case we can handle, that of multiple consecutive
	 jumps where the first jumps to one of the targets of the second jump.
	 This happens frequently in switch statements for default labels.
	 The structure is as follows:
	 FINAL_DEST_BB
	 ....
	 if (cond) jump A;
	 fall through
	 BB
	 jump with targets A, B, C, D...
	 A
	 has two incoming edges, from FINAL_DEST_BB and BB

	 In this case, we can try to move the insns through BB and into
	 FINAL_DEST_BB.  */
      if (EDGE_COUNT (other_bb->preds) != 1)
	{
	  edge incoming_edge, incoming_bb_other_edge;
	  edge_iterator ei;

	  if (final_dest_bb != NULL
	      || EDGE_COUNT (other_bb->preds) != 2)
	    return false;

	  /* We must be able to move the insns across the whole block.  */
	  move_before = BB_HEAD (bb);
	  while (!NONDEBUG_INSN_P (move_before))
	    move_before = NEXT_INSN (move_before);

	  if (EDGE_COUNT (bb->preds) != 1)
	    return false;
	  incoming_edge = EDGE_PRED (bb, 0);
	  final_dest_bb = incoming_edge->src;
	  if (EDGE_COUNT (final_dest_bb->succs) != 2)
	    return false;
	  FOR_EACH_EDGE (incoming_bb_other_edge, ei, final_dest_bb->succs)
	    if (incoming_bb_other_edge != incoming_edge)
	      break;
	  if (incoming_bb_other_edge->dest != other_bb)
	    return false;
	}
    }

  e0 = EDGE_SUCC (bb, 0);
2392
  e0_last_head = NULL;
2393 2394 2395 2396 2397
  changed = false;

  for (ix = 1; ix < nedges; ix++)
    {
      edge e = EDGE_SUCC (bb, ix);
2398
      rtx_insn *e0_last, *e_last;
2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422
      int nmatch;

      nmatch = flow_find_head_matching_sequence (e0->dest, e->dest,
						 &e0_last, &e_last, 0);
      if (nmatch == 0)
	return false;

      if (nmatch < max_match)
	{
	  max_match = nmatch;
	  e0_last_head = e0_last;
	}
    }

  /* If we matched an entire block, we probably have to avoid moving the
     last insn.  */
  if (max_match > 0
      && e0_last_head == BB_END (e0->dest)
      && (find_reg_note (e0_last_head, REG_EH_REGION, 0)
	  || control_flow_insn_p (e0_last_head)))
    {
      max_match--;
      if (max_match == 0)
	return false;
2423 2424 2425
      do
	e0_last_head = prev_real_insn (e0_last_head);
      while (DEBUG_INSN_P (e0_last_head));
2426 2427 2428 2429 2430 2431 2432 2433 2434
    }

  if (max_match == 0)
    return false;

  /* We must find a union of the live registers at each of the end points.  */
  live = BITMAP_ALLOC (NULL);
  live_union = BITMAP_ALLOC (NULL);

2435 2436 2437
  currptr = XNEWVEC (rtx_insn *, nedges);
  headptr = XNEWVEC (rtx_insn *, nedges);
  nextptr = XNEWVEC (rtx_insn *, nedges);
2438 2439 2440 2441 2442

  for (ix = 0; ix < nedges; ix++)
    {
      int j;
      basic_block merge_bb = EDGE_SUCC (bb, ix)->dest;
2443
      rtx_insn *head = BB_HEAD (merge_bb);
2444

2445 2446
      while (!NONDEBUG_INSN_P (head))
	head = NEXT_INSN (head);
2447 2448 2449 2450 2451
      headptr[ix] = head;
      currptr[ix] = head;

      /* Compute the end point and live information  */
      for (j = 1; j < max_match; j++)
2452 2453 2454
	do
	  head = NEXT_INSN (head);
	while (!NONDEBUG_INSN_P (head));
2455 2456 2457 2458 2459 2460 2461 2462 2463
      simulate_backwards_to_point (merge_bb, live, head);
      IOR_REG_SET (live_union, live);
    }

  /* If we're moving across two blocks, verify the validity of the
     first move, then adjust the target and let the loop below deal
     with the final move.  */
  if (final_dest_bb != NULL)
    {
2464
      rtx_insn *move_upto;
2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486

      moveall = can_move_insns_across (currptr[0], e0_last_head, move_before,
				       jump, e0->dest, live_union,
				       NULL, &move_upto);
      if (!moveall)
	{
	  if (move_upto == NULL_RTX)
	    goto out;

	  while (e0_last_head != move_upto)
	    {
	      df_simulate_one_insn_backwards (e0->dest, e0_last_head,
					      live_union);
	      e0_last_head = PREV_INSN (e0_last_head);
	    }
	}
      if (e0_last_head == NULL_RTX)
	goto out;

      jump = BB_END (final_dest_bb);
      cond = get_condition (jump, &move_before, true, false);
      if (cond == NULL_RTX)
2487 2488 2489 2490 2491 2492 2493 2494
	{
#ifdef HAVE_cc0
	  if (reg_mentioned_p (cc0_rtx, jump))
	    move_before = prev_nonnote_nondebug_insn (jump);
	  else
#endif
	    move_before = jump;
	}
2495 2496 2497 2498
    }

  do
    {
2499
      rtx_insn *move_upto;
2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532
      moveall = can_move_insns_across (currptr[0], e0_last_head,
				       move_before, jump, e0->dest, live_union,
				       NULL, &move_upto);
      if (!moveall && move_upto == NULL_RTX)
	{
	  if (jump == move_before)
	    break;

	  /* Try again, using a different insertion point.  */
	  move_before = jump;

#ifdef HAVE_cc0
	  /* Don't try moving before a cc0 user, as that may invalidate
	     the cc0.  */
	  if (reg_mentioned_p (cc0_rtx, jump))
	    break;
#endif

	  continue;
	}

      if (final_dest_bb && !moveall)
	/* We haven't checked whether a partial move would be OK for the first
	   move, so we have to fail this case.  */
	break;

      changed = true;
      for (;;)
	{
	  if (currptr[0] == move_upto)
	    break;
	  for (ix = 0; ix < nedges; ix++)
	    {
2533
	      rtx_insn *curr = currptr[ix];
2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545
	      do
		curr = NEXT_INSN (curr);
	      while (!NONDEBUG_INSN_P (curr));
	      currptr[ix] = curr;
	    }
	}

      /* If we can't currently move all of the identical insns, remember
	 each insn after the range that we'll merge.  */
      if (!moveall)
	for (ix = 0; ix < nedges; ix++)
	  {
2546
	    rtx_insn *curr = currptr[ix];
2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593
	    do
	      curr = NEXT_INSN (curr);
	    while (!NONDEBUG_INSN_P (curr));
	    nextptr[ix] = curr;
	  }

      reorder_insns (headptr[0], currptr[0], PREV_INSN (move_before));
      df_set_bb_dirty (EDGE_SUCC (bb, 0)->dest);
      if (final_dest_bb != NULL)
	df_set_bb_dirty (final_dest_bb);
      df_set_bb_dirty (bb);
      for (ix = 1; ix < nedges; ix++)
	{
	  df_set_bb_dirty (EDGE_SUCC (bb, ix)->dest);
	  delete_insn_chain (headptr[ix], currptr[ix], false);
	}
      if (!moveall)
	{
	  if (jump == move_before)
	    break;

	  /* For the unmerged insns, try a different insertion point.  */
	  move_before = jump;

#ifdef HAVE_cc0
	  /* Don't try moving before a cc0 user, as that may invalidate
	     the cc0.  */
	  if (reg_mentioned_p (cc0_rtx, jump))
	    break;
#endif

	  for (ix = 0; ix < nedges; ix++)
	    currptr[ix] = headptr[ix] = nextptr[ix];
	}
    }
  while (!moveall);

 out:
  free (currptr);
  free (headptr);
  free (nextptr);

  crossjumps_occured |= changed;

  return changed;
}

2594 2595 2596 2597 2598 2599
/* Return true if BB contains just bb note, or bb note followed
   by only DEBUG_INSNs.  */

static bool
trivially_empty_bb_p (basic_block bb)
{
2600
  rtx_insn *insn = BB_END (bb);
2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611

  while (1)
    {
      if (insn == BB_HEAD (bb))
	return true;
      if (!DEBUG_INSN_P (insn))
	return false;
      insn = PREV_INSN (insn);
    }
}

2612 2613 2614 2615
/* Do simple CFG optimizations - basic block merging, simplifying of jump
   instructions etc.  Return nonzero if changes were made.  */

static bool
2616
try_optimize_cfg (int mode)
2617 2618 2619 2620
{
  bool changed_overall = false;
  bool changed;
  int iterations = 0;
2621
  basic_block bb, b, next;
2622

2623
  if (mode & (CLEANUP_CROSSJUMP | CLEANUP_THREADING))
2624 2625
    clear_bb_flags ();

2626 2627
  crossjumps_occured = false;

2628
  FOR_EACH_BB_FN (bb, cfun)
2629 2630
    update_forwarder_flag (bb);

2631
  if (! targetm.cannot_modify_jumps_p ())
2632
    {
2633
      first_pass = true;
Alexandre Oliva committed
2634 2635 2636 2637
      /* Attempt to merge blocks as made possible by edge removal.  If
	 a block has only one successor, and the successor has only
	 one predecessor, they may be combined.  */
      do
2638
	{
2639
	  block_was_dirty = false;
Alexandre Oliva committed
2640 2641 2642
	  changed = false;
	  iterations++;

2643 2644
	  if (dump_file)
	    fprintf (dump_file,
Alexandre Oliva committed
2645 2646
		     "\n\ntry_optimize_cfg iteration %i\n\n",
		     iterations);
2647

2648 2649
	  for (b = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; b
	       != EXIT_BLOCK_PTR_FOR_FN (cfun);)
2650
	    {
2651
	      basic_block c;
Alexandre Oliva committed
2652 2653
	      edge s;
	      bool changed_here = false;
2654

2655 2656
	      /* Delete trivially dead basic blocks.  This is either
		 blocks with no predecessors, or empty blocks with no
2657 2658 2659 2660 2661
		 successors.  However if the empty block with no
		 successors is the successor of the ENTRY_BLOCK, it is
		 kept.  This ensures that the ENTRY_BLOCK will have a
		 successor which is a precondition for many RTL
		 passes.  Empty blocks may result from expanding
2662 2663
		 __builtin_unreachable ().  */
	      if (EDGE_COUNT (b->preds) == 0
2664
		  || (EDGE_COUNT (b->succs) == 0
2665
		      && trivially_empty_bb_p (b)
2666 2667
		      && single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))->dest
		      != b))
Alexandre Oliva committed
2668
		{
2669
		  c = b->prev_bb;
2670
		  if (EDGE_COUNT (b->preds) > 0)
2671 2672 2673 2674
		    {
		      edge e;
		      edge_iterator ei;

2675 2676
		      if (current_ir_type () == IR_RTL_CFGLAYOUT)
			{
2677 2678
			  if (BB_FOOTER (b)
			      && BARRIER_P (BB_FOOTER (b)))
2679 2680
			    FOR_EACH_EDGE (e, ei, b->preds)
			      if ((e->flags & EDGE_FALLTHRU)
2681
				  && BB_FOOTER (e->src) == NULL)
2682
				{
2683
				  if (BB_FOOTER (b))
2684
				    {
2685 2686
				      BB_FOOTER (e->src) = BB_FOOTER (b);
				      BB_FOOTER (b) = NULL;
2687 2688 2689 2690
				    }
				  else
				    {
				      start_sequence ();
2691
				      BB_FOOTER (e->src) = emit_barrier ();
2692 2693 2694 2695 2696 2697
				      end_sequence ();
				    }
				}
			}
		      else
			{
2698
			  rtx_insn *last = get_last_bb_insn (b);
2699 2700 2701 2702 2703
			  if (last && BARRIER_P (last))
			    FOR_EACH_EDGE (e, ei, b->preds)
			      if ((e->flags & EDGE_FALLTHRU))
				emit_barrier_after (BB_END (e->src));
			}
2704
		    }
2705
		  delete_basic_block (b);
2706
		  changed = true;
2707
		  /* Avoid trying to remove the exit block.  */
2708
		  b = (c == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? c->next_bb : c);
2709
		  continue;
Alexandre Oliva committed
2710
		}
2711

2712
	      /* Remove code labels no longer used.  */
2713 2714 2715
	      if (single_pred_p (b)
		  && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
		  && !(single_pred_edge (b)->flags & EDGE_COMPLEX)
2716
		  && LABEL_P (BB_HEAD (b))
2717
		  && !LABEL_PRESERVE_P (BB_HEAD (b))
Alexandre Oliva committed
2718 2719 2720 2721 2722 2723
		  /* If the previous block ends with a branch to this
		     block, we can't delete the label.  Normally this
		     is a condjump that is yet to be simplified, but
		     if CASE_DROPS_THRU, this can be a tablejump with
		     some element going to the same place as the
		     default (fallthru).  */
2724
		  && (single_pred (b) == ENTRY_BLOCK_PTR_FOR_FN (cfun)
2725
		      || !JUMP_P (BB_END (single_pred (b)))
2726
		      || ! label_is_jump_target_p (BB_HEAD (b),
2727
						   BB_END (single_pred (b)))))
Alexandre Oliva committed
2728
		{
2729
		  delete_insn (BB_HEAD (b));
2730 2731
		  if (dump_file)
		    fprintf (dump_file, "Deleted label in block %i.\n",
2732
			     b->index);
Alexandre Oliva committed
2733
		}
2734

Alexandre Oliva committed
2735
	      /* If we fall through an empty block, we can remove it.  */
2736
	      if (!(mode & (CLEANUP_CFGLAYOUT | CLEANUP_NO_INSN_DEL))
2737 2738
		  && single_pred_p (b)
		  && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2739
		  && !LABEL_P (BB_HEAD (b))
Alexandre Oliva committed
2740 2741 2742
		  && FORWARDER_BLOCK_P (b)
		  /* Note that forwarder_block_p true ensures that
		     there is a successor for this block.  */
2743
		  && (single_succ_edge (b)->flags & EDGE_FALLTHRU)
2744
		  && n_basic_blocks_for_fn (cfun) > NUM_FIXED_BLOCKS + 1)
Alexandre Oliva committed
2745
		{
2746 2747
		  if (dump_file)
		    fprintf (dump_file,
Alexandre Oliva committed
2748
			     "Deleting fallthru block %i.\n",
2749
			     b->index);
Alexandre Oliva committed
2750

2751 2752
		  c = ((b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
		       ? b->next_bb : b->prev_bb);
2753 2754
		  redirect_edge_succ_nodup (single_pred_edge (b),
					    single_succ (b));
2755
		  delete_basic_block (b);
Alexandre Oliva committed
2756 2757
		  changed = true;
		  b = c;
2758
		  continue;
Alexandre Oliva committed
2759
		}
2760

2761
	      /* Merge B with its single successor, if any.  */
2762 2763
	      if (single_succ_p (b)
		  && (s = single_succ_edge (b))
2764
		  && !(s->flags & EDGE_COMPLEX)
2765
		  && (c = s->dest) != EXIT_BLOCK_PTR_FOR_FN (cfun)
2766
		  && single_pred_p (c)
2767 2768 2769 2770 2771 2772 2773
		  && b != c)
		{
		  /* When not in cfg_layout mode use code aware of reordering
		     INSN.  This code possibly creates new basic blocks so it
		     does not fit merge_blocks interface and is kept here in
		     hope that it will become useless once more of compiler
		     is transformed to use cfg_layout mode.  */
Mike Stump committed
2774

2775 2776 2777 2778 2779 2780 2781 2782 2783 2784
		  if ((mode & CLEANUP_CFGLAYOUT)
		      && can_merge_blocks_p (b, c))
		    {
		      merge_blocks (b, c);
		      update_forwarder_flag (b);
		      changed_here = true;
		    }
		  else if (!(mode & CLEANUP_CFGLAYOUT)
			   /* If the jump insn has side effects,
			      we can't kill the edge.  */
2785
			   && (!JUMP_P (BB_END (b))
2786
			       || (reload_completed
2787
				   ? simplejump_p (BB_END (b))
2788 2789 2790
				   : (onlyjump_p (BB_END (b))
				      && !tablejump_p (BB_END (b),
						       NULL, NULL))))
2791 2792 2793 2794 2795
			   && (next = merge_blocks_move (s, b, c, mode)))
		      {
			b = next;
			changed_here = true;
		      }
2796
		}
Alexandre Oliva committed
2797 2798

	      /* Simplify branch over branch.  */
2799 2800 2801
	      if ((mode & CLEANUP_EXPENSIVE)
		   && !(mode & CLEANUP_CFGLAYOUT)
		   && try_simplify_condjump (b))
2802
		changed_here = true;
2803

Alexandre Oliva committed
2804 2805 2806
	      /* If B has a single outgoing edge, but uses a
		 non-trivial jump instruction without side-effects, we
		 can either delete the jump entirely, or replace it
2807
		 with a simple unconditional jump.  */
2808
	      if (single_succ_p (b)
2809
		  && single_succ (b) != EXIT_BLOCK_PTR_FOR_FN (cfun)
2810
		  && onlyjump_p (BB_END (b))
2811
		  && !CROSSING_JUMP_P (BB_END (b))
2812 2813
		  && try_redirect_by_replacing_jump (single_succ_edge (b),
						     single_succ (b),
2814
						     (mode & CLEANUP_CFGLAYOUT) != 0))
Alexandre Oliva committed
2815 2816 2817 2818
		{
		  update_forwarder_flag (b);
		  changed_here = true;
		}
2819

Alexandre Oliva committed
2820 2821
	      /* Simplify branch to branch.  */
	      if (try_forward_edges (mode, b))
2822 2823 2824 2825
		{
		  update_forwarder_flag (b);
		  changed_here = true;
		}
2826

Alexandre Oliva committed
2827 2828 2829 2830
	      /* Look for shared code between blocks.  */
	      if ((mode & CLEANUP_CROSSJUMP)
		  && try_crossjump_bb (mode, b))
		changed_here = true;
2831

2832 2833 2834 2835 2836 2837 2838
	      if ((mode & CLEANUP_CROSSJUMP)
		  /* This can lengthen register lifetimes.  Do it only after
		     reload.  */
		  && reload_completed
		  && try_head_merge_bb (b))
		changed_here = true;

Alexandre Oliva committed
2839 2840 2841
	      /* Don't get confused by the index shift caused by
		 deleting blocks.  */
	      if (!changed_here)
2842
		b = b->next_bb;
Alexandre Oliva committed
2843 2844 2845
	      else
		changed = true;
	    }
2846

Alexandre Oliva committed
2847
	  if ((mode & CLEANUP_CROSSJUMP)
2848
	      && try_crossjump_bb (mode, EXIT_BLOCK_PTR_FOR_FN (cfun)))
2849 2850
	    changed = true;

2851 2852 2853 2854 2855 2856 2857
	  if (block_was_dirty)
	    {
	      /* This should only be set by head-merging.  */
	      gcc_assert (mode & CLEANUP_CROSSJUMP);
	      df_analyze ();
	    }

Alexandre Oliva committed
2858
	  if (changed)
2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870
            {
              /* Edge forwarding in particular can cause hot blocks previously
                 reached by both hot and cold blocks to become dominated only
                 by cold blocks. This will cause the verification below to fail,
                 and lead to now cold code in the hot section. This is not easy
                 to detect and fix during edge forwarding, and in some cases
                 is only visible after newly unreachable blocks are deleted,
                 which will be done in fixup_partitions.  */
              fixup_partitions ();

#ifdef ENABLE_CHECKING
              verify_flow_info ();
2871
#endif
2872
            }
2873

Alexandre Oliva committed
2874
	  changed_overall |= changed;
2875
	  first_pass = false;
Alexandre Oliva committed
2876 2877
	}
      while (changed);
2878
    }
2879

2880
  FOR_ALL_BB_FN (b, cfun)
2881
    b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK);
2882

2883 2884 2885
  return changed_overall;
}

Kazu Hirata committed
2886
/* Delete all unreachable basic blocks.  */
2887

2888
bool
2889
delete_unreachable_blocks (void)
2890 2891
{
  bool changed = false;
2892
  basic_block b, prev_bb;
2893 2894 2895

  find_unreachable_blocks ();

2896 2897 2898 2899 2900 2901
  /* When we're in GIMPLE mode and there may be debug insns, we should
     delete blocks in reverse dominator order, so as to get a chance
     to substitute all released DEFs into debug stmts.  If we don't
     have dominators information, walking blocks backward gets us a
     better chance of retaining most debug information than
     otherwise.  */
2902
  if (MAY_HAVE_DEBUG_INSNS && current_ir_type () == IR_GIMPLE
2903
      && dom_info_available_p (CDI_DOMINATORS))
2904
    {
2905 2906
      for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
	   b != ENTRY_BLOCK_PTR_FOR_FN (cfun); b = prev_bb)
2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918
	{
	  prev_bb = b->prev_bb;

	  if (!(b->flags & BB_REACHABLE))
	    {
	      /* Speed up the removal of blocks that don't dominate
		 others.  Walking backwards, this should be the common
		 case.  */
	      if (!first_dom_son (CDI_DOMINATORS, b))
		delete_basic_block (b);
	      else
		{
2919
		  vec<basic_block> h
2920 2921
		    = get_all_dominated_blocks (CDI_DOMINATORS, b);

2922
		  while (h.length ())
2923
		    {
2924
		      b = h.pop ();
2925 2926

		      prev_bb = b->prev_bb;
2927

2928 2929 2930 2931 2932
		      gcc_assert (!(b->flags & BB_REACHABLE));

		      delete_basic_block (b);
		    }

2933
		  h.release ();
2934 2935 2936 2937 2938 2939 2940 2941
		}

	      changed = true;
	    }
	}
    }
  else
    {
2942 2943
      for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
	   b != ENTRY_BLOCK_PTR_FOR_FN (cfun); b = prev_bb)
2944
	{
2945 2946 2947 2948 2949 2950 2951
	  prev_bb = b->prev_bb;

	  if (!(b->flags & BB_REACHABLE))
	    {
	      delete_basic_block (b);
	      changed = true;
	    }
2952
	}
2953 2954 2955 2956 2957 2958
    }

  if (changed)
    tidy_fallthru_edges ();
  return changed;
}
2959 2960

/* Delete any jump tables never referenced.  We can't delete them at the
2961 2962 2963
   time of removing tablejump insn as they are referenced by the preceding
   insns computing the destination, so we delay deleting and garbagecollect
   them once life information is computed.  */
2964 2965 2966 2967 2968
void
delete_dead_jumptables (void)
{
  basic_block bb;

2969 2970
  /* A dead jump table does not belong to any basic block.  Scan insns
     between two adjacent basic blocks.  */
2971
  FOR_EACH_BB_FN (bb, cfun)
2972
    {
2973
      rtx_insn *insn, *next;
2974 2975 2976 2977

      for (insn = NEXT_INSN (BB_END (bb));
	   insn && !NOTE_INSN_BASIC_BLOCK_P (insn);
	   insn = next)
2978
	{
2979 2980 2981 2982 2983
	  next = NEXT_INSN (insn);
	  if (LABEL_P (insn)
	      && LABEL_NUSES (insn) == LABEL_PRESERVE_P (insn)
	      && JUMP_TABLE_DATA_P (next))
	    {
2984
	      rtx_insn *label = insn, *jump = next;
2985 2986 2987 2988 2989 2990 2991 2992 2993

	      if (dump_file)
		fprintf (dump_file, "Dead jumptable %i removed\n",
			 INSN_UID (insn));

	      next = NEXT_INSN (next);
	      delete_insn (jump);
	      delete_insn (label);
	    }
2994 2995 2996 2997
	}
    }
}

2998 2999 3000 3001

/* Tidy the CFG by deleting unreachable code and whatnot.  */

bool
3002
cleanup_cfg (int mode)
3003 3004 3005
{
  bool changed = false;

3006 3007 3008 3009 3010 3011
  /* Set the cfglayout mode flag here.  We could update all the callers
     but that is just inconvenient, especially given that we eventually
     want to have cfglayout mode as the default.  */
  if (current_ir_type () == IR_RTL_CFGLAYOUT)
    mode |= CLEANUP_CFGLAYOUT;

3012
  timevar_push (TV_CLEANUP_CFG);
3013 3014 3015 3016
  if (delete_unreachable_blocks ())
    {
      changed = true;
      /* We've possibly created trivially dead code.  Cleanup it right
3017
	 now to introduce more opportunities for try_optimize_cfg.  */
3018
      if (!(mode & (CLEANUP_NO_INSN_DEL))
3019
	  && !reload_completed)
3020
	delete_trivially_dead_insns (get_insns (), max_reg_num ());
3021
    }
3022 3023 3024

  compact_blocks ();

3025 3026 3027 3028 3029 3030 3031
  /* To tail-merge blocks ending in the same noreturn function (e.g.
     a call to abort) we have to insert fake edges to exit.  Do this
     here once.  The fake edges do not interfere with any other CFG
     cleanups.  */
  if (mode & CLEANUP_CROSSJUMP)
    add_noreturn_fake_exit_edges ();

3032 3033 3034
  if (!dbg_cnt (cfg_cleanup))
    return changed;

3035 3036 3037
  while (try_optimize_cfg (mode))
    {
      delete_unreachable_blocks (), changed = true;
3038
      if (!(mode & CLEANUP_NO_INSN_DEL))
3039
	{
3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053
	  /* Try to remove some trivially dead insns when doing an expensive
	     cleanup.  But delete_trivially_dead_insns doesn't work after
	     reload (it only handles pseudos) and run_fast_dce is too costly
	     to run in every iteration.

	     For effective cross jumping, we really want to run a fast DCE to
	     clean up any dead conditions, or they get in the way of performing
	     useful tail merges.

	     Other transformations in cleanup_cfg are not so sensitive to dead
	     code, so delete_trivially_dead_insns or even doing nothing at all
	     is good enough.  */
	  if ((mode & CLEANUP_EXPENSIVE) && !reload_completed
	      && !delete_trivially_dead_insns (get_insns (), max_reg_num ()))
3054
	    break;
3055
	  if ((mode & CLEANUP_CROSSJUMP) && crossjumps_occured)
Jan Hubicka committed
3056
	    run_fast_dce ();
3057 3058 3059 3060
	}
      else
	break;
    }
3061

3062 3063 3064
  if (mode & CLEANUP_CROSSJUMP)
    remove_fake_exit_edges ();

3065 3066 3067 3068 3069 3070 3071
  /* Don't call delete_dead_jumptables in cfglayout mode, because
     that function assumes that jump tables are in the insns stream.
     But we also don't _have_ to delete dead jumptables in cfglayout
     mode because we shouldn't even be looking at things that are
     not in a basic block.  Dead jumptables are cleaned up when
     going out of cfglayout mode.  */
  if (!(mode & CLEANUP_CFGLAYOUT))
3072 3073
    delete_dead_jumptables ();

3074 3075 3076 3077 3078 3079 3080 3081 3082
  /* ???  We probably do this way too often.  */
  if (current_loops
      && (changed
	  || (mode & CLEANUP_CFG_CHANGED)))
    {
      timevar_push (TV_REPAIR_LOOPS);
      /* The above doesn't preserve dominance info if available.  */
      gcc_assert (!dom_info_available_p (CDI_DOMINATORS));
      calculate_dominance_info (CDI_DOMINATORS);
3083
      fix_loop_structure (NULL);
3084 3085 3086 3087
      free_dominance_info (CDI_DOMINATORS);
      timevar_pop (TV_REPAIR_LOOPS);
    }

3088 3089 3090 3091
  timevar_pop (TV_CLEANUP_CFG);

  return changed;
}
3092

3093 3094 3095
namespace {

const pass_data pass_data_jump =
3096
{
3097 3098 3099 3100 3101 3102 3103 3104
  RTL_PASS, /* type */
  "jump", /* name */
  OPTGROUP_NONE, /* optinfo_flags */
  TV_JUMP, /* tv_id */
  0, /* properties_required */
  0, /* properties_provided */
  0, /* properties_destroyed */
  0, /* todo_flags_start */
3105
  0, /* todo_flags_finish */
3106
};
3107 3108 3109 3110

class pass_jump : public rtl_opt_pass
{
public:
3111 3112
  pass_jump (gcc::context *ctxt)
    : rtl_opt_pass (pass_data_jump, ctxt)
3113 3114 3115
  {}

  /* opt_pass methods: */
3116
  virtual unsigned int execute (function *);
3117 3118 3119

}; // class pass_jump

3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130
unsigned int
pass_jump::execute (function *)
{
  delete_trivially_dead_insns (get_insns (), max_reg_num ());
  if (dump_file)
    dump_flow_info (dump_file, dump_flags);
  cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0)
	       | (flag_thread_jumps ? CLEANUP_THREADING : 0));
  return 0;
}

3131 3132 3133 3134 3135 3136 3137
} // anon namespace

rtl_opt_pass *
make_pass_jump (gcc::context *ctxt)
{
  return new pass_jump (ctxt);
}
3138

3139 3140 3141
namespace {

const pass_data pass_data_jump2 =
3142
{
3143 3144 3145 3146 3147 3148 3149 3150
  RTL_PASS, /* type */
  "jump2", /* name */
  OPTGROUP_NONE, /* optinfo_flags */
  TV_JUMP, /* tv_id */
  0, /* properties_required */
  0, /* properties_provided */
  0, /* properties_destroyed */
  0, /* todo_flags_start */
3151
  0, /* todo_flags_finish */
3152
};
3153 3154 3155 3156

class pass_jump2 : public rtl_opt_pass
{
public:
3157 3158
  pass_jump2 (gcc::context *ctxt)
    : rtl_opt_pass (pass_data_jump2, ctxt)
3159 3160 3161
  {}

  /* opt_pass methods: */
3162 3163 3164 3165 3166
  virtual unsigned int execute (function *)
    {
      cleanup_cfg (flag_crossjumping ? CLEANUP_CROSSJUMP : 0);
      return 0;
    }
3167 3168 3169 3170 3171 3172 3173 3174 3175 3176

}; // class pass_jump2

} // anon namespace

rtl_opt_pass *
make_pass_jump2 (gcc::context *ctxt)
{
  return new pass_jump2 (ctxt);
}