cfg.c 28.4 KB
Newer Older
1
/* Control flow graph manipulation code for GNU compiler.
2
   Copyright (C) 1987-2017 Free Software Foundation, Inc.
3 4 5 6 7

This file is part of GCC.

GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
8
Software Foundation; either version 3, or (at your option) any later
9 10 11 12 13 14 15 16
version.

GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
for more details.

You should have received a copy of the GNU General Public License
17 18
along with GCC; see the file COPYING3.  If not see
<http://www.gnu.org/licenses/>.  */
19

20
/* This file contains low level functions to manipulate the CFG and
21
   analyze it.  All other modules should not transform the data structure
22 23 24
   directly and use abstraction instead.  The file is supposed to be
   ordered bottom-up and should not contain any code dependent on a
   particular intermediate language (RTL or trees).
25 26 27 28

   Available functionality:
     - Initialization/deallocation
	 init_flow, clear_edges
29 30
     - Low level basic block manipulation
	 alloc_block, expunge_block
31
     - Edge manipulation
32
	 make_edge, make_single_succ_edge, cached_make_edge, remove_edge
33 34
	 - Low level edge redirection (without updating instruction chain)
	     redirect_edge_succ, redirect_edge_succ_nodup, redirect_edge_pred
35
     - Dumping and debugging
36 37 38
	 dump_flow_info, debug_flow_info, dump_edge_info
     - Allocation of AUX fields for basic blocks
	 alloc_aux_for_blocks, free_aux_for_blocks, alloc_aux_for_block
39
     - clear_bb_flags
40 41 42 43
     - Consistency checking
	 verify_flow_info
     - Dumping and debugging
	 print_rtl_with_bb, dump_bb, debug_bb, debug_bb_n
44 45 46

   TODO: Document these "Available functionality" functions in the files
   that implement them.
47 48 49 50
 */

#include "config.h"
#include "system.h"
51
#include "coretypes.h"
52
#include "backend.h"
53
#include "hard-reg-set.h"
54 55
#include "tree.h"
#include "cfghooks.h"
56
#include "df.h"
57
#include "cfganal.h"
58
#include "cfgloop.h" /* FIXME: For struct loop.  */
59
#include "dumpfile.h"
60 61


62

63
/* Called once at initialization time.  */
64 65

void
Diego Novillo committed
66
init_flow (struct function *the_fun)
67
{
Diego Novillo committed
68
  if (!the_fun->cfg)
69
    the_fun->cfg = ggc_cleared_alloc<control_flow_graph> ();
David Malcolm committed
70
  n_edges_for_fn (the_fun) = 0;
71
  the_fun->cfg->count_max = profile_count::uninitialized ();
72
  ENTRY_BLOCK_PTR_FOR_FN (the_fun)
73
    = alloc_block ();
74 75
  ENTRY_BLOCK_PTR_FOR_FN (the_fun)->index = ENTRY_BLOCK;
  EXIT_BLOCK_PTR_FOR_FN (the_fun)
76
    = alloc_block ();
77 78 79 80 81
  EXIT_BLOCK_PTR_FOR_FN (the_fun)->index = EXIT_BLOCK;
  ENTRY_BLOCK_PTR_FOR_FN (the_fun)->next_bb
    = EXIT_BLOCK_PTR_FOR_FN (the_fun);
  EXIT_BLOCK_PTR_FOR_FN (the_fun)->prev_bb
    = ENTRY_BLOCK_PTR_FOR_FN (the_fun);
82 83
}

84
/* Helper function for remove_edge and clear_edges.  Frees edge structure
85
   without actually removing it from the pred/succ arrays.  */
86 87

static void
88
free_edge (function *fn, edge e)
89
{
90
  n_edges_for_fn (fn)--;
91
  ggc_free (e);
92 93
}

94 95 96
/* Free the memory associated with the edge structures.  */

void
97
clear_edges (struct function *fn)
98
{
99
  basic_block bb;
100
  edge e;
101
  edge_iterator ei;
102

103
  FOR_EACH_BB_FN (bb, fn)
104
    {
105
      FOR_EACH_EDGE (e, ei, bb->succs)
106
	free_edge (fn, e);
107 108
      vec_safe_truncate (bb->succs, 0);
      vec_safe_truncate (bb->preds, 0);
109
    }
110

111 112 113 114
  FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (fn)->succs)
    free_edge (fn, e);
  vec_safe_truncate (EXIT_BLOCK_PTR_FOR_FN (fn)->preds, 0);
  vec_safe_truncate (ENTRY_BLOCK_PTR_FOR_FN (fn)->succs, 0);
115

116
  gcc_assert (!n_edges_for_fn (fn));
117 118
}

119
/* Allocate memory for basic_block.  */
120

121
basic_block
122
alloc_block (void)
123 124
{
  basic_block bb;
125
  bb = ggc_cleared_alloc<basic_block_def> ();
126
  bb->count = profile_count::uninitialized ();
127
  return bb;
128 129
}

130 131
/* Link block B to chain after AFTER.  */
void
132
link_block (basic_block b, basic_block after)
133 134 135 136 137 138
{
  b->next_bb = after->next_bb;
  b->prev_bb = after;
  after->next_bb = b;
  b->next_bb->prev_bb = b;
}
139

140 141
/* Unlink block B from chain.  */
void
142
unlink_block (basic_block b)
143 144 145
{
  b->next_bb->prev_bb = b->prev_bb;
  b->prev_bb->next_bb = b->next_bb;
146 147
  b->prev_bb = NULL;
  b->next_bb = NULL;
148
}
149

150 151
/* Sequentially order blocks and compact the arrays.  */
void
152
compact_blocks (void)
153 154
{
  int i;
155

156 157
  SET_BASIC_BLOCK_FOR_FN (cfun, ENTRY_BLOCK, ENTRY_BLOCK_PTR_FOR_FN (cfun));
  SET_BASIC_BLOCK_FOR_FN (cfun, EXIT_BLOCK, EXIT_BLOCK_PTR_FOR_FN (cfun));
H.J. Lu committed
158

159 160
  if (df)
    df_compact_blocks ();
H.J. Lu committed
161
  else
162
    {
163
      basic_block bb;
H.J. Lu committed
164

165
      i = NUM_FIXED_BLOCKS;
166
      FOR_EACH_BB_FN (bb, cfun)
167
	{
168
	  SET_BASIC_BLOCK_FOR_FN (cfun, i, bb);
169 170 171
	  bb->index = i;
	  i++;
	}
172
      gcc_assert (i == n_basic_blocks_for_fn (cfun));
173

174
      for (; i < last_basic_block_for_fn (cfun); i++)
175
	SET_BASIC_BLOCK_FOR_FN (cfun, i, NULL);
176
    }
177
  last_basic_block_for_fn (cfun) = n_basic_blocks_for_fn (cfun);
178 179 180
}

/* Remove block B from the basic block array.  */
181

182
void
183
expunge_block (basic_block b)
184
{
185
  unlink_block (b);
186
  SET_BASIC_BLOCK_FOR_FN (cfun, b->index, NULL);
187
  n_basic_blocks_for_fn (cfun)--;
188 189 190 191 192
  /* We should be able to ggc_free here, but we are not.
     The dead SSA_NAMES are left pointing to dead statements that are pointing
     to dead basic blocks making garbage collector to die.
     We should be able to release all dead SSA_NAMES and at the same time we should
     clear out BB pointer of dead statements consistently.  */
193
}
194

195 196 197 198 199
/* Connect E to E->src.  */

static inline void
connect_src (edge e)
{
200
  vec_safe_push (e->src->succs, e);
201
  df_mark_solutions_dirty ();
202 203 204 205 206 207 208 209
}

/* Connect E to E->dest.  */

static inline void
connect_dest (edge e)
{
  basic_block dest = e->dest;
210
  vec_safe_push (dest->preds, e);
211
  e->dest_idx = EDGE_COUNT (dest->preds) - 1;
212
  df_mark_solutions_dirty ();
213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
}

/* Disconnect edge E from E->src.  */

static inline void
disconnect_src (edge e)
{
  basic_block src = e->src;
  edge_iterator ei;
  edge tmp;

  for (ei = ei_start (src->succs); (tmp = ei_safe_edge (ei)); )
    {
      if (tmp == e)
	{
228
	  src->succs->unordered_remove (ei.index);
229
	  df_mark_solutions_dirty ();
230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
	  return;
	}
      else
	ei_next (&ei);
    }

  gcc_unreachable ();
}

/* Disconnect edge E from E->dest.  */

static inline void
disconnect_dest (edge e)
{
  basic_block dest = e->dest;
  unsigned int dest_idx = e->dest_idx;

247
  dest->preds->unordered_remove (dest_idx);
248 249 250 251 252

  /* If we removed an edge in the middle of the edge vector, we need
     to update dest_idx of the edge that moved into the "hole".  */
  if (dest_idx < EDGE_COUNT (dest->preds))
    EDGE_PRED (dest, dest_idx)->dest_idx = dest_idx;
253
  df_mark_solutions_dirty ();
254 255
}

256 257 258 259 260
/* Create an edge connecting SRC and DEST with flags FLAGS.  Return newly
   created edge.  Use this only if you are sure that this edge can't
   possibly already exist.  */

edge
261
unchecked_make_edge (basic_block src, basic_block dst, int flags)
262 263
{
  edge e;
264
  e = ggc_cleared_alloc<edge_def> ();
David Malcolm committed
265
  n_edges_for_fn (cfun)++;
266

267
  e->probability = profile_probability::uninitialized ();
268 269 270
  e->src = src;
  e->dest = dst;
  e->flags = flags;
271 272 273

  connect_src (e);
  connect_dest (e);
274

275
  execute_on_growing_pred (e);
276 277 278
  return e;
}

279
/* Create an edge connecting SRC and DST with FLAGS optionally using
280
   edge cache CACHE.  Return the new edge, NULL if already exist.  */
281

282
edge
283
cached_make_edge (sbitmap edge_cache, basic_block src, basic_block dst, int flags)
284
{
285
  if (edge_cache == NULL
286 287
      || src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
      || dst == EXIT_BLOCK_PTR_FOR_FN (cfun))
288
    return make_edge (src, dst, flags);
289

290
  /* Does the requested edge already exist?  */
291
  if (! bitmap_bit_p (edge_cache, dst->index))
292
    {
293 294
      /* The edge does not exist.  Create one and update the
	 cache.  */
295
      bitmap_set_bit (edge_cache, dst->index);
296
      return unchecked_make_edge (src, dst, flags);
297
    }
298

299 300 301 302 303 304 305
  /* At this point, we know that the requested edge exists.  Adjust
     flags if necessary.  */
  if (flags)
    {
      edge e = find_edge (src, dst);
      e->flags |= flags;
    }
306

307
  return NULL;
308 309 310 311 312 313
}

/* Create an edge connecting SRC and DEST with flags FLAGS.  Return newly
   created edge or NULL if already exist.  */

edge
314
make_edge (basic_block src, basic_block dest, int flags)
315
{
316 317 318 319 320 321 322 323 324 325
  edge e = find_edge (src, dest);

  /* Make sure we don't add duplicate edges.  */
  if (e)
    {
      e->flags |= flags;
      return NULL;
    }

  return unchecked_make_edge (src, dest, flags);
326 327
}

328
/* Create an edge connecting SRC to DEST and set probability by knowing
329 330 331
   that it is the single edge leaving SRC.  */

edge
332
make_single_succ_edge (basic_block src, basic_block dest, int flags)
333 334 335
{
  edge e = make_edge (src, dest, flags);

336
  e->probability = profile_probability::always ();
337
  return e;
338 339 340 341 342
}

/* This function will remove an edge from the flow graph.  */

void
343
remove_edge_raw (edge e)
344
{
345
  remove_predictions_associated_with_edge (e);
346 347
  execute_on_shrinking_pred (e);

348 349
  disconnect_src (e);
  disconnect_dest (e);
350

351
  free_edge (cfun, e);
352 353 354 355 356
}

/* Redirect an edge's successor from one block to another.  */

void
357
redirect_edge_succ (edge e, basic_block new_succ)
358
{
359 360
  execute_on_shrinking_pred (e);

361
  disconnect_dest (e);
362

363
  e->dest = new_succ;
364 365

  /* Reconnect the edge to the new successor block.  */
366 367
  connect_dest (e);

368
  execute_on_growing_pred (e);
369 370 371 372 373
}

/* Redirect an edge's predecessor from one block to another.  */

void
374
redirect_edge_pred (edge e, basic_block new_pred)
375
{
376
  disconnect_src (e);
377

378
  e->src = new_pred;
379 380

  /* Reconnect the edge to the new predecessor block.  */
381
  connect_src (e);
382
}
383

384
/* Clear all basic block flags that do not have to be preserved.  */
385
void
386
clear_bb_flags (void)
387
{
388 389
  basic_block bb;

390
  FOR_ALL_BB_FN (bb, cfun)
391
    bb->flags &= BB_FLAGS_TO_PRESERVE;
392
}
393

394 395 396 397 398
/* Check the consistency of profile information.  We can't do that
   in verify_flow_info, as the counts may get invalid for incompletely
   solved graphs, later eliminating of conditionals or roundoff errors.
   It is still practical to have them reported for debugging of simple
   testcases.  */
399
static void
400
check_bb_profile (basic_block bb, FILE * file, int indent)
401 402
{
  edge e;
403
  edge_iterator ei;
404
  struct function *fun = DECL_STRUCT_FUNCTION (current_function_decl);
405 406 407
  char *s_indent = (char *) alloca ((size_t) indent + 1);
  memset ((void *) s_indent, ' ', (size_t) indent);
  s_indent[indent] = '\0';
408

409
  if (profile_status_for_fn (fun) == PROFILE_ABSENT)
410 411
    return;

412
  if (bb != EXIT_BLOCK_PTR_FOR_FN (fun))
413
    {
414
      bool found = false;
415 416 417
      profile_probability sum = profile_probability::never ();
      int isum = 0;

418
      FOR_EACH_EDGE (e, ei, bb->succs)
419
	{
420
	  if (!(e->flags & (EDGE_EH | EDGE_FAKE)))
421 422
	    found = true;
	  sum += e->probability;
423 424
	  if (e->probability.initialized_p ())
	    isum += e->probability.to_reg_br_prob_base ();
425 426 427 428 429 430
	}
      /* Only report mismatches for non-EH control flow. If there are only EH
	 edges it means that the BB ends by noreturn call.  Here the control
	 flow may just terminate.  */
      if (found)
	{
431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
	  if (sum.differs_from_p (profile_probability::always ()))
	    {
	      fprintf (file,
		       ";; %sInvalid sum of outgoing probabilities ",
		       s_indent);
	      sum.dump (file);
	      fprintf (file, "\n");
	    }
	  /* Probabilities caps to 100% and thus the previous test will never
	     fire if the sum of probabilities is too large.  */
	  else if (isum > REG_BR_PROB_BASE + 100)
	    {
	      fprintf (file,
		       ";; %sInvalid sum of outgoing probabilities %.1f%%\n",
		       s_indent, isum * 100.0 / REG_BR_PROB_BASE);
	    }
447
	}
448
    }
449
  if (bb != ENTRY_BLOCK_PTR_FOR_FN (fun))
450
    {
451
      profile_count sum = profile_count::zero ();
452
      FOR_EACH_EDGE (e, ei, bb->preds)
453 454 455 456 457 458 459 460 461 462
	sum += e->count ();
      if (sum.differs_from_p (bb->count))
	{
	  fprintf (file, ";; %sInvalid sum of incoming counts ",
		   s_indent);
	  sum.dump (file);
	  fprintf (file, ", should be ");
	  bb->count.dump (file);
	  fprintf (file, "\n");
	}
463
    }
464 465 466 467 468
  if (BB_PARTITION (bb) == BB_COLD_PARTITION)
    {
      /* Warn about inconsistencies in the partitioning that are
         currently caused by profile insanities created via optimization.  */
      if (!probably_never_executed_bb_p (fun, bb))
469 470
	fprintf (file, ";; %sBlock in cold partition with hot count\n",
		 s_indent);
471 472 473 474
      FOR_EACH_EDGE (e, ei, bb->preds)
        {
          if (!probably_never_executed_edge_p (fun, e))
            fprintf (file,
475 476
		     ";; %sBlock in cold partition with incoming hot edge\n",
		     s_indent);
477 478
        }
    }
479 480
}

481
void
482
dump_edge_info (FILE *file, edge e, dump_flags_t flags, int do_succ)
483
{
484
  basic_block side = (do_succ ? e->dest : e->src);
485 486 487 488 489 490
  bool do_details = false;
  
  if ((flags & TDF_DETAILS) != 0
      && (flags & TDF_SLIM) == 0)
    do_details = true;

491
  if (side->index == ENTRY_BLOCK)
492
    fputs (" ENTRY", file);
493
  else if (side->index == EXIT_BLOCK)
494 495
    fputs (" EXIT", file);
  else
496
    fprintf (file, " %d", side->index);
497

498 499 500 501 502 503
  if (e->probability.initialized_p () && do_details)
    {
      fprintf (file, " [");
      e->probability.dump (file);
      fprintf (file, "] ");
    }
504

505
  if (e->count ().initialized_p () && do_details)
506
    {
507
      fputs (" count:", file);
508
      e->count ().dump (file);
509 510
    }

511
  if (e->flags && do_details)
512
    {
513 514 515 516 517 518 519 520
      static const char * const bitnames[] =
	{
#define DEF_EDGE_FLAG(NAME,IDX) #NAME ,
#include "cfg-flags.def"
	  NULL
#undef DEF_EDGE_FLAG
	};
      bool comma = false;
521
      int i, flags = e->flags;
522

523
      gcc_assert (e->flags <= EDGE_ALL_FLAGS);
524
      fputs (" (", file);
525 526 527 528 529 530 531
      for (i = 0; flags; i++)
	if (flags & (1 << i))
	  {
	    flags &= ~(1 << i);

	    if (comma)
	      fputc (',', file);
532 533
	    fputs (bitnames[i], file);
	    comma = true;
534
	  }
535

536 537 538
      fputc (')', file);
    }
}
539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555

DEBUG_FUNCTION void
debug (edge_def &ref)
{
  /* FIXME (crowl): Is this desireable?  */
  dump_edge_info (stderr, &ref, 0, false);
  dump_edge_info (stderr, &ref, 0, true);
}

DEBUG_FUNCTION void
debug (edge_def *ptr)
{
  if (ptr)
    debug (*ptr);
  else
    fprintf (stderr, "<nil>\n");
}
556 557 558 559 560 561 562 563 564 565

static void
debug_slim (edge e)
{
  fprintf (stderr, "<edge 0x%p (%d -> %d)>", (void *) e,
	   e->src->index, e->dest->index);
}

DEFINE_DEBUG_VEC (edge)
DEFINE_DEBUG_HASH_SET (edge)
566

567
/* Simple routines to easily allocate AUX fields of basic blocks.  */
568

569 570 571 572
static struct obstack block_aux_obstack;
static void *first_block_aux_obj = 0;
static struct obstack edge_aux_obstack;
static void *first_edge_aux_obj = 0;
573

574
/* Allocate a memory block of SIZE as BB->aux.  The obstack must
575
   be first initialized by alloc_aux_for_blocks.  */
576

577
static void
578
alloc_aux_for_block (basic_block bb, int size)
579
{
580
  /* Verify that aux field is clear.  */
581
  gcc_assert (!bb->aux && first_block_aux_obj);
582 583
  bb->aux = obstack_alloc (&block_aux_obstack, size);
  memset (bb->aux, 0, size);
584 585
}

586 587
/* Initialize the block_aux_obstack and if SIZE is nonzero, call
   alloc_aux_for_block for each basic block.  */
588 589

void
590
alloc_aux_for_blocks (int size)
591
{
592
  static int initialized;
593

594
  if (!initialized)
595
    {
596 597
      gcc_obstack_init (&block_aux_obstack);
      initialized = 1;
598
    }
599 600 601
  else
    /* Check whether AUX data are still allocated.  */
    gcc_assert (!first_block_aux_obj);
Mike Stump committed
602

603
  first_block_aux_obj = obstack_alloc (&block_aux_obstack, 0);
604
  if (size)
605
    {
606
      basic_block bb;
607

608
      FOR_ALL_BB_FN (bb, cfun)
609
	alloc_aux_for_block (bb, size);
610 611
    }
}
612

613
/* Clear AUX pointers of all blocks.  */
614 615

void
616
clear_aux_for_blocks (void)
617
{
618
  basic_block bb;
619

620
  FOR_ALL_BB_FN (bb, cfun)
621
    bb->aux = NULL;
622 623 624 625 626 627
}

/* Free data allocated in block_aux_obstack and clear AUX pointers
   of all blocks.  */

void
628
free_aux_for_blocks (void)
629
{
630
  gcc_assert (first_block_aux_obj);
631
  obstack_free (&block_aux_obstack, first_block_aux_obj);
632
  first_block_aux_obj = NULL;
633 634

  clear_aux_for_blocks ();
635
}
636

637
/* Allocate a memory edge of SIZE as E->aux.  The obstack must
638
   be first initialized by alloc_aux_for_edges.  */
639

640
void
641
alloc_aux_for_edge (edge e, int size)
642 643
{
  /* Verify that aux field is clear.  */
644
  gcc_assert (!e->aux && first_edge_aux_obj);
645 646 647
  e->aux = obstack_alloc (&edge_aux_obstack, size);
  memset (e->aux, 0, size);
}
648

649 650
/* Initialize the edge_aux_obstack and if SIZE is nonzero, call
   alloc_aux_for_edge for each basic edge.  */
651

652
void
653
alloc_aux_for_edges (int size)
654 655
{
  static int initialized;
656

657 658 659 660
  if (!initialized)
    {
      gcc_obstack_init (&edge_aux_obstack);
      initialized = 1;
661
    }
662 663 664
  else
    /* Check whether AUX data are still allocated.  */
    gcc_assert (!first_edge_aux_obj);
665

666
  first_edge_aux_obj = obstack_alloc (&edge_aux_obstack, 0);
667
  if (size)
668
    {
669 670
      basic_block bb;

671 672
      FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
		      EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
673
	{
674
	  edge e;
675
	  edge_iterator ei;
676

677
	  FOR_EACH_EDGE (e, ei, bb->succs)
678
	    alloc_aux_for_edge (e, size);
679 680 681 682
	}
    }
}

683
/* Clear AUX pointers of all edges.  */
684 685

void
686
clear_aux_for_edges (void)
687
{
688 689
  basic_block bb;
  edge e;
690

691 692
  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
		  EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
693
    {
694 695
      edge_iterator ei;
      FOR_EACH_EDGE (e, ei, bb->succs)
696
	e->aux = NULL;
697
    }
698 699 700 701 702 703
}

/* Free data allocated in edge_aux_obstack and clear AUX pointers
   of all edges.  */

void
704
free_aux_for_edges (void)
705
{
706
  gcc_assert (first_edge_aux_obj);
707
  obstack_free (&edge_aux_obstack, first_edge_aux_obj);
708
  first_edge_aux_obj = NULL;
709 710

  clear_aux_for_edges ();
711
}
712

713
DEBUG_FUNCTION void
714
debug_bb (basic_block bb)
715
{
716
  dump_bb (stderr, bb, 0, dump_flags);
717 718
}

719
DEBUG_FUNCTION basic_block
720
debug_bb_n (int n)
721
{
722
  basic_block bb = BASIC_BLOCK_FOR_FN (cfun, n);
723
  debug_bb (bb);
724
  return bb;
725
}
726

727 728 729 730 731 732
/* Dumps cfg related information about basic block BB to OUTF.
   If HEADER is true, dump things that appear before the instructions
   contained in BB.  If FOOTER is true, dump things that appear after.
   Flags are the TDF_* masks as documented in dumpfile.h.
   NB: With TDF_DETAILS, it is assumed that cfun is available, so
   that maybe_hot_bb_p and probably_never_executed_bb_p don't ICE.  */
733

734
void
735
dump_bb_info (FILE *outf, basic_block bb, int indent, dump_flags_t flags,
736
	      bool do_header, bool do_footer)
737
{
738
  edge_iterator ei;
739
  edge e;
740 741
  static const char * const bb_bitnames[] =
    {
742 743 744 745
#define DEF_BASIC_BLOCK_FLAG(NAME,IDX) #NAME ,
#include "cfg-flags.def"
      NULL
#undef DEF_BASIC_BLOCK_FLAG
746 747
    };
  const unsigned n_bitnames = sizeof (bb_bitnames) / sizeof (char *);
748
  bool first;
749 750 751
  char *s_indent = (char *) alloca ((size_t) indent + 1);
  memset ((void *) s_indent, ' ', (size_t) indent);
  s_indent[indent] = '\0';
752

753 754 755 756 757 758
  gcc_assert (bb->flags <= BB_ALL_FLAGS);

  if (do_header)
    {
      unsigned i;

759
      fputs (";; ", outf);
760
      fprintf (outf, "%sbasic block %d, loop depth %d",
761
	       s_indent, bb->index, bb_loop_depth (bb));
762 763
      if (flags & TDF_DETAILS)
	{
764
	  struct function *fun = DECL_STRUCT_FUNCTION (current_function_decl);
765 766 767 768 769
	  if (bb->count.initialized_p ())
	    {
	      fputs (", count ", outf);
	      bb->count.dump (outf);
	    }
770
	  if (maybe_hot_bb_p (fun, bb))
771
	    fputs (", maybe hot", outf);
772
	  if (probably_never_executed_bb_p (fun, bb))
773 774 775 776 777 778
	    fputs (", probably never executed", outf);
	}
      fputc ('\n', outf);

      if (flags & TDF_DETAILS)
	{
779 780
	  check_bb_profile (bb, outf, indent);
	  fputs (";; ", outf);
781 782 783 784 785 786 787 788 789 790 791 792
	  fprintf (outf, "%s prev block ", s_indent);
	  if (bb->prev_bb)
	    fprintf (outf, "%d", bb->prev_bb->index);
	  else
	    fprintf (outf, "(nil)");
	  fprintf (outf, ", next block ");
	  if (bb->next_bb)
	    fprintf (outf, "%d", bb->next_bb->index);
	  else
	    fprintf (outf, "(nil)");

	  fputs (", flags:", outf);
793
	  first = true;
794 795 796 797 798 799 800 801 802 803 804 805
	  for (i = 0; i < n_bitnames; i++)
	    if (bb->flags & (1 << i))
	      {
		if (first)
		  fputs (" (", outf);
		else
		  fputs (", ", outf);
		first = false;
		fputs (bb_bitnames[i], outf);
	      }
	  if (!first)
	    fputc (')', outf);
806
	  fputc ('\n', outf);
807 808
	}

809
      fputs (";; ", outf);
810
      fprintf (outf, "%s pred:      ", s_indent);
811
      first = true;
812
      FOR_EACH_EDGE (e, ei, bb->preds)
813 814 815
	{
	  if (! first)
	    {
816
	      fputs (";; ", outf);
817 818 819 820 821 822
	      fprintf (outf, "%s            ", s_indent);
	    }
	  first = false;
	  dump_edge_info (outf, e, flags, 0);
	  fputc ('\n', outf);
	}
823 824
      if (first)
	fputc ('\n', outf);
825 826 827 828
    }

  if (do_footer)
    {
829
      fputs (";; ", outf);
830
      fprintf (outf, "%s succ:      ", s_indent);
831
      first = true;
832
      FOR_EACH_EDGE (e, ei, bb->succs)
833 834 835
        {
	  if (! first)
	    {
836
	      fputs (";; ", outf);
837 838 839 840 841 842
	      fprintf (outf, "%s            ", s_indent);
	    }
	  first = false;
	  dump_edge_info (outf, e, flags, 1);
	  fputc ('\n', outf);
	}
843 844
      if (first)
	fputc ('\n', outf);
845
    }
846 847 848 849 850
}

/* Dumps a brief description of cfg to FILE.  */

void
851
brief_dump_cfg (FILE *file, dump_flags_t flags)
852 853 854
{
  basic_block bb;

855
  FOR_EACH_BB_FN (bb, cfun)
856
    {
857
      dump_bb_info (file, bb, 0, flags & TDF_DETAILS, true, true);
858 859
    }
}
860

861
/* An edge originally destinating BB of COUNT has been proved to
862
   leave the block by TAKEN_EDGE.  Update profile of BB such that edge E can be
Mike Stump committed
863
   redirected to destination of TAKEN_EDGE.
864 865

   This function may leave the profile inconsistent in the case TAKEN_EDGE
866
   frequency or count is believed to be lower than COUNT
867
   respectively.  */
868
void
869
update_bb_profile_for_threading (basic_block bb, 
870
				 profile_count count, edge taken_edge)
871 872
{
  edge c;
873
  profile_probability prob;
874
  edge_iterator ei;
875

876
  if (bb->count < count)
877 878 879 880 881
    {
      if (dump_file)
	fprintf (dump_file, "bb %i count became negative after threading",
		 bb->index);
    }
882
  bb->count -= count;
883 884 885

  /* Compute the probability of TAKEN_EDGE being reached via threaded edge.
     Watch for overflows.  */
886 887
  if (bb->count.nonzero_p ())
    prob = count.probability_in (bb->count);
888
  else
889
    prob = profile_probability::never ();
890 891 892
  if (prob > taken_edge->probability)
    {
      if (dump_file)
893 894 895 896 897 898 899 900 901 902
	{
	  fprintf (dump_file, "Jump threading proved probability of edge "
		   "%i->%i too small (it is ",
		   taken_edge->src->index, taken_edge->dest->index);	
	  taken_edge->probability.dump (dump_file);
	  fprintf (dump_file, " should be ");
	  prob.dump (dump_file);
	  fprintf (dump_file, ")\n");
	}
      prob = taken_edge->probability.apply_scale (6, 8);
903 904 905 906
    }

  /* Now rescale the probabilities.  */
  taken_edge->probability -= prob;
907 908
  prob = prob.invert ();
  if (prob == profile_probability::never ())
909 910
    {
      if (dump_file)
911 912 913
	fprintf (dump_file, "Edge probabilities of bb %i has been reset, "
		 "count of block should end up being 0, it is non-zero\n",
		 bb->index);
914
      EDGE_SUCC (bb, 0)->probability = profile_probability::guessed_always ();
915 916 917
      ei = ei_start (bb->succs);
      ei_next (&ei);
      for (; (c = ei_safe_edge (ei)); ei_next (&ei))
918
	c->probability = profile_probability::guessed_never ();
919
    }
920
  else if (!(prob == profile_probability::always ()))
921 922
    {
      FOR_EACH_EDGE (c, ei, bb->succs)
923
	c->probability /= prob;
924
    }
925

926
  gcc_assert (bb == taken_edge->src);
927
}
928 929

/* Multiply all frequencies of basic blocks in array BBS of length NBBS
930 931 932 933 934 935 936
   by NUM/DEN, in profile_count arithmetic.  More accurate than previous
   function but considerably slower.  */
void
scale_bbs_frequencies_profile_count (basic_block *bbs, int nbbs,
				     profile_count num, profile_count den)
{
  int i;
937 938
  if (num == profile_count::zero () || den.nonzero_p ())
    for (i = 0; i < nbbs; i++)
939 940 941
      bbs[i]->count = bbs[i]->count.apply_scale (num, den);
}

942 943 944 945 946 947 948 949 950 951
/* Multiply all frequencies of basic blocks in array BBS of length NBBS
   by NUM/DEN, in profile_count arithmetic.  More accurate than previous
   function but considerably slower.  */
void
scale_bbs_frequencies (basic_block *bbs, int nbbs,
		       profile_probability p)
{
  int i;

  for (i = 0; i < nbbs; i++)
952
    bbs[i]->count = bbs[i]->count.apply_probability (p);
953 954
}

955
/* Helper types for hash tables.  */
956 957 958 959 960 961 962 963 964

struct htab_bb_copy_original_entry
{
  /* Block we are attaching info to.  */
  int index1;
  /* Index of original or copy (depending on the hashtable) */
  int index2;
};

965
struct bb_copy_hasher : nofree_ptr_hash <htab_bb_copy_original_entry>
966
{
967 968 969
  static inline hashval_t hash (const htab_bb_copy_original_entry *);
  static inline bool equal (const htab_bb_copy_original_entry *existing,
			    const htab_bb_copy_original_entry * candidate);
970
};
971

972
inline hashval_t
973
bb_copy_hasher::hash (const htab_bb_copy_original_entry *data)
974
{
975 976 977
  return data->index1;
}

978
inline bool
979 980
bb_copy_hasher::equal (const htab_bb_copy_original_entry *data,
		       const htab_bb_copy_original_entry *data2)
981
{
982 983 984
  return data->index1 == data2->index1;
}

985 986
/* Data structures used to maintain mapping between basic blocks and
   copies.  */
987 988
static hash_table<bb_copy_hasher> *bb_original;
static hash_table<bb_copy_hasher> *bb_copy;
989 990

/* And between loops and copies.  */
991
static hash_table<bb_copy_hasher> *loop_copy;
992
static object_allocator<htab_bb_copy_original_entry> *original_copy_bb_pool;
993

994 995
/* Initialize the data structures to maintain mapping between blocks
   and its copies.  */
996 997 998
void
initialize_original_copy_tables (void)
{
999
  original_copy_bb_pool = new object_allocator<htab_bb_copy_original_entry>
1000
    ("original_copy");
1001 1002 1003
  bb_original = new hash_table<bb_copy_hasher> (10);
  bb_copy = new hash_table<bb_copy_hasher> (10);
  loop_copy = new hash_table<bb_copy_hasher> (10);
1004 1005
}

1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017
/* Reset the data structures to maintain mapping between blocks and
   its copies.  */

void
reset_original_copy_tables (void)
{
  gcc_assert (original_copy_bb_pool);
  bb_original->empty ();
  bb_copy->empty ();
  loop_copy->empty ();
}

1018 1019
/* Free the data structures to maintain mapping between blocks and
   its copies.  */
1020 1021 1022 1023
void
free_original_copy_tables (void)
{
  gcc_assert (original_copy_bb_pool);
1024 1025 1026
  delete bb_copy;
  bb_copy = NULL;
  delete bb_original;
1027
  bb_original = NULL;
1028 1029
  delete loop_copy;
  loop_copy = NULL;
1030
  delete original_copy_bb_pool;
1031 1032 1033
  original_copy_bb_pool = NULL;
}

David Malcolm committed
1034 1035 1036 1037 1038 1039 1040 1041 1042
/* Return true iff we have had a call to initialize_original_copy_tables
   without a corresponding call to free_original_copy_tables.  */

bool
original_copy_tables_initialized_p (void)
{
  return original_copy_bb_pool != NULL;
}

1043 1044 1045
/* Removes the value associated with OBJ from table TAB.  */

static void
1046
copy_original_table_clear (hash_table<bb_copy_hasher> *tab, unsigned obj)
1047
{
1048
  htab_bb_copy_original_entry **slot;
1049 1050 1051 1052 1053 1054
  struct htab_bb_copy_original_entry key, *elt;

  if (!original_copy_bb_pool)
    return;

  key.index1 = obj;
1055
  slot = tab->find_slot (&key, NO_INSERT);
1056 1057 1058
  if (!slot)
    return;

1059
  elt = *slot;
1060
  tab->clear_slot (slot);
1061
  original_copy_bb_pool->remove (elt);
1062 1063 1064 1065 1066 1067
}

/* Sets the value associated with OBJ in table TAB to VAL.
   Do nothing when data structures are not initialized.  */

static void
1068
copy_original_table_set (hash_table<bb_copy_hasher> *tab,
1069
			 unsigned obj, unsigned val)
1070 1071 1072 1073 1074 1075 1076 1077
{
  struct htab_bb_copy_original_entry **slot;
  struct htab_bb_copy_original_entry key;

  if (!original_copy_bb_pool)
    return;

  key.index1 = obj;
1078
  slot = tab->find_slot (&key, INSERT);
1079 1080
  if (!*slot)
    {
1081
      *slot = original_copy_bb_pool->allocate ();
1082 1083 1084 1085 1086
      (*slot)->index1 = obj;
    }
  (*slot)->index2 = val;
}

1087 1088
/* Set original for basic block.  Do nothing when data structures are not
   initialized so passes not needing this don't need to care.  */
1089 1090 1091
void
set_bb_original (basic_block bb, basic_block original)
{
1092
  copy_original_table_set (bb_original, bb->index, original->index);
1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104
}

/* Get the original basic block.  */
basic_block
get_bb_original (basic_block bb)
{
  struct htab_bb_copy_original_entry *entry;
  struct htab_bb_copy_original_entry key;

  gcc_assert (original_copy_bb_pool);

  key.index1 = bb->index;
1105
  entry = bb_original->find (&key);
1106
  if (entry)
1107
    return BASIC_BLOCK_FOR_FN (cfun, entry->index2);
1108 1109 1110 1111
  else
    return NULL;
}

1112 1113
/* Set copy for basic block.  Do nothing when data structures are not
   initialized so passes not needing this don't need to care.  */
1114 1115 1116
void
set_bb_copy (basic_block bb, basic_block copy)
{
1117
  copy_original_table_set (bb_copy, bb->index, copy->index);
1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129
}

/* Get the copy of basic block.  */
basic_block
get_bb_copy (basic_block bb)
{
  struct htab_bb_copy_original_entry *entry;
  struct htab_bb_copy_original_entry key;

  gcc_assert (original_copy_bb_pool);

  key.index1 = bb->index;
1130
  entry = bb_copy->find (&key);
1131
  if (entry)
1132
    return BASIC_BLOCK_FOR_FN (cfun, entry->index2);
1133 1134 1135
  else
    return NULL;
}
1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159

/* Set copy for LOOP to COPY.  Do nothing when data structures are not
   initialized so passes not needing this don't need to care.  */

void
set_loop_copy (struct loop *loop, struct loop *copy)
{
  if (!copy)
    copy_original_table_clear (loop_copy, loop->num);
  else
    copy_original_table_set (loop_copy, loop->num, copy->num);
}

/* Get the copy of LOOP.  */

struct loop *
get_loop_copy (struct loop *loop)
{
  struct htab_bb_copy_original_entry *entry;
  struct htab_bb_copy_original_entry key;

  gcc_assert (original_copy_bb_pool);

  key.index1 = loop->num;
1160
  entry = loop_copy->find (&key);
1161
  if (entry)
1162
    return get_loop (cfun, entry->index2);
1163 1164 1165
  else
    return NULL;
}