cfg.c 30.4 KB
Newer Older
1
/* Control flow graph manipulation code for GNU compiler.
2
   Copyright (C) 1987-2017 Free Software Foundation, Inc.
3 4 5 6 7

This file is part of GCC.

GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
8
Software Foundation; either version 3, or (at your option) any later
9 10 11 12 13 14 15 16
version.

GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
for more details.

You should have received a copy of the GNU General Public License
17 18
along with GCC; see the file COPYING3.  If not see
<http://www.gnu.org/licenses/>.  */
19

20
/* This file contains low level functions to manipulate the CFG and
21
   analyze it.  All other modules should not transform the data structure
22 23 24
   directly and use abstraction instead.  The file is supposed to be
   ordered bottom-up and should not contain any code dependent on a
   particular intermediate language (RTL or trees).
25 26 27 28

   Available functionality:
     - Initialization/deallocation
	 init_flow, clear_edges
29 30
     - Low level basic block manipulation
	 alloc_block, expunge_block
31
     - Edge manipulation
32
	 make_edge, make_single_succ_edge, cached_make_edge, remove_edge
33 34
	 - Low level edge redirection (without updating instruction chain)
	     redirect_edge_succ, redirect_edge_succ_nodup, redirect_edge_pred
35
     - Dumping and debugging
36 37 38
	 dump_flow_info, debug_flow_info, dump_edge_info
     - Allocation of AUX fields for basic blocks
	 alloc_aux_for_blocks, free_aux_for_blocks, alloc_aux_for_block
39
     - clear_bb_flags
40 41 42 43
     - Consistency checking
	 verify_flow_info
     - Dumping and debugging
	 print_rtl_with_bb, dump_bb, debug_bb, debug_bb_n
44 45 46

   TODO: Document these "Available functionality" functions in the files
   that implement them.
47 48 49 50
 */

#include "config.h"
#include "system.h"
51
#include "coretypes.h"
52
#include "backend.h"
53
#include "hard-reg-set.h"
54 55
#include "tree.h"
#include "cfghooks.h"
56
#include "df.h"
57
#include "cfganal.h"
58
#include "cfgloop.h" /* FIXME: For struct loop.  */
59
#include "dumpfile.h"
60 61


62

63
/* Called once at initialization time.  */
64 65

void
Diego Novillo committed
66
init_flow (struct function *the_fun)
67
{
Diego Novillo committed
68
  if (!the_fun->cfg)
69
    the_fun->cfg = ggc_cleared_alloc<control_flow_graph> ();
David Malcolm committed
70
  n_edges_for_fn (the_fun) = 0;
71
  ENTRY_BLOCK_PTR_FOR_FN (the_fun)
72
    = alloc_block ();
73 74
  ENTRY_BLOCK_PTR_FOR_FN (the_fun)->index = ENTRY_BLOCK;
  EXIT_BLOCK_PTR_FOR_FN (the_fun)
75
    = alloc_block ();
76 77 78 79 80
  EXIT_BLOCK_PTR_FOR_FN (the_fun)->index = EXIT_BLOCK;
  ENTRY_BLOCK_PTR_FOR_FN (the_fun)->next_bb
    = EXIT_BLOCK_PTR_FOR_FN (the_fun);
  EXIT_BLOCK_PTR_FOR_FN (the_fun)->prev_bb
    = ENTRY_BLOCK_PTR_FOR_FN (the_fun);
81 82
}

83
/* Helper function for remove_edge and clear_edges.  Frees edge structure
84
   without actually removing it from the pred/succ arrays.  */
85 86

static void
87
free_edge (function *fn, edge e)
88
{
89
  n_edges_for_fn (fn)--;
90
  ggc_free (e);
91 92
}

93 94 95
/* Free the memory associated with the edge structures.  */

void
96
clear_edges (struct function *fn)
97
{
98
  basic_block bb;
99
  edge e;
100
  edge_iterator ei;
101

102
  FOR_EACH_BB_FN (bb, fn)
103
    {
104
      FOR_EACH_EDGE (e, ei, bb->succs)
105
	free_edge (fn, e);
106 107
      vec_safe_truncate (bb->succs, 0);
      vec_safe_truncate (bb->preds, 0);
108
    }
109

110 111 112 113
  FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (fn)->succs)
    free_edge (fn, e);
  vec_safe_truncate (EXIT_BLOCK_PTR_FOR_FN (fn)->preds, 0);
  vec_safe_truncate (ENTRY_BLOCK_PTR_FOR_FN (fn)->succs, 0);
114

115
  gcc_assert (!n_edges_for_fn (fn));
116 117
}

118
/* Allocate memory for basic_block.  */
119

120
basic_block
121
alloc_block (void)
122 123
{
  basic_block bb;
124
  bb = ggc_cleared_alloc<basic_block_def> ();
125
  bb->count = profile_count::uninitialized ();
126
  return bb;
127 128
}

129 130
/* Link block B to chain after AFTER.  */
void
131
link_block (basic_block b, basic_block after)
132 133 134 135 136 137
{
  b->next_bb = after->next_bb;
  b->prev_bb = after;
  after->next_bb = b;
  b->next_bb->prev_bb = b;
}
138

139 140
/* Unlink block B from chain.  */
void
141
unlink_block (basic_block b)
142 143 144
{
  b->next_bb->prev_bb = b->prev_bb;
  b->prev_bb->next_bb = b->next_bb;
145 146
  b->prev_bb = NULL;
  b->next_bb = NULL;
147
}
148

149 150
/* Sequentially order blocks and compact the arrays.  */
void
151
compact_blocks (void)
152 153
{
  int i;
154

155 156
  SET_BASIC_BLOCK_FOR_FN (cfun, ENTRY_BLOCK, ENTRY_BLOCK_PTR_FOR_FN (cfun));
  SET_BASIC_BLOCK_FOR_FN (cfun, EXIT_BLOCK, EXIT_BLOCK_PTR_FOR_FN (cfun));
H.J. Lu committed
157

158 159
  if (df)
    df_compact_blocks ();
H.J. Lu committed
160
  else
161
    {
162
      basic_block bb;
H.J. Lu committed
163

164
      i = NUM_FIXED_BLOCKS;
165
      FOR_EACH_BB_FN (bb, cfun)
166
	{
167
	  SET_BASIC_BLOCK_FOR_FN (cfun, i, bb);
168 169 170
	  bb->index = i;
	  i++;
	}
171
      gcc_assert (i == n_basic_blocks_for_fn (cfun));
172

173
      for (; i < last_basic_block_for_fn (cfun); i++)
174
	SET_BASIC_BLOCK_FOR_FN (cfun, i, NULL);
175
    }
176
  last_basic_block_for_fn (cfun) = n_basic_blocks_for_fn (cfun);
177 178 179
}

/* Remove block B from the basic block array.  */
180

181
void
182
expunge_block (basic_block b)
183
{
184
  unlink_block (b);
185
  SET_BASIC_BLOCK_FOR_FN (cfun, b->index, NULL);
186
  n_basic_blocks_for_fn (cfun)--;
187 188 189 190 191
  /* We should be able to ggc_free here, but we are not.
     The dead SSA_NAMES are left pointing to dead statements that are pointing
     to dead basic blocks making garbage collector to die.
     We should be able to release all dead SSA_NAMES and at the same time we should
     clear out BB pointer of dead statements consistently.  */
192
}
193

194 195 196 197 198
/* Connect E to E->src.  */

static inline void
connect_src (edge e)
{
199
  vec_safe_push (e->src->succs, e);
200
  df_mark_solutions_dirty ();
201 202 203 204 205 206 207 208
}

/* Connect E to E->dest.  */

static inline void
connect_dest (edge e)
{
  basic_block dest = e->dest;
209
  vec_safe_push (dest->preds, e);
210
  e->dest_idx = EDGE_COUNT (dest->preds) - 1;
211
  df_mark_solutions_dirty ();
212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
}

/* Disconnect edge E from E->src.  */

static inline void
disconnect_src (edge e)
{
  basic_block src = e->src;
  edge_iterator ei;
  edge tmp;

  for (ei = ei_start (src->succs); (tmp = ei_safe_edge (ei)); )
    {
      if (tmp == e)
	{
227
	  src->succs->unordered_remove (ei.index);
228
	  df_mark_solutions_dirty ();
229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
	  return;
	}
      else
	ei_next (&ei);
    }

  gcc_unreachable ();
}

/* Disconnect edge E from E->dest.  */

static inline void
disconnect_dest (edge e)
{
  basic_block dest = e->dest;
  unsigned int dest_idx = e->dest_idx;

246
  dest->preds->unordered_remove (dest_idx);
247 248 249 250 251

  /* If we removed an edge in the middle of the edge vector, we need
     to update dest_idx of the edge that moved into the "hole".  */
  if (dest_idx < EDGE_COUNT (dest->preds))
    EDGE_PRED (dest, dest_idx)->dest_idx = dest_idx;
252
  df_mark_solutions_dirty ();
253 254
}

255 256 257 258 259
/* Create an edge connecting SRC and DEST with flags FLAGS.  Return newly
   created edge.  Use this only if you are sure that this edge can't
   possibly already exist.  */

edge
260
unchecked_make_edge (basic_block src, basic_block dst, int flags)
261 262
{
  edge e;
263
  e = ggc_cleared_alloc<edge_def> ();
David Malcolm committed
264
  n_edges_for_fn (cfun)++;
265

266
  e->probability = profile_probability::uninitialized ();
267 268 269
  e->src = src;
  e->dest = dst;
  e->flags = flags;
270 271 272

  connect_src (e);
  connect_dest (e);
273

274
  execute_on_growing_pred (e);
275 276 277
  return e;
}

278
/* Create an edge connecting SRC and DST with FLAGS optionally using
279
   edge cache CACHE.  Return the new edge, NULL if already exist.  */
280

281
edge
282
cached_make_edge (sbitmap edge_cache, basic_block src, basic_block dst, int flags)
283
{
284
  if (edge_cache == NULL
285 286
      || src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
      || dst == EXIT_BLOCK_PTR_FOR_FN (cfun))
287
    return make_edge (src, dst, flags);
288

289
  /* Does the requested edge already exist?  */
290
  if (! bitmap_bit_p (edge_cache, dst->index))
291
    {
292 293
      /* The edge does not exist.  Create one and update the
	 cache.  */
294
      bitmap_set_bit (edge_cache, dst->index);
295
      return unchecked_make_edge (src, dst, flags);
296
    }
297

298 299 300 301 302 303 304
  /* At this point, we know that the requested edge exists.  Adjust
     flags if necessary.  */
  if (flags)
    {
      edge e = find_edge (src, dst);
      e->flags |= flags;
    }
305

306
  return NULL;
307 308 309 310 311 312
}

/* Create an edge connecting SRC and DEST with flags FLAGS.  Return newly
   created edge or NULL if already exist.  */

edge
313
make_edge (basic_block src, basic_block dest, int flags)
314
{
315 316 317 318 319 320 321 322 323 324
  edge e = find_edge (src, dest);

  /* Make sure we don't add duplicate edges.  */
  if (e)
    {
      e->flags |= flags;
      return NULL;
    }

  return unchecked_make_edge (src, dest, flags);
325 326
}

327
/* Create an edge connecting SRC to DEST and set probability by knowing
328 329 330
   that it is the single edge leaving SRC.  */

edge
331
make_single_succ_edge (basic_block src, basic_block dest, int flags)
332 333 334
{
  edge e = make_edge (src, dest, flags);

335
  e->probability = profile_probability::always ();
336
  return e;
337 338 339 340 341
}

/* This function will remove an edge from the flow graph.  */

void
342
remove_edge_raw (edge e)
343
{
344
  remove_predictions_associated_with_edge (e);
345 346
  execute_on_shrinking_pred (e);

347 348
  disconnect_src (e);
  disconnect_dest (e);
349

350
  free_edge (cfun, e);
351 352 353 354 355
}

/* Redirect an edge's successor from one block to another.  */

void
356
redirect_edge_succ (edge e, basic_block new_succ)
357
{
358 359
  execute_on_shrinking_pred (e);

360
  disconnect_dest (e);
361

362
  e->dest = new_succ;
363 364

  /* Reconnect the edge to the new successor block.  */
365 366
  connect_dest (e);

367
  execute_on_growing_pred (e);
368 369 370 371 372
}

/* Redirect an edge's predecessor from one block to another.  */

void
373
redirect_edge_pred (edge e, basic_block new_pred)
374
{
375
  disconnect_src (e);
376

377
  e->src = new_pred;
378 379

  /* Reconnect the edge to the new predecessor block.  */
380
  connect_src (e);
381
}
382

383
/* Clear all basic block flags that do not have to be preserved.  */
384
void
385
clear_bb_flags (void)
386
{
387 388
  basic_block bb;

389
  FOR_ALL_BB_FN (bb, cfun)
390
    bb->flags &= BB_FLAGS_TO_PRESERVE;
391
}
392

393 394 395 396 397
/* Check the consistency of profile information.  We can't do that
   in verify_flow_info, as the counts may get invalid for incompletely
   solved graphs, later eliminating of conditionals or roundoff errors.
   It is still practical to have them reported for debugging of simple
   testcases.  */
398
static void
399
check_bb_profile (basic_block bb, FILE * file, int indent)
400 401
{
  edge e;
402
  edge_iterator ei;
403
  struct function *fun = DECL_STRUCT_FUNCTION (current_function_decl);
404 405 406
  char *s_indent = (char *) alloca ((size_t) indent + 1);
  memset ((void *) s_indent, ' ', (size_t) indent);
  s_indent[indent] = '\0';
407

408
  if (profile_status_for_fn (fun) == PROFILE_ABSENT)
409 410
    return;

411
  if (bb != EXIT_BLOCK_PTR_FOR_FN (fun))
412
    {
413
      bool found = false;
414 415 416
      profile_probability sum = profile_probability::never ();
      int isum = 0;

417
      FOR_EACH_EDGE (e, ei, bb->succs)
418
	{
419
	  if (!(e->flags & (EDGE_EH | EDGE_FAKE)))
420 421
	    found = true;
	  sum += e->probability;
422 423
	  if (e->probability.initialized_p ())
	    isum += e->probability.to_reg_br_prob_base ();
424 425 426 427 428 429
	}
      /* Only report mismatches for non-EH control flow. If there are only EH
	 edges it means that the BB ends by noreturn call.  Here the control
	 flow may just terminate.  */
      if (found)
	{
430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445
	  if (sum.differs_from_p (profile_probability::always ()))
	    {
	      fprintf (file,
		       ";; %sInvalid sum of outgoing probabilities ",
		       s_indent);
	      sum.dump (file);
	      fprintf (file, "\n");
	    }
	  /* Probabilities caps to 100% and thus the previous test will never
	     fire if the sum of probabilities is too large.  */
	  else if (isum > REG_BR_PROB_BASE + 100)
	    {
	      fprintf (file,
		       ";; %sInvalid sum of outgoing probabilities %.1f%%\n",
		       s_indent, isum * 100.0 / REG_BR_PROB_BASE);
	    }
446
	}
447
    }
448
  if (bb != ENTRY_BLOCK_PTR_FOR_FN (fun))
449
    {
450
      int sum = 0;
451
      FOR_EACH_EDGE (e, ei, bb->preds)
452 453 454
	sum += EDGE_FREQUENCY (e);
      if (abs (sum - bb->frequency) > 100)
	fprintf (file,
455 456
		 ";; %sInvalid sum of incoming frequencies %i, should be %i\n",
		 s_indent, sum, bb->frequency);
457
    }
458 459 460 461 462
  if (BB_PARTITION (bb) == BB_COLD_PARTITION)
    {
      /* Warn about inconsistencies in the partitioning that are
         currently caused by profile insanities created via optimization.  */
      if (!probably_never_executed_bb_p (fun, bb))
463 464
	fprintf (file, ";; %sBlock in cold partition with hot count\n",
		 s_indent);
465 466 467 468
      FOR_EACH_EDGE (e, ei, bb->preds)
        {
          if (!probably_never_executed_edge_p (fun, e))
            fprintf (file,
469 470
		     ";; %sBlock in cold partition with incoming hot edge\n",
		     s_indent);
471 472
        }
    }
473 474
}

475
void
476
dump_edge_info (FILE *file, edge e, dump_flags_t flags, int do_succ)
477
{
478
  basic_block side = (do_succ ? e->dest : e->src);
479 480 481 482 483 484
  bool do_details = false;
  
  if ((flags & TDF_DETAILS) != 0
      && (flags & TDF_SLIM) == 0)
    do_details = true;

485
  if (side->index == ENTRY_BLOCK)
486
    fputs (" ENTRY", file);
487
  else if (side->index == EXIT_BLOCK)
488 489
    fputs (" EXIT", file);
  else
490
    fprintf (file, " %d", side->index);
491

492 493 494 495 496 497
  if (e->probability.initialized_p () && do_details)
    {
      fprintf (file, " [");
      e->probability.dump (file);
      fprintf (file, "] ");
    }
498

499
  if (e->count ().initialized_p () && do_details)
500
    {
501
      fputs (" count:", file);
502
      e->count ().dump (file);
503 504
    }

505
  if (e->flags && do_details)
506
    {
507 508 509 510 511 512 513 514
      static const char * const bitnames[] =
	{
#define DEF_EDGE_FLAG(NAME,IDX) #NAME ,
#include "cfg-flags.def"
	  NULL
#undef DEF_EDGE_FLAG
	};
      bool comma = false;
515
      int i, flags = e->flags;
516

517
      gcc_assert (e->flags <= EDGE_ALL_FLAGS);
518
      fputs (" (", file);
519 520 521 522 523 524 525
      for (i = 0; flags; i++)
	if (flags & (1 << i))
	  {
	    flags &= ~(1 << i);

	    if (comma)
	      fputc (',', file);
526 527
	    fputs (bitnames[i], file);
	    comma = true;
528
	  }
529

530 531 532
      fputc (')', file);
    }
}
533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549

DEBUG_FUNCTION void
debug (edge_def &ref)
{
  /* FIXME (crowl): Is this desireable?  */
  dump_edge_info (stderr, &ref, 0, false);
  dump_edge_info (stderr, &ref, 0, true);
}

DEBUG_FUNCTION void
debug (edge_def *ptr)
{
  if (ptr)
    debug (*ptr);
  else
    fprintf (stderr, "<nil>\n");
}
550

551
/* Simple routines to easily allocate AUX fields of basic blocks.  */
552

553 554 555 556
static struct obstack block_aux_obstack;
static void *first_block_aux_obj = 0;
static struct obstack edge_aux_obstack;
static void *first_edge_aux_obj = 0;
557

558
/* Allocate a memory block of SIZE as BB->aux.  The obstack must
559
   be first initialized by alloc_aux_for_blocks.  */
560

561
static void
562
alloc_aux_for_block (basic_block bb, int size)
563
{
564
  /* Verify that aux field is clear.  */
565
  gcc_assert (!bb->aux && first_block_aux_obj);
566 567
  bb->aux = obstack_alloc (&block_aux_obstack, size);
  memset (bb->aux, 0, size);
568 569
}

570 571
/* Initialize the block_aux_obstack and if SIZE is nonzero, call
   alloc_aux_for_block for each basic block.  */
572 573

void
574
alloc_aux_for_blocks (int size)
575
{
576
  static int initialized;
577

578
  if (!initialized)
579
    {
580 581
      gcc_obstack_init (&block_aux_obstack);
      initialized = 1;
582
    }
583 584 585
  else
    /* Check whether AUX data are still allocated.  */
    gcc_assert (!first_block_aux_obj);
Mike Stump committed
586

587
  first_block_aux_obj = obstack_alloc (&block_aux_obstack, 0);
588
  if (size)
589
    {
590
      basic_block bb;
591

592
      FOR_ALL_BB_FN (bb, cfun)
593
	alloc_aux_for_block (bb, size);
594 595
    }
}
596

597
/* Clear AUX pointers of all blocks.  */
598 599

void
600
clear_aux_for_blocks (void)
601
{
602
  basic_block bb;
603

604
  FOR_ALL_BB_FN (bb, cfun)
605
    bb->aux = NULL;
606 607 608 609 610 611
}

/* Free data allocated in block_aux_obstack and clear AUX pointers
   of all blocks.  */

void
612
free_aux_for_blocks (void)
613
{
614
  gcc_assert (first_block_aux_obj);
615
  obstack_free (&block_aux_obstack, first_block_aux_obj);
616
  first_block_aux_obj = NULL;
617 618

  clear_aux_for_blocks ();
619
}
620

621
/* Allocate a memory edge of SIZE as E->aux.  The obstack must
622
   be first initialized by alloc_aux_for_edges.  */
623

624
void
625
alloc_aux_for_edge (edge e, int size)
626 627
{
  /* Verify that aux field is clear.  */
628
  gcc_assert (!e->aux && first_edge_aux_obj);
629 630 631
  e->aux = obstack_alloc (&edge_aux_obstack, size);
  memset (e->aux, 0, size);
}
632

633 634
/* Initialize the edge_aux_obstack and if SIZE is nonzero, call
   alloc_aux_for_edge for each basic edge.  */
635

636
void
637
alloc_aux_for_edges (int size)
638 639
{
  static int initialized;
640

641 642 643 644
  if (!initialized)
    {
      gcc_obstack_init (&edge_aux_obstack);
      initialized = 1;
645
    }
646 647 648
  else
    /* Check whether AUX data are still allocated.  */
    gcc_assert (!first_edge_aux_obj);
649

650
  first_edge_aux_obj = obstack_alloc (&edge_aux_obstack, 0);
651
  if (size)
652
    {
653 654
      basic_block bb;

655 656
      FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
		      EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
657
	{
658
	  edge e;
659
	  edge_iterator ei;
660

661
	  FOR_EACH_EDGE (e, ei, bb->succs)
662
	    alloc_aux_for_edge (e, size);
663 664 665 666
	}
    }
}

667
/* Clear AUX pointers of all edges.  */
668 669

void
670
clear_aux_for_edges (void)
671
{
672 673
  basic_block bb;
  edge e;
674

675 676
  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
		  EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
677
    {
678 679
      edge_iterator ei;
      FOR_EACH_EDGE (e, ei, bb->succs)
680
	e->aux = NULL;
681
    }
682 683 684 685 686 687
}

/* Free data allocated in edge_aux_obstack and clear AUX pointers
   of all edges.  */

void
688
free_aux_for_edges (void)
689
{
690
  gcc_assert (first_edge_aux_obj);
691
  obstack_free (&edge_aux_obstack, first_edge_aux_obj);
692
  first_edge_aux_obj = NULL;
693 694

  clear_aux_for_edges ();
695
}
696

697
DEBUG_FUNCTION void
698
debug_bb (basic_block bb)
699
{
700
  dump_bb (stderr, bb, 0, dump_flags);
701 702
}

703
DEBUG_FUNCTION basic_block
704
debug_bb_n (int n)
705
{
706
  basic_block bb = BASIC_BLOCK_FOR_FN (cfun, n);
707
  debug_bb (bb);
708
  return bb;
709
}
710

711 712 713 714 715 716
/* Dumps cfg related information about basic block BB to OUTF.
   If HEADER is true, dump things that appear before the instructions
   contained in BB.  If FOOTER is true, dump things that appear after.
   Flags are the TDF_* masks as documented in dumpfile.h.
   NB: With TDF_DETAILS, it is assumed that cfun is available, so
   that maybe_hot_bb_p and probably_never_executed_bb_p don't ICE.  */
717

718
void
719
dump_bb_info (FILE *outf, basic_block bb, int indent, dump_flags_t flags,
720
	      bool do_header, bool do_footer)
721
{
722
  edge_iterator ei;
723
  edge e;
724 725
  static const char * const bb_bitnames[] =
    {
726 727 728 729
#define DEF_BASIC_BLOCK_FLAG(NAME,IDX) #NAME ,
#include "cfg-flags.def"
      NULL
#undef DEF_BASIC_BLOCK_FLAG
730 731
    };
  const unsigned n_bitnames = sizeof (bb_bitnames) / sizeof (char *);
732
  bool first;
733 734 735
  char *s_indent = (char *) alloca ((size_t) indent + 1);
  memset ((void *) s_indent, ' ', (size_t) indent);
  s_indent[indent] = '\0';
736

737 738 739 740 741 742
  gcc_assert (bb->flags <= BB_ALL_FLAGS);

  if (do_header)
    {
      unsigned i;

743
      fputs (";; ", outf);
744
      fprintf (outf, "%sbasic block %d, loop depth %d",
745
	       s_indent, bb->index, bb_loop_depth (bb));
746 747
      if (flags & TDF_DETAILS)
	{
748
	  struct function *fun = DECL_STRUCT_FUNCTION (current_function_decl);
749 750 751 752 753
	  if (bb->count.initialized_p ())
	    {
	      fputs (", count ", outf);
	      bb->count.dump (outf);
	    }
754
	  fprintf (outf, ", freq %i", bb->frequency);
755
	  if (maybe_hot_bb_p (fun, bb))
756
	    fputs (", maybe hot", outf);
757
	  if (probably_never_executed_bb_p (fun, bb))
758 759 760 761 762 763
	    fputs (", probably never executed", outf);
	}
      fputc ('\n', outf);

      if (flags & TDF_DETAILS)
	{
764 765
	  check_bb_profile (bb, outf, indent);
	  fputs (";; ", outf);
766 767 768 769 770 771 772 773 774 775 776 777
	  fprintf (outf, "%s prev block ", s_indent);
	  if (bb->prev_bb)
	    fprintf (outf, "%d", bb->prev_bb->index);
	  else
	    fprintf (outf, "(nil)");
	  fprintf (outf, ", next block ");
	  if (bb->next_bb)
	    fprintf (outf, "%d", bb->next_bb->index);
	  else
	    fprintf (outf, "(nil)");

	  fputs (", flags:", outf);
778
	  first = true;
779 780 781 782 783 784 785 786 787 788 789 790
	  for (i = 0; i < n_bitnames; i++)
	    if (bb->flags & (1 << i))
	      {
		if (first)
		  fputs (" (", outf);
		else
		  fputs (", ", outf);
		first = false;
		fputs (bb_bitnames[i], outf);
	      }
	  if (!first)
	    fputc (')', outf);
791
	  fputc ('\n', outf);
792 793
	}

794
      fputs (";; ", outf);
795
      fprintf (outf, "%s pred:      ", s_indent);
796
      first = true;
797
      FOR_EACH_EDGE (e, ei, bb->preds)
798 799 800
	{
	  if (! first)
	    {
801
	      fputs (";; ", outf);
802 803 804 805 806 807
	      fprintf (outf, "%s            ", s_indent);
	    }
	  first = false;
	  dump_edge_info (outf, e, flags, 0);
	  fputc ('\n', outf);
	}
808 809
      if (first)
	fputc ('\n', outf);
810 811 812 813
    }

  if (do_footer)
    {
814
      fputs (";; ", outf);
815
      fprintf (outf, "%s succ:      ", s_indent);
816
      first = true;
817
      FOR_EACH_EDGE (e, ei, bb->succs)
818 819 820
        {
	  if (! first)
	    {
821
	      fputs (";; ", outf);
822 823 824 825 826 827
	      fprintf (outf, "%s            ", s_indent);
	    }
	  first = false;
	  dump_edge_info (outf, e, flags, 1);
	  fputc ('\n', outf);
	}
828 829
      if (first)
	fputc ('\n', outf);
830
    }
831 832 833 834 835
}

/* Dumps a brief description of cfg to FILE.  */

void
836
brief_dump_cfg (FILE *file, dump_flags_t flags)
837 838 839
{
  basic_block bb;

840
  FOR_EACH_BB_FN (bb, cfun)
841
    {
842
      dump_bb_info (file, bb, 0, flags & TDF_DETAILS, true, true);
843 844
    }
}
845 846 847

/* An edge originally destinating BB of FREQUENCY and COUNT has been proved to
   leave the block by TAKEN_EDGE.  Update profile of BB such that edge E can be
Mike Stump committed
848
   redirected to destination of TAKEN_EDGE.
849 850 851

   This function may leave the profile inconsistent in the case TAKEN_EDGE
   frequency or count is believed to be lower than FREQUENCY or COUNT
852
   respectively.  */
853 854
void
update_bb_profile_for_threading (basic_block bb, int edge_frequency,
855
				 profile_count count, edge taken_edge)
856 857
{
  edge c;
858
  profile_probability prob;
859
  edge_iterator ei;
860

861
  if (bb->count < count)
862 863 864 865 866
    {
      if (dump_file)
	fprintf (dump_file, "bb %i count became negative after threading",
		 bb->index);
    }
867
  bb->count -= count;
868

869 870 871 872
  bb->frequency -= edge_frequency;
  if (bb->frequency < 0)
    bb->frequency = 0;

873 874 875
  /* Compute the probability of TAKEN_EDGE being reached via threaded edge.
     Watch for overflows.  */
  if (bb->frequency)
876 877 878
    /* FIXME: We should get edge frequency as count.  */
    prob = profile_probability::probability_in_gcov_type
		 (edge_frequency, bb->frequency);
879
  else
880
    prob = profile_probability::never ();
881 882 883
  if (prob > taken_edge->probability)
    {
      if (dump_file)
884 885 886 887 888 889 890 891 892 893
	{
	  fprintf (dump_file, "Jump threading proved probability of edge "
		   "%i->%i too small (it is ",
		   taken_edge->src->index, taken_edge->dest->index);	
	  taken_edge->probability.dump (dump_file);
	  fprintf (dump_file, " should be ");
	  prob.dump (dump_file);
	  fprintf (dump_file, ")\n");
	}
      prob = taken_edge->probability.apply_scale (6, 8);
894 895 896 897
    }

  /* Now rescale the probabilities.  */
  taken_edge->probability -= prob;
898 899
  prob = prob.invert ();
  if (prob == profile_probability::never ())
900 901 902 903 904
    {
      if (dump_file)
	fprintf (dump_file, "Edge frequencies of bb %i has been reset, "
		 "frequency of block should end up being 0, it is %i\n",
		 bb->index, bb->frequency);
905
      EDGE_SUCC (bb, 0)->probability = profile_probability::guessed_always ();
906 907 908
      ei = ei_start (bb->succs);
      ei_next (&ei);
      for (; (c = ei_safe_edge (ei)); ei_next (&ei))
909
	c->probability = profile_probability::guessed_never ();
910
    }
911
  else if (!(prob == profile_probability::always ()))
912 913
    {
      FOR_EACH_EDGE (c, ei, bb->succs)
914
	c->probability /= prob;
915
    }
916

917
  gcc_assert (bb == taken_edge->src);
918
}
919 920 921 922 923 924 925

/* Multiply all frequencies of basic blocks in array BBS of length NBBS
   by NUM/DEN, in int arithmetic.  May lose some accuracy.  */
void
scale_bbs_frequencies_int (basic_block *bbs, int nbbs, int num, int den)
{
  int i;
926 927
  if (num < 0)
    num = 0;
928 929 930 931 932 933 934 935 936 937 938 939 940

  /* Scale NUM and DEN to avoid overflows.  Frequencies are in order of
     10^4, if we make DEN <= 10^3, we can afford to upscale by 100
     and still safely fit in int during calculations.  */
  if (den > 1000)
    {
      if (num > 1000000)
	return;

      num = RDIV (1000 * num, den);
      den = 1000;
    }
  if (num > 100 * den)
941
    return;
942

943 944
  for (i = 0; i < nbbs; i++)
    {
945
      bbs[i]->frequency = RDIV (bbs[i]->frequency * num, den);
946 947 948
      /* Make sure the frequencies do not grow over BB_FREQ_MAX.  */
      if (bbs[i]->frequency > BB_FREQ_MAX)
	bbs[i]->frequency = BB_FREQ_MAX;
949
      bbs[i]->count = bbs[i]->count.apply_scale (num, den);
950 951 952
    }
}

953 954
/* numbers smaller than this value are safe to multiply without getting
   64bit overflow.  */
955
#define MAX_SAFE_MULTIPLIER (1 << (sizeof (int64_t) * 4 - 1))
956

957 958 959 960
/* Multiply all frequencies of basic blocks in array BBS of length NBBS
   by NUM/DEN, in gcov_type arithmetic.  More accurate than previous
   function but considerably slower.  */
void
Mike Stump committed
961 962
scale_bbs_frequencies_gcov_type (basic_block *bbs, int nbbs, gcov_type num,
				 gcov_type den)
963 964
{
  int i;
965
  gcov_type fraction = RDIV (num * 65536, den);
966

967 968 969 970 971 972 973
  gcc_assert (fraction >= 0);

  if (num < MAX_SAFE_MULTIPLIER)
    for (i = 0; i < nbbs; i++)
      {
	bbs[i]->frequency = RDIV (bbs[i]->frequency * num, den);
	if (bbs[i]->count <= MAX_SAFE_MULTIPLIER)
974
	  bbs[i]->count = bbs[i]->count.apply_scale (num, den);
975
	else
976
	  bbs[i]->count = bbs[i]->count.apply_scale (fraction, 65536);
977 978 979 980 981 982 983 984
      }
   else
    for (i = 0; i < nbbs; i++)
      {
	if (sizeof (gcov_type) > sizeof (int))
	  bbs[i]->frequency = RDIV (bbs[i]->frequency * num, den);
	else
	  bbs[i]->frequency = RDIV (bbs[i]->frequency * fraction, 65536);
985
	bbs[i]->count = bbs[i]->count.apply_scale (fraction, 65536);
986
      }
987
}
988

989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005
/* Multiply all frequencies of basic blocks in array BBS of length NBBS
   by NUM/DEN, in profile_count arithmetic.  More accurate than previous
   function but considerably slower.  */
void
scale_bbs_frequencies_profile_count (basic_block *bbs, int nbbs,
				     profile_count num, profile_count den)
{
  int i;

  for (i = 0; i < nbbs; i++)
    {
      bbs[i]->frequency = RDIV (bbs[i]->frequency * num.to_gcov_type (),
				den.to_gcov_type ());
      bbs[i]->count = bbs[i]->count.apply_scale (num, den);
    }
}

1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021
/* Multiply all frequencies of basic blocks in array BBS of length NBBS
   by NUM/DEN, in profile_count arithmetic.  More accurate than previous
   function but considerably slower.  */
void
scale_bbs_frequencies (basic_block *bbs, int nbbs,
		       profile_probability p)
{
  int i;

  for (i = 0; i < nbbs; i++)
    {
      bbs[i]->frequency = p.apply (bbs[i]->frequency);
      bbs[i]->count = bbs[i]->count.apply_probability (p);
    }
}

1022
/* Helper types for hash tables.  */
1023 1024 1025 1026 1027 1028 1029 1030 1031

struct htab_bb_copy_original_entry
{
  /* Block we are attaching info to.  */
  int index1;
  /* Index of original or copy (depending on the hashtable) */
  int index2;
};

1032
struct bb_copy_hasher : nofree_ptr_hash <htab_bb_copy_original_entry>
1033
{
1034 1035 1036
  static inline hashval_t hash (const htab_bb_copy_original_entry *);
  static inline bool equal (const htab_bb_copy_original_entry *existing,
			    const htab_bb_copy_original_entry * candidate);
1037
};
1038

1039
inline hashval_t
1040
bb_copy_hasher::hash (const htab_bb_copy_original_entry *data)
1041
{
1042 1043 1044
  return data->index1;
}

1045
inline bool
1046 1047
bb_copy_hasher::equal (const htab_bb_copy_original_entry *data,
		       const htab_bb_copy_original_entry *data2)
1048
{
1049 1050 1051
  return data->index1 == data2->index1;
}

1052 1053
/* Data structures used to maintain mapping between basic blocks and
   copies.  */
1054 1055
static hash_table<bb_copy_hasher> *bb_original;
static hash_table<bb_copy_hasher> *bb_copy;
1056 1057

/* And between loops and copies.  */
1058
static hash_table<bb_copy_hasher> *loop_copy;
1059
static object_allocator<htab_bb_copy_original_entry> *original_copy_bb_pool;
1060

1061 1062
/* Initialize the data structures to maintain mapping between blocks
   and its copies.  */
1063 1064 1065
void
initialize_original_copy_tables (void)
{
1066
  original_copy_bb_pool = new object_allocator<htab_bb_copy_original_entry>
1067
    ("original_copy");
1068 1069 1070
  bb_original = new hash_table<bb_copy_hasher> (10);
  bb_copy = new hash_table<bb_copy_hasher> (10);
  loop_copy = new hash_table<bb_copy_hasher> (10);
1071 1072
}

1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
/* Reset the data structures to maintain mapping between blocks and
   its copies.  */

void
reset_original_copy_tables (void)
{
  gcc_assert (original_copy_bb_pool);
  bb_original->empty ();
  bb_copy->empty ();
  loop_copy->empty ();
}

1085 1086
/* Free the data structures to maintain mapping between blocks and
   its copies.  */
1087 1088 1089 1090
void
free_original_copy_tables (void)
{
  gcc_assert (original_copy_bb_pool);
1091 1092 1093
  delete bb_copy;
  bb_copy = NULL;
  delete bb_original;
1094
  bb_original = NULL;
1095 1096
  delete loop_copy;
  loop_copy = NULL;
1097
  delete original_copy_bb_pool;
1098 1099 1100
  original_copy_bb_pool = NULL;
}

David Malcolm committed
1101 1102 1103 1104 1105 1106 1107 1108 1109
/* Return true iff we have had a call to initialize_original_copy_tables
   without a corresponding call to free_original_copy_tables.  */

bool
original_copy_tables_initialized_p (void)
{
  return original_copy_bb_pool != NULL;
}

1110 1111 1112
/* Removes the value associated with OBJ from table TAB.  */

static void
1113
copy_original_table_clear (hash_table<bb_copy_hasher> *tab, unsigned obj)
1114
{
1115
  htab_bb_copy_original_entry **slot;
1116 1117 1118 1119 1120 1121
  struct htab_bb_copy_original_entry key, *elt;

  if (!original_copy_bb_pool)
    return;

  key.index1 = obj;
1122
  slot = tab->find_slot (&key, NO_INSERT);
1123 1124 1125
  if (!slot)
    return;

1126
  elt = *slot;
1127
  tab->clear_slot (slot);
1128
  original_copy_bb_pool->remove (elt);
1129 1130 1131 1132 1133 1134
}

/* Sets the value associated with OBJ in table TAB to VAL.
   Do nothing when data structures are not initialized.  */

static void
1135
copy_original_table_set (hash_table<bb_copy_hasher> *tab,
1136
			 unsigned obj, unsigned val)
1137 1138 1139 1140 1141 1142 1143 1144
{
  struct htab_bb_copy_original_entry **slot;
  struct htab_bb_copy_original_entry key;

  if (!original_copy_bb_pool)
    return;

  key.index1 = obj;
1145
  slot = tab->find_slot (&key, INSERT);
1146 1147
  if (!*slot)
    {
1148
      *slot = original_copy_bb_pool->allocate ();
1149 1150 1151 1152 1153
      (*slot)->index1 = obj;
    }
  (*slot)->index2 = val;
}

1154 1155
/* Set original for basic block.  Do nothing when data structures are not
   initialized so passes not needing this don't need to care.  */
1156 1157 1158
void
set_bb_original (basic_block bb, basic_block original)
{
1159
  copy_original_table_set (bb_original, bb->index, original->index);
1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171
}

/* Get the original basic block.  */
basic_block
get_bb_original (basic_block bb)
{
  struct htab_bb_copy_original_entry *entry;
  struct htab_bb_copy_original_entry key;

  gcc_assert (original_copy_bb_pool);

  key.index1 = bb->index;
1172
  entry = bb_original->find (&key);
1173
  if (entry)
1174
    return BASIC_BLOCK_FOR_FN (cfun, entry->index2);
1175 1176 1177 1178
  else
    return NULL;
}

1179 1180
/* Set copy for basic block.  Do nothing when data structures are not
   initialized so passes not needing this don't need to care.  */
1181 1182 1183
void
set_bb_copy (basic_block bb, basic_block copy)
{
1184
  copy_original_table_set (bb_copy, bb->index, copy->index);
1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196
}

/* Get the copy of basic block.  */
basic_block
get_bb_copy (basic_block bb)
{
  struct htab_bb_copy_original_entry *entry;
  struct htab_bb_copy_original_entry key;

  gcc_assert (original_copy_bb_pool);

  key.index1 = bb->index;
1197
  entry = bb_copy->find (&key);
1198
  if (entry)
1199
    return BASIC_BLOCK_FOR_FN (cfun, entry->index2);
1200 1201 1202
  else
    return NULL;
}
1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226

/* Set copy for LOOP to COPY.  Do nothing when data structures are not
   initialized so passes not needing this don't need to care.  */

void
set_loop_copy (struct loop *loop, struct loop *copy)
{
  if (!copy)
    copy_original_table_clear (loop_copy, loop->num);
  else
    copy_original_table_set (loop_copy, loop->num, copy->num);
}

/* Get the copy of LOOP.  */

struct loop *
get_loop_copy (struct loop *loop)
{
  struct htab_bb_copy_original_entry *entry;
  struct htab_bb_copy_original_entry key;

  gcc_assert (original_copy_bb_pool);

  key.index1 = loop->num;
1227
  entry = loop_copy->find (&key);
1228
  if (entry)
1229
    return get_loop (cfun, entry->index2);
1230 1231 1232
  else
    return NULL;
}