cfg.c 29.8 KB
Newer Older
1
/* Control flow graph manipulation code for GNU compiler.
Jakub Jelinek committed
2
   Copyright (C) 1987-2015 Free Software Foundation, Inc.
3 4 5 6 7

This file is part of GCC.

GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
8
Software Foundation; either version 3, or (at your option) any later
9 10 11 12 13 14 15 16
version.

GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
for more details.

You should have received a copy of the GNU General Public License
17 18
along with GCC; see the file COPYING3.  If not see
<http://www.gnu.org/licenses/>.  */
19

20
/* This file contains low level functions to manipulate the CFG and
21
   analyze it.  All other modules should not transform the data structure
22 23 24
   directly and use abstraction instead.  The file is supposed to be
   ordered bottom-up and should not contain any code dependent on a
   particular intermediate language (RTL or trees).
25 26 27 28

   Available functionality:
     - Initialization/deallocation
	 init_flow, clear_edges
29 30
     - Low level basic block manipulation
	 alloc_block, expunge_block
31
     - Edge manipulation
32
	 make_edge, make_single_succ_edge, cached_make_edge, remove_edge
33 34
	 - Low level edge redirection (without updating instruction chain)
	     redirect_edge_succ, redirect_edge_succ_nodup, redirect_edge_pred
35
     - Dumping and debugging
36 37 38
	 dump_flow_info, debug_flow_info, dump_edge_info
     - Allocation of AUX fields for basic blocks
	 alloc_aux_for_blocks, free_aux_for_blocks, alloc_aux_for_block
39
     - clear_bb_flags
40 41 42 43
     - Consistency checking
	 verify_flow_info
     - Dumping and debugging
	 print_rtl_with_bb, dump_bb, debug_bb, debug_bb_n
44 45 46

   TODO: Document these "Available functionality" functions in the files
   that implement them.
47 48 49 50
 */

#include "config.h"
#include "system.h"
51
#include "coretypes.h"
52
#include "backend.h"
53
#include "alloc-pool.h"
54
#include "alias.h"
55
#include "cfghooks.h"
56
#include "tree.h"
57
#include "hard-reg-set.h"
58
#include "df.h"
59 60
#include "options.h"
#include "cfganal.h"
61
#include "cfgloop.h" /* FIXME: For struct loop.  */
62
#include "dumpfile.h"
63 64


65 66
#define RDIV(X,Y) (((X) + (Y) / 2) / (Y))

67
/* Called once at initialization time.  */
68 69

void
Diego Novillo committed
70
init_flow (struct function *the_fun)
71
{
Diego Novillo committed
72
  if (!the_fun->cfg)
73
    the_fun->cfg = ggc_cleared_alloc<control_flow_graph> ();
David Malcolm committed
74
  n_edges_for_fn (the_fun) = 0;
75
  ENTRY_BLOCK_PTR_FOR_FN (the_fun)
76
    = ggc_cleared_alloc<basic_block_def> ();
77 78
  ENTRY_BLOCK_PTR_FOR_FN (the_fun)->index = ENTRY_BLOCK;
  EXIT_BLOCK_PTR_FOR_FN (the_fun)
79
    = ggc_cleared_alloc<basic_block_def> ();
80 81 82 83 84
  EXIT_BLOCK_PTR_FOR_FN (the_fun)->index = EXIT_BLOCK;
  ENTRY_BLOCK_PTR_FOR_FN (the_fun)->next_bb
    = EXIT_BLOCK_PTR_FOR_FN (the_fun);
  EXIT_BLOCK_PTR_FOR_FN (the_fun)->prev_bb
    = ENTRY_BLOCK_PTR_FOR_FN (the_fun);
85 86
}

87
/* Helper function for remove_edge and clear_edges.  Frees edge structure
88
   without actually removing it from the pred/succ arrays.  */
89 90

static void
91
free_edge (edge e)
92
{
David Malcolm committed
93
  n_edges_for_fn (cfun)--;
94
  ggc_free (e);
95 96
}

97 98 99
/* Free the memory associated with the edge structures.  */

void
100
clear_edges (void)
101
{
102
  basic_block bb;
103
  edge e;
104
  edge_iterator ei;
105

106
  FOR_EACH_BB_FN (bb, cfun)
107
    {
108 109
      FOR_EACH_EDGE (e, ei, bb->succs)
	free_edge (e);
110 111
      vec_safe_truncate (bb->succs, 0);
      vec_safe_truncate (bb->preds, 0);
112
    }
113

114
  FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
115
    free_edge (e);
116 117
  vec_safe_truncate (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds, 0);
  vec_safe_truncate (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs, 0);
118

David Malcolm committed
119
  gcc_assert (!n_edges_for_fn (cfun));
120 121
}

122
/* Allocate memory for basic_block.  */
123

124
basic_block
125
alloc_block (void)
126 127
{
  basic_block bb;
128
  bb = ggc_cleared_alloc<basic_block_def> ();
129
  return bb;
130 131
}

132 133
/* Link block B to chain after AFTER.  */
void
134
link_block (basic_block b, basic_block after)
135 136 137 138 139 140
{
  b->next_bb = after->next_bb;
  b->prev_bb = after;
  after->next_bb = b;
  b->next_bb->prev_bb = b;
}
141

142 143
/* Unlink block B from chain.  */
void
144
unlink_block (basic_block b)
145 146 147
{
  b->next_bb->prev_bb = b->prev_bb;
  b->prev_bb->next_bb = b->next_bb;
148 149
  b->prev_bb = NULL;
  b->next_bb = NULL;
150
}
151

152 153
/* Sequentially order blocks and compact the arrays.  */
void
154
compact_blocks (void)
155 156
{
  int i;
157

158 159
  SET_BASIC_BLOCK_FOR_FN (cfun, ENTRY_BLOCK, ENTRY_BLOCK_PTR_FOR_FN (cfun));
  SET_BASIC_BLOCK_FOR_FN (cfun, EXIT_BLOCK, EXIT_BLOCK_PTR_FOR_FN (cfun));
H.J. Lu committed
160

161 162
  if (df)
    df_compact_blocks ();
H.J. Lu committed
163
  else
164
    {
165
      basic_block bb;
H.J. Lu committed
166

167
      i = NUM_FIXED_BLOCKS;
168
      FOR_EACH_BB_FN (bb, cfun)
169
	{
170
	  SET_BASIC_BLOCK_FOR_FN (cfun, i, bb);
171 172 173
	  bb->index = i;
	  i++;
	}
174
      gcc_assert (i == n_basic_blocks_for_fn (cfun));
175

176
      for (; i < last_basic_block_for_fn (cfun); i++)
177
	SET_BASIC_BLOCK_FOR_FN (cfun, i, NULL);
178
    }
179
  last_basic_block_for_fn (cfun) = n_basic_blocks_for_fn (cfun);
180 181 182
}

/* Remove block B from the basic block array.  */
183

184
void
185
expunge_block (basic_block b)
186
{
187
  unlink_block (b);
188
  SET_BASIC_BLOCK_FOR_FN (cfun, b->index, NULL);
189
  n_basic_blocks_for_fn (cfun)--;
190 191 192 193 194
  /* We should be able to ggc_free here, but we are not.
     The dead SSA_NAMES are left pointing to dead statements that are pointing
     to dead basic blocks making garbage collector to die.
     We should be able to release all dead SSA_NAMES and at the same time we should
     clear out BB pointer of dead statements consistently.  */
195
}
196

197 198 199 200 201
/* Connect E to E->src.  */

static inline void
connect_src (edge e)
{
202
  vec_safe_push (e->src->succs, e);
203
  df_mark_solutions_dirty ();
204 205 206 207 208 209 210 211
}

/* Connect E to E->dest.  */

static inline void
connect_dest (edge e)
{
  basic_block dest = e->dest;
212
  vec_safe_push (dest->preds, e);
213
  e->dest_idx = EDGE_COUNT (dest->preds) - 1;
214
  df_mark_solutions_dirty ();
215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
}

/* Disconnect edge E from E->src.  */

static inline void
disconnect_src (edge e)
{
  basic_block src = e->src;
  edge_iterator ei;
  edge tmp;

  for (ei = ei_start (src->succs); (tmp = ei_safe_edge (ei)); )
    {
      if (tmp == e)
	{
230
	  src->succs->unordered_remove (ei.index);
231
	  df_mark_solutions_dirty ();
232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
	  return;
	}
      else
	ei_next (&ei);
    }

  gcc_unreachable ();
}

/* Disconnect edge E from E->dest.  */

static inline void
disconnect_dest (edge e)
{
  basic_block dest = e->dest;
  unsigned int dest_idx = e->dest_idx;

249
  dest->preds->unordered_remove (dest_idx);
250 251 252 253 254

  /* If we removed an edge in the middle of the edge vector, we need
     to update dest_idx of the edge that moved into the "hole".  */
  if (dest_idx < EDGE_COUNT (dest->preds))
    EDGE_PRED (dest, dest_idx)->dest_idx = dest_idx;
255
  df_mark_solutions_dirty ();
256 257
}

258 259 260 261 262
/* Create an edge connecting SRC and DEST with flags FLAGS.  Return newly
   created edge.  Use this only if you are sure that this edge can't
   possibly already exist.  */

edge
263
unchecked_make_edge (basic_block src, basic_block dst, int flags)
264 265
{
  edge e;
266
  e = ggc_cleared_alloc<edge_def> ();
David Malcolm committed
267
  n_edges_for_fn (cfun)++;
268 269 270 271

  e->src = src;
  e->dest = dst;
  e->flags = flags;
272 273 274

  connect_src (e);
  connect_dest (e);
275

276
  execute_on_growing_pred (e);
277 278 279
  return e;
}

280
/* Create an edge connecting SRC and DST with FLAGS optionally using
281
   edge cache CACHE.  Return the new edge, NULL if already exist.  */
282

283
edge
284
cached_make_edge (sbitmap edge_cache, basic_block src, basic_block dst, int flags)
285
{
286
  if (edge_cache == NULL
287 288
      || src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
      || dst == EXIT_BLOCK_PTR_FOR_FN (cfun))
289
    return make_edge (src, dst, flags);
290

291
  /* Does the requested edge already exist?  */
292
  if (! bitmap_bit_p (edge_cache, dst->index))
293
    {
294 295
      /* The edge does not exist.  Create one and update the
	 cache.  */
296
      bitmap_set_bit (edge_cache, dst->index);
297
      return unchecked_make_edge (src, dst, flags);
298
    }
299

300 301 302 303 304 305 306
  /* At this point, we know that the requested edge exists.  Adjust
     flags if necessary.  */
  if (flags)
    {
      edge e = find_edge (src, dst);
      e->flags |= flags;
    }
307

308
  return NULL;
309 310 311 312 313 314
}

/* Create an edge connecting SRC and DEST with flags FLAGS.  Return newly
   created edge or NULL if already exist.  */

edge
315
make_edge (basic_block src, basic_block dest, int flags)
316
{
317 318 319 320 321 322 323 324 325 326
  edge e = find_edge (src, dest);

  /* Make sure we don't add duplicate edges.  */
  if (e)
    {
      e->flags |= flags;
      return NULL;
    }

  return unchecked_make_edge (src, dest, flags);
327 328
}

329
/* Create an edge connecting SRC to DEST and set probability by knowing
330 331 332
   that it is the single edge leaving SRC.  */

edge
333
make_single_succ_edge (basic_block src, basic_block dest, int flags)
334 335 336 337 338 339
{
  edge e = make_edge (src, dest, flags);

  e->probability = REG_BR_PROB_BASE;
  e->count = src->count;
  return e;
340 341 342 343 344
}

/* This function will remove an edge from the flow graph.  */

void
345
remove_edge_raw (edge e)
346
{
347
  remove_predictions_associated_with_edge (e);
348 349
  execute_on_shrinking_pred (e);

350 351
  disconnect_src (e);
  disconnect_dest (e);
352

353
  free_edge (e);
354 355 356 357 358
}

/* Redirect an edge's successor from one block to another.  */

void
359
redirect_edge_succ (edge e, basic_block new_succ)
360
{
361 362
  execute_on_shrinking_pred (e);

363
  disconnect_dest (e);
364

365
  e->dest = new_succ;
366 367

  /* Reconnect the edge to the new successor block.  */
368 369
  connect_dest (e);

370
  execute_on_growing_pred (e);
371 372 373 374 375
}

/* Redirect an edge's predecessor from one block to another.  */

void
376
redirect_edge_pred (edge e, basic_block new_pred)
377
{
378
  disconnect_src (e);
379

380
  e->src = new_pred;
381 382

  /* Reconnect the edge to the new predecessor block.  */
383
  connect_src (e);
384
}
385

386
/* Clear all basic block flags that do not have to be preserved.  */
387
void
388
clear_bb_flags (void)
389
{
390 391
  basic_block bb;

392
  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
393
    bb->flags &= BB_FLAGS_TO_PRESERVE;
394
}
395

396 397 398 399 400
/* Check the consistency of profile information.  We can't do that
   in verify_flow_info, as the counts may get invalid for incompletely
   solved graphs, later eliminating of conditionals or roundoff errors.
   It is still practical to have them reported for debugging of simple
   testcases.  */
401 402
static void
check_bb_profile (basic_block bb, FILE * file, int indent, int flags)
403 404 405 406
{
  edge e;
  int sum = 0;
  gcov_type lsum;
407
  edge_iterator ei;
408
  struct function *fun = DECL_STRUCT_FUNCTION (current_function_decl);
409 410 411
  char *s_indent = (char *) alloca ((size_t) indent + 1);
  memset ((void *) s_indent, ' ', (size_t) indent);
  s_indent[indent] = '\0';
412

413
  if (profile_status_for_fn (fun) == PROFILE_ABSENT)
414 415
    return;

416
  if (bb != EXIT_BLOCK_PTR_FOR_FN (fun))
417
    {
418
      FOR_EACH_EDGE (e, ei, bb->succs)
419
	sum += e->probability;
420
      if (EDGE_COUNT (bb->succs) && abs (sum - REG_BR_PROB_BASE) > 100)
421 422
	fprintf (file, "%s%sInvalid sum of outgoing probabilities %.1f%%\n",
		 (flags & TDF_COMMENT) ? ";; " : "", s_indent,
423 424
		 sum * 100.0 / REG_BR_PROB_BASE);
      lsum = 0;
425
      FOR_EACH_EDGE (e, ei, bb->succs)
426
	lsum += e->count;
427 428
      if (EDGE_COUNT (bb->succs)
	  && (lsum - bb->count > 100 || lsum - bb->count < -100))
429 430
	fprintf (file, "%s%sInvalid sum of outgoing counts %i, should be %i\n",
		 (flags & TDF_COMMENT) ? ";; " : "", s_indent,
431 432
		 (int) lsum, (int) bb->count);
    }
433
    if (bb != ENTRY_BLOCK_PTR_FOR_FN (fun))
434 435
    {
      sum = 0;
436
      FOR_EACH_EDGE (e, ei, bb->preds)
437 438 439
	sum += EDGE_FREQUENCY (e);
      if (abs (sum - bb->frequency) > 100)
	fprintf (file,
440 441
		 "%s%sInvalid sum of incoming frequencies %i, should be %i\n",
		 (flags & TDF_COMMENT) ? ";; " : "", s_indent,
442 443
		 sum, bb->frequency);
      lsum = 0;
444
      FOR_EACH_EDGE (e, ei, bb->preds)
445 446
	lsum += e->count;
      if (lsum - bb->count > 100 || lsum - bb->count < -100)
447 448
	fprintf (file, "%s%sInvalid sum of incoming counts %i, should be %i\n",
		 (flags & TDF_COMMENT) ? ";; " : "", s_indent,
449 450
		 (int) lsum, (int) bb->count);
    }
451 452 453 454 455 456 457 458 459 460 461 462 463 464 465
  if (BB_PARTITION (bb) == BB_COLD_PARTITION)
    {
      /* Warn about inconsistencies in the partitioning that are
         currently caused by profile insanities created via optimization.  */
      if (!probably_never_executed_bb_p (fun, bb))
        fprintf (file, "%s%sBlock in cold partition with hot count\n",
                 (flags & TDF_COMMENT) ? ";; " : "", s_indent);
      FOR_EACH_EDGE (e, ei, bb->preds)
        {
          if (!probably_never_executed_edge_p (fun, e))
            fprintf (file,
                     "%s%sBlock in cold partition with incoming hot edge\n",
                     (flags & TDF_COMMENT) ? ";; " : "", s_indent);
        }
    }
466 467
}

468
void
469
dump_edge_info (FILE *file, edge e, int flags, int do_succ)
470
{
471
  basic_block side = (do_succ ? e->dest : e->src);
472 473 474 475 476 477
  bool do_details = false;
  
  if ((flags & TDF_DETAILS) != 0
      && (flags & TDF_SLIM) == 0)
    do_details = true;

478
  if (side->index == ENTRY_BLOCK)
479
    fputs (" ENTRY", file);
480
  else if (side->index == EXIT_BLOCK)
481 482
    fputs (" EXIT", file);
  else
483
    fprintf (file, " %d", side->index);
484

485
  if (e->probability && do_details)
486
    fprintf (file, " [%.1f%%] ", e->probability * 100.0 / REG_BR_PROB_BASE);
487

488
  if (e->count && do_details)
489
    {
490
      fputs (" count:", file);
491
      fprintf (file, "%" PRId64, e->count);
492 493
    }

494
  if (e->flags && do_details)
495
    {
496 497 498 499 500 501 502 503
      static const char * const bitnames[] =
	{
#define DEF_EDGE_FLAG(NAME,IDX) #NAME ,
#include "cfg-flags.def"
	  NULL
#undef DEF_EDGE_FLAG
	};
      bool comma = false;
504
      int i, flags = e->flags;
505

506
      gcc_assert (e->flags <= EDGE_ALL_FLAGS);
507
      fputs (" (", file);
508 509 510 511 512 513 514
      for (i = 0; flags; i++)
	if (flags & (1 << i))
	  {
	    flags &= ~(1 << i);

	    if (comma)
	      fputc (',', file);
515 516
	    fputs (bitnames[i], file);
	    comma = true;
517
	  }
518

519 520 521
      fputc (')', file);
    }
}
522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538

DEBUG_FUNCTION void
debug (edge_def &ref)
{
  /* FIXME (crowl): Is this desireable?  */
  dump_edge_info (stderr, &ref, 0, false);
  dump_edge_info (stderr, &ref, 0, true);
}

DEBUG_FUNCTION void
debug (edge_def *ptr)
{
  if (ptr)
    debug (*ptr);
  else
    fprintf (stderr, "<nil>\n");
}
539

540
/* Simple routines to easily allocate AUX fields of basic blocks.  */
541

542 543 544 545
static struct obstack block_aux_obstack;
static void *first_block_aux_obj = 0;
static struct obstack edge_aux_obstack;
static void *first_edge_aux_obj = 0;
546

547
/* Allocate a memory block of SIZE as BB->aux.  The obstack must
548
   be first initialized by alloc_aux_for_blocks.  */
549

550
static void
551
alloc_aux_for_block (basic_block bb, int size)
552
{
553
  /* Verify that aux field is clear.  */
554
  gcc_assert (!bb->aux && first_block_aux_obj);
555 556
  bb->aux = obstack_alloc (&block_aux_obstack, size);
  memset (bb->aux, 0, size);
557 558
}

559 560
/* Initialize the block_aux_obstack and if SIZE is nonzero, call
   alloc_aux_for_block for each basic block.  */
561 562

void
563
alloc_aux_for_blocks (int size)
564
{
565
  static int initialized;
566

567
  if (!initialized)
568
    {
569 570
      gcc_obstack_init (&block_aux_obstack);
      initialized = 1;
571
    }
572 573 574
  else
    /* Check whether AUX data are still allocated.  */
    gcc_assert (!first_block_aux_obj);
Mike Stump committed
575

576
  first_block_aux_obj = obstack_alloc (&block_aux_obstack, 0);
577
  if (size)
578
    {
579
      basic_block bb;
580

581
      FOR_ALL_BB_FN (bb, cfun)
582
	alloc_aux_for_block (bb, size);
583 584
    }
}
585

586
/* Clear AUX pointers of all blocks.  */
587 588

void
589
clear_aux_for_blocks (void)
590
{
591
  basic_block bb;
592

593
  FOR_ALL_BB_FN (bb, cfun)
594
    bb->aux = NULL;
595 596 597 598 599 600
}

/* Free data allocated in block_aux_obstack and clear AUX pointers
   of all blocks.  */

void
601
free_aux_for_blocks (void)
602
{
603
  gcc_assert (first_block_aux_obj);
604
  obstack_free (&block_aux_obstack, first_block_aux_obj);
605
  first_block_aux_obj = NULL;
606 607

  clear_aux_for_blocks ();
608
}
609

610
/* Allocate a memory edge of SIZE as E->aux.  The obstack must
611
   be first initialized by alloc_aux_for_edges.  */
612

613
void
614
alloc_aux_for_edge (edge e, int size)
615 616
{
  /* Verify that aux field is clear.  */
617
  gcc_assert (!e->aux && first_edge_aux_obj);
618 619 620
  e->aux = obstack_alloc (&edge_aux_obstack, size);
  memset (e->aux, 0, size);
}
621

622 623
/* Initialize the edge_aux_obstack and if SIZE is nonzero, call
   alloc_aux_for_edge for each basic edge.  */
624

625
void
626
alloc_aux_for_edges (int size)
627 628
{
  static int initialized;
629

630 631 632 633
  if (!initialized)
    {
      gcc_obstack_init (&edge_aux_obstack);
      initialized = 1;
634
    }
635 636 637
  else
    /* Check whether AUX data are still allocated.  */
    gcc_assert (!first_edge_aux_obj);
638

639
  first_edge_aux_obj = obstack_alloc (&edge_aux_obstack, 0);
640
  if (size)
641
    {
642 643
      basic_block bb;

644 645
      FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
		      EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
646
	{
647
	  edge e;
648
	  edge_iterator ei;
649

650
	  FOR_EACH_EDGE (e, ei, bb->succs)
651
	    alloc_aux_for_edge (e, size);
652 653 654 655
	}
    }
}

656
/* Clear AUX pointers of all edges.  */
657 658

void
659
clear_aux_for_edges (void)
660
{
661 662
  basic_block bb;
  edge e;
663

664 665
  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
		  EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
666
    {
667 668
      edge_iterator ei;
      FOR_EACH_EDGE (e, ei, bb->succs)
669
	e->aux = NULL;
670
    }
671 672 673 674 675 676
}

/* Free data allocated in edge_aux_obstack and clear AUX pointers
   of all edges.  */

void
677
free_aux_for_edges (void)
678
{
679
  gcc_assert (first_edge_aux_obj);
680
  obstack_free (&edge_aux_obstack, first_edge_aux_obj);
681
  first_edge_aux_obj = NULL;
682 683

  clear_aux_for_edges ();
684
}
685

686
DEBUG_FUNCTION void
687
debug_bb (basic_block bb)
688
{
689
  dump_bb (stderr, bb, 0, dump_flags);
690 691
}

692
DEBUG_FUNCTION basic_block
693
debug_bb_n (int n)
694
{
695
  basic_block bb = BASIC_BLOCK_FOR_FN (cfun, n);
696
  debug_bb (bb);
697
  return bb;
698
}
699

700 701 702 703 704 705
/* Dumps cfg related information about basic block BB to OUTF.
   If HEADER is true, dump things that appear before the instructions
   contained in BB.  If FOOTER is true, dump things that appear after.
   Flags are the TDF_* masks as documented in dumpfile.h.
   NB: With TDF_DETAILS, it is assumed that cfun is available, so
   that maybe_hot_bb_p and probably_never_executed_bb_p don't ICE.  */
706

707 708 709
void
dump_bb_info (FILE *outf, basic_block bb, int indent, int flags,
	      bool do_header, bool do_footer)
710
{
711
  edge_iterator ei;
712
  edge e;
713 714
  static const char * const bb_bitnames[] =
    {
715 716 717 718
#define DEF_BASIC_BLOCK_FLAG(NAME,IDX) #NAME ,
#include "cfg-flags.def"
      NULL
#undef DEF_BASIC_BLOCK_FLAG
719 720
    };
  const unsigned n_bitnames = sizeof (bb_bitnames) / sizeof (char *);
721
  bool first;
722 723 724
  char *s_indent = (char *) alloca ((size_t) indent + 1);
  memset ((void *) s_indent, ' ', (size_t) indent);
  s_indent[indent] = '\0';
725

726 727 728 729 730 731 732 733
  gcc_assert (bb->flags <= BB_ALL_FLAGS);

  if (do_header)
    {
      unsigned i;

      if (flags & TDF_COMMENT)
	fputs (";; ", outf);
734
      fprintf (outf, "%sbasic block %d, loop depth %d",
735
	       s_indent, bb->index, bb_loop_depth (bb));
736 737
      if (flags & TDF_DETAILS)
	{
738
	  struct function *fun = DECL_STRUCT_FUNCTION (current_function_decl);
739
	  fprintf (outf, ", count " "%" PRId64,
740
		   (int64_t) bb->count);
741
	  fprintf (outf, ", freq %i", bb->frequency);
742
	  if (maybe_hot_bb_p (fun, bb))
743
	    fputs (", maybe hot", outf);
744
	  if (probably_never_executed_bb_p (fun, bb))
745 746 747 748 749 750
	    fputs (", probably never executed", outf);
	}
      fputc ('\n', outf);

      if (flags & TDF_DETAILS)
	{
751
	  check_bb_profile (bb, outf, indent, flags);
752 753 754 755 756 757 758 759 760 761 762 763 764 765
	  if (flags & TDF_COMMENT)
	    fputs (";; ", outf);
	  fprintf (outf, "%s prev block ", s_indent);
	  if (bb->prev_bb)
	    fprintf (outf, "%d", bb->prev_bb->index);
	  else
	    fprintf (outf, "(nil)");
	  fprintf (outf, ", next block ");
	  if (bb->next_bb)
	    fprintf (outf, "%d", bb->next_bb->index);
	  else
	    fprintf (outf, "(nil)");

	  fputs (", flags:", outf);
766
	  first = true;
767 768 769 770 771 772 773 774 775 776 777 778
	  for (i = 0; i < n_bitnames; i++)
	    if (bb->flags & (1 << i))
	      {
		if (first)
		  fputs (" (", outf);
		else
		  fputs (", ", outf);
		first = false;
		fputs (bb_bitnames[i], outf);
	      }
	  if (!first)
	    fputc (')', outf);
779
	  fputc ('\n', outf);
780 781 782 783 784
	}

      if (flags & TDF_COMMENT)
	fputs (";; ", outf);
      fprintf (outf, "%s pred:      ", s_indent);
785
      first = true;
786
      FOR_EACH_EDGE (e, ei, bb->preds)
787 788 789 790 791 792 793 794 795 796 797
	{
	  if (! first)
	    {
	      if (flags & TDF_COMMENT)
		fputs (";; ", outf);
	      fprintf (outf, "%s            ", s_indent);
	    }
	  first = false;
	  dump_edge_info (outf, e, flags, 0);
	  fputc ('\n', outf);
	}
798 799
      if (first)
	fputc ('\n', outf);
800 801 802 803 804 805 806
    }

  if (do_footer)
    {
      if (flags & TDF_COMMENT)
	fputs (";; ", outf);
      fprintf (outf, "%s succ:      ", s_indent);
807
      first = true;
808
      FOR_EACH_EDGE (e, ei, bb->succs)
809 810 811 812 813 814 815 816 817 818 819
        {
	  if (! first)
	    {
	      if (flags & TDF_COMMENT)
		fputs (";; ", outf);
	      fprintf (outf, "%s            ", s_indent);
	    }
	  first = false;
	  dump_edge_info (outf, e, flags, 1);
	  fputc ('\n', outf);
	}
820 821
      if (first)
	fputc ('\n', outf);
822
    }
823 824 825 826 827
}

/* Dumps a brief description of cfg to FILE.  */

void
828
brief_dump_cfg (FILE *file, int flags)
829 830 831
{
  basic_block bb;

832
  FOR_EACH_BB_FN (bb, cfun)
833
    {
834 835 836
      dump_bb_info (file, bb, 0,
		    flags & (TDF_COMMENT | TDF_DETAILS),
		    true, true);
837 838
    }
}
839 840 841

/* An edge originally destinating BB of FREQUENCY and COUNT has been proved to
   leave the block by TAKEN_EDGE.  Update profile of BB such that edge E can be
Mike Stump committed
842
   redirected to destination of TAKEN_EDGE.
843 844 845

   This function may leave the profile inconsistent in the case TAKEN_EDGE
   frequency or count is believed to be lower than FREQUENCY or COUNT
846
   respectively.  */
847 848 849 850 851 852
void
update_bb_profile_for_threading (basic_block bb, int edge_frequency,
				 gcov_type count, edge taken_edge)
{
  edge c;
  int prob;
853
  edge_iterator ei;
854 855 856

  bb->count -= count;
  if (bb->count < 0)
857 858 859 860 861 862
    {
      if (dump_file)
	fprintf (dump_file, "bb %i count became negative after threading",
		 bb->index);
      bb->count = 0;
    }
863 864 865 866

  /* Compute the probability of TAKEN_EDGE being reached via threaded edge.
     Watch for overflows.  */
  if (bb->frequency)
867
    prob = GCOV_COMPUTE_SCALE (edge_frequency, bb->frequency);
868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891
  else
    prob = 0;
  if (prob > taken_edge->probability)
    {
      if (dump_file)
	fprintf (dump_file, "Jump threading proved probability of edge "
		 "%i->%i too small (it is %i, should be %i).\n",
		 taken_edge->src->index, taken_edge->dest->index,
		 taken_edge->probability, prob);
      prob = taken_edge->probability;
    }

  /* Now rescale the probabilities.  */
  taken_edge->probability -= prob;
  prob = REG_BR_PROB_BASE - prob;
  bb->frequency -= edge_frequency;
  if (bb->frequency < 0)
    bb->frequency = 0;
  if (prob <= 0)
    {
      if (dump_file)
	fprintf (dump_file, "Edge frequencies of bb %i has been reset, "
		 "frequency of block should end up being 0, it is %i\n",
		 bb->index, bb->frequency);
892 893 894 895
      EDGE_SUCC (bb, 0)->probability = REG_BR_PROB_BASE;
      ei = ei_start (bb->succs);
      ei_next (&ei);
      for (; (c = ei_safe_edge (ei)); ei_next (&ei))
896 897
	c->probability = 0;
    }
898 899
  else if (prob != REG_BR_PROB_BASE)
    {
900
      int scale = RDIV (65536 * REG_BR_PROB_BASE, prob);
901 902

      FOR_EACH_EDGE (c, ei, bb->succs)
903
	{
904 905
	  /* Protect from overflow due to additional scaling.  */
	  if (c->probability > prob)
906
	    c->probability = REG_BR_PROB_BASE;
907 908 909 910 911 912
	  else
	    {
	      c->probability = RDIV (c->probability * scale, 65536);
	      if (c->probability > REG_BR_PROB_BASE)
		c->probability = REG_BR_PROB_BASE;
	    }
913
	}
914
    }
915

916
  gcc_assert (bb == taken_edge->src);
917 918
  taken_edge->count -= count;
  if (taken_edge->count < 0)
919 920 921 922 923 924
    {
      if (dump_file)
	fprintf (dump_file, "edge %i->%i count became negative after threading",
		 taken_edge->src->index, taken_edge->dest->index);
      taken_edge->count = 0;
    }
925
}
926 927 928 929 930 931 932 933

/* Multiply all frequencies of basic blocks in array BBS of length NBBS
   by NUM/DEN, in int arithmetic.  May lose some accuracy.  */
void
scale_bbs_frequencies_int (basic_block *bbs, int nbbs, int num, int den)
{
  int i;
  edge e;
934 935
  if (num < 0)
    num = 0;
936 937 938 939 940 941 942 943 944 945 946 947 948

  /* Scale NUM and DEN to avoid overflows.  Frequencies are in order of
     10^4, if we make DEN <= 10^3, we can afford to upscale by 100
     and still safely fit in int during calculations.  */
  if (den > 1000)
    {
      if (num > 1000000)
	return;

      num = RDIV (1000 * num, den);
      den = 1000;
    }
  if (num > 100 * den)
949
    return;
950

951 952 953
  for (i = 0; i < nbbs; i++)
    {
      edge_iterator ei;
954
      bbs[i]->frequency = RDIV (bbs[i]->frequency * num, den);
955 956 957
      /* Make sure the frequencies do not grow over BB_FREQ_MAX.  */
      if (bbs[i]->frequency > BB_FREQ_MAX)
	bbs[i]->frequency = BB_FREQ_MAX;
958 959
      bbs[i]->count = RDIV (bbs[i]->count * num, den);
      FOR_EACH_EDGE (e, ei, bbs[i]->succs)
960
	e->count = RDIV (e->count * num, den);
961 962 963
    }
}

964 965
/* numbers smaller than this value are safe to multiply without getting
   64bit overflow.  */
966
#define MAX_SAFE_MULTIPLIER (1 << (sizeof (int64_t) * 4 - 1))
967

968 969 970 971
/* Multiply all frequencies of basic blocks in array BBS of length NBBS
   by NUM/DEN, in gcov_type arithmetic.  More accurate than previous
   function but considerably slower.  */
void
Mike Stump committed
972 973
scale_bbs_frequencies_gcov_type (basic_block *bbs, int nbbs, gcov_type num,
				 gcov_type den)
974 975 976
{
  int i;
  edge e;
977
  gcov_type fraction = RDIV (num * 65536, den);
978

979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007
  gcc_assert (fraction >= 0);

  if (num < MAX_SAFE_MULTIPLIER)
    for (i = 0; i < nbbs; i++)
      {
	edge_iterator ei;
	bbs[i]->frequency = RDIV (bbs[i]->frequency * num, den);
	if (bbs[i]->count <= MAX_SAFE_MULTIPLIER)
	  bbs[i]->count = RDIV (bbs[i]->count * num, den);
	else
	  bbs[i]->count = RDIV (bbs[i]->count * fraction, 65536);
	FOR_EACH_EDGE (e, ei, bbs[i]->succs)
	  if (bbs[i]->count <= MAX_SAFE_MULTIPLIER)
	    e->count = RDIV (e->count * num, den);
	  else
	    e->count = RDIV (e->count * fraction, 65536);
      }
   else
    for (i = 0; i < nbbs; i++)
      {
	edge_iterator ei;
	if (sizeof (gcov_type) > sizeof (int))
	  bbs[i]->frequency = RDIV (bbs[i]->frequency * num, den);
	else
	  bbs[i]->frequency = RDIV (bbs[i]->frequency * fraction, 65536);
	bbs[i]->count = RDIV (bbs[i]->count * fraction, 65536);
	FOR_EACH_EDGE (e, ei, bbs[i]->succs)
	  e->count = RDIV (e->count * fraction, 65536);
      }
1008
}
1009

1010
/* Helper types for hash tables.  */
1011 1012 1013 1014 1015 1016 1017 1018 1019

struct htab_bb_copy_original_entry
{
  /* Block we are attaching info to.  */
  int index1;
  /* Index of original or copy (depending on the hashtable) */
  int index2;
};

1020
struct bb_copy_hasher : nofree_ptr_hash <htab_bb_copy_original_entry>
1021
{
1022 1023 1024
  static inline hashval_t hash (const htab_bb_copy_original_entry *);
  static inline bool equal (const htab_bb_copy_original_entry *existing,
			    const htab_bb_copy_original_entry * candidate);
1025
};
1026

1027
inline hashval_t
1028
bb_copy_hasher::hash (const htab_bb_copy_original_entry *data)
1029
{
1030 1031 1032
  return data->index1;
}

1033
inline bool
1034 1035
bb_copy_hasher::equal (const htab_bb_copy_original_entry *data,
		       const htab_bb_copy_original_entry *data2)
1036
{
1037 1038 1039
  return data->index1 == data2->index1;
}

1040 1041
/* Data structures used to maintain mapping between basic blocks and
   copies.  */
1042 1043
static hash_table<bb_copy_hasher> *bb_original;
static hash_table<bb_copy_hasher> *bb_copy;
1044 1045

/* And between loops and copies.  */
1046
static hash_table<bb_copy_hasher> *loop_copy;
1047
static object_allocator<htab_bb_copy_original_entry> *original_copy_bb_pool;
1048

1049 1050
/* Initialize the data structures to maintain mapping between blocks
   and its copies.  */
1051 1052 1053
void
initialize_original_copy_tables (void)
{
1054
  original_copy_bb_pool = new object_allocator<htab_bb_copy_original_entry>
1055
    ("original_copy", 10);
1056 1057 1058
  bb_original = new hash_table<bb_copy_hasher> (10);
  bb_copy = new hash_table<bb_copy_hasher> (10);
  loop_copy = new hash_table<bb_copy_hasher> (10);
1059 1060
}

1061 1062
/* Free the data structures to maintain mapping between blocks and
   its copies.  */
1063 1064 1065 1066
void
free_original_copy_tables (void)
{
  gcc_assert (original_copy_bb_pool);
1067 1068 1069 1070 1071 1072
  delete bb_copy;
  bb_copy = NULL;
  delete bb_original;
  bb_copy = NULL;
  delete loop_copy;
  loop_copy = NULL;
1073
  delete original_copy_bb_pool;
1074 1075 1076
  original_copy_bb_pool = NULL;
}

1077 1078 1079
/* Removes the value associated with OBJ from table TAB.  */

static void
1080
copy_original_table_clear (hash_table<bb_copy_hasher> *tab, unsigned obj)
1081
{
1082
  htab_bb_copy_original_entry **slot;
1083 1084 1085 1086 1087 1088
  struct htab_bb_copy_original_entry key, *elt;

  if (!original_copy_bb_pool)
    return;

  key.index1 = obj;
1089
  slot = tab->find_slot (&key, NO_INSERT);
1090 1091 1092
  if (!slot)
    return;

1093
  elt = *slot;
1094
  tab->clear_slot (slot);
1095
  original_copy_bb_pool->remove (elt);
1096 1097 1098 1099 1100 1101
}

/* Sets the value associated with OBJ in table TAB to VAL.
   Do nothing when data structures are not initialized.  */

static void
1102
copy_original_table_set (hash_table<bb_copy_hasher> *tab,
1103
			 unsigned obj, unsigned val)
1104 1105 1106 1107 1108 1109 1110 1111
{
  struct htab_bb_copy_original_entry **slot;
  struct htab_bb_copy_original_entry key;

  if (!original_copy_bb_pool)
    return;

  key.index1 = obj;
1112
  slot = tab->find_slot (&key, INSERT);
1113 1114
  if (!*slot)
    {
1115
      *slot = original_copy_bb_pool->allocate ();
1116 1117 1118 1119 1120
      (*slot)->index1 = obj;
    }
  (*slot)->index2 = val;
}

1121 1122
/* Set original for basic block.  Do nothing when data structures are not
   initialized so passes not needing this don't need to care.  */
1123 1124 1125
void
set_bb_original (basic_block bb, basic_block original)
{
1126
  copy_original_table_set (bb_original, bb->index, original->index);
1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138
}

/* Get the original basic block.  */
basic_block
get_bb_original (basic_block bb)
{
  struct htab_bb_copy_original_entry *entry;
  struct htab_bb_copy_original_entry key;

  gcc_assert (original_copy_bb_pool);

  key.index1 = bb->index;
1139
  entry = bb_original->find (&key);
1140
  if (entry)
1141
    return BASIC_BLOCK_FOR_FN (cfun, entry->index2);
1142 1143 1144 1145
  else
    return NULL;
}

1146 1147
/* Set copy for basic block.  Do nothing when data structures are not
   initialized so passes not needing this don't need to care.  */
1148 1149 1150
void
set_bb_copy (basic_block bb, basic_block copy)
{
1151
  copy_original_table_set (bb_copy, bb->index, copy->index);
1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163
}

/* Get the copy of basic block.  */
basic_block
get_bb_copy (basic_block bb)
{
  struct htab_bb_copy_original_entry *entry;
  struct htab_bb_copy_original_entry key;

  gcc_assert (original_copy_bb_pool);

  key.index1 = bb->index;
1164
  entry = bb_copy->find (&key);
1165
  if (entry)
1166
    return BASIC_BLOCK_FOR_FN (cfun, entry->index2);
1167 1168 1169
  else
    return NULL;
}
1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193

/* Set copy for LOOP to COPY.  Do nothing when data structures are not
   initialized so passes not needing this don't need to care.  */

void
set_loop_copy (struct loop *loop, struct loop *copy)
{
  if (!copy)
    copy_original_table_clear (loop_copy, loop->num);
  else
    copy_original_table_set (loop_copy, loop->num, copy->num);
}

/* Get the copy of LOOP.  */

struct loop *
get_loop_copy (struct loop *loop)
{
  struct htab_bb_copy_original_entry *entry;
  struct htab_bb_copy_original_entry key;

  gcc_assert (original_copy_bb_pool);

  key.index1 = loop->num;
1194
  entry = loop_copy->find (&key);
1195
  if (entry)
1196
    return get_loop (cfun, entry->index2);
1197 1198 1199
  else
    return NULL;
}