cfg.c 30.3 KB
Newer Older
1
/* Control flow graph manipulation code for GNU compiler.
Jakub Jelinek committed
2
   Copyright (C) 1987-2015 Free Software Foundation, Inc.
3 4 5 6 7

This file is part of GCC.

GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
8
Software Foundation; either version 3, or (at your option) any later
9 10 11 12 13 14 15 16
version.

GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
for more details.

You should have received a copy of the GNU General Public License
17 18
along with GCC; see the file COPYING3.  If not see
<http://www.gnu.org/licenses/>.  */
19

20
/* This file contains low level functions to manipulate the CFG and
21
   analyze it.  All other modules should not transform the data structure
22 23 24
   directly and use abstraction instead.  The file is supposed to be
   ordered bottom-up and should not contain any code dependent on a
   particular intermediate language (RTL or trees).
25 26 27 28

   Available functionality:
     - Initialization/deallocation
	 init_flow, clear_edges
29 30
     - Low level basic block manipulation
	 alloc_block, expunge_block
31
     - Edge manipulation
32
	 make_edge, make_single_succ_edge, cached_make_edge, remove_edge
33 34
	 - Low level edge redirection (without updating instruction chain)
	     redirect_edge_succ, redirect_edge_succ_nodup, redirect_edge_pred
35
     - Dumping and debugging
36 37 38
	 dump_flow_info, debug_flow_info, dump_edge_info
     - Allocation of AUX fields for basic blocks
	 alloc_aux_for_blocks, free_aux_for_blocks, alloc_aux_for_block
39
     - clear_bb_flags
40 41 42 43
     - Consistency checking
	 verify_flow_info
     - Dumping and debugging
	 print_rtl_with_bb, dump_bb, debug_bb, debug_bb_n
44 45 46

   TODO: Document these "Available functionality" functions in the files
   that implement them.
47 48 49 50
 */

#include "config.h"
#include "system.h"
51
#include "coretypes.h"
52
#include "obstack.h"
53
#include "ggc.h"
54
#include "hash-table.h"
55
#include "alloc-pool.h"
56 57 58 59 60 61 62 63 64 65
#include "hash-set.h"
#include "machmode.h"
#include "vec.h"
#include "double-int.h"
#include "input.h"
#include "alias.h"
#include "symtab.h"
#include "options.h"
#include "wide-int.h"
#include "inchash.h"
66
#include "tree.h"
67 68 69 70 71 72 73 74 75 76 77 78
#include "predict.h"
#include "vec.h"
#include "hashtab.h"
#include "hash-set.h"
#include "machmode.h"
#include "tm.h"
#include "hard-reg-set.h"
#include "input.h"
#include "function.h"
#include "dominance.h"
#include "cfg.h"
#include "cfganal.h"
79
#include "basic-block.h"
80
#include "df.h"
81
#include "cfgloop.h" /* FIXME: For struct loop.  */
82
#include "dumpfile.h"
83 84


85 86
#define RDIV(X,Y) (((X) + (Y) / 2) / (Y))

87
/* Called once at initialization time.  */
88 89

void
Diego Novillo committed
90
init_flow (struct function *the_fun)
91
{
Diego Novillo committed
92
  if (!the_fun->cfg)
93
    the_fun->cfg = ggc_cleared_alloc<control_flow_graph> ();
David Malcolm committed
94
  n_edges_for_fn (the_fun) = 0;
95
  ENTRY_BLOCK_PTR_FOR_FN (the_fun)
96
    = ggc_cleared_alloc<basic_block_def> ();
97 98
  ENTRY_BLOCK_PTR_FOR_FN (the_fun)->index = ENTRY_BLOCK;
  EXIT_BLOCK_PTR_FOR_FN (the_fun)
99
    = ggc_cleared_alloc<basic_block_def> ();
100 101 102 103 104
  EXIT_BLOCK_PTR_FOR_FN (the_fun)->index = EXIT_BLOCK;
  ENTRY_BLOCK_PTR_FOR_FN (the_fun)->next_bb
    = EXIT_BLOCK_PTR_FOR_FN (the_fun);
  EXIT_BLOCK_PTR_FOR_FN (the_fun)->prev_bb
    = ENTRY_BLOCK_PTR_FOR_FN (the_fun);
105 106
}

107
/* Helper function for remove_edge and clear_edges.  Frees edge structure
108
   without actually removing it from the pred/succ arrays.  */
109 110

static void
111
free_edge (edge e)
112
{
David Malcolm committed
113
  n_edges_for_fn (cfun)--;
114
  ggc_free (e);
115 116
}

117 118 119
/* Free the memory associated with the edge structures.  */

void
120
clear_edges (void)
121
{
122
  basic_block bb;
123
  edge e;
124
  edge_iterator ei;
125

126
  FOR_EACH_BB_FN (bb, cfun)
127
    {
128 129
      FOR_EACH_EDGE (e, ei, bb->succs)
	free_edge (e);
130 131
      vec_safe_truncate (bb->succs, 0);
      vec_safe_truncate (bb->preds, 0);
132
    }
133

134
  FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
135
    free_edge (e);
136 137
  vec_safe_truncate (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds, 0);
  vec_safe_truncate (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs, 0);
138

David Malcolm committed
139
  gcc_assert (!n_edges_for_fn (cfun));
140 141
}

142
/* Allocate memory for basic_block.  */
143

144
basic_block
145
alloc_block (void)
146 147
{
  basic_block bb;
148
  bb = ggc_cleared_alloc<basic_block_def> ();
149
  return bb;
150 151
}

152 153
/* Link block B to chain after AFTER.  */
void
154
link_block (basic_block b, basic_block after)
155 156 157 158 159 160
{
  b->next_bb = after->next_bb;
  b->prev_bb = after;
  after->next_bb = b;
  b->next_bb->prev_bb = b;
}
161

162 163
/* Unlink block B from chain.  */
void
164
unlink_block (basic_block b)
165 166 167
{
  b->next_bb->prev_bb = b->prev_bb;
  b->prev_bb->next_bb = b->next_bb;
168 169
  b->prev_bb = NULL;
  b->next_bb = NULL;
170
}
171

172 173
/* Sequentially order blocks and compact the arrays.  */
void
174
compact_blocks (void)
175 176
{
  int i;
177

178 179
  SET_BASIC_BLOCK_FOR_FN (cfun, ENTRY_BLOCK, ENTRY_BLOCK_PTR_FOR_FN (cfun));
  SET_BASIC_BLOCK_FOR_FN (cfun, EXIT_BLOCK, EXIT_BLOCK_PTR_FOR_FN (cfun));
H.J. Lu committed
180

181 182
  if (df)
    df_compact_blocks ();
H.J. Lu committed
183
  else
184
    {
185
      basic_block bb;
H.J. Lu committed
186

187
      i = NUM_FIXED_BLOCKS;
188
      FOR_EACH_BB_FN (bb, cfun)
189
	{
190
	  SET_BASIC_BLOCK_FOR_FN (cfun, i, bb);
191 192 193
	  bb->index = i;
	  i++;
	}
194
      gcc_assert (i == n_basic_blocks_for_fn (cfun));
195

196
      for (; i < last_basic_block_for_fn (cfun); i++)
197
	SET_BASIC_BLOCK_FOR_FN (cfun, i, NULL);
198
    }
199
  last_basic_block_for_fn (cfun) = n_basic_blocks_for_fn (cfun);
200 201 202
}

/* Remove block B from the basic block array.  */
203

204
void
205
expunge_block (basic_block b)
206
{
207
  unlink_block (b);
208
  SET_BASIC_BLOCK_FOR_FN (cfun, b->index, NULL);
209
  n_basic_blocks_for_fn (cfun)--;
210 211 212 213 214
  /* We should be able to ggc_free here, but we are not.
     The dead SSA_NAMES are left pointing to dead statements that are pointing
     to dead basic blocks making garbage collector to die.
     We should be able to release all dead SSA_NAMES and at the same time we should
     clear out BB pointer of dead statements consistently.  */
215
}
216

217 218 219 220 221
/* Connect E to E->src.  */

static inline void
connect_src (edge e)
{
222
  vec_safe_push (e->src->succs, e);
223
  df_mark_solutions_dirty ();
224 225 226 227 228 229 230 231
}

/* Connect E to E->dest.  */

static inline void
connect_dest (edge e)
{
  basic_block dest = e->dest;
232
  vec_safe_push (dest->preds, e);
233
  e->dest_idx = EDGE_COUNT (dest->preds) - 1;
234
  df_mark_solutions_dirty ();
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249
}

/* Disconnect edge E from E->src.  */

static inline void
disconnect_src (edge e)
{
  basic_block src = e->src;
  edge_iterator ei;
  edge tmp;

  for (ei = ei_start (src->succs); (tmp = ei_safe_edge (ei)); )
    {
      if (tmp == e)
	{
250
	  src->succs->unordered_remove (ei.index);
251
	  df_mark_solutions_dirty ();
252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
	  return;
	}
      else
	ei_next (&ei);
    }

  gcc_unreachable ();
}

/* Disconnect edge E from E->dest.  */

static inline void
disconnect_dest (edge e)
{
  basic_block dest = e->dest;
  unsigned int dest_idx = e->dest_idx;

269
  dest->preds->unordered_remove (dest_idx);
270 271 272 273 274

  /* If we removed an edge in the middle of the edge vector, we need
     to update dest_idx of the edge that moved into the "hole".  */
  if (dest_idx < EDGE_COUNT (dest->preds))
    EDGE_PRED (dest, dest_idx)->dest_idx = dest_idx;
275
  df_mark_solutions_dirty ();
276 277
}

278 279 280 281 282
/* Create an edge connecting SRC and DEST with flags FLAGS.  Return newly
   created edge.  Use this only if you are sure that this edge can't
   possibly already exist.  */

edge
283
unchecked_make_edge (basic_block src, basic_block dst, int flags)
284 285
{
  edge e;
286
  e = ggc_cleared_alloc<edge_def> ();
David Malcolm committed
287
  n_edges_for_fn (cfun)++;
288 289 290 291

  e->src = src;
  e->dest = dst;
  e->flags = flags;
292 293 294

  connect_src (e);
  connect_dest (e);
295

296
  execute_on_growing_pred (e);
297 298 299
  return e;
}

300
/* Create an edge connecting SRC and DST with FLAGS optionally using
301
   edge cache CACHE.  Return the new edge, NULL if already exist.  */
302

303
edge
304
cached_make_edge (sbitmap edge_cache, basic_block src, basic_block dst, int flags)
305
{
306
  if (edge_cache == NULL
307 308
      || src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
      || dst == EXIT_BLOCK_PTR_FOR_FN (cfun))
309
    return make_edge (src, dst, flags);
310

311
  /* Does the requested edge already exist?  */
312
  if (! bitmap_bit_p (edge_cache, dst->index))
313
    {
314 315
      /* The edge does not exist.  Create one and update the
	 cache.  */
316
      bitmap_set_bit (edge_cache, dst->index);
317
      return unchecked_make_edge (src, dst, flags);
318
    }
319

320 321 322 323 324 325 326
  /* At this point, we know that the requested edge exists.  Adjust
     flags if necessary.  */
  if (flags)
    {
      edge e = find_edge (src, dst);
      e->flags |= flags;
    }
327

328
  return NULL;
329 330 331 332 333 334
}

/* Create an edge connecting SRC and DEST with flags FLAGS.  Return newly
   created edge or NULL if already exist.  */

edge
335
make_edge (basic_block src, basic_block dest, int flags)
336
{
337 338 339 340 341 342 343 344 345 346
  edge e = find_edge (src, dest);

  /* Make sure we don't add duplicate edges.  */
  if (e)
    {
      e->flags |= flags;
      return NULL;
    }

  return unchecked_make_edge (src, dest, flags);
347 348
}

349
/* Create an edge connecting SRC to DEST and set probability by knowing
350 351 352
   that it is the single edge leaving SRC.  */

edge
353
make_single_succ_edge (basic_block src, basic_block dest, int flags)
354 355 356 357 358 359
{
  edge e = make_edge (src, dest, flags);

  e->probability = REG_BR_PROB_BASE;
  e->count = src->count;
  return e;
360 361 362 363 364
}

/* This function will remove an edge from the flow graph.  */

void
365
remove_edge_raw (edge e)
366
{
367
  remove_predictions_associated_with_edge (e);
368 369
  execute_on_shrinking_pred (e);

370 371
  disconnect_src (e);
  disconnect_dest (e);
372

373
  free_edge (e);
374 375 376 377 378
}

/* Redirect an edge's successor from one block to another.  */

void
379
redirect_edge_succ (edge e, basic_block new_succ)
380
{
381 382
  execute_on_shrinking_pred (e);

383
  disconnect_dest (e);
384

385
  e->dest = new_succ;
386 387

  /* Reconnect the edge to the new successor block.  */
388 389
  connect_dest (e);

390
  execute_on_growing_pred (e);
391 392 393 394 395
}

/* Redirect an edge's predecessor from one block to another.  */

void
396
redirect_edge_pred (edge e, basic_block new_pred)
397
{
398
  disconnect_src (e);
399

400
  e->src = new_pred;
401 402

  /* Reconnect the edge to the new predecessor block.  */
403
  connect_src (e);
404
}
405

406
/* Clear all basic block flags that do not have to be preserved.  */
407
void
408
clear_bb_flags (void)
409
{
410 411
  basic_block bb;

412
  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
413
    bb->flags &= BB_FLAGS_TO_PRESERVE;
414
}
415

416 417 418 419 420
/* Check the consistency of profile information.  We can't do that
   in verify_flow_info, as the counts may get invalid for incompletely
   solved graphs, later eliminating of conditionals or roundoff errors.
   It is still practical to have them reported for debugging of simple
   testcases.  */
421 422
static void
check_bb_profile (basic_block bb, FILE * file, int indent, int flags)
423 424 425 426
{
  edge e;
  int sum = 0;
  gcov_type lsum;
427
  edge_iterator ei;
428
  struct function *fun = DECL_STRUCT_FUNCTION (current_function_decl);
429 430 431
  char *s_indent = (char *) alloca ((size_t) indent + 1);
  memset ((void *) s_indent, ' ', (size_t) indent);
  s_indent[indent] = '\0';
432

433
  if (profile_status_for_fn (fun) == PROFILE_ABSENT)
434 435
    return;

436
  if (bb != EXIT_BLOCK_PTR_FOR_FN (fun))
437
    {
438
      FOR_EACH_EDGE (e, ei, bb->succs)
439
	sum += e->probability;
440
      if (EDGE_COUNT (bb->succs) && abs (sum - REG_BR_PROB_BASE) > 100)
441 442
	fprintf (file, "%s%sInvalid sum of outgoing probabilities %.1f%%\n",
		 (flags & TDF_COMMENT) ? ";; " : "", s_indent,
443 444
		 sum * 100.0 / REG_BR_PROB_BASE);
      lsum = 0;
445
      FOR_EACH_EDGE (e, ei, bb->succs)
446
	lsum += e->count;
447 448
      if (EDGE_COUNT (bb->succs)
	  && (lsum - bb->count > 100 || lsum - bb->count < -100))
449 450
	fprintf (file, "%s%sInvalid sum of outgoing counts %i, should be %i\n",
		 (flags & TDF_COMMENT) ? ";; " : "", s_indent,
451 452
		 (int) lsum, (int) bb->count);
    }
453
    if (bb != ENTRY_BLOCK_PTR_FOR_FN (fun))
454 455
    {
      sum = 0;
456
      FOR_EACH_EDGE (e, ei, bb->preds)
457 458 459
	sum += EDGE_FREQUENCY (e);
      if (abs (sum - bb->frequency) > 100)
	fprintf (file,
460 461
		 "%s%sInvalid sum of incoming frequencies %i, should be %i\n",
		 (flags & TDF_COMMENT) ? ";; " : "", s_indent,
462 463
		 sum, bb->frequency);
      lsum = 0;
464
      FOR_EACH_EDGE (e, ei, bb->preds)
465 466
	lsum += e->count;
      if (lsum - bb->count > 100 || lsum - bb->count < -100)
467 468
	fprintf (file, "%s%sInvalid sum of incoming counts %i, should be %i\n",
		 (flags & TDF_COMMENT) ? ";; " : "", s_indent,
469 470
		 (int) lsum, (int) bb->count);
    }
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485
  if (BB_PARTITION (bb) == BB_COLD_PARTITION)
    {
      /* Warn about inconsistencies in the partitioning that are
         currently caused by profile insanities created via optimization.  */
      if (!probably_never_executed_bb_p (fun, bb))
        fprintf (file, "%s%sBlock in cold partition with hot count\n",
                 (flags & TDF_COMMENT) ? ";; " : "", s_indent);
      FOR_EACH_EDGE (e, ei, bb->preds)
        {
          if (!probably_never_executed_edge_p (fun, e))
            fprintf (file,
                     "%s%sBlock in cold partition with incoming hot edge\n",
                     (flags & TDF_COMMENT) ? ";; " : "", s_indent);
        }
    }
486 487
}

488
void
489
dump_edge_info (FILE *file, edge e, int flags, int do_succ)
490
{
491
  basic_block side = (do_succ ? e->dest : e->src);
492 493 494 495 496 497
  bool do_details = false;
  
  if ((flags & TDF_DETAILS) != 0
      && (flags & TDF_SLIM) == 0)
    do_details = true;

498
  if (side->index == ENTRY_BLOCK)
499
    fputs (" ENTRY", file);
500
  else if (side->index == EXIT_BLOCK)
501 502
    fputs (" EXIT", file);
  else
503
    fprintf (file, " %d", side->index);
504

505
  if (e->probability && do_details)
506
    fprintf (file, " [%.1f%%] ", e->probability * 100.0 / REG_BR_PROB_BASE);
507

508
  if (e->count && do_details)
509
    {
510
      fputs (" count:", file);
511
      fprintf (file, "%"PRId64, e->count);
512 513
    }

514
  if (e->flags && do_details)
515
    {
516 517 518 519 520 521 522 523
      static const char * const bitnames[] =
	{
#define DEF_EDGE_FLAG(NAME,IDX) #NAME ,
#include "cfg-flags.def"
	  NULL
#undef DEF_EDGE_FLAG
	};
      bool comma = false;
524
      int i, flags = e->flags;
525

526
      gcc_assert (e->flags <= EDGE_ALL_FLAGS);
527
      fputs (" (", file);
528 529 530 531 532 533 534
      for (i = 0; flags; i++)
	if (flags & (1 << i))
	  {
	    flags &= ~(1 << i);

	    if (comma)
	      fputc (',', file);
535 536
	    fputs (bitnames[i], file);
	    comma = true;
537
	  }
538

539 540 541
      fputc (')', file);
    }
}
542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558

DEBUG_FUNCTION void
debug (edge_def &ref)
{
  /* FIXME (crowl): Is this desireable?  */
  dump_edge_info (stderr, &ref, 0, false);
  dump_edge_info (stderr, &ref, 0, true);
}

DEBUG_FUNCTION void
debug (edge_def *ptr)
{
  if (ptr)
    debug (*ptr);
  else
    fprintf (stderr, "<nil>\n");
}
559

560
/* Simple routines to easily allocate AUX fields of basic blocks.  */
561

562 563 564 565
static struct obstack block_aux_obstack;
static void *first_block_aux_obj = 0;
static struct obstack edge_aux_obstack;
static void *first_edge_aux_obj = 0;
566

567
/* Allocate a memory block of SIZE as BB->aux.  The obstack must
568
   be first initialized by alloc_aux_for_blocks.  */
569

570
static void
571
alloc_aux_for_block (basic_block bb, int size)
572
{
573
  /* Verify that aux field is clear.  */
574
  gcc_assert (!bb->aux && first_block_aux_obj);
575 576
  bb->aux = obstack_alloc (&block_aux_obstack, size);
  memset (bb->aux, 0, size);
577 578
}

579 580
/* Initialize the block_aux_obstack and if SIZE is nonzero, call
   alloc_aux_for_block for each basic block.  */
581 582

void
583
alloc_aux_for_blocks (int size)
584
{
585
  static int initialized;
586

587
  if (!initialized)
588
    {
589 590
      gcc_obstack_init (&block_aux_obstack);
      initialized = 1;
591
    }
592 593 594
  else
    /* Check whether AUX data are still allocated.  */
    gcc_assert (!first_block_aux_obj);
Mike Stump committed
595

596
  first_block_aux_obj = obstack_alloc (&block_aux_obstack, 0);
597
  if (size)
598
    {
599
      basic_block bb;
600

601
      FOR_ALL_BB_FN (bb, cfun)
602
	alloc_aux_for_block (bb, size);
603 604
    }
}
605

606
/* Clear AUX pointers of all blocks.  */
607 608

void
609
clear_aux_for_blocks (void)
610
{
611
  basic_block bb;
612

613
  FOR_ALL_BB_FN (bb, cfun)
614
    bb->aux = NULL;
615 616 617 618 619 620
}

/* Free data allocated in block_aux_obstack and clear AUX pointers
   of all blocks.  */

void
621
free_aux_for_blocks (void)
622
{
623
  gcc_assert (first_block_aux_obj);
624
  obstack_free (&block_aux_obstack, first_block_aux_obj);
625
  first_block_aux_obj = NULL;
626 627

  clear_aux_for_blocks ();
628
}
629

630
/* Allocate a memory edge of SIZE as E->aux.  The obstack must
631
   be first initialized by alloc_aux_for_edges.  */
632

633
void
634
alloc_aux_for_edge (edge e, int size)
635 636
{
  /* Verify that aux field is clear.  */
637
  gcc_assert (!e->aux && first_edge_aux_obj);
638 639 640
  e->aux = obstack_alloc (&edge_aux_obstack, size);
  memset (e->aux, 0, size);
}
641

642 643
/* Initialize the edge_aux_obstack and if SIZE is nonzero, call
   alloc_aux_for_edge for each basic edge.  */
644

645
void
646
alloc_aux_for_edges (int size)
647 648
{
  static int initialized;
649

650 651 652 653
  if (!initialized)
    {
      gcc_obstack_init (&edge_aux_obstack);
      initialized = 1;
654
    }
655 656 657
  else
    /* Check whether AUX data are still allocated.  */
    gcc_assert (!first_edge_aux_obj);
658

659
  first_edge_aux_obj = obstack_alloc (&edge_aux_obstack, 0);
660
  if (size)
661
    {
662 663
      basic_block bb;

664 665
      FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
		      EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
666
	{
667
	  edge e;
668
	  edge_iterator ei;
669

670
	  FOR_EACH_EDGE (e, ei, bb->succs)
671
	    alloc_aux_for_edge (e, size);
672 673 674 675
	}
    }
}

676
/* Clear AUX pointers of all edges.  */
677 678

void
679
clear_aux_for_edges (void)
680
{
681 682
  basic_block bb;
  edge e;
683

684 685
  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
		  EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
686
    {
687 688
      edge_iterator ei;
      FOR_EACH_EDGE (e, ei, bb->succs)
689
	e->aux = NULL;
690
    }
691 692 693 694 695 696
}

/* Free data allocated in edge_aux_obstack and clear AUX pointers
   of all edges.  */

void
697
free_aux_for_edges (void)
698
{
699
  gcc_assert (first_edge_aux_obj);
700
  obstack_free (&edge_aux_obstack, first_edge_aux_obj);
701
  first_edge_aux_obj = NULL;
702 703

  clear_aux_for_edges ();
704
}
705

706
DEBUG_FUNCTION void
707
debug_bb (basic_block bb)
708
{
709
  dump_bb (stderr, bb, 0, dump_flags);
710 711
}

712
DEBUG_FUNCTION basic_block
713
debug_bb_n (int n)
714
{
715
  basic_block bb = BASIC_BLOCK_FOR_FN (cfun, n);
716
  debug_bb (bb);
717
  return bb;
718
}
719

720 721 722 723 724 725
/* Dumps cfg related information about basic block BB to OUTF.
   If HEADER is true, dump things that appear before the instructions
   contained in BB.  If FOOTER is true, dump things that appear after.
   Flags are the TDF_* masks as documented in dumpfile.h.
   NB: With TDF_DETAILS, it is assumed that cfun is available, so
   that maybe_hot_bb_p and probably_never_executed_bb_p don't ICE.  */
726

727 728 729
void
dump_bb_info (FILE *outf, basic_block bb, int indent, int flags,
	      bool do_header, bool do_footer)
730
{
731
  edge_iterator ei;
732
  edge e;
733 734
  static const char * const bb_bitnames[] =
    {
735 736 737 738
#define DEF_BASIC_BLOCK_FLAG(NAME,IDX) #NAME ,
#include "cfg-flags.def"
      NULL
#undef DEF_BASIC_BLOCK_FLAG
739 740
    };
  const unsigned n_bitnames = sizeof (bb_bitnames) / sizeof (char *);
741
  bool first;
742 743 744
  char *s_indent = (char *) alloca ((size_t) indent + 1);
  memset ((void *) s_indent, ' ', (size_t) indent);
  s_indent[indent] = '\0';
745

746 747 748 749 750 751 752 753
  gcc_assert (bb->flags <= BB_ALL_FLAGS);

  if (do_header)
    {
      unsigned i;

      if (flags & TDF_COMMENT)
	fputs (";; ", outf);
754
      fprintf (outf, "%sbasic block %d, loop depth %d",
755
	       s_indent, bb->index, bb_loop_depth (bb));
756 757
      if (flags & TDF_DETAILS)
	{
758
	  struct function *fun = DECL_STRUCT_FUNCTION (current_function_decl);
759 760
	  fprintf (outf, ", count " "%"PRId64,
		   (int64_t) bb->count);
761
	  fprintf (outf, ", freq %i", bb->frequency);
762
	  if (maybe_hot_bb_p (fun, bb))
763
	    fputs (", maybe hot", outf);
764
	  if (probably_never_executed_bb_p (fun, bb))
765 766 767 768 769 770
	    fputs (", probably never executed", outf);
	}
      fputc ('\n', outf);

      if (flags & TDF_DETAILS)
	{
771
	  check_bb_profile (bb, outf, indent, flags);
772 773 774 775 776 777 778 779 780 781 782 783 784 785
	  if (flags & TDF_COMMENT)
	    fputs (";; ", outf);
	  fprintf (outf, "%s prev block ", s_indent);
	  if (bb->prev_bb)
	    fprintf (outf, "%d", bb->prev_bb->index);
	  else
	    fprintf (outf, "(nil)");
	  fprintf (outf, ", next block ");
	  if (bb->next_bb)
	    fprintf (outf, "%d", bb->next_bb->index);
	  else
	    fprintf (outf, "(nil)");

	  fputs (", flags:", outf);
786
	  first = true;
787 788 789 790 791 792 793 794 795 796 797 798
	  for (i = 0; i < n_bitnames; i++)
	    if (bb->flags & (1 << i))
	      {
		if (first)
		  fputs (" (", outf);
		else
		  fputs (", ", outf);
		first = false;
		fputs (bb_bitnames[i], outf);
	      }
	  if (!first)
	    fputc (')', outf);
799
	  fputc ('\n', outf);
800 801 802 803 804
	}

      if (flags & TDF_COMMENT)
	fputs (";; ", outf);
      fprintf (outf, "%s pred:      ", s_indent);
805
      first = true;
806
      FOR_EACH_EDGE (e, ei, bb->preds)
807 808 809 810 811 812 813 814 815 816 817
	{
	  if (! first)
	    {
	      if (flags & TDF_COMMENT)
		fputs (";; ", outf);
	      fprintf (outf, "%s            ", s_indent);
	    }
	  first = false;
	  dump_edge_info (outf, e, flags, 0);
	  fputc ('\n', outf);
	}
818 819
      if (first)
	fputc ('\n', outf);
820 821 822 823 824 825 826
    }

  if (do_footer)
    {
      if (flags & TDF_COMMENT)
	fputs (";; ", outf);
      fprintf (outf, "%s succ:      ", s_indent);
827
      first = true;
828
      FOR_EACH_EDGE (e, ei, bb->succs)
829 830 831 832 833 834 835 836 837 838 839
        {
	  if (! first)
	    {
	      if (flags & TDF_COMMENT)
		fputs (";; ", outf);
	      fprintf (outf, "%s            ", s_indent);
	    }
	  first = false;
	  dump_edge_info (outf, e, flags, 1);
	  fputc ('\n', outf);
	}
840 841
      if (first)
	fputc ('\n', outf);
842
    }
843 844 845 846 847
}

/* Dumps a brief description of cfg to FILE.  */

void
848
brief_dump_cfg (FILE *file, int flags)
849 850 851
{
  basic_block bb;

852
  FOR_EACH_BB_FN (bb, cfun)
853
    {
854 855 856
      dump_bb_info (file, bb, 0,
		    flags & (TDF_COMMENT | TDF_DETAILS),
		    true, true);
857 858
    }
}
859 860 861

/* An edge originally destinating BB of FREQUENCY and COUNT has been proved to
   leave the block by TAKEN_EDGE.  Update profile of BB such that edge E can be
Mike Stump committed
862
   redirected to destination of TAKEN_EDGE.
863 864 865

   This function may leave the profile inconsistent in the case TAKEN_EDGE
   frequency or count is believed to be lower than FREQUENCY or COUNT
866
   respectively.  */
867 868 869 870 871 872
void
update_bb_profile_for_threading (basic_block bb, int edge_frequency,
				 gcov_type count, edge taken_edge)
{
  edge c;
  int prob;
873
  edge_iterator ei;
874 875 876

  bb->count -= count;
  if (bb->count < 0)
877 878 879 880 881 882
    {
      if (dump_file)
	fprintf (dump_file, "bb %i count became negative after threading",
		 bb->index);
      bb->count = 0;
    }
883 884 885 886

  /* Compute the probability of TAKEN_EDGE being reached via threaded edge.
     Watch for overflows.  */
  if (bb->frequency)
887
    prob = GCOV_COMPUTE_SCALE (edge_frequency, bb->frequency);
888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911
  else
    prob = 0;
  if (prob > taken_edge->probability)
    {
      if (dump_file)
	fprintf (dump_file, "Jump threading proved probability of edge "
		 "%i->%i too small (it is %i, should be %i).\n",
		 taken_edge->src->index, taken_edge->dest->index,
		 taken_edge->probability, prob);
      prob = taken_edge->probability;
    }

  /* Now rescale the probabilities.  */
  taken_edge->probability -= prob;
  prob = REG_BR_PROB_BASE - prob;
  bb->frequency -= edge_frequency;
  if (bb->frequency < 0)
    bb->frequency = 0;
  if (prob <= 0)
    {
      if (dump_file)
	fprintf (dump_file, "Edge frequencies of bb %i has been reset, "
		 "frequency of block should end up being 0, it is %i\n",
		 bb->index, bb->frequency);
912 913 914 915
      EDGE_SUCC (bb, 0)->probability = REG_BR_PROB_BASE;
      ei = ei_start (bb->succs);
      ei_next (&ei);
      for (; (c = ei_safe_edge (ei)); ei_next (&ei))
916 917
	c->probability = 0;
    }
918 919
  else if (prob != REG_BR_PROB_BASE)
    {
920
      int scale = RDIV (65536 * REG_BR_PROB_BASE, prob);
921 922

      FOR_EACH_EDGE (c, ei, bb->succs)
923
	{
924 925
	  /* Protect from overflow due to additional scaling.  */
	  if (c->probability > prob)
926
	    c->probability = REG_BR_PROB_BASE;
927 928 929 930 931 932
	  else
	    {
	      c->probability = RDIV (c->probability * scale, 65536);
	      if (c->probability > REG_BR_PROB_BASE)
		c->probability = REG_BR_PROB_BASE;
	    }
933
	}
934
    }
935

936
  gcc_assert (bb == taken_edge->src);
937 938
  taken_edge->count -= count;
  if (taken_edge->count < 0)
939 940 941 942 943 944
    {
      if (dump_file)
	fprintf (dump_file, "edge %i->%i count became negative after threading",
		 taken_edge->src->index, taken_edge->dest->index);
      taken_edge->count = 0;
    }
945
}
946 947 948 949 950 951 952 953

/* Multiply all frequencies of basic blocks in array BBS of length NBBS
   by NUM/DEN, in int arithmetic.  May lose some accuracy.  */
void
scale_bbs_frequencies_int (basic_block *bbs, int nbbs, int num, int den)
{
  int i;
  edge e;
954 955
  if (num < 0)
    num = 0;
956 957 958 959 960 961 962 963 964 965 966 967 968

  /* Scale NUM and DEN to avoid overflows.  Frequencies are in order of
     10^4, if we make DEN <= 10^3, we can afford to upscale by 100
     and still safely fit in int during calculations.  */
  if (den > 1000)
    {
      if (num > 1000000)
	return;

      num = RDIV (1000 * num, den);
      den = 1000;
    }
  if (num > 100 * den)
969
    return;
970

971 972 973
  for (i = 0; i < nbbs; i++)
    {
      edge_iterator ei;
974
      bbs[i]->frequency = RDIV (bbs[i]->frequency * num, den);
975 976 977
      /* Make sure the frequencies do not grow over BB_FREQ_MAX.  */
      if (bbs[i]->frequency > BB_FREQ_MAX)
	bbs[i]->frequency = BB_FREQ_MAX;
978 979
      bbs[i]->count = RDIV (bbs[i]->count * num, den);
      FOR_EACH_EDGE (e, ei, bbs[i]->succs)
980
	e->count = RDIV (e->count * num, den);
981 982 983
    }
}

984 985
/* numbers smaller than this value are safe to multiply without getting
   64bit overflow.  */
986
#define MAX_SAFE_MULTIPLIER (1 << (sizeof (int64_t) * 4 - 1))
987

988 989 990 991
/* Multiply all frequencies of basic blocks in array BBS of length NBBS
   by NUM/DEN, in gcov_type arithmetic.  More accurate than previous
   function but considerably slower.  */
void
Mike Stump committed
992 993
scale_bbs_frequencies_gcov_type (basic_block *bbs, int nbbs, gcov_type num,
				 gcov_type den)
994 995 996
{
  int i;
  edge e;
997
  gcov_type fraction = RDIV (num * 65536, den);
998

999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027
  gcc_assert (fraction >= 0);

  if (num < MAX_SAFE_MULTIPLIER)
    for (i = 0; i < nbbs; i++)
      {
	edge_iterator ei;
	bbs[i]->frequency = RDIV (bbs[i]->frequency * num, den);
	if (bbs[i]->count <= MAX_SAFE_MULTIPLIER)
	  bbs[i]->count = RDIV (bbs[i]->count * num, den);
	else
	  bbs[i]->count = RDIV (bbs[i]->count * fraction, 65536);
	FOR_EACH_EDGE (e, ei, bbs[i]->succs)
	  if (bbs[i]->count <= MAX_SAFE_MULTIPLIER)
	    e->count = RDIV (e->count * num, den);
	  else
	    e->count = RDIV (e->count * fraction, 65536);
      }
   else
    for (i = 0; i < nbbs; i++)
      {
	edge_iterator ei;
	if (sizeof (gcov_type) > sizeof (int))
	  bbs[i]->frequency = RDIV (bbs[i]->frequency * num, den);
	else
	  bbs[i]->frequency = RDIV (bbs[i]->frequency * fraction, 65536);
	bbs[i]->count = RDIV (bbs[i]->count * fraction, 65536);
	FOR_EACH_EDGE (e, ei, bbs[i]->succs)
	  e->count = RDIV (e->count * fraction, 65536);
      }
1028
}
1029

1030
/* Helper types for hash tables.  */
1031 1032 1033 1034 1035 1036 1037 1038 1039

struct htab_bb_copy_original_entry
{
  /* Block we are attaching info to.  */
  int index1;
  /* Index of original or copy (depending on the hashtable) */
  int index2;
};

1040
struct bb_copy_hasher : typed_noop_remove <htab_bb_copy_original_entry>
1041
{
1042 1043 1044 1045 1046
  typedef htab_bb_copy_original_entry value_type;
  typedef htab_bb_copy_original_entry compare_type;
  static inline hashval_t hash (const value_type *);
  static inline bool equal (const value_type *existing,
			    const compare_type * candidate);
1047
};
1048

1049
inline hashval_t
1050
bb_copy_hasher::hash (const value_type *data)
1051
{
1052 1053 1054
  return data->index1;
}

1055
inline bool
1056
bb_copy_hasher::equal (const value_type *data, const compare_type *data2)
1057
{
1058 1059 1060
  return data->index1 == data2->index1;
}

1061 1062
/* Data structures used to maintain mapping between basic blocks and
   copies.  */
1063 1064
static hash_table<bb_copy_hasher> *bb_original;
static hash_table<bb_copy_hasher> *bb_copy;
1065 1066

/* And between loops and copies.  */
1067
static hash_table<bb_copy_hasher> *loop_copy;
1068 1069 1070
static alloc_pool original_copy_bb_pool;


1071 1072
/* Initialize the data structures to maintain mapping between blocks
   and its copies.  */
1073 1074 1075 1076 1077 1078 1079
void
initialize_original_copy_tables (void)
{
  gcc_assert (!original_copy_bb_pool);
  original_copy_bb_pool
    = create_alloc_pool ("original_copy",
			 sizeof (struct htab_bb_copy_original_entry), 10);
1080 1081 1082
  bb_original = new hash_table<bb_copy_hasher> (10);
  bb_copy = new hash_table<bb_copy_hasher> (10);
  loop_copy = new hash_table<bb_copy_hasher> (10);
1083 1084
}

1085 1086
/* Free the data structures to maintain mapping between blocks and
   its copies.  */
1087 1088 1089 1090
void
free_original_copy_tables (void)
{
  gcc_assert (original_copy_bb_pool);
1091 1092 1093 1094 1095 1096
  delete bb_copy;
  bb_copy = NULL;
  delete bb_original;
  bb_copy = NULL;
  delete loop_copy;
  loop_copy = NULL;
1097 1098 1099 1100
  free_alloc_pool (original_copy_bb_pool);
  original_copy_bb_pool = NULL;
}

1101 1102 1103
/* Removes the value associated with OBJ from table TAB.  */

static void
1104
copy_original_table_clear (hash_table<bb_copy_hasher> *tab, unsigned obj)
1105
{
1106
  htab_bb_copy_original_entry **slot;
1107 1108 1109 1110 1111 1112
  struct htab_bb_copy_original_entry key, *elt;

  if (!original_copy_bb_pool)
    return;

  key.index1 = obj;
1113
  slot = tab->find_slot (&key, NO_INSERT);
1114 1115 1116
  if (!slot)
    return;

1117
  elt = *slot;
1118
  tab->clear_slot (slot);
1119 1120 1121 1122 1123 1124 1125
  pool_free (original_copy_bb_pool, elt);
}

/* Sets the value associated with OBJ in table TAB to VAL.
   Do nothing when data structures are not initialized.  */

static void
1126
copy_original_table_set (hash_table<bb_copy_hasher> *tab,
1127
			 unsigned obj, unsigned val)
1128 1129 1130 1131 1132 1133 1134 1135
{
  struct htab_bb_copy_original_entry **slot;
  struct htab_bb_copy_original_entry key;

  if (!original_copy_bb_pool)
    return;

  key.index1 = obj;
1136
  slot = tab->find_slot (&key, INSERT);
1137 1138
  if (!*slot)
    {
1139 1140
      *slot = (struct htab_bb_copy_original_entry *)
		pool_alloc (original_copy_bb_pool);
1141 1142 1143 1144 1145
      (*slot)->index1 = obj;
    }
  (*slot)->index2 = val;
}

1146 1147
/* Set original for basic block.  Do nothing when data structures are not
   initialized so passes not needing this don't need to care.  */
1148 1149 1150
void
set_bb_original (basic_block bb, basic_block original)
{
1151
  copy_original_table_set (bb_original, bb->index, original->index);
1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163
}

/* Get the original basic block.  */
basic_block
get_bb_original (basic_block bb)
{
  struct htab_bb_copy_original_entry *entry;
  struct htab_bb_copy_original_entry key;

  gcc_assert (original_copy_bb_pool);

  key.index1 = bb->index;
1164
  entry = bb_original->find (&key);
1165
  if (entry)
1166
    return BASIC_BLOCK_FOR_FN (cfun, entry->index2);
1167 1168 1169 1170
  else
    return NULL;
}

1171 1172
/* Set copy for basic block.  Do nothing when data structures are not
   initialized so passes not needing this don't need to care.  */
1173 1174 1175
void
set_bb_copy (basic_block bb, basic_block copy)
{
1176
  copy_original_table_set (bb_copy, bb->index, copy->index);
1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188
}

/* Get the copy of basic block.  */
basic_block
get_bb_copy (basic_block bb)
{
  struct htab_bb_copy_original_entry *entry;
  struct htab_bb_copy_original_entry key;

  gcc_assert (original_copy_bb_pool);

  key.index1 = bb->index;
1189
  entry = bb_copy->find (&key);
1190
  if (entry)
1191
    return BASIC_BLOCK_FOR_FN (cfun, entry->index2);
1192 1193 1194
  else
    return NULL;
}
1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218

/* Set copy for LOOP to COPY.  Do nothing when data structures are not
   initialized so passes not needing this don't need to care.  */

void
set_loop_copy (struct loop *loop, struct loop *copy)
{
  if (!copy)
    copy_original_table_clear (loop_copy, loop->num);
  else
    copy_original_table_set (loop_copy, loop->num, copy->num);
}

/* Get the copy of LOOP.  */

struct loop *
get_loop_copy (struct loop *loop)
{
  struct htab_bb_copy_original_entry *entry;
  struct htab_bb_copy_original_entry key;

  gcc_assert (original_copy_bb_pool);

  key.index1 = loop->num;
1219
  entry = loop_copy->find (&key);
1220
  if (entry)
1221
    return get_loop (cfun, entry->index2);
1222 1223 1224
  else
    return NULL;
}