Commit cd280abb by Paolo Bonzini Committed by Paolo Bonzini

common.opt (-fnew-ra): Remove.

2005-01-17  Paolo Bonzini  <bonzini@gnu.org>

	* common.opt (-fnew-ra): Remove.
	* ra*.*: Remove.
	* toplev.h (flag_new_regalloc): Remove.
	* Makefile.in (ra*.*): Don't mention.
	* passes.c (rest_of_handle_new_regalloc): Remove.
	(rest_of_handle_combine, rest_of_compilation): Always consider
	flag_new_regalloc as false.
	* doc/invoke.texi: Don't document -fnew-ra.

From-SVN: r93759
parent c80a0f26
2005-01-15 Paolo Bonzini <bonzini@gnu.org>
2005-01-17 Paolo Bonzini <bonzini@gnu.org>
* common.opt (-fnew-ra): Remove.
* ra*.*: Remove.
* toplev.h (flag_new_regalloc): Remove.
* Makefile.in (ra*.*): Don't mention.
* passes.c (rest_of_handle_new_regalloc): Remove.
(rest_of_handle_combine, rest_of_compilation): Always consider
flag_new_regalloc as false.
* doc/invoke.texi: Don't document -fnew-ra.
2005-01-17 Paolo Bonzini <bonzini@gnu.org>
* bb-reorder.c (fix_edges_for_rarely_executed_code): Remove
last parameter to reg_scan.
......
......@@ -2,7 +2,7 @@
# Run 'configure' to generate Makefile from Makefile.in
# Copyright (C) 1987, 1988, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997,
# 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
# 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
#This file is part of GCC.
......@@ -704,7 +704,6 @@ FLAGS_H = flags.h options.h
EXPR_H = expr.h insn-config.h function.h $(RTL_H) $(FLAGS_H) $(TREE_H) $(MACHMODE_H) $(EMIT_RTL_H)
OPTABS_H = optabs.h insn-codes.h
REGS_H = regs.h varray.h $(MACHMODE_H) $(OBSTACK_H) $(BASIC_BLOCK_H)
RA_H = ra.h bitmap.h sbitmap.h hard-reg-set.h insn-modes.h
RESOURCE_H = resource.h hard-reg-set.h
SCHED_INT_H = sched-int.h $(INSN_ATTR_H) $(BASIC_BLOCK_H) $(RTL_H)
INTEGRATE_H = integrate.h varray.h
......@@ -920,8 +919,7 @@ OBJS-common = \
loop.o modulo-sched.o optabs.o options.o opts.o \
params.o postreload.o postreload-gcse.o predict.o \
insn-preds.o pointer-set.o postreload.o \
print-rtl.o print-tree.o value-prof.o var-tracking.o \
profile.o ra.o ra-build.o ra-colorize.o ra-debug.o ra-rewrite.o \
print-rtl.o print-tree.o profile.o value-prof.o var-tracking.o \
real.o recog.o reg-stack.o regclass.o regmove.o regrename.o \
reload.o reload1.o reorg.o resource.o rtl.o rtlanal.o rtl-error.o \
sbitmap.o sched-deps.o sched-ebb.o sched-rgn.o sched-vis.o sdbout.o \
......@@ -2067,20 +2065,6 @@ global.o : global.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) $(FLAGS
varray.o : varray.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) varray.h $(GGC_H) errors.h \
$(HASHTAB_H)
vec.o : vec.c $(CONFIG_H) $(SYSTEM_H) $(TREE_H) coretypes.h vec.h $(GGC_H) errors.h
ra.o : ra.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) $(TM_P_H) insn-config.h \
$(RECOG_H) $(INTEGRATE_H) function.h $(REGS_H) $(OBSTACK_H) hard-reg-set.h \
$(BASIC_BLOCK_H) $(DF_H) $(EXPR_H) output.h toplev.h $(FLAGS_H) reload.h $(RA_H)
ra-build.o : ra-build.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) $(TM_P_H) \
insn-config.h $(RECOG_H) function.h $(REGS_H) hard-reg-set.h \
$(BASIC_BLOCK_H) $(DF_H) output.h $(GGC_H) $(RA_H) gt-ra-build.h reload.h
ra-colorize.o : ra-colorize.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) \
$(TM_P_H) function.h $(REGS_H) hard-reg-set.h $(BASIC_BLOCK_H) $(DF_H) output.h $(RA_H)
ra-debug.o : ra-debug.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) \
insn-config.h $(RECOG_H) function.h hard-reg-set.h $(BASIC_BLOCK_H) $(DF_H) output.h \
$(RA_H) $(TM_P_H) $(REGS_H)
ra-rewrite.o : ra-rewrite.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) \
$(TM_P_H) function.h $(REGS_H) hard-reg-set.h $(BASIC_BLOCK_H) $(DF_H) $(EXPR_H) \
output.h except.h $(RA_H) reload.h insn-config.h
reload.o : reload.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) $(FLAGS_H) output.h \
$(EXPR_H) $(OPTABS_H) reload.h $(RECOG_H) hard-reg-set.h insn-config.h \
$(REGS_H) function.h real.h toplev.h $(TM_P_H) $(PARAMS_H)
......@@ -2432,7 +2416,7 @@ GTFILES = $(srcdir)/input.h $(srcdir)/coretypes.h \
$(srcdir)/emit-rtl.c $(srcdir)/except.c $(srcdir)/explow.c $(srcdir)/expr.c \
$(srcdir)/function.c \
$(srcdir)/gcse.c $(srcdir)/integrate.c $(srcdir)/lists.c $(srcdir)/optabs.c \
$(srcdir)/profile.c $(srcdir)/ra-build.c $(srcdir)/regclass.c \
$(srcdir)/profile.c $(srcdir)/regclass.c \
$(srcdir)/reg-stack.c $(srcdir)/cfglayout.c \
$(srcdir)/sdbout.c $(srcdir)/stor-layout.c \
$(srcdir)/stringpool.c $(srcdir)/tree.c $(srcdir)/varasm.c \
......@@ -2458,7 +2442,7 @@ gt-function.h gt-integrate.h gt-tree.h gt-varasm.h \
gt-emit-rtl.h gt-explow.h gt-stor-layout.h gt-regclass.h \
gt-lists.h gt-alias.h gt-cselib.h gt-gcse.h \
gt-expr.h gt-sdbout.h gt-optabs.h gt-bitmap.h gt-dojump.h \
gt-dwarf2out.h gt-ra-build.h gt-reg-stack.h gt-dwarf2asm.h \
gt-dwarf2out.h gt-reg-stack.h gt-dwarf2asm.h \
gt-dbxout.h gt-c-common.h gt-c-decl.h gt-c-parse.h \
gt-c-pragma.h gtype-c.h gt-cfglayout.h \
gt-tree-mudflap.h gt-tree-complex.h \
......
; Options for the language- and target-independent parts of the compiler.
; Copyright (C) 2003, 2004 Free Software Foundation, Inc.
; Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
;
; This file is part of GCC.
;
......@@ -539,10 +539,6 @@ fmudflapir
Common RejectNegative Report Var(flag_mudflap_ignore_reads)
Ignore read operations when inserting mudflap instrumentation.
fnew-ra
Common Report Var(flag_new_regalloc)
Use graph-coloring register allocation
freschedule-modulo-scheduled-loops
Common Report Var(flag_resched_modulo_sched)
Enable/Disable the traditional scheduling in loops that already passed modulo scheduling
......
......@@ -294,7 +294,7 @@ Objective-C and Objective-C++ Dialects}.
-floop-optimize -fcrossjumping -fif-conversion -fif-conversion2 @gol
-finline-functions -finline-limit=@var{n} -fkeep-inline-functions @gol
-fkeep-static-consts -fmerge-constants -fmerge-all-constants @gol
-fmodulo-sched -fnew-ra -fno-branch-count-reg @gol
-fmodulo-sched -fno-branch-count-reg @gol
-fno-default-inline -fno-defer-pop -floop-optimize2 -fmove-loop-invariants @gol
-fno-function-cse -fno-guess-branch-probability @gol
-fno-inline -fno-math-errno -fno-peephole -fno-peephole2 @gol
......@@ -4282,12 +4282,6 @@ Perform swing modulo scheduling immediately before the first scheduling
pass. This pass looks at innermost loops and reorders their
instructions by overlapping different iterations.
@item -fnew-ra
@opindex fnew-ra
Use a graph coloring register allocator. Currently this option is meant
only for testing. Users should not specify this option, since it is not
yet ready for production use.
@item -fno-branch-count-reg
@opindex fno-branch-count-reg
Do not use ``decrement and branch'' instructions on a count register,
......@@ -5211,12 +5205,6 @@ a ``home register''.
Not enabled by default at any level because it has known bugs.
@item -fnew-ra
@opindex fnew-ra
Use a graph coloring register allocator. Currently this option is meant
for testing, so we are interested to hear about miscompilations with
@option{-fnew-ra}.
@item -ftracer
@opindex ftracer
Perform tail duplication to enlarge superblock size. This transformation
......
/* Top level of GCC compilers (cc1, cc1plus, etc.)
Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
This file is part of GCC.
......@@ -425,49 +425,6 @@ rest_of_handle_machine_reorg (void)
}
/* Run new register allocator. Return TRUE if we must exit
rest_of_compilation upon return. */
static bool
rest_of_handle_new_regalloc (void)
{
int failure;
timevar_push (TV_LOCAL_ALLOC);
open_dump_file (DFI_lreg, current_function_decl);
delete_trivially_dead_insns (get_insns (), max_reg_num ());
reg_alloc ();
timevar_pop (TV_LOCAL_ALLOC);
close_dump_file (DFI_lreg, NULL, NULL);
/* XXX clean up the whole mess to bring live info in shape again. */
timevar_push (TV_GLOBAL_ALLOC);
open_dump_file (DFI_greg, current_function_decl);
build_insn_chain (get_insns ());
failure = reload (get_insns (), 0);
timevar_pop (TV_GLOBAL_ALLOC);
ggc_collect ();
if (dump_enabled_p (DFI_greg))
{
timevar_push (TV_DUMP);
dump_global_regs (dump_file);
timevar_pop (TV_DUMP);
close_dump_file (DFI_greg, print_rtl_with_bb, get_insns ());
}
if (failure)
return true;
reload_completed = 1;
return false;
}
/* Run old register allocator. Return TRUE if we must exit
rest_of_compilation upon return. */
static bool
......@@ -970,7 +927,7 @@ rest_of_handle_life (void)
if (optimize)
{
if (!flag_new_regalloc && initialize_uninitialized_subregs ())
if (initialize_uninitialized_subregs ())
{
/* Insns were inserted, and possibly pseudos created, so
things might look a bit different. */
......@@ -1706,16 +1663,8 @@ rest_of_compilation (void)
epilogue thus changing register elimination offsets. */
current_function_is_leaf = leaf_function_p ();
if (flag_new_regalloc)
{
if (rest_of_handle_new_regalloc ())
goto exit_rest_of_compilation;
}
else
{
if (rest_of_handle_old_regalloc ())
goto exit_rest_of_compilation;
}
if (rest_of_handle_old_regalloc ())
goto exit_rest_of_compilation;
if (optimize > 0)
rest_of_handle_postreload ();
......
This source diff could not be displayed because it is too large. You can view the blob instead.
/* Graph coloring register allocator
Copyright (C) 2001, 2002, 2004 Free Software Foundation, Inc.
Contributed by Michael Matz <matz@suse.de>
and Daniel Berlin <dan@cgsoftware.com>.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software
Foundation; either version 2, or (at your option) any later version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details.
You should have received a copy of the GNU General Public License along
with GCC; see the file COPYING. If not, write to the Free Software
Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "rtl.h"
#include "tm_p.h"
#include "function.h"
#include "regs.h"
#include "hard-reg-set.h"
#include "basic-block.h"
#include "df.h"
#include "output.h"
#include "ra.h"
/* This file is part of the graph coloring register allocator.
It contains the graph colorizer. Given an interference graph
as set up in ra-build.c the toplevel function in this file
(ra_colorize_graph) colorizes the graph, leaving a list
of colored, coalesced and spilled nodes.
The algorithm used is a merge of George & Appels iterative coalescing
and optimistic coalescing, switchable at runtime. The current default
is "optimistic coalescing +", which is based on the normal Briggs/Cooper
framework. We can also use biased coloring. Most of the structure
here follows the different papers.
Additionally there is a custom step to locally improve the overall
spill cost of the colored graph (recolor_spills). */
static void push_list (struct dlist *, struct dlist **);
static void push_list_end (struct dlist *, struct dlist **);
static void free_dlist (struct dlist **);
static void put_web_at_end (struct web *, enum ra_node_type);
static void put_move (struct move *, enum move_type);
static void build_worklists (struct df *);
static void enable_move (struct web *);
static void decrement_degree (struct web *, int);
static void simplify (void);
static void remove_move_1 (struct web *, struct move *);
static void remove_move (struct web *, struct move *);
static void add_worklist (struct web *);
static int ok (struct web *, struct web *);
static int conservative (struct web *, struct web *);
static inline unsigned int simplify_p (enum ra_node_type);
static void combine (struct web *, struct web *);
static void coalesce (void);
static void freeze_moves (struct web *);
static void freeze (void);
static void select_spill (void);
static int color_usable_p (int, HARD_REG_SET, HARD_REG_SET,
enum machine_mode);
int get_free_reg (HARD_REG_SET, HARD_REG_SET, enum machine_mode);
static int get_biased_reg (HARD_REG_SET, HARD_REG_SET, HARD_REG_SET,
HARD_REG_SET, enum machine_mode);
static int count_long_blocks (HARD_REG_SET, int);
static char * hardregset_to_string (HARD_REG_SET);
static void calculate_dont_begin (struct web *, HARD_REG_SET *);
static void colorize_one_web (struct web *, int);
static void assign_colors (void);
static void try_recolor_web (struct web *);
static void insert_coalesced_conflicts (void);
static int comp_webs_maxcost (const void *, const void *);
static void recolor_spills (void);
static void check_colors (void);
static void restore_conflicts_from_coalesce (struct web *);
static void break_coalesced_spills (void);
static void unalias_web (struct web *);
static void break_aliases_to_web (struct web *);
static void break_precolored_alias (struct web *);
static void init_web_pairs (void);
static void add_web_pair_cost (struct web *, struct web *,
unsigned HOST_WIDE_INT, unsigned int);
static int comp_web_pairs (const void *, const void *);
static void sort_and_combine_web_pairs (int);
static int ok_class (struct web *, struct web *);
static void aggressive_coalesce (void);
static void extended_coalesce_2 (void);
static void check_uncoalesced_moves (void);
static struct dlist *mv_worklist, *mv_coalesced, *mv_constrained;
static struct dlist *mv_frozen, *mv_active;
/* Push a node onto the front of the list. */
static void
push_list (struct dlist *x, struct dlist **list)
{
gcc_assert (!x->next);
gcc_assert (!x->prev);
x->next = *list;
if (*list)
(*list)->prev = x;
*list = x;
}
static void
push_list_end (struct dlist *x, struct dlist **list)
{
gcc_assert (!x->prev);
gcc_assert (!x->next);
if (!*list)
{
*list = x;
return;
}
while ((*list)->next)
list = &((*list)->next);
x->prev = *list;
(*list)->next = x;
}
/* Remove a node from the list. */
void
remove_list (struct dlist *x, struct dlist **list)
{
struct dlist *y = x->prev;
if (y)
y->next = x->next;
else
*list = x->next;
y = x->next;
if (y)
y->prev = x->prev;
x->next = x->prev = NULL;
}
/* Pop the front of the list. */
struct dlist *
pop_list (struct dlist **list)
{
struct dlist *r = *list;
if (r)
remove_list (r, list);
return r;
}
/* Free the given double linked list. */
static void
free_dlist (struct dlist **list)
{
*list = NULL;
}
/* The web WEB should get the given new TYPE. Put it onto the
appropriate list.
Inline, because it's called with constant TYPE every time. */
inline void
put_web (struct web *web, enum ra_node_type type)
{
switch (type)
{
case INITIAL:
case FREE:
case FREEZE:
case SPILL:
case SPILLED:
case COALESCED:
case COLORED:
case SELECT:
push_list (web->dlink, &WEBS(type));
break;
case PRECOLORED:
push_list (web->dlink, &WEBS(INITIAL));
break;
case SIMPLIFY:
if (web->spill_temp)
push_list (web->dlink, &WEBS(type = SIMPLIFY_SPILL));
else if (web->add_hardregs)
push_list (web->dlink, &WEBS(type = SIMPLIFY_FAT));
else
push_list (web->dlink, &WEBS(SIMPLIFY));
break;
default:
gcc_unreachable ();
}
web->type = type;
}
/* After we are done with the whole pass of coloring/spilling,
we reset the lists of webs, in preparation of the next pass.
The spilled webs become free, colored webs go to the initial list,
coalesced webs become free or initial, according to what type of web
they are coalesced to. */
void
reset_lists (void)
{
struct dlist *d;
gcc_assert (!WEBS(SIMPLIFY));
gcc_assert (!WEBS(SIMPLIFY_SPILL));
gcc_assert (!WEBS(SIMPLIFY_FAT));
gcc_assert (!WEBS(FREEZE));
gcc_assert (!WEBS(SPILL));
gcc_assert (!WEBS(SELECT));
while ((d = pop_list (&WEBS(COALESCED))) != NULL)
{
struct web *web = DLIST_WEB (d);
struct web *aweb = alias (web);
/* Note, how alias() becomes invalid through the two put_web()'s
below. It might set the type of a web to FREE (from COALESCED),
which itself is a target of aliasing (i.e. in the middle of
an alias chain). We can handle this by checking also for
type == FREE. Note nevertheless, that alias() is invalid
henceforth. */
if (aweb->type == SPILLED || aweb->type == FREE)
put_web (web, FREE);
else
put_web (web, INITIAL);
}
while ((d = pop_list (&WEBS(SPILLED))) != NULL)
put_web (DLIST_WEB (d), FREE);
while ((d = pop_list (&WEBS(COLORED))) != NULL)
put_web (DLIST_WEB (d), INITIAL);
/* All free webs have no conflicts anymore. */
for (d = WEBS(FREE); d; d = d->next)
{
struct web *web = DLIST_WEB (d);
BITMAP_XFREE (web->useless_conflicts);
web->useless_conflicts = NULL;
}
#ifdef ENABLE_CHECKING
/* Sanity check, that we only have free, initial or precolored webs. */
{
unsigned int i;
for (i = 0; i < num_webs; i++)
{
struct web *web = ID2WEB (i);
gcc_assert (web->type == INITIAL || web->type == FREE
|| web->type == PRECOLORED);
}
}
#endif
free_dlist (&mv_worklist);
free_dlist (&mv_coalesced);
free_dlist (&mv_constrained);
free_dlist (&mv_frozen);
free_dlist (&mv_active);
}
/* Similar to put_web(), but add the web to the end of the appropriate
list. Additionally TYPE may not be SIMPLIFY. */
static void
put_web_at_end (struct web *web, enum ra_node_type type)
{
if (type == PRECOLORED)
type = INITIAL;
else
gcc_assert (type != SIMPLIFY);
push_list_end (web->dlink, &WEBS(type));
web->type = type;
}
/* Unlink WEB from the list it's currently on (which corresponds to
its current type). */
void
remove_web_from_list (struct web *web)
{
if (web->type == PRECOLORED)
remove_list (web->dlink, &WEBS(INITIAL));
else
remove_list (web->dlink, &WEBS(web->type));
}
/* Give MOVE the TYPE, and link it into the correct list. */
static inline void
put_move (struct move *move, enum move_type type)
{
switch (type)
{
case WORKLIST:
push_list (move->dlink, &mv_worklist);
break;
case MV_COALESCED:
push_list (move->dlink, &mv_coalesced);
break;
case CONSTRAINED:
push_list (move->dlink, &mv_constrained);
break;
case FROZEN:
push_list (move->dlink, &mv_frozen);
break;
case ACTIVE:
push_list (move->dlink, &mv_active);
break;
default:
gcc_unreachable ();
}
move->type = type;
}
/* Build the worklists we are going to process. */
static void
build_worklists (struct df *df ATTRIBUTE_UNUSED)
{
struct dlist *d, *d_next;
struct move_list *ml;
/* If we are not the first pass, put all stackwebs (which are still
backed by a new pseudo, but conceptually can stand for a stackslot,
i.e. it doesn't really matter if they get a color or not), on
the SELECT stack first, those with lowest cost first. This way
they will be colored last, so do not constrain the coloring of the
normal webs. But still those with the highest count are colored
before, i.e. get a color more probable. The use of stackregs is
a pure optimization, and all would work, if we used real stackslots
from the begin. */
if (ra_pass > 1)
{
unsigned int i, num, max_num;
struct web **order2web;
max_num = num_webs - num_subwebs;
order2web = xmalloc (max_num * sizeof (order2web[0]));
for (i = 0, num = 0; i < max_num; i++)
if (id2web[i]->regno >= max_normal_pseudo)
order2web[num++] = id2web[i];
if (num)
{
qsort (order2web, num, sizeof (order2web[0]), comp_webs_maxcost);
for (i = num - 1;; i--)
{
struct web *web = order2web[i];
struct conflict_link *wl;
remove_list (web->dlink, &WEBS(INITIAL));
put_web (web, SELECT);
for (wl = web->conflict_list; wl; wl = wl->next)
{
struct web *pweb = wl->t;
pweb->num_conflicts -= 1 + web->add_hardregs;
}
if (i == 0)
break;
}
}
free (order2web);
}
/* For all remaining initial webs, classify them. */
for (d = WEBS(INITIAL); d; d = d_next)
{
struct web *web = DLIST_WEB (d);
d_next = d->next;
if (web->type == PRECOLORED)
continue;
remove_list (d, &WEBS(INITIAL));
if (web->num_conflicts >= NUM_REGS (web))
put_web (web, SPILL);
else if (web->moves)
put_web (web, FREEZE);
else
put_web (web, SIMPLIFY);
}
/* And put all moves on the worklist for iterated coalescing.
Note, that if iterated coalescing is off, then wl_moves doesn't
contain any moves. */
for (ml = wl_moves; ml; ml = ml->next)
if (ml->move)
{
struct move *m = ml->move;
d = ra_calloc (sizeof (struct dlist));
DLIST_MOVE (d) = m;
m->dlink = d;
put_move (m, WORKLIST);
}
}
/* Enable the active moves, in which WEB takes part, to be processed. */
static void
enable_move (struct web *web)
{
struct move_list *ml;
for (ml = web->moves; ml; ml = ml->next)
if (ml->move->type == ACTIVE)
{
remove_list (ml->move->dlink, &mv_active);
put_move (ml->move, WORKLIST);
}
}
/* Decrement the degree of node WEB by the amount DEC.
Possibly change the type of WEB, if the number of conflicts is
now smaller than its freedom. */
static void
decrement_degree (struct web *web, int dec)
{
int before = web->num_conflicts;
web->num_conflicts -= dec;
if (web->num_conflicts < NUM_REGS (web) && before >= NUM_REGS (web))
{
struct conflict_link *a;
enable_move (web);
for (a = web->conflict_list; a; a = a->next)
{
struct web *aweb = a->t;
if (aweb->type != SELECT && aweb->type != COALESCED)
enable_move (aweb);
}
if (web->type != FREEZE)
{
remove_web_from_list (web);
if (web->moves)
put_web (web, FREEZE);
else
put_web (web, SIMPLIFY);
}
}
}
/* Repeatedly simplify the nodes on the simplify worklists. */
static void
simplify (void)
{
struct dlist *d;
struct web *web;
struct conflict_link *wl;
while (1)
{
/* We try hard to color all the webs resulting from spills first.
Without that on register starved machines (x86 e.g) with some live
DImode pseudos, -fPIC, and an asm requiring %edx, it might be, that
we do rounds over rounds, because the conflict graph says, we can
simplify those short webs, but later due to irregularities we can't
color those pseudos. So we have to spill them, which in later rounds
leads to other spills. */
d = pop_list (&WEBS(SIMPLIFY));
if (!d)
d = pop_list (&WEBS(SIMPLIFY_FAT));
if (!d)
d = pop_list (&WEBS(SIMPLIFY_SPILL));
if (!d)
break;
web = DLIST_WEB (d);
ra_debug_msg (DUMP_PROCESS, " simplifying web %3d, conflicts = %d\n",
web->id, web->num_conflicts);
put_web (web, SELECT);
for (wl = web->conflict_list; wl; wl = wl->next)
{
struct web *pweb = wl->t;
if (pweb->type != SELECT && pweb->type != COALESCED)
{
decrement_degree (pweb, 1 + web->add_hardregs);
}
}
}
}
/* Helper function to remove a move from the movelist of the web. */
static void
remove_move_1 (struct web *web, struct move *move)
{
struct move_list *ml = web->moves;
if (!ml)
return;
if (ml->move == move)
{
web->moves = ml->next;
return;
}
for (; ml->next && ml->next->move != move; ml = ml->next) ;
if (!ml->next)
return;
ml->next = ml->next->next;
}
/* Remove a move from the movelist of the web. Actually this is just a
wrapper around remove_move_1(), making sure, the removed move really is
not in the list anymore. */
static void
remove_move (struct web *web, struct move *move)
{
struct move_list *ml;
remove_move_1 (web, move);
for (ml = web->moves; ml; ml = ml->next)
gcc_assert (ml->move != move);
}
/* Merge the moves for the two webs into the first web's movelist. */
void
merge_moves (struct web *u, struct web *v)
{
regset seen;
struct move_list *ml, *ml_next;
seen = BITMAP_XMALLOC ();
for (ml = u->moves; ml; ml = ml->next)
bitmap_set_bit (seen, INSN_UID (ml->move->insn));
for (ml = v->moves; ml; ml = ml_next)
{
ml_next = ml->next;
if (! bitmap_bit_p (seen, INSN_UID (ml->move->insn)))
{
ml->next = u->moves;
u->moves = ml;
}
}
BITMAP_XFREE (seen);
v->moves = NULL;
}
/* Add a web to the simplify worklist, from the freeze worklist. */
static void
add_worklist (struct web *web)
{
if (web->type != PRECOLORED && !web->moves
&& web->num_conflicts < NUM_REGS (web))
{
remove_list (web->dlink, &WEBS(FREEZE));
put_web (web, SIMPLIFY);
}
}
/* Precolored node coalescing heuristic. */
static int
ok (struct web *target, struct web *source)
{
struct conflict_link *wl;
int i;
int color = source->color;
int size;
/* Normally one would think, the next test wouldn't be needed.
We try to coalesce S and T, and S has already a color, and we checked
when processing the insns, that both have the same mode. So naively
we could conclude, that of course that mode was valid for this color.
Hah. But there is sparc. Before reload there are copy insns
(e.g. the ones copying arguments to locals) which happily refer to
colors in invalid modes. We can't coalesce those things. */
if (! HARD_REGNO_MODE_OK (source->color, GET_MODE (target->orig_x)))
return 0;
/* Sanity for funny modes. */
size = hard_regno_nregs[color][GET_MODE (target->orig_x)];
if (!size)
return 0;
/* We can't coalesce target with a precolored register which isn't in
usable_regs. */
for (i = size; i--;)
if (TEST_HARD_REG_BIT (never_use_colors, color + i)
|| !TEST_HARD_REG_BIT (target->usable_regs, color + i)
/* Before usually calling ok() at all, we already test, if the
candidates conflict in sup_igraph. But when wide webs are
coalesced to hardregs, we only test the hardweb coalesced into.
This is only the begin color. When actually coalescing both,
it will also take the following size colors, i.e. their webs.
We nowhere checked if the candidate possibly conflicts with
one of _those_, which is possible with partial conflicts,
so we simply do it here (this does one bit-test more than
necessary, the first color). Note, that if X is precolored
bit [X*num_webs + Y] can't be set (see add_conflict_edge()). */
|| TEST_BIT (sup_igraph,
target->id * num_webs + hardreg2web[color + i]->id))
return 0;
for (wl = target->conflict_list; wl; wl = wl->next)
{
struct web *pweb = wl->t;
if (pweb->type == SELECT || pweb->type == COALESCED)
continue;
/* Coalescing target (T) and source (S) is o.k, if for
all conflicts C of T it is true, that:
1) C will be colored, or
2) C is a hardreg (precolored), or
3) C already conflicts with S too, or
4) a web which contains C conflicts already with S.
XXX: we handle here only the special case of 4), that C is
a subreg, and the containing thing is the reg itself, i.e.
we dont handle the situation, were T conflicts with
(subreg:SI x 1), and S conflicts with (subreg:DI x 0), which
would be allowed also, as the S-conflict overlaps
the T-conflict.
So, we first test the whole web for any of these conditions, and
continue with the next C, if 1, 2 or 3 is true. */
if (pweb->num_conflicts < NUM_REGS (pweb)
|| pweb->type == PRECOLORED
|| TEST_BIT (igraph, igraph_index (source->id, pweb->id)) )
continue;
/* This is reached, if not one of 1, 2 or 3 was true. In the case C has
no subwebs, 4 can't be true either, so we can't coalesce S and T. */
if (wl->sub == NULL)
return 0;
else
{
/* The main webs do _not_ conflict, only some parts of both. This
means, that 4 is possibly true, so we need to check this too.
For this we go through all sub conflicts between T and C, and see if
the target part of C already conflicts with S. When this is not
the case we disallow coalescing. */
struct sub_conflict *sl;
for (sl = wl->sub; sl; sl = sl->next)
{
if (!TEST_BIT (igraph, igraph_index (source->id, sl->t->id)))
return 0;
}
}
}
return 1;
}
/* Non-precolored node coalescing heuristic. */
static int
conservative (struct web *target, struct web *source)
{
unsigned int k;
unsigned int loop;
regset seen;
struct conflict_link *wl;
unsigned int num_regs = NUM_REGS (target); /* XXX */
/* k counts the resulting conflict weight, if target and source
would be merged, and all low-degree neighbors would be
removed. */
k = 0 * MAX (target->add_hardregs, source->add_hardregs);
seen = BITMAP_XMALLOC ();
for (loop = 0; loop < 2; loop++)
for (wl = ((loop == 0) ? target : source)->conflict_list;
wl; wl = wl->next)
{
struct web *pweb = wl->t;
if (pweb->type != SELECT && pweb->type != COALESCED
&& pweb->num_conflicts >= NUM_REGS (pweb)
&& ! REGNO_REG_SET_P (seen, pweb->id))
{
SET_REGNO_REG_SET (seen, pweb->id);
k += 1 + pweb->add_hardregs;
}
}
BITMAP_XFREE (seen);
if (k >= num_regs)
return 0;
return 1;
}
/* If the web is coalesced, return it's alias. Otherwise, return what
was passed in. */
struct web *
alias (struct web *web)
{
while (web->type == COALESCED)
web = web->alias;
return web;
}
/* Returns nonzero, if the TYPE belongs to one of those representing
SIMPLIFY types. */
static inline unsigned int
simplify_p (enum ra_node_type type)
{
return type == SIMPLIFY || type == SIMPLIFY_SPILL || type == SIMPLIFY_FAT;
}
/* Actually combine two webs, that can be coalesced. */
static void
combine (struct web *u, struct web *v)
{
int i;
struct conflict_link *wl;
gcc_assert (u != v);
gcc_assert (v->type != COALESCED);
gcc_assert ((u->regno >= max_normal_pseudo)
== (v->regno >= max_normal_pseudo));
remove_web_from_list (v);
put_web (v, COALESCED);
v->alias = u;
u->is_coalesced = 1;
v->is_coalesced = 1;
u->num_aliased += 1 + v->num_aliased;
if (flag_ra_merge_spill_costs && u->type != PRECOLORED)
u->spill_cost += v->spill_cost;
/*u->spill_cost = MAX (u->spill_cost, v->spill_cost);*/
merge_moves (u, v);
/* combine add_hardregs's of U and V. */
for (wl = v->conflict_list; wl; wl = wl->next)
{
struct web *pweb = wl->t;
/* We don't strictly need to move conflicts between webs which are
already coalesced or selected, if we do iterated coalescing, or
better if we need not to be able to break aliases again.
I.e. normally we would use the condition
(pweb->type != SELECT && pweb->type != COALESCED).
But for now we simply merge all conflicts. It doesn't take that
much time. */
if (1)
{
struct web *web = u;
int nregs = 1 + v->add_hardregs;
if (u->type == PRECOLORED)
nregs = hard_regno_nregs[u->color][GET_MODE (v->orig_x)];
/* For precolored U's we need to make conflicts between V's
neighbors and as many hardregs from U as V needed if it gets
color U. For now we approximate this by V->add_hardregs, which
could be too much in multi-length classes. We should really
count how many hardregs are needed for V with color U. When U
isn't precolored this loop breaks out after one iteration. */
for (i = 0; i < nregs; i++)
{
if (u->type == PRECOLORED)
web = hardreg2web[i + u->color];
if (wl->sub == NULL)
record_conflict (web, pweb);
else
{
struct sub_conflict *sl;
/* So, between V and PWEB there are sub_conflicts. We
need to relocate those conflicts to be between WEB (==
U when it wasn't precolored) and PWEB. In the case
only a part of V conflicted with (part of) PWEB we
nevertheless make the new conflict between the whole U
and the (part of) PWEB. Later we might try to find in
U the correct subpart corresponding (by size and
offset) to the part of V (sl->s) which was the source
of the conflict. */
for (sl = wl->sub; sl; sl = sl->next)
{
/* Beware: sl->s is no subweb of web (== U) but of V.
We try to search a corresponding subpart of U.
If we found none we let it conflict with the whole U.
Note that find_subweb() only looks for mode and
subreg_byte of the REG rtx but not for the pseudo
reg number (otherwise it would be guaranteed to
_not_ find any subpart). */
struct web *sweb = NULL;
if (SUBWEB_P (sl->s))
sweb = find_subweb (web, sl->s->orig_x);
if (!sweb)
sweb = web;
record_conflict (sweb, sl->t);
}
}
if (u->type != PRECOLORED)
break;
}
if (pweb->type != SELECT && pweb->type != COALESCED)
decrement_degree (pweb, 1 + v->add_hardregs);
}
}
/* Now merge the usable_regs together. */
/* XXX That merging might normally make it necessary to
adjust add_hardregs, which also means to adjust neighbors. This can
result in making some more webs trivially colorable, (or the opposite,
if this increases our add_hardregs). Because we intersect the
usable_regs it should only be possible to decrease add_hardregs. So a
conservative solution for now is to simply don't change it. */
u->use_my_regs = 1;
AND_HARD_REG_SET (u->usable_regs, v->usable_regs);
u->regclass = reg_class_subunion[u->regclass][v->regclass];
/* Count number of possible hardregs. This might make U a spillweb,
but that could also happen, if U and V together had too many
conflicts. */
u->num_freedom = hard_regs_count (u->usable_regs);
u->num_freedom -= u->add_hardregs;
/* The next checks for an invalid coalesced move (both webs must have
possible hardregs in common). */
gcc_assert (u->num_freedom);
if (u->num_conflicts >= NUM_REGS (u)
&& (u->type == FREEZE || simplify_p (u->type)))
{
remove_web_from_list (u);
put_web (u, SPILL);
}
/* We want the most relaxed combination of spill_temp state.
I.e. if any was no spilltemp or a spilltemp2, the result is so too,
otherwise if any is short, the result is too. It remains, when both
are normal spilltemps. */
if (v->spill_temp == 0)
u->spill_temp = 0;
else if (v->spill_temp == 2 && u->spill_temp != 0)
u->spill_temp = 2;
else if (v->spill_temp == 3 && u->spill_temp == 1)
u->spill_temp = 3;
}
/* Attempt to coalesce the first thing on the move worklist.
This is used only for iterated coalescing. */
static void
coalesce (void)
{
struct dlist *d = pop_list (&mv_worklist);
struct move *m = DLIST_MOVE (d);
struct web *source = alias (m->source_web);
struct web *target = alias (m->target_web);
if (target->type == PRECOLORED)
{
struct web *h = source;
source = target;
target = h;
}
if (source == target)
{
remove_move (source, m);
put_move (m, MV_COALESCED);
add_worklist (source);
}
else if (target->type == PRECOLORED
|| TEST_BIT (sup_igraph, source->id * num_webs + target->id)
|| TEST_BIT (sup_igraph, target->id * num_webs + source->id)
|| !ok_class (target, source))
{
remove_move (source, m);
remove_move (target, m);
put_move (m, CONSTRAINED);
add_worklist (source);
add_worklist (target);
}
else if ((source->type == PRECOLORED && ok (target, source))
|| (source->type != PRECOLORED
&& conservative (target, source)))
{
remove_move (source, m);
remove_move (target, m);
put_move (m, MV_COALESCED);
combine (source, target);
add_worklist (source);
}
else
put_move (m, ACTIVE);
}
/* Freeze the moves associated with the web. Used for iterated coalescing. */
static void
freeze_moves (struct web *web)
{
struct move_list *ml, *ml_next;
for (ml = web->moves; ml; ml = ml_next)
{
struct move *m = ml->move;
struct web *src, *dest;
ml_next = ml->next;
if (m->type == ACTIVE)
remove_list (m->dlink, &mv_active);
else
remove_list (m->dlink, &mv_worklist);
put_move (m, FROZEN);
remove_move (web, m);
src = alias (m->source_web);
dest = alias (m->target_web);
src = (src == web) ? dest : src;
remove_move (src, m);
/* XXX GA use the original v, instead of alias(v) */
if (!src->moves && src->num_conflicts < NUM_REGS (src))
{
remove_list (src->dlink, &WEBS(FREEZE));
put_web (src, SIMPLIFY);
}
}
}
/* Freeze the first thing on the freeze worklist (only for iterated
coalescing). */
static void
freeze (void)
{
struct dlist *d = pop_list (&WEBS(FREEZE));
put_web (DLIST_WEB (d), SIMPLIFY);
freeze_moves (DLIST_WEB (d));
}
/* The current spill heuristic. Returns a number for a WEB.
Webs with higher numbers are selected later. */
static unsigned HOST_WIDE_INT (*spill_heuristic) (struct web *);
static unsigned HOST_WIDE_INT default_spill_heuristic (struct web *);
/* Our default heuristic is similar to spill_cost / num_conflicts.
Just scaled for integer arithmetic, and it favors coalesced webs,
and webs which span more insns with deaths. */
static unsigned HOST_WIDE_INT
default_spill_heuristic (struct web *web)
{
unsigned HOST_WIDE_INT ret;
unsigned int divisor = 1;
/* Make coalesce targets cheaper to spill, because they will be broken
up again into smaller parts. */
if (flag_ra_break_aliases)
divisor += web->num_aliased;
divisor += web->num_conflicts;
ret = ((web->spill_cost << 8) + divisor - 1) / divisor;
/* It is better to spill webs that span more insns (deaths in our
case) than other webs with the otherwise same spill_cost. So make
them a little bit cheaper. Remember that spill_cost is unsigned. */
if (web->span_deaths < ret)
ret -= web->span_deaths;
return ret;
}
/* Select the cheapest spill to be potentially spilled (we don't
*actually* spill until we need to). */
static void
select_spill (void)
{
unsigned HOST_WIDE_INT best = (unsigned HOST_WIDE_INT) -1;
struct dlist *bestd = NULL;
unsigned HOST_WIDE_INT best2 = (unsigned HOST_WIDE_INT) -1;
struct dlist *bestd2 = NULL;
struct dlist *d;
for (d = WEBS(SPILL); d; d = d->next)
{
struct web *w = DLIST_WEB (d);
unsigned HOST_WIDE_INT cost = spill_heuristic (w);
if ((!w->spill_temp) && cost < best)
{
best = cost;
bestd = d;
}
/* Specially marked spill temps can be spilled. Also coalesce
targets can. Eventually they will be broken up later in the
colorizing process, so if we have nothing better take that. */
else if ((w->spill_temp == 2 || w->is_coalesced) && cost < best2)
{
best2 = cost;
bestd2 = d;
}
}
if (!bestd)
{
bestd = bestd2;
best = best2;
}
gcc_assert (bestd);
/* Note the potential spill. */
DLIST_WEB (bestd)->was_spilled = 1;
remove_list (bestd, &WEBS(SPILL));
put_web (DLIST_WEB (bestd), SIMPLIFY);
freeze_moves (DLIST_WEB (bestd));
ra_debug_msg (DUMP_PROCESS, " potential spill web %3d, conflicts = %d\n",
DLIST_WEB (bestd)->id, DLIST_WEB (bestd)->num_conflicts);
}
/* Given a set of forbidden colors to begin at, and a set of still
free colors, and MODE, returns nonzero of color C is still usable. */
static int
color_usable_p (int c, HARD_REG_SET dont_begin_colors,
HARD_REG_SET free_colors, enum machine_mode mode)
{
if (!TEST_HARD_REG_BIT (dont_begin_colors, c)
&& TEST_HARD_REG_BIT (free_colors, c)
&& HARD_REGNO_MODE_OK (c, mode))
{
int i, size;
size = hard_regno_nregs[c][mode];
for (i = 1; i < size && TEST_HARD_REG_BIT (free_colors, c + i); i++);
if (i == size)
return 1;
}
return 0;
}
/* I don't want to clutter up the actual code with ifdef's. */
#ifdef REG_ALLOC_ORDER
#define INV_REG_ALLOC_ORDER(c) inv_reg_alloc_order[c]
#else
#define INV_REG_ALLOC_ORDER(c) c
#endif
/* Searches in FREE_COLORS for a block of hardregs of the right length
for MODE, which doesn't begin at a hardreg mentioned in DONT_BEGIN_COLORS.
If it needs more than one hardreg it prefers blocks beginning
at an even hardreg, and only gives an odd begin reg if no other
block could be found. */
int
get_free_reg (HARD_REG_SET dont_begin_colors, HARD_REG_SET free_colors,
enum machine_mode mode)
{
int c;
int last_resort_reg = -1;
int pref_reg = -1;
int pref_reg_order = INT_MAX;
int last_resort_reg_order = INT_MAX;
for (c = 0; c < FIRST_PSEUDO_REGISTER; c++)
if (!TEST_HARD_REG_BIT (dont_begin_colors, c)
&& TEST_HARD_REG_BIT (free_colors, c)
&& HARD_REGNO_MODE_OK (c, mode))
{
int i, size;
size = hard_regno_nregs[c][mode];
for (i = 1; i < size && TEST_HARD_REG_BIT (free_colors, c + i); i++);
if (i != size)
{
c += i;
continue;
}
if (i == size)
{
if (size < 2 || (c & 1) == 0)
{
if (INV_REG_ALLOC_ORDER (c) < pref_reg_order)
{
pref_reg = c;
pref_reg_order = INV_REG_ALLOC_ORDER (c);
}
}
else if (INV_REG_ALLOC_ORDER (c) < last_resort_reg_order)
{
last_resort_reg = c;
last_resort_reg_order = INV_REG_ALLOC_ORDER (c);
}
}
else
c += i;
}
return pref_reg >= 0 ? pref_reg : last_resort_reg;
}
/* Similar to get_free_reg(), but first search in colors provided
by BIAS _and_ PREFER_COLORS, then in BIAS alone, then in PREFER_COLORS
alone, and only then for any free color. If flag_ra_biased is zero
only do the last two steps. */
static int
get_biased_reg (HARD_REG_SET dont_begin_colors, HARD_REG_SET bias,
HARD_REG_SET prefer_colors, HARD_REG_SET free_colors,
enum machine_mode mode)
{
int c = -1;
HARD_REG_SET s;
if (flag_ra_biased)
{
COPY_HARD_REG_SET (s, dont_begin_colors);
IOR_COMPL_HARD_REG_SET (s, bias);
IOR_COMPL_HARD_REG_SET (s, prefer_colors);
c = get_free_reg (s, free_colors, mode);
if (c >= 0)
return c;
COPY_HARD_REG_SET (s, dont_begin_colors);
IOR_COMPL_HARD_REG_SET (s, bias);
c = get_free_reg (s, free_colors, mode);
if (c >= 0)
return c;
}
COPY_HARD_REG_SET (s, dont_begin_colors);
IOR_COMPL_HARD_REG_SET (s, prefer_colors);
c = get_free_reg (s, free_colors, mode);
if (c >= 0)
return c;
c = get_free_reg (dont_begin_colors, free_colors, mode);
return c;
}
/* Counts the number of non-overlapping bitblocks of length LEN
in FREE_COLORS. */
static int
count_long_blocks (HARD_REG_SET free_colors, int len)
{
int i, j;
int count = 0;
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
{
if (!TEST_HARD_REG_BIT (free_colors, i))
continue;
for (j = 1; j < len; j++)
if (!TEST_HARD_REG_BIT (free_colors, i + j))
break;
/* Bits [i .. i+j-1] are free. */
if (j == len)
count++;
i += j - 1;
}
return count;
}
/* Given a hardreg set S, return a string representing it.
Either as 0/1 string, or as hex value depending on the implementation
of hardreg sets. Note that this string is statically allocated. */
static char *
hardregset_to_string (HARD_REG_SET s)
{
static char string[/*FIRST_PSEUDO_REGISTER + 30*/1024];
#if FIRST_PSEUDO_REGISTER <= HOST_BITS_PER_WIDEST_FAST_INT
sprintf (string, HOST_WIDE_INT_PRINT_HEX, (HOST_WIDE_INT) s);
#else
char *c = string;
int i,j;
c += sprintf (c, "{ ");
for (i = 0;i < HARD_REG_SET_LONGS; i++)
{
for (j = 0; j < HOST_BITS_PER_WIDEST_FAST_INT; j++)
c += sprintf (c, "%s", ( 1 << j) & s[i] ? "1" : "0");
c += sprintf (c, "%s", i ? ", " : "");
}
c += sprintf (c, " }");
#endif
return string;
}
/* For WEB, look at its already colored neighbors, and calculate
the set of hardregs which is not allowed as color for WEB. Place
that set int *RESULT. Note that the set of forbidden begin colors
is not the same as all colors taken up by neighbors. E.g. suppose
two DImode webs, but only the lo-part from one conflicts with the
hipart from the other, and suppose the other gets colors 2 and 3
(it needs two SImode hardregs). Now the first can take also color
1 or 2, although in those cases there's a partial overlap. Only
3 can't be used as begin color. */
static void
calculate_dont_begin (struct web *web, HARD_REG_SET *result)
{
struct conflict_link *wl;
HARD_REG_SET dont_begin;
/* The bits set in dont_begin correspond to the hardregs, at which
WEB may not begin. This differs from the set of _all_ hardregs which
are taken by WEB's conflicts in the presence of wide webs, where only
some parts conflict with others. */
CLEAR_HARD_REG_SET (dont_begin);
for (wl = web->conflict_list; wl; wl = wl->next)
{
struct web *w;
struct web *ptarget = alias (wl->t);
struct sub_conflict *sl = wl->sub;
w = sl ? sl->t : wl->t;
while (w)
{
if (ptarget->type == COLORED || ptarget->type == PRECOLORED)
{
struct web *source = (sl) ? sl->s : web;
unsigned int tsize = hard_regno_nregs[ptarget->color]
[GET_MODE (w->orig_x)];
/* ssize is only a first guess for the size. */
unsigned int ssize = hard_regno_nregs[ptarget->color][GET_MODE
(source->orig_x)];
unsigned int tofs = 0;
unsigned int sofs = 0;
/* C1 and C2 can become negative, so unsigned
would be wrong. */
int c1, c2;
if (SUBWEB_P (w)
&& GET_MODE_SIZE (GET_MODE (w->orig_x)) >= UNITS_PER_WORD)
tofs = (SUBREG_BYTE (w->orig_x) / UNITS_PER_WORD);
if (SUBWEB_P (source)
&& GET_MODE_SIZE (GET_MODE (source->orig_x))
>= UNITS_PER_WORD)
sofs = (SUBREG_BYTE (source->orig_x) / UNITS_PER_WORD);
c1 = ptarget->color + tofs - sofs - ssize + 1;
c2 = ptarget->color + tofs + tsize - 1 - sofs;
if (c2 >= 0)
{
if (c1 < 0)
c1 = 0;
/* Because ssize was only guessed above, which influenced our
begin color (c1), we need adjustment, if for that color
another size would be needed. This is done by moving
c1 to a place, where the last of sources hardregs does not
overlap the first of targets colors. */
while (c1 + sofs
+ hard_regno_nregs[c1][GET_MODE (source->orig_x)] - 1
< ptarget->color + tofs)
c1++;
while (c1 > 0 && c1 + sofs
+ hard_regno_nregs[c1][GET_MODE (source->orig_x)] - 1
> ptarget->color + tofs)
c1--;
for (; c1 <= c2; c1++)
SET_HARD_REG_BIT (dont_begin, c1);
}
}
/* The next if() only gets true, if there was no wl->sub at all, in
which case we are only making one go through this loop with W being
a whole web. */
if (!sl)
break;
sl = sl->next;
w = sl ? sl->t : NULL;
}
}
COPY_HARD_REG_SET (*result, dont_begin);
}
/* Try to assign a color to WEB. If HARD if nonzero, we try many
tricks to get it one color, including respilling already colored
neighbors.
We also trie very hard, to not constrain the uncolored non-spill
neighbors, which need more hardregs than we. Consider a situation, 2
hardregs free for us (0 and 1), and one of our neighbors needs 2
hardregs, and only conflicts with us. There are 3 hardregs at all. Now
a simple minded method might choose 1 as color for us. Then our neighbor
has two free colors (0 and 2) as it should, but they are not consecutive,
so coloring it later would fail. This leads to nasty problems on
register starved machines, so we try to avoid this. */
static void
colorize_one_web (struct web *web, int hard)
{
struct conflict_link *wl;
HARD_REG_SET colors, dont_begin;
int c = -1;
int bestc = -1;
int neighbor_needs= 0;
struct web *fats_parent = NULL;
int num_fat = 0;
int long_blocks = 0;
int best_long_blocks = -1;
HARD_REG_SET fat_colors;
HARD_REG_SET bias;
CLEAR_HARD_REG_SET (fat_colors);
if (web->regno >= max_normal_pseudo)
hard = 0;
/* First we want to know the colors at which we can't begin. */
calculate_dont_begin (web, &dont_begin);
CLEAR_HARD_REG_SET (bias);
/* Now setup the set of colors used by our neighbors neighbors,
and search the biggest noncolored neighbor. */
neighbor_needs = web->add_hardregs + 1;
for (wl = web->conflict_list; wl; wl = wl->next)
{
struct web *w;
struct web *ptarget = alias (wl->t);
struct sub_conflict *sl = wl->sub;
IOR_HARD_REG_SET (bias, ptarget->bias_colors);
w = sl ? sl->t : wl->t;
if (ptarget->type != COLORED && ptarget->type != PRECOLORED
&& !ptarget->was_spilled)
while (w)
{
if (find_web_for_subweb (w)->type != COALESCED
&& w->add_hardregs >= neighbor_needs)
{
neighbor_needs = w->add_hardregs;
fats_parent = ptarget;
num_fat++;
}
if (!sl)
break;
sl = sl->next;
w = sl ? sl->t : NULL;
}
}
ra_debug_msg (DUMP_COLORIZE, "colorize web %d [don't begin at %s]", web->id,
hardregset_to_string (dont_begin));
/* If there are some fat neighbors, remember their usable regs,
and how many blocks are free in it for that neighbor. */
if (num_fat)
{
COPY_HARD_REG_SET (fat_colors, fats_parent->usable_regs);
long_blocks = count_long_blocks (fat_colors, neighbor_needs + 1);
}
/* We break out, if we found a color which doesn't constrain
neighbors, or if we can't find any colors. */
while (1)
{
HARD_REG_SET call_clobbered;
/* Here we choose a hard-reg for the current web. For non spill
temporaries we first search in the hardregs for it's preferred
class, then, if we found nothing appropriate, in those of the
alternate class. For spill temporaries we only search in
usable_regs of this web (which is probably larger than that of
the preferred or alternate class). All searches first try to
find a non-call-clobbered hard-reg.
XXX this should be more fine grained... First look into preferred
non-callclobbered hardregs, then _if_ the web crosses calls, in
alternate non-cc hardregs, and only _then_ also in preferred cc
hardregs (and alternate ones). Currently we don't track the number
of calls crossed for webs. We should. */
if (web->use_my_regs)
{
COPY_HARD_REG_SET (colors, web->usable_regs);
AND_HARD_REG_SET (colors,
usable_regs[reg_preferred_class (web->regno)]);
}
else
COPY_HARD_REG_SET (colors,
usable_regs[reg_preferred_class (web->regno)]);
#ifdef CANNOT_CHANGE_MODE_CLASS
if (web->mode_changed)
AND_COMPL_HARD_REG_SET (colors, invalid_mode_change_regs);
#endif
COPY_HARD_REG_SET (call_clobbered, colors);
AND_HARD_REG_SET (call_clobbered, call_used_reg_set);
/* If this web got a color in the last pass, try to give it the
same color again. This will to much better colorization
down the line, as we spilled for a certain coloring last time. */
if (web->old_color)
{
c = web->old_color - 1;
if (!color_usable_p (c, dont_begin, colors,
PSEUDO_REGNO_MODE (web->regno)))
c = -1;
}
else
c = -1;
if (c < 0)
c = get_biased_reg (dont_begin, bias, web->prefer_colors,
call_clobbered, PSEUDO_REGNO_MODE (web->regno));
if (c < 0)
c = get_biased_reg (dont_begin, bias, web->prefer_colors,
colors, PSEUDO_REGNO_MODE (web->regno));
if (c < 0)
{
if (web->use_my_regs)
IOR_HARD_REG_SET (colors, web->usable_regs);
else
IOR_HARD_REG_SET (colors, usable_regs
[reg_alternate_class (web->regno)]);
#ifdef CANNOT_CHANGE_MODE_CLASS
if (web->mode_changed)
AND_COMPL_HARD_REG_SET (colors, invalid_mode_change_regs);
#endif
COPY_HARD_REG_SET (call_clobbered, colors);
AND_HARD_REG_SET (call_clobbered, call_used_reg_set);
c = get_biased_reg (dont_begin, bias, web->prefer_colors,
call_clobbered, PSEUDO_REGNO_MODE (web->regno));
if (c < 0)
c = get_biased_reg (dont_begin, bias, web->prefer_colors,
colors, PSEUDO_REGNO_MODE (web->regno));
}
if (c < 0)
break;
if (bestc < 0)
bestc = c;
/* If one of the yet uncolored neighbors, which is not a potential
spill needs a block of hardregs be sure, not to destroy such a block
by coloring one reg in the middle. */
if (num_fat)
{
int i;
int new_long;
HARD_REG_SET colors1;
COPY_HARD_REG_SET (colors1, fat_colors);
for (i = 0; i < 1 + web->add_hardregs; i++)
CLEAR_HARD_REG_BIT (colors1, c + i);
new_long = count_long_blocks (colors1, neighbor_needs + 1);
/* If we changed the number of long blocks, and it's now smaller
than needed, we try to avoid this color. */
if (long_blocks != new_long && new_long < num_fat)
{
if (new_long > best_long_blocks)
{
best_long_blocks = new_long;
bestc = c;
}
SET_HARD_REG_BIT (dont_begin, c);
ra_debug_msg (DUMP_COLORIZE, " avoid %d", c);
}
else
/* We found a color which doesn't destroy a block. */
break;
}
/* If we havee no fat neighbors, the current color won't become
"better", so we've found it. */
else
break;
}
ra_debug_msg (DUMP_COLORIZE, " --> got %d", c < 0 ? bestc : c);
if (bestc >= 0 && c < 0 && !web->was_spilled)
{
/* This is a non-potential-spill web, which got a color, which did
destroy a hardreg block for one of it's neighbors. We color
this web anyway and hope for the best for the neighbor, if we are
a spill temp. */
if (1 || web->spill_temp)
c = bestc;
ra_debug_msg (DUMP_COLORIZE, " [constrains neighbors]");
}
ra_debug_msg (DUMP_COLORIZE, "\n");
if (c < 0)
{
/* Guard against a simplified node being spilled. */
/* Don't assert. This can happen, when e.g. enough registers
are available in colors, but they are not consecutive. This is a
very serious issue if this web is a short live one, because
even if we spill this one here, the situation won't become better
in the next iteration. It probably will have the same conflicts,
those will have the same colors, and we would come here again, for
all parts, in which this one gets split by the spill. This
can result in endless iteration spilling the same register again and
again. That's why we try to find a neighbor, which spans more
instructions that ourself, and got a color, and try to spill _that_.
gcc_assert (DLIST_WEB (d)->was_spilled >= 0); */
if (hard && (!web->was_spilled || web->spill_temp))
{
unsigned int loop;
struct web *try = NULL;
struct web *candidates[8];
ra_debug_msg (DUMP_COLORIZE, " *** %d spilled, although %s ***\n",
web->id, web->spill_temp ? "spilltemp" : "non-spill");
/* We make multiple passes over our conflicts, first trying to
spill those webs, which only got a color by chance, but
were potential spill ones, and if that isn't enough, in a second
pass also to spill normal colored webs. If we still didn't find
a candidate, but we are a spill-temp, we make a third pass
and include also webs, which were targets for coalescing, and
spill those. */
memset (candidates, 0, sizeof candidates);
#define set_cand(i, w) \
do { \
if (!candidates[(i)] \
|| (candidates[(i)]->spill_cost < (w)->spill_cost)) \
candidates[(i)] = (w); \
} while (0)
for (wl = web->conflict_list; wl; wl = wl->next)
{
struct web *w = wl->t;
struct web *aw = alias (w);
/* If we are a spill-temp, we also look at webs coalesced
to precolored ones. Otherwise we only look at webs which
themselves were colored, or coalesced to one. */
if (aw->type == PRECOLORED && w != aw && web->spill_temp
&& flag_ra_optimistic_coalescing)
{
if (!w->spill_temp)
set_cand (4, w);
else if (web->spill_temp == 2
&& w->spill_temp == 2
&& w->spill_cost < web->spill_cost)
set_cand (5, w);
else if (web->spill_temp != 2
&& (w->spill_temp == 2
|| w->spill_cost < web->spill_cost))
set_cand (6, w);
continue;
}
if (aw->type != COLORED)
continue;
if (w->type == COLORED && !w->spill_temp && !w->is_coalesced
&& w->was_spilled)
{
if (w->spill_cost < web->spill_cost)
set_cand (0, w);
else if (web->spill_temp)
set_cand (1, w);
}
if (w->type == COLORED && !w->spill_temp && !w->is_coalesced
&& !w->was_spilled)
{
if (w->spill_cost < web->spill_cost)
set_cand (2, w);
else if (web->spill_temp && web->spill_temp != 2)
set_cand (3, w);
}
if (web->spill_temp)
{
if (w->type == COLORED && w->spill_temp == 2
&& !w->is_coalesced
&& (w->spill_cost < web->spill_cost
|| web->spill_temp != 2))
set_cand (4, w);
if (!aw->spill_temp)
set_cand (5, aw);
if (aw->spill_temp == 2
&& (aw->spill_cost < web->spill_cost
|| web->spill_temp != 2))
set_cand (6, aw);
/* For boehm-gc/misc.c. If we are a difficult spilltemp,
also coalesced neighbors are a chance, _even_ if they
too are spilltemps. At least their coalescing can be
broken up, which may be reset usable_regs, and makes
it easier colorable. */
if (web->spill_temp != 2 && aw->is_coalesced
&& flag_ra_optimistic_coalescing)
set_cand (7, aw);
}
}
for (loop = 0; try == NULL && loop < 8; loop++)
if (candidates[loop])
try = candidates[loop];
#undef set_cand
if (try)
{
int old_c = try->color;
if (try->type == COALESCED)
{
gcc_assert (alias (try)->type == PRECOLORED);
ra_debug_msg (DUMP_COLORIZE, " breaking alias %d -> %d\n",
try->id, alias (try)->id);
break_precolored_alias (try);
colorize_one_web (web, hard);
}
else
{
remove_list (try->dlink, &WEBS(COLORED));
put_web (try, SPILLED);
/* Now try to colorize us again. Can recursively make other
webs also spill, until there are no more unspilled
neighbors. */
ra_debug_msg (DUMP_COLORIZE, " trying to spill %d\n", try->id);
colorize_one_web (web, hard);
if (web->type != COLORED)
{
/* We tried recursively to spill all already colored
neighbors, but we are still uncolorable. So it made
no sense to spill those neighbors. Recolor them. */
remove_list (try->dlink, &WEBS(SPILLED));
put_web (try, COLORED);
try->color = old_c;
ra_debug_msg (DUMP_COLORIZE,
" spilling %d was useless\n", try->id);
}
else
{
ra_debug_msg (DUMP_COLORIZE,
" to spill %d was a good idea\n",
try->id);
remove_list (try->dlink, &WEBS(SPILLED));
if (try->was_spilled)
colorize_one_web (try, 0);
else
colorize_one_web (try, hard - 1);
}
}
}
else
/* No more chances to get a color, so give up hope and
spill us. */
put_web (web, SPILLED);
}
else
put_web (web, SPILLED);
}
else
{
put_web (web, COLORED);
web->color = c;
if (flag_ra_biased)
{
int nregs = hard_regno_nregs[c][GET_MODE (web->orig_x)];
for (wl = web->conflict_list; wl; wl = wl->next)
{
struct web *ptarget = alias (wl->t);
int i;
for (i = 0; i < nregs; i++)
SET_HARD_REG_BIT (ptarget->bias_colors, c + i);
}
}
}
if (web->regno >= max_normal_pseudo && web->type == SPILLED)
{
web->color = an_unusable_color;
remove_list (web->dlink, &WEBS(SPILLED));
put_web (web, COLORED);
}
if (web->type == SPILLED && flag_ra_optimistic_coalescing
&& web->is_coalesced)
{
ra_debug_msg (DUMP_COLORIZE, "breaking aliases to web %d:", web->id);
restore_conflicts_from_coalesce (web);
break_aliases_to_web (web);
insert_coalesced_conflicts ();
ra_debug_msg (DUMP_COLORIZE, "\n");
remove_list (web->dlink, &WEBS(SPILLED));
put_web (web, SELECT);
web->color = -1;
}
}
/* Assign the colors to all nodes on the select stack. And update the
colors of coalesced webs. */
static void
assign_colors (void)
{
struct dlist *d;
while (WEBS(SELECT))
{
d = pop_list (&WEBS(SELECT));
colorize_one_web (DLIST_WEB (d), 1);
}
for (d = WEBS(COALESCED); d; d = d->next)
{
struct web *a = alias (DLIST_WEB (d));
DLIST_WEB (d)->color = a->color;
}
}
/* WEB is a spilled web. Look if we can improve the cost of the graph,
by coloring WEB, even if we then need to spill some of it's neighbors.
For this we calculate the cost for each color C, that results when we
_would_ give WEB color C (i.e. the cost of the then spilled neighbors).
If the lowest cost among them is smaller than the spillcost of WEB, we
do that recoloring, and instead spill the neighbors.
This can sometime help, when due to irregularities in register file,
and due to multi word pseudos, the colorization is suboptimal. But
be aware, that currently this pass is quite slow. */
static void
try_recolor_web (struct web *web)
{
struct conflict_link *wl;
unsigned HOST_WIDE_INT *cost_neighbors;
unsigned int *min_color;
int newcol, c;
HARD_REG_SET precolored_neighbors, spill_temps;
HARD_REG_SET possible_begin, wide_seen;
cost_neighbors = xcalloc (FIRST_PSEUDO_REGISTER, sizeof (cost_neighbors[0]));
/* For each hard-regs count the number of preceding hardregs, which
would overlap this color, if used in WEB's mode. */
min_color = xcalloc (FIRST_PSEUDO_REGISTER, sizeof (int));
CLEAR_HARD_REG_SET (possible_begin);
for (c = 0; c < FIRST_PSEUDO_REGISTER; c++)
{
int i, nregs;
if (!HARD_REGNO_MODE_OK (c, GET_MODE (web->orig_x)))
continue;
nregs = hard_regno_nregs[c][GET_MODE (web->orig_x)];
for (i = 0; i < nregs; i++)
if (!TEST_HARD_REG_BIT (web->usable_regs, c + i))
break;
if (i < nregs || nregs == 0)
continue;
SET_HARD_REG_BIT (possible_begin, c);
for (; nregs--;)
if (!min_color[c + nregs])
min_color[c + nregs] = 1 + c;
}
CLEAR_HARD_REG_SET (precolored_neighbors);
CLEAR_HARD_REG_SET (spill_temps);
CLEAR_HARD_REG_SET (wide_seen);
for (wl = web->conflict_list; wl; wl = wl->next)
{
HARD_REG_SET dont_begin;
struct web *web2 = alias (wl->t);
struct conflict_link *nn;
int c1, c2;
int wide_p = 0;
if (wl->t->type == COALESCED || web2->type != COLORED)
{
if (web2->type == PRECOLORED)
{
c1 = min_color[web2->color];
c1 = (c1 == 0) ? web2->color : (c1 - 1);
c2 = web2->color;
for (; c1 <= c2; c1++)
SET_HARD_REG_BIT (precolored_neighbors, c1);
}
continue;
}
/* Mark colors for which some wide webs are involved. For
those the independent sets are not simply one-node graphs, so
they can't be recolored independent from their neighborhood. This
means, that our cost calculation can be incorrect (assuming it
can avoid spilling a web because it thinks some colors are available,
although it's neighbors which itself need recoloring might take
away exactly those colors). */
if (web2->add_hardregs)
wide_p = 1;
for (nn = web2->conflict_list; nn && !wide_p; nn = nn->next)
if (alias (nn->t)->add_hardregs)
wide_p = 1;
calculate_dont_begin (web2, &dont_begin);
c1 = min_color[web2->color];
/* Note that min_color[] contains 1-based values (zero means
undef). */
c1 = c1 == 0 ? web2->color : (c1 - 1);
c2 = web2->color + hard_regno_nregs[web2->color][GET_MODE
(web2->orig_x)] - 1;
for (; c1 <= c2; c1++)
if (TEST_HARD_REG_BIT (possible_begin, c1))
{
int nregs;
HARD_REG_SET colors;
nregs = hard_regno_nregs[c1][GET_MODE (web->orig_x)];
COPY_HARD_REG_SET (colors, web2->usable_regs);
for (; nregs--;)
CLEAR_HARD_REG_BIT (colors, c1 + nregs);
if (wide_p)
SET_HARD_REG_BIT (wide_seen, c1);
if (get_free_reg (dont_begin, colors,
GET_MODE (web2->orig_x)) < 0)
{
if (web2->spill_temp)
SET_HARD_REG_BIT (spill_temps, c1);
else
cost_neighbors[c1] += web2->spill_cost;
}
}
}
newcol = -1;
for (c = 0; c < FIRST_PSEUDO_REGISTER; c++)
if (TEST_HARD_REG_BIT (possible_begin, c)
&& !TEST_HARD_REG_BIT (precolored_neighbors, c)
&& !TEST_HARD_REG_BIT (spill_temps, c)
&& (newcol == -1
|| cost_neighbors[c] < cost_neighbors[newcol]))
newcol = c;
if (newcol >= 0 && cost_neighbors[newcol] < web->spill_cost)
{
int nregs = hard_regno_nregs[newcol][GET_MODE (web->orig_x)];
unsigned HOST_WIDE_INT cost = 0;
int *old_colors;
struct conflict_link *wl_next;
ra_debug_msg (DUMP_COLORIZE, "try to set web %d to color %d\n", web->id,
newcol);
remove_list (web->dlink, &WEBS(SPILLED));
put_web (web, COLORED);
web->color = newcol;
old_colors = xcalloc (num_webs, sizeof (int));
for (wl = web->conflict_list; wl; wl = wl_next)
{
struct web *web2 = alias (wl->t);
/* If web2 is a coalesce-target, and will become spilled
below in colorize_one_web(), and the current conflict wl
between web and web2 was only the result of that coalescing
this conflict will be deleted, making wl invalid. So save
the next conflict right now. Note that if web2 has indeed
such state, then wl->next can not be deleted in this
iteration. */
wl_next = wl->next;
if (web2->type == COLORED)
{
int nregs2 = hard_regno_nregs[web2->color][GET_MODE
(web2->orig_x)];
if (web->color >= web2->color + nregs2
|| web2->color >= web->color + nregs)
continue;
old_colors[web2->id] = web2->color + 1;
web2->color = -1;
remove_list (web2->dlink, &WEBS(COLORED));
web2->type = SELECT;
/* Allow webs to be spilled. */
if (web2->spill_temp == 0 || web2->spill_temp == 2)
web2->was_spilled = 1;
colorize_one_web (web2, 1);
if (web2->type == SPILLED)
cost += web2->spill_cost;
}
}
/* The actual cost may be smaller than the guessed one, because
partial conflicts could result in some conflicting webs getting
a color, where we assumed it must be spilled. See the comment
above what happens, when wide webs are involved, and why in that
case there might actually be some webs spilled although thought to
be colorable. */
gcc_assert (cost <= cost_neighbors[newcol]
|| nregs != 1 || TEST_HARD_REG_BIT (wide_seen, newcol));
/* But if the new spill-cost is higher than our own, then really loose.
Respill us and recolor neighbors as before. */
if (cost > web->spill_cost)
{
ra_debug_msg (DUMP_COLORIZE,
"reset coloring of web %d, too expensive\n", web->id);
remove_list (web->dlink, &WEBS(COLORED));
web->color = -1;
put_web (web, SPILLED);
for (wl = web->conflict_list; wl; wl = wl->next)
{
struct web *web2 = alias (wl->t);
if (old_colors[web2->id])
{
switch (web2->type)
{
case SPILLED:
remove_list (web2->dlink, &WEBS(SPILLED));
web2->color = old_colors[web2->id] - 1;
put_web (web2, COLORED);
break;
case COLORED:
web2->color = old_colors[web2->id] - 1;
break;
case SELECT:
/* This means, that WEB2 once was a part of a coalesced
web, which got spilled in the above colorize_one_web()
call, and whose parts then got split and put back
onto the SELECT stack. As the cause for that splitting
(the coloring of WEB) was worthless, we should again
coalesce the parts, as they were before. For now we
simply leave them SELECTed, for our caller to take
care. */
break;
default:
gcc_unreachable ();
}
}
}
}
free (old_colors);
}
free (min_color);
free (cost_neighbors);
}
/* This ensures that all conflicts of coalesced webs are seen from
the webs coalesced into. combine() only adds the conflicts which
at the time of combining were not already SELECTed or COALESCED
to not destroy num_conflicts. Here we add all remaining conflicts
and thereby destroy num_conflicts. This should be used when num_conflicts
isn't used anymore, e.g. on a completely colored graph. */
static void
insert_coalesced_conflicts (void)
{
struct dlist *d;
for (d = WEBS(COALESCED); 0 && d; d = d->next)
{
struct web *web = DLIST_WEB (d);
struct web *aweb = alias (web);
struct conflict_link *wl;
for (wl = web->conflict_list; wl; wl = wl->next)
{
struct web *tweb = aweb;
int i;
int nregs = 1 + web->add_hardregs;
if (aweb->type == PRECOLORED)
nregs = hard_regno_nregs[aweb->color][GET_MODE (web->orig_x)];
for (i = 0; i < nregs; i++)
{
if (aweb->type == PRECOLORED)
tweb = hardreg2web[i + aweb->color];
/* There might be some conflict edges laying around
where the usable_regs don't intersect. This can happen
when first some webs were coalesced and conflicts
propagated, then some combining narrowed usable_regs and
further coalescing ignored those conflicts. Now there are
some edges to COALESCED webs but not to its alias.
So assert they really don not conflict. */
gcc_assert (((tweb->type == PRECOLORED
|| TEST_BIT (sup_igraph,
tweb->id * num_webs + wl->t->id))
&& (wl->t->type == PRECOLORED
|| TEST_BIT (sup_igraph,
wl->t->id * num_webs + tweb->id)))
|| !hard_regs_intersect_p (&tweb->usable_regs,
&wl->t->usable_regs));
/*if (wl->sub == NULL)
record_conflict (tweb, wl->t);
else
{
struct sub_conflict *sl;
for (sl = wl->sub; sl; sl = sl->next)
record_conflict (tweb, sl->t);
}*/
if (aweb->type != PRECOLORED)
break;
}
}
}
}
/* A function suitable to pass to qsort(). Compare the spill costs
of webs W1 and W2. When used by qsort, this would order webs with
largest cost first. */
static int
comp_webs_maxcost (const void *w1, const void *w2)
{
struct web *web1 = *(struct web **)w1;
struct web *web2 = *(struct web **)w2;
if (web1->spill_cost > web2->spill_cost)
return -1;
else if (web1->spill_cost < web2->spill_cost)
return 1;
else
return 0;
}
/* This tries to recolor all spilled webs. See try_recolor_web()
how this is done. This just calls it for each spilled web. */
static void
recolor_spills (void)
{
unsigned int i, num;
struct web **order2web;
num = num_webs - num_subwebs;
order2web = xmalloc (num * sizeof (order2web[0]));
for (i = 0; i < num; i++)
{
order2web[i] = id2web[i];
/* If we aren't breaking aliases, combine() wasn't merging the
spill_costs. So do that here to have sane measures. */
if (!flag_ra_merge_spill_costs && id2web[i]->type == COALESCED)
alias (id2web[i])->spill_cost += id2web[i]->spill_cost;
}
qsort (order2web, num, sizeof (order2web[0]), comp_webs_maxcost);
insert_coalesced_conflicts ();
dump_graph_cost (DUMP_COSTS, "before spill-recolor");
for (i = 0; i < num; i++)
{
struct web *web = order2web[i];
if (web->type == SPILLED)
try_recolor_web (web);
}
/* It might have been decided in try_recolor_web() (in colorize_one_web())
that a coalesced web should be spilled, so it was put on the
select stack. Those webs need recoloring again, and all remaining
coalesced webs might need their color updated, so simply call
assign_colors() again. */
assign_colors ();
free (order2web);
}
/* This checks the current color assignment for obvious errors,
like two conflicting webs overlapping in colors, or the used colors
not being in usable regs. */
static void
check_colors (void)
{
unsigned int i;
for (i = 0; i < num_webs - num_subwebs; i++)
{
struct web *web = id2web[i];
struct web *aweb = alias (web);
struct conflict_link *wl;
int nregs;
if (web->regno >= max_normal_pseudo)
continue;
switch (aweb->type)
{
case SPILLED:
continue;
case COLORED:
nregs = hard_regno_nregs[aweb->color][GET_MODE (web->orig_x)];
break;
case PRECOLORED:
nregs = 1;
break;
default:
gcc_unreachable ();
}
#ifdef ENABLE_CHECKING
/* The color must be valid for the original usable_regs. */
{
int c;
for (c = 0; c < nregs; c++)
gcc_assert (TEST_HARD_REG_BIT (web->usable_regs, aweb->color + c));
}
#endif
/* Search the original (pre-coalesce) conflict list. In the current
one some imprecise conflicts may be noted (due to combine() or
insert_coalesced_conflicts() relocating partial conflicts) making
it look like some wide webs are in conflict and having the same
color. */
wl = (web->have_orig_conflicts ? web->orig_conflict_list
: web->conflict_list);
for (; wl; wl = wl->next)
if (wl->t->regno >= max_normal_pseudo)
continue;
else if (!wl->sub)
{
struct web *web2 = alias (wl->t);
int nregs2;
if (web2->type == COLORED)
nregs2 = hard_regno_nregs[web2->color][GET_MODE (web2->orig_x)];
else if (web2->type == PRECOLORED)
nregs2 = 1;
else
continue;
gcc_assert (aweb->color >= web2->color + nregs2
|| web2->color >= aweb->color + nregs);
continue;
}
else
{
struct sub_conflict *sl;
int scol = aweb->color;
int tcol = alias (wl->t)->color;
if (alias (wl->t)->type == SPILLED)
continue;
for (sl = wl->sub; sl; sl = sl->next)
{
int ssize = hard_regno_nregs[scol][GET_MODE (sl->s->orig_x)];
int tsize = hard_regno_nregs[tcol][GET_MODE (sl->t->orig_x)];
int sofs = 0, tofs = 0;
if (SUBWEB_P (sl->t)
&& GET_MODE_SIZE (GET_MODE (sl->t->orig_x)) >= UNITS_PER_WORD)
tofs = (SUBREG_BYTE (sl->t->orig_x) / UNITS_PER_WORD);
if (SUBWEB_P (sl->s)
&& GET_MODE_SIZE (GET_MODE (sl->s->orig_x))
>= UNITS_PER_WORD)
sofs = (SUBREG_BYTE (sl->s->orig_x) / UNITS_PER_WORD);
gcc_assert ((tcol + tofs >= scol + sofs + ssize)
|| (scol + sofs >= tcol + tofs + tsize));
continue;
}
}
}
}
/* WEB was a coalesced web. Make it unaliased again, and put it
back onto SELECT stack. */
static void
unalias_web (struct web *web)
{
web->alias = NULL;
web->is_coalesced = 0;
web->color = -1;
/* Well, initially everything was spilled, so it isn't incorrect,
that also the individual parts can be spilled.
XXX this isn't entirely correct, as we also relaxed the
spill_temp flag in combine(), which might have made components
spill, although they were a short or spilltemp web. */
web->was_spilled = 1;
remove_list (web->dlink, &WEBS(COALESCED));
/* Spilltemps must be colored right now (i.e. as early as possible),
other webs can be deferred to the end (the code building the
stack assumed that in this stage only one web was colored). */
if (web->spill_temp && web->spill_temp != 2)
put_web (web, SELECT);
else
put_web_at_end (web, SELECT);
}
/* WEB is a _target_ for coalescing which got spilled.
Break all aliases to WEB, and restore some of its member to the state
they were before coalescing. Due to the suboptimal structure of
the interference graph we need to go through all coalesced webs.
Somewhen we'll change this to be more sane. */
static void
break_aliases_to_web (struct web *web)
{
struct dlist *d, *d_next;
gcc_assert (web->type == SPILLED);
for (d = WEBS(COALESCED); d; d = d_next)
{
struct web *other = DLIST_WEB (d);
d_next = d->next;
/* Beware: Don't use alias() here. We really want to check only
one level of aliasing, i.e. only break up webs directly
aliased to WEB, not also those aliased through other webs. */
if (other->alias == web)
{
unalias_web (other);
ra_debug_msg (DUMP_COLORIZE, " %d", other->id);
}
}
web->spill_temp = web->orig_spill_temp;
web->spill_cost = web->orig_spill_cost;
/* Beware: The following possibly widens usable_regs again. While
it was narrower there might have been some conflicts added which got
ignored because of non-intersecting hardregsets. All those conflicts
would now matter again. Fortunately we only add conflicts when
coalescing, which is also the time of narrowing. And we remove all
those added conflicts again now that we unalias this web.
Therefore this is safe to do. */
COPY_HARD_REG_SET (web->usable_regs, web->orig_usable_regs);
web->is_coalesced = 0;
web->num_aliased = 0;
web->was_spilled = 1;
/* Reset is_coalesced flag for webs which itself are target of coalescing.
It was cleared above if it was coalesced to WEB. */
for (d = WEBS(COALESCED); d; d = d->next)
DLIST_WEB (d)->alias->is_coalesced = 1;
}
/* WEB is a web coalesced into a precolored one. Break that alias,
making WEB SELECTed again. Also restores the conflicts which resulted
from initially coalescing both. */
static void
break_precolored_alias (struct web *web)
{
struct web *pre = web->alias;
struct conflict_link *wl;
unsigned int c = pre->color;
unsigned int nregs = hard_regno_nregs[c][GET_MODE (web->orig_x)];
gcc_assert (pre->type == PRECOLORED);
unalias_web (web);
/* Now we need to look at each conflict X of WEB, if it conflicts
with [PRE, PRE+nregs), and remove such conflicts, of X has not other
conflicts, which are coalesced into those precolored webs. */
for (wl = web->conflict_list; wl; wl = wl->next)
{
struct web *x = wl->t;
struct web *y;
unsigned int i;
struct conflict_link *wl2;
struct conflict_link **pcl;
HARD_REG_SET regs;
if (!x->have_orig_conflicts)
continue;
/* First look at which colors can not go away, due to other coalesces
still existing. */
CLEAR_HARD_REG_SET (regs);
for (i = 0; i < nregs; i++)
SET_HARD_REG_BIT (regs, c + i);
for (wl2 = x->conflict_list; wl2; wl2 = wl2->next)
if (wl2->t->type == COALESCED && alias (wl2->t)->type == PRECOLORED)
CLEAR_HARD_REG_BIT (regs, alias (wl2->t)->color);
/* Now also remove the colors of those conflicts which already
were there before coalescing at all. */
for (wl2 = x->orig_conflict_list; wl2; wl2 = wl2->next)
if (wl2->t->type == PRECOLORED)
CLEAR_HARD_REG_BIT (regs, wl2->t->color);
/* The colors now still set are those for which WEB was the last
cause, i.e. those which can be removed. */
y = NULL;
for (i = 0; i < nregs; i++)
if (TEST_HARD_REG_BIT (regs, c + i))
{
struct web *sub;
y = hardreg2web[c + i];
RESET_BIT (sup_igraph, x->id * num_webs + y->id);
RESET_BIT (sup_igraph, y->id * num_webs + x->id);
RESET_BIT (igraph, igraph_index (x->id, y->id));
for (sub = x->subreg_next; sub; sub = sub->subreg_next)
RESET_BIT (igraph, igraph_index (sub->id, y->id));
}
if (!y)
continue;
pcl = &(x->conflict_list);
while (*pcl)
{
struct web *y = (*pcl)->t;
if (y->type != PRECOLORED || !TEST_HARD_REG_BIT (regs, y->color))
pcl = &((*pcl)->next);
else
*pcl = (*pcl)->next;
}
}
}
/* WEB is a spilled web which was target for coalescing.
Delete all interference edges which were added due to that coalescing,
and break up the coalescing. */
static void
restore_conflicts_from_coalesce (struct web *web)
{
struct conflict_link **pcl;
struct conflict_link *wl;
pcl = &(web->conflict_list);
/* No original conflict list means no conflict was added at all
after building the graph. So neither we nor any neighbors have
conflicts due to this coalescing. */
if (!web->have_orig_conflicts)
return;
while (*pcl)
{
struct web *other = (*pcl)->t;
for (wl = web->orig_conflict_list; wl; wl = wl->next)
if (wl->t == other)
break;
if (wl)
{
/* We found this conflict also in the original list, so this
was no new conflict. */
pcl = &((*pcl)->next);
}
else
{
/* This is a new conflict, so delete it from us and
the neighbor. */
struct conflict_link **opcl;
struct conflict_link *owl;
struct sub_conflict *sl;
wl = *pcl;
*pcl = wl->next;
gcc_assert (other->have_orig_conflicts
|| other->type == PRECOLORED);
for (owl = other->orig_conflict_list; owl; owl = owl->next)
if (owl->t == web)
break;
gcc_assert (!owl);
opcl = &(other->conflict_list);
while (*opcl)
{
if ((*opcl)->t == web)
{
owl = *opcl;
*opcl = owl->next;
break;
}
else
{
opcl = &((*opcl)->next);
}
}
gcc_assert (owl || other->type == PRECOLORED);
/* wl and owl contain the edge data to be deleted. */
RESET_BIT (sup_igraph, web->id * num_webs + other->id);
RESET_BIT (sup_igraph, other->id * num_webs + web->id);
RESET_BIT (igraph, igraph_index (web->id, other->id));
for (sl = wl->sub; sl; sl = sl->next)
RESET_BIT (igraph, igraph_index (sl->s->id, sl->t->id));
if (other->type != PRECOLORED)
{
for (sl = owl->sub; sl; sl = sl->next)
RESET_BIT (igraph, igraph_index (sl->s->id, sl->t->id));
}
}
}
/* We must restore usable_regs because record_conflict will use it. */
COPY_HARD_REG_SET (web->usable_regs, web->orig_usable_regs);
/* We might have deleted some conflicts above, which really are still
there (diamond pattern coalescing). This is because we don't reference
count interference edges but some of them were the result of different
coalesces. */
for (wl = web->conflict_list; wl; wl = wl->next)
if (wl->t->type == COALESCED)
{
struct web *tweb;
for (tweb = wl->t->alias; tweb; tweb = tweb->alias)
{
if (wl->sub == NULL)
record_conflict (web, tweb);
else
{
struct sub_conflict *sl;
for (sl = wl->sub; sl; sl = sl->next)
{
struct web *sweb = NULL;
if (SUBWEB_P (sl->t))
sweb = find_subweb (tweb, sl->t->orig_x);
if (!sweb)
sweb = tweb;
record_conflict (sl->s, sweb);
}
}
if (tweb->type != COALESCED)
break;
}
}
}
/* Repeatedly break aliases for spilled webs, which were target for
coalescing, and recolorize the resulting parts. Do this as long as
there are any spilled coalesce targets. */
static void
break_coalesced_spills (void)
{
int changed = 0;
while (1)
{
struct dlist *d;
struct web *web;
for (d = WEBS(SPILLED); d; d = d->next)
if (DLIST_WEB (d)->is_coalesced)
break;
if (!d)
break;
changed = 1;
web = DLIST_WEB (d);
ra_debug_msg (DUMP_COLORIZE, "breaking aliases to web %d:", web->id);
restore_conflicts_from_coalesce (web);
break_aliases_to_web (web);
/* WEB was a spilled web and isn't anymore. Everything coalesced
to WEB is now SELECTed and might potentially get a color.
If those other webs were itself targets of coalescing it might be
that there are still some conflicts from aliased webs missing,
because they were added in combine() right into the now
SELECTed web. So we need to add those missing conflicts here. */
insert_coalesced_conflicts ();
ra_debug_msg (DUMP_COLORIZE, "\n");
remove_list (d, &WEBS(SPILLED));
put_web (web, SELECT);
web->color = -1;
while (WEBS(SELECT))
{
d = pop_list (&WEBS(SELECT));
colorize_one_web (DLIST_WEB (d), 1);
}
}
if (changed)
{
struct dlist *d;
for (d = WEBS(COALESCED); d; d = d->next)
{
struct web *a = alias (DLIST_WEB (d));
DLIST_WEB (d)->color = a->color;
}
}
dump_graph_cost (DUMP_COSTS, "after alias-breaking");
}
/* A structure for fast hashing of a pair of webs.
Used to cumulate savings (from removing copy insns) for coalesced webs.
All the pairs are also put into a single linked list. */
struct web_pair
{
struct web_pair *next_hash;
struct web_pair *next_list;
struct web *smaller;
struct web *larger;
unsigned int conflicts;
unsigned HOST_WIDE_INT cost;
};
/* The actual hash table. */
#define WEB_PAIR_HASH_SIZE 8192
static struct web_pair *web_pair_hash[WEB_PAIR_HASH_SIZE];
static struct web_pair *web_pair_list;
static unsigned int num_web_pairs;
/* Clear the hash table of web pairs. */
static void
init_web_pairs (void)
{
memset (web_pair_hash, 0, sizeof web_pair_hash);
num_web_pairs = 0;
web_pair_list = NULL;
}
/* Given two webs connected by a move with cost COST which together
have CONFLICTS conflicts, add that pair to the hash table, or if
already in, cumulate the costs and conflict number. */
static void
add_web_pair_cost (struct web *web1, struct web *web2,
unsigned HOST_WIDE_INT cost, unsigned int conflicts)
{
unsigned int hash;
struct web_pair *p;
if (web1->id > web2->id)
{
struct web *h = web1;
web1 = web2;
web2 = h;
}
hash = (web1->id * num_webs + web2->id) % WEB_PAIR_HASH_SIZE;
for (p = web_pair_hash[hash]; p; p = p->next_hash)
if (p->smaller == web1 && p->larger == web2)
{
p->cost += cost;
p->conflicts += conflicts;
return;
}
p = ra_alloc (sizeof *p);
p->next_hash = web_pair_hash[hash];
p->next_list = web_pair_list;
p->smaller = web1;
p->larger = web2;
p->conflicts = conflicts;
p->cost = cost;
web_pair_hash[hash] = p;
web_pair_list = p;
num_web_pairs++;
}
/* Suitable to be passed to qsort(). Sort web pairs so, that those
with more conflicts and higher cost (which actually is a saving
when the moves are removed) come first. */
static int
comp_web_pairs (const void *w1, const void *w2)
{
struct web_pair *p1 = *(struct web_pair **)w1;
struct web_pair *p2 = *(struct web_pair **)w2;
if (p1->conflicts > p2->conflicts)
return -1;
else if (p1->conflicts < p2->conflicts)
return 1;
else if (p1->cost > p2->cost)
return -1;
else if (p1->cost < p2->cost)
return 1;
else
return 0;
}
/* Given the list of web pairs, begin to combine them from the one
with the most savings. */
static void
sort_and_combine_web_pairs (int for_move)
{
unsigned int i;
struct web_pair **sorted;
struct web_pair *p;
if (!num_web_pairs)
return;
sorted = xmalloc (num_web_pairs * sizeof (sorted[0]));
for (p = web_pair_list, i = 0; p; p = p->next_list)
sorted[i++] = p;
gcc_assert (i == num_web_pairs);
qsort (sorted, num_web_pairs, sizeof (sorted[0]), comp_web_pairs);
/* After combining one pair, we actually should adjust the savings
of the other pairs, if they are connected to one of the just coalesced
pair. Later. */
for (i = 0; i < num_web_pairs; i++)
{
struct web *w1, *w2;
p = sorted[i];
w1 = alias (p->smaller);
w2 = alias (p->larger);
if (!for_move && (w1->type == PRECOLORED || w2->type == PRECOLORED))
continue;
else if (w2->type == PRECOLORED)
{
struct web *h = w1;
w1 = w2;
w2 = h;
}
if (w1 != w2
&& !TEST_BIT (sup_igraph, w1->id * num_webs + w2->id)
&& !TEST_BIT (sup_igraph, w2->id * num_webs + w1->id)
&& w2->type != PRECOLORED
&& hard_regs_intersect_p (&w1->usable_regs, &w2->usable_regs))
{
if (w1->type != PRECOLORED
|| (w1->type == PRECOLORED && ok (w2, w1)))
combine (w1, w2);
else if (w1->type == PRECOLORED)
SET_HARD_REG_BIT (w2->prefer_colors, w1->color);
}
}
free (sorted);
}
/* Returns nonzero if source/target reg classes are ok for coalesce. */
static int
ok_class (struct web *target, struct web *source)
{
/* Don't coalesce if preferred classes are different and at least one
of them has a size of 1. This was preventing things such as the
branch on count transformation (i.e. DoLoop) since the target, which
prefers the CTR, was being coalesced with a source which preferred
GENERAL_REGS. If only one web has a preferred class with 1 free reg
then set it as the preferred color of the other web. */
enum reg_class t_class, s_class;
t_class = reg_preferred_class (target->regno);
s_class = reg_preferred_class (source->regno);
if (t_class != s_class)
{
if (num_free_regs[t_class] == 1)
{
if (num_free_regs[s_class] != 1)
SET_HARD_REG_BIT (source->prefer_colors,
single_reg_in_regclass[t_class]);
return 0;
}
else if (num_free_regs[s_class] == 1)
{
SET_HARD_REG_BIT (target->prefer_colors,
single_reg_in_regclass[s_class]);
return 0;
}
}
return 1;
}
/* Greedily coalesce all moves possible. Begin with the web pair
giving the most saving if coalesced. */
static void
aggressive_coalesce (void)
{
struct dlist *d;
struct move *m;
init_web_pairs ();
while ((d = pop_list (&mv_worklist)) != NULL)
if ((m = DLIST_MOVE (d)))
{
struct web *s = alias (m->source_web);
struct web *t = alias (m->target_web);
if (t->type == PRECOLORED)
{
struct web *h = s;
s = t;
t = h;
}
if (s != t
&& t->type != PRECOLORED
&& !TEST_BIT (sup_igraph, s->id * num_webs + t->id)
&& !TEST_BIT (sup_igraph, t->id * num_webs + s->id)
&& ok_class (t, s))
{
if ((s->type == PRECOLORED && ok (t, s))
|| s->type != PRECOLORED)
{
put_move (m, MV_COALESCED);
add_web_pair_cost (s, t, BLOCK_FOR_INSN (m->insn)->frequency,
0);
}
else if (s->type == PRECOLORED)
/* It is !ok(t, s). But later when coloring the graph it might
be possible to take that color. So we remember the preferred
color to try that first. */
{
put_move (m, CONSTRAINED);
SET_HARD_REG_BIT (t->prefer_colors, s->color);
}
}
else
{
put_move (m, CONSTRAINED);
}
}
sort_and_combine_web_pairs (1);
}
/* This is the difference between optimistic coalescing and
optimistic coalescing+. Extended coalesce tries to coalesce also
non-conflicting nodes, not related by a move. The criteria here is,
the one web must be a source, the other a destination of the same insn.
This actually makes sense, as (because they are in the same insn) they
share many of their neighbors, and if they are coalesced, reduce the
number of conflicts of those neighbors by one. For this we sort the
candidate pairs again according to savings (and this time also conflict
number).
This is also a comparatively slow operation, as we need to go through
all insns, and for each insn, through all defs and uses. */
static void
extended_coalesce_2 (void)
{
rtx insn;
struct ra_insn_info info;
unsigned int n;
init_web_pairs ();
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
if (INSN_P (insn) && (info = insn_df[INSN_UID (insn)]).num_defs)
for (n = 0; n < info.num_defs; n++)
{
struct web *dest = def2web[DF_REF_ID (info.defs[n])];
dest = alias (find_web_for_subweb (dest));
if (dest->type != PRECOLORED && dest->regno < max_normal_pseudo)
{
unsigned int n2;
for (n2 = 0; n2 < info.num_uses; n2++)
{
struct web *source = use2web[DF_REF_ID (info.uses[n2])];
source = alias (find_web_for_subweb (source));
if (source->type != PRECOLORED
&& source != dest
&& source->regno < max_normal_pseudo
/* Coalesced webs end up using the same REG rtx in
emit_colors(). So we can only coalesce something
of equal modes. */
&& GET_MODE (source->orig_x) == GET_MODE (dest->orig_x)
&& !TEST_BIT (sup_igraph,
dest->id * num_webs + source->id)
&& !TEST_BIT (sup_igraph,
source->id * num_webs + dest->id)
&& ok_class (dest, source)
&& hard_regs_intersect_p (&source->usable_regs,
&dest->usable_regs))
add_web_pair_cost (dest, source,
BLOCK_FOR_INSN (insn)->frequency,
dest->num_conflicts
+ source->num_conflicts);
}
}
}
sort_and_combine_web_pairs (0);
}
/* Check if we forgot to coalesce some moves. */
static void
check_uncoalesced_moves (void)
{
struct move_list *ml;
struct move *m;
for (ml = wl_moves; ml; ml = ml->next)
if ((m = ml->move))
{
struct web *s = alias (m->source_web);
struct web *t = alias (m->target_web);
if (t->type == PRECOLORED)
{
struct web *h = s;
s = t;
t = h;
}
gcc_assert (s == t
|| m->type == CONSTRAINED
/* Following can happen when a move was coalesced, but
later broken up again. Then s!=t, but m is still
MV_COALESCED. */
|| m->type == MV_COALESCED
|| t->type == PRECOLORED
|| (s->type == PRECOLORED && !ok (t, s))
|| TEST_BIT (sup_igraph, s->id * num_webs + t->id)
|| TEST_BIT (sup_igraph, t->id * num_webs + s->id));
}
}
/* The toplevel function in this file. Precondition is, that
the interference graph is built completely by ra-build.c. This
produces a list of spilled, colored and coalesced nodes. */
void
ra_colorize_graph (struct df *df)
{
if (dump_file)
dump_igraph (df);
build_worklists (df);
/* With optimistic coalescing we coalesce everything we can. */
if (flag_ra_optimistic_coalescing)
{
aggressive_coalesce ();
extended_coalesce_2 ();
}
/* Now build the select stack. */
do
{
simplify ();
if (mv_worklist)
coalesce ();
else if (WEBS(FREEZE))
freeze ();
else if (WEBS(SPILL))
select_spill ();
}
while (WEBS(SIMPLIFY) || WEBS(SIMPLIFY_FAT) || WEBS(SIMPLIFY_SPILL)
|| mv_worklist || WEBS(FREEZE) || WEBS(SPILL));
if (flag_ra_optimistic_coalescing)
check_uncoalesced_moves ();
/* Actually colorize the webs from the select stack. */
assign_colors ();
check_colors ();
dump_graph_cost (DUMP_COSTS, "initially");
if (flag_ra_break_aliases)
break_coalesced_spills ();
check_colors ();
/* And try to improve the cost by recoloring spilled webs. */
recolor_spills ();
dump_graph_cost (DUMP_COSTS, "after spill-recolor");
check_colors ();
}
/* Initialize this module. */
void ra_colorize_init (void)
{
/* FIXME: Choose spill heuristic for platform if we have one */
spill_heuristic = default_spill_heuristic;
}
/* Free all memory. (Note that we don't need to free any per pass
memory). */
void
ra_colorize_free_all (void)
{
struct dlist *d;
while ((d = pop_list (&WEBS(FREE))) != NULL)
put_web (DLIST_WEB (d), INITIAL);
while ((d = pop_list (&WEBS(INITIAL))) != NULL)
{
struct web *web = DLIST_WEB (d);
struct web *wnext;
web->orig_conflict_list = NULL;
web->conflict_list = NULL;
for (web = web->subreg_next; web; web = wnext)
{
wnext = web->subreg_next;
free (web);
}
free (DLIST_WEB (d));
}
}
/*
vim:cinoptions={.5s,g0,p5,t0,(0,^-0.5s,n-0.5s:tw=78:cindent:sw=4:
*/
/* Graph coloring register allocator
Copyright (C) 2001, 2002, 2004, 2005 Free Software Foundation, Inc.
Contributed by Michael Matz <matz@suse.de>
and Daniel Berlin <dan@cgsoftware.com>.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software
Foundation; either version 2, or (at your option) any later version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details.
You should have received a copy of the GNU General Public License along
with GCC; see the file COPYING. If not, write to the Free Software
Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "rtl.h"
#include "insn-config.h"
#include "recog.h"
#include "function.h"
#include "hard-reg-set.h"
#include "regs.h"
#include "df.h"
#include "output.h"
#include "ra.h"
#include "tm_p.h"
/* This file contains various dumping and debug functions for
the graph coloring register allocator. */
static void ra_print_rtx_1op (FILE *, rtx);
static void ra_print_rtx_2op (FILE *, rtx);
static void ra_print_rtx_3op (FILE *, rtx);
static void ra_print_rtx_object (FILE *, rtx);
/* Print a message to the dump file, if debug_new_regalloc and LEVEL
have any bits in common. */
void
ra_debug_msg (unsigned int level, const char *format, ...)
{
va_list ap;
va_start (ap, format);
if ((debug_new_regalloc & level) != 0 && dump_file != NULL)
vfprintf (dump_file, format, ap);
va_end (ap);
}
/* The following ra_print_xxx() functions print RTL expressions
in concise infix form. If the mode can be seen from context it's
left out. Most operators are represented by their graphical
characters, e.g. LE as "<=". Unknown constructs are currently
printed with print_inline_rtx(), which disrupts the nice layout.
Currently only the inline asm things are written this way. */
/* Print rtx X, which is a one operand rtx (op:mode (Y)), as
"op(Y)" to FILE. */
static void
ra_print_rtx_1op (FILE *file, rtx x)
{
enum rtx_code code = GET_CODE (x);
rtx op0 = XEXP (x, 0);
switch (code)
{
case NEG:
case NOT:
fputs ((code == NEG) ? "-(" : "~(", file);
ra_print_rtx (file, op0, 0);
fputs (")", file);
break;
case HIGH:
fputs ("hi(", file);
ra_print_rtx (file, op0, 0);
fputs (")", file);
break;
default:
fprintf (file, "%s", GET_RTX_NAME (code));
if (GET_MODE (x) != VOIDmode)
fprintf (file, ":%s(", GET_MODE_NAME (GET_MODE (x)));
else
fputs ("(", file);
ra_print_rtx (file, op0, 0);
fputs (")", file);
break;
}
}
/* Print rtx X, which is a two operand rtx (op:mode (Y) (Z))
as "(Y op Z)", if the operand is know, or as "op(Y, Z)", if not,
to FILE. */
static void
ra_print_rtx_2op (FILE *file, rtx x)
{
int infix = 1;
const char *opname = "shitop";
enum rtx_code code = GET_CODE (x);
rtx op0 = XEXP (x, 0);
rtx op1 = XEXP (x, 1);
switch (code)
{
/* class '2' */
case COMPARE: opname = "?"; break;
case MINUS: opname = "-"; break;
case DIV: opname = "/"; break;
case UDIV: opname = "u/"; break;
case MOD: opname = "%"; break;
case UMOD: opname = "u%"; break;
case ASHIFT: opname = "<<"; break;
case ASHIFTRT: opname = "a>>"; break;
case LSHIFTRT: opname = "l>>"; break;
/* class 'c' */
case PLUS: opname = "+"; break;
case MULT: opname = "*"; break;
case AND: opname = "&"; break;
case IOR: opname = "|"; break;
case XOR: opname = "^"; break;
/* class '=' */
case NE: opname = "!="; break;
case EQ: opname = "=="; break;
case LTGT: opname = "<>"; break;
/* class '<' */
case GE: opname = "s>="; break;
case GT: opname = "s>"; break;
case LE: opname = "s<="; break;
case LT: opname = "s<"; break;
case GEU: opname = "u>="; break;
case GTU: opname = "u>"; break;
case LEU: opname = "u<="; break;
case LTU: opname = "u<"; break;
default:
infix = 0;
opname = GET_RTX_NAME (code);
break;
}
if (infix)
{
fputs ("(", file);
ra_print_rtx (file, op0, 0);
fprintf (file, " %s ", opname);
ra_print_rtx (file, op1, 0);
fputs (")", file);
}
else
{
fprintf (file, "%s(", opname);
ra_print_rtx (file, op0, 0);
fputs (", ", file);
ra_print_rtx (file, op1, 0);
fputs (")", file);
}
}
/* Print rtx X, which a three operand rtx to FILE.
I.e. X is either an IF_THEN_ELSE, or a bitmap operation. */
static void
ra_print_rtx_3op (FILE *file, rtx x)
{
enum rtx_code code = GET_CODE (x);
rtx op0 = XEXP (x, 0);
rtx op1 = XEXP (x, 1);
rtx op2 = XEXP (x, 2);
if (code == IF_THEN_ELSE)
{
ra_print_rtx (file, op0, 0);
fputs (" ? ", file);
ra_print_rtx (file, op1, 0);
fputs (" : ", file);
ra_print_rtx (file, op2, 0);
}
else
{
/* Bitmap-operation */
fprintf (file, "%s:%s(", GET_RTX_NAME (code),
GET_MODE_NAME (GET_MODE (x)));
ra_print_rtx (file, op0, 0);
fputs (", ", file);
ra_print_rtx (file, op1, 0);
fputs (", ", file);
ra_print_rtx (file, op2, 0);
fputs (")", file);
}
}
/* Print rtx X, which represents an object (class 'o', 'C', or some constructs
of class 'x' (e.g. subreg)), to FILE.
(reg XX) rtl is represented as "pXX", of XX was a pseudo,
as "name" it name is the nonnull hardreg name, or as "hXX", if XX
is a hardreg, whose name is NULL, or empty. */
static void
ra_print_rtx_object (FILE *file, rtx x)
{
enum rtx_code code = GET_CODE (x);
enum machine_mode mode = GET_MODE (x);
switch (code)
{
case CONST_INT:
fprintf (file, HOST_WIDE_INT_PRINT_DEC, XWINT (x, 0));
break;
case CONST_DOUBLE:
{
int i, num = 0;
const char *fmt = GET_RTX_FORMAT (code);
fputs ("dbl(", file);
for (i = 0; i < GET_RTX_LENGTH (code); i++)
{
if (num)
fputs (", ", file);
if (fmt[i] == 'e' && XEXP (x, i))
/* The MEM or other stuff */
{
ra_print_rtx (file, XEXP (x, i), 0);
num++;
}
else if (fmt[i] == 'w')
{
fprintf (file, HOST_WIDE_INT_PRINT_HEX, XWINT (x, i));
num++;
}
}
break;
}
case CONST_STRING: fprintf (file, "\"%s\"", XSTR (x, 0)); break;
case CONST: fputs ("const(", file);
ra_print_rtx (file, XEXP (x, 0), 0);
fputs (")", file);
break;
case PC: fputs ("pc", file); break;
case REG:
{
int regno = REGNO (x);
if (regno < FIRST_PSEUDO_REGISTER)
{
int i, nregs = hard_regno_nregs[regno][mode];
if (nregs > 1)
fputs ("[", file);
for (i = 0; i < nregs; i++)
{
if (i)
fputs (", ", file);
if (reg_names[regno+i] && *reg_names[regno + i])
fprintf (file, "%s", reg_names[regno + i]);
else
fprintf (file, "h%d", regno + i);
}
if (nregs > 1)
fputs ("]", file);
}
else
fprintf (file, "p%d", regno);
break;
}
case SUBREG:
{
rtx sub = SUBREG_REG (x);
int ofs = SUBREG_BYTE (x);
if (REG_P (sub)
&& REGNO (sub) < FIRST_PSEUDO_REGISTER)
{
int regno = REGNO (sub);
int i, nregs = hard_regno_nregs[regno][mode];
regno += subreg_regno_offset (regno, GET_MODE (sub),
ofs, mode);
if (nregs > 1)
fputs ("[", file);
for (i = 0; i < nregs; i++)
{
if (i)
fputs (", ", file);
if (reg_names[regno+i])
fprintf (file, "%s", reg_names[regno + i]);
else
fprintf (file, "h%d", regno + i);
}
if (nregs > 1)
fputs ("]", file);
}
else
{
ra_print_rtx (file, sub, 0);
fprintf (file, ":[%s+%d]", GET_MODE_NAME (mode), ofs);
}
break;
}
case SCRATCH: fputs ("scratch", file); break;
case CONCAT: ra_print_rtx_2op (file, x); break;
case HIGH: ra_print_rtx_1op (file, x); break;
case LO_SUM:
fputs ("(", file);
ra_print_rtx (file, XEXP (x, 0), 0);
fputs (" + lo(", file);
ra_print_rtx (file, XEXP (x, 1), 0);
fputs ("))", file);
break;
case MEM: fputs ("[", file);
ra_print_rtx (file, XEXP (x, 0), 0);
fprintf (file, "]:%s", GET_MODE_NAME (GET_MODE (x)));
/* XXX print alias set too ?? */
break;
case LABEL_REF:
{
rtx sub = XEXP (x, 0);
if (NOTE_P (sub)
&& NOTE_LINE_NUMBER (sub) == NOTE_INSN_DELETED_LABEL)
fprintf (file, "(deleted uid=%d)", INSN_UID (sub));
else if (LABEL_P (sub))
fprintf (file, "L%d", CODE_LABEL_NUMBER (sub));
else
fprintf (file, "(nonlabel uid=%d)", INSN_UID (sub));
}
break;
case SYMBOL_REF:
fprintf (file, "sym(\"%s\")", XSTR (x, 0)); break;
case CC0: fputs ("cc0", file); break;
default: print_inline_rtx (file, x, 0); break;
}
}
/* Print a general rtx X to FILE in nice infix form.
If WITH_PN is set, and X is one of the toplevel constructs
(insns, notes, labels or barriers), then print also the UIDs of
the preceding and following insn. */
void
ra_print_rtx (FILE *file, rtx x, int with_pn)
{
enum rtx_code code;
int unhandled = 0;
if (!x)
return;
code = GET_CODE (x);
/* First handle the insn like constructs. */
if (INSN_P (x) || code == NOTE || code == CODE_LABEL || code == BARRIER)
{
if (INSN_P (x))
fputs (" ", file);
/* Non-insns are prefixed by a ';'. */
if (code == BARRIER)
fputs ("; ", file);
else if (code == NOTE)
/* But notes are indented very far right. */
fprintf (file, "\t\t\t\t\t; ");
else if (code == CODE_LABEL)
/* And labels have their Lxx name first, before the actual UID. */
{
fprintf (file, "L%d:\t; ", CODE_LABEL_NUMBER (x));
if (LABEL_NAME (x))
fprintf (file, "(%s) ", LABEL_NAME (x));
switch (LABEL_KIND (x))
{
case LABEL_NORMAL: break;
case LABEL_STATIC_ENTRY: fputs (" (entry)", file); break;
case LABEL_GLOBAL_ENTRY: fputs (" (global entry)", file); break;
case LABEL_WEAK_ENTRY: fputs (" (weak entry)", file); break;
default: abort();
}
fprintf (file, " [%d uses] uid=(", LABEL_NUSES (x));
}
fprintf (file, "%d", INSN_UID (x));
if (with_pn)
fprintf (file, " %d %d", PREV_INSN (x) ? INSN_UID (PREV_INSN (x)) : 0,
NEXT_INSN (x) ? INSN_UID (NEXT_INSN (x)) : 0);
if (code == BARRIER)
fputs (" -------- barrier ---------", file);
else if (code == CODE_LABEL)
fputs (")", file);
else if (code == NOTE)
{
int ln = NOTE_LINE_NUMBER (x);
if (ln >= (int) NOTE_INSN_BIAS && ln < (int) NOTE_INSN_MAX)
fprintf (file, " %s", GET_NOTE_INSN_NAME (ln));
else
{
expanded_location s;
NOTE_EXPANDED_LOCATION (s, x);
fprintf (file, " line %d", s.line);
if (s.file != NULL)
fprintf (file, ":%s", s.file);
}
}
else
{
fprintf (file, "\t");
ra_print_rtx (file, PATTERN (x), 0);
}
return;
}
switch (code)
{
/* Top-level stuff. */
case PARALLEL:
{
int j;
for (j = 0; j < XVECLEN (x, 0); j++)
{
if (j)
fputs ("\t;; ", file);
ra_print_rtx (file, XVECEXP (x, 0, j), 0);
}
break;
}
case UNSPEC: case UNSPEC_VOLATILE:
{
int j;
fprintf (file, "unspec%s(%d",
(code == UNSPEC) ? "" : "_vol", XINT (x, 1));
for (j = 0; j < XVECLEN (x, 0); j++)
{
fputs (", ", file);
ra_print_rtx (file, XVECEXP (x, 0, j), 0);
}
fputs (")", file);
break;
}
case SET:
if (GET_CODE (SET_DEST (x)) == PC)
{
if (GET_CODE (SET_SRC (x)) == IF_THEN_ELSE
&& GET_CODE (XEXP (SET_SRC(x), 2)) == PC)
{
fputs ("if ", file);
ra_print_rtx (file, XEXP (SET_SRC (x), 0), 0);
fputs (" jump ", file);
ra_print_rtx (file, XEXP (SET_SRC (x), 1), 0);
}
else
{
fputs ("jump ", file);
ra_print_rtx (file, SET_SRC (x), 0);
}
}
else
{
ra_print_rtx (file, SET_DEST (x), 0);
fputs (" <= ", file);
ra_print_rtx (file, SET_SRC (x), 0);
}
break;
case USE:
fputs ("use <= ", file);
ra_print_rtx (file, XEXP (x, 0), 0);
break;
case CLOBBER:
ra_print_rtx (file, XEXP (x, 0), 0);
fputs (" <= clobber", file);
break;
case CALL:
fputs ("call ", file);
ra_print_rtx (file, XEXP (x, 0), 0); /* Address */
fputs (" numargs=", file);
ra_print_rtx (file, XEXP (x, 1), 0); /* Num arguments */
break;
case RETURN:
fputs ("return", file);
break;
case TRAP_IF:
fputs ("if (", file);
ra_print_rtx (file, XEXP (x, 0), 0);
fputs (") trap ", file);
ra_print_rtx (file, XEXP (x, 1), 0);
break;
case RESX:
fprintf (file, "resx from region %d", XINT (x, 0));
break;
/* Different things of class 'x' */
case SUBREG: ra_print_rtx_object (file, x); break;
case STRICT_LOW_PART:
fputs ("low(", file);
ra_print_rtx (file, XEXP (x, 0), 0);
fputs (")", file);
break;
default:
unhandled = 1;
break;
}
if (!unhandled)
return;
switch (GET_RTX_CLASS (code))
{
case RTX_UNARY:
ra_print_rtx_1op (file, x);
break;
case RTX_BIN_ARITH:
case RTX_COMM_ARITH:
case RTX_COMPARE:
case RTX_COMM_COMPARE:
ra_print_rtx_2op (file, x);
break;
case RTX_TERNARY:
case RTX_BITFIELD_OPS:
ra_print_rtx_3op (file, x);
break;
case RTX_OBJ:
case RTX_CONST_OBJ:
ra_print_rtx_object (file, x);
break;
default:
print_inline_rtx (file, x, 0);
break;
}
}
/* This only calls ra_print_rtx(), but emits a final newline. */
void
ra_print_rtx_top (FILE *file, rtx x, int with_pn)
{
ra_print_rtx (file, x, with_pn);
fprintf (file, "\n");
}
/* Callable from gdb. This prints rtx X onto stderr. */
void
ra_debug_rtx (rtx x)
{
ra_print_rtx_top (stderr, x, 1);
}
/* This prints the content of basic block with index BBI.
The first and last insn are emitted with UIDs of prev and next insns. */
void
ra_debug_bbi (int bbi)
{
basic_block bb = BASIC_BLOCK (bbi);
rtx insn;
for (insn = BB_HEAD (bb); insn; insn = NEXT_INSN (insn))
{
ra_print_rtx_top (stderr, insn,
(insn == BB_HEAD (bb) || insn == BB_END (bb)));
fprintf (stderr, "\n");
if (insn == BB_END (bb))
break;
}
}
/* Beginning from INSN, emit NUM insns (if NUM is non-negative)
or emit a window of NUM insns around INSN, to stderr. */
void
ra_debug_insns (rtx insn, int num)
{
int i, count = (num == 0 ? 1 : num < 0 ? -num : num);
if (num < 0)
for (i = count / 2; i > 0 && PREV_INSN (insn); i--)
insn = PREV_INSN (insn);
for (i = count; i > 0 && insn; insn = NEXT_INSN (insn), i--)
{
if (LABEL_P (insn))
fprintf (stderr, "\n");
ra_print_rtx_top (stderr, insn, (i == count || i == 1));
}
}
/* Beginning with INSN, emit the whole insn chain into FILE.
This also outputs comments when basic blocks start or end and omits
some notes, if flag_ra_dump_notes is zero. */
void
ra_print_rtl_with_bb (FILE *file, rtx insn)
{
basic_block last_bb, bb;
unsigned int num = 0;
if (!insn)
fputs ("nil", file);
last_bb = NULL;
for (; insn; insn = NEXT_INSN (insn))
{
if (BARRIER_P (insn))
bb = NULL;
else
bb = BLOCK_FOR_INSN (insn);
if (bb != last_bb)
{
if (last_bb)
fprintf (file, ";; End of basic block %d\n", last_bb->index);
if (bb)
fprintf (file, ";; Begin of basic block %d\n", bb->index);
last_bb = bb;
}
if (LABEL_P (insn))
fputc ('\n', file);
if (NOTE_P (insn))
{
/* Ignore basic block and maybe other notes not referencing
deleted things. */
if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_BASIC_BLOCK
&& (flag_ra_dump_notes
|| NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED
|| NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED_LABEL))
{
ra_print_rtx_top (file, insn, (num == 0 || !NEXT_INSN (insn)));
num++;
}
}
else
{
ra_print_rtx_top (file, insn, (num == 0 || !NEXT_INSN (insn)));
num++;
}
}
}
/* Count how many insns were seen how often, while building the interference
graph, and prints the findings. */
void
dump_number_seen (void)
{
#define N 17
int num[N];
int i;
for (i = 0; i < N; i++)
num[i] = 0;
for (i = 0; i < get_max_uid (); i++)
if (number_seen[i] < N - 1)
num[number_seen[i]]++;
else
num[N - 1]++;
for (i = 0; i < N - 1; i++)
if (num[i])
ra_debug_msg (DUMP_PROCESS, "%d insns seen %d times\n", num[i], i);
if (num[N - 1])
ra_debug_msg (DUMP_PROCESS, "%d insns seen %d and more times\n", num[i],
N - 1);
ra_debug_msg (DUMP_PROCESS, "from overall %d insns\n", get_max_uid ());
#undef N
}
/* Dump the interference graph, the move list and the webs. */
void
dump_igraph (struct df *df ATTRIBUTE_UNUSED)
{
struct move_list *ml;
unsigned int def1, def2;
int num = 0;
int num2;
unsigned int i;
if (!dump_file || (debug_new_regalloc & (DUMP_IGRAPH | DUMP_WEBS)) == 0)
return;
ra_debug_msg (DUMP_IGRAPH, "conflicts:\n ");
for (def1 = 0; def1 < num_webs; def1++)
{
int num1 = num;
num2 = 0;
for (def2 = 0; def2 < num_webs; def2++)
if (def1 != def2 && TEST_BIT (igraph, igraph_index (def1, def2)))
{
if (num1 == num)
{
if (SUBWEB_P (ID2WEB (def1)))
ra_debug_msg (DUMP_IGRAPH, "%d (SUBREG %d, %d) with ", def1,
ID2WEB (def1)->regno,
SUBREG_BYTE (ID2WEB (def1)->orig_x));
else
ra_debug_msg (DUMP_IGRAPH, "%d (REG %d) with ", def1,
ID2WEB (def1)->regno);
}
if ((num2 % 9) == 8)
ra_debug_msg (DUMP_IGRAPH, "\n ");
num++;
num2++;
if (SUBWEB_P (ID2WEB (def2)))
ra_debug_msg (DUMP_IGRAPH, "%d(%d,%d) ", def2, ID2WEB (def2)->regno,
SUBREG_BYTE (ID2WEB (def2)->orig_x));
else
ra_debug_msg (DUMP_IGRAPH, "%d(%d) ", def2, ID2WEB (def2)->regno);
}
if (num1 != num)
ra_debug_msg (DUMP_IGRAPH, "\n ");
}
ra_debug_msg (DUMP_IGRAPH, "\n");
for (ml = wl_moves; ml; ml = ml->next)
if (ml->move)
{
ra_debug_msg (DUMP_IGRAPH, "move: insn %d: Web %d <-- Web %d\n",
INSN_UID (ml->move->insn), ml->move->target_web->id,
ml->move->source_web->id);
}
ra_debug_msg (DUMP_WEBS, "\nWebs:\n");
for (i = 0; i < num_webs; i++)
{
struct web *web = ID2WEB (i);
ra_debug_msg (DUMP_WEBS, " %4d : regno %3d", i, web->regno);
if (SUBWEB_P (web))
{
ra_debug_msg (DUMP_WEBS, " sub %d", SUBREG_BYTE (web->orig_x));
ra_debug_msg (DUMP_WEBS, " par %d", find_web_for_subweb (web)->id);
}
ra_debug_msg (DUMP_WEBS, " +%d (span %d, cost "
HOST_WIDE_INT_PRINT_DEC ") (%s)",
web->add_hardregs, web->span_deaths, web->spill_cost,
reg_class_names[web->regclass]);
if (web->spill_temp == 1)
ra_debug_msg (DUMP_WEBS, " (spilltemp)");
else if (web->spill_temp == 2)
ra_debug_msg (DUMP_WEBS, " (spilltem2)");
else if (web->spill_temp == 3)
ra_debug_msg (DUMP_WEBS, " (short)");
if (web->type == PRECOLORED)
ra_debug_msg (DUMP_WEBS, " (precolored, color=%d)", web->color);
else if (find_web_for_subweb (web)->num_uses == 0)
ra_debug_msg (DUMP_WEBS, " dead");
if (web->crosses_call)
ra_debug_msg (DUMP_WEBS, " xcall");
if (web->regno >= max_normal_pseudo)
ra_debug_msg (DUMP_WEBS, " stack");
ra_debug_msg (DUMP_WEBS, "\n");
}
}
/* Dump the interference graph and webs in a format easily
parsable by programs. Used to emit real world interference graph
to my custom graph colorizer. */
void
dump_igraph_machine (void)
{
unsigned int i;
if (!dump_file || (debug_new_regalloc & DUMP_IGRAPH_M) == 0)
return;
ra_debug_msg (DUMP_IGRAPH_M, "g %d %d\n", num_webs - num_subwebs,
FIRST_PSEUDO_REGISTER);
for (i = 0; i < num_webs - num_subwebs; i++)
{
struct web *web = ID2WEB (i);
struct conflict_link *cl;
int flags = 0;
int numc = 0;
int col = 0;
flags = web->spill_temp & 0xF;
flags |= ((web->type == PRECOLORED) ? 1 : 0) << 4;
flags |= (web->add_hardregs & 0xF) << 5;
for (cl = web->conflict_list; cl; cl = cl->next)
if (cl->t->id < web->id)
numc++;
ra_debug_msg (DUMP_IGRAPH_M, "n %d %d %d %d %d %d %d\n",
web->id, web->color, flags,
(unsigned int)web->spill_cost, web->num_defs, web->num_uses,
numc);
if (web->type != PRECOLORED)
{
ra_debug_msg (DUMP_IGRAPH_M, "s %d", web->id);
while (1)
{
unsigned int u = 0;
int n;
for (n = 0; n < 32 && col < FIRST_PSEUDO_REGISTER; n++, col++)
if (TEST_HARD_REG_BIT (web->usable_regs, col))
u |= 1 << n;
ra_debug_msg (DUMP_IGRAPH_M, " %u", u);
if (col >= FIRST_PSEUDO_REGISTER)
break;
}
ra_debug_msg (DUMP_IGRAPH_M, "\n");
}
if (numc)
{
ra_debug_msg (DUMP_IGRAPH_M, "c %d", web->id);
for (cl = web->conflict_list; cl; cl = cl->next)
{
if (cl->t->id < web->id)
ra_debug_msg (DUMP_IGRAPH_M, " %d", cl->t->id);
}
ra_debug_msg (DUMP_IGRAPH_M, "\n");
}
}
ra_debug_msg (DUMP_IGRAPH_M, "e\n");
}
/* This runs after colorization and changing the insn stream.
It temporarily replaces all pseudo registers with their colors,
and emits information, if the resulting insns are strictly valid. */
void
dump_constraints (void)
{
rtx insn;
int i;
if (!dump_file || (debug_new_regalloc & DUMP_CONSTRAINTS) == 0)
return;
for (i = FIRST_PSEUDO_REGISTER; i < ra_max_regno; i++)
if (regno_reg_rtx[i] && REG_P (regno_reg_rtx[i]))
REGNO (regno_reg_rtx[i])
= ra_reg_renumber[i] >= 0 ? ra_reg_renumber[i] : i;
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
if (INSN_P (insn))
{
int code;
int uid = INSN_UID (insn);
int o;
/* Don't simply force rerecognition, as combine might left us
with some unrecognizable ones, which later leads to aborts
in regclass, if we now destroy the remembered INSN_CODE(). */
/*INSN_CODE (insn) = -1;*/
code = recog_memoized (insn);
if (code < 0)
{
ra_debug_msg (DUMP_CONSTRAINTS,
"%d: asm insn or not recognizable.\n", uid);
continue;
}
ra_debug_msg (DUMP_CONSTRAINTS,
"%d: code %d {%s}, %d operands, constraints: ",
uid, code, insn_data[code].name, recog_data.n_operands);
extract_insn (insn);
/*preprocess_constraints ();*/
for (o = 0; o < recog_data.n_operands; o++)
{
ra_debug_msg (DUMP_CONSTRAINTS,
"%d:%s ", o, recog_data.constraints[o]);
}
if (constrain_operands (1))
ra_debug_msg (DUMP_CONSTRAINTS, "matches strictly alternative %d",
which_alternative);
else
ra_debug_msg (DUMP_CONSTRAINTS, "doesn't match strictly");
ra_debug_msg (DUMP_CONSTRAINTS, "\n");
}
for (i = FIRST_PSEUDO_REGISTER; i < ra_max_regno; i++)
if (regno_reg_rtx[i] && REG_P (regno_reg_rtx[i]))
REGNO (regno_reg_rtx[i]) = i;
}
/* This counts and emits the cumulated cost of all spilled webs,
preceded by a custom message MSG, with debug level LEVEL. */
void
dump_graph_cost (unsigned int level, const char *msg)
{
unsigned int i;
unsigned HOST_WIDE_INT cost;
if (!dump_file || (debug_new_regalloc & level) == 0)
return;
cost = 0;
for (i = 0; i < num_webs; i++)
{
struct web *web = id2web[i];
if (alias (web)->type == SPILLED)
cost += web->orig_spill_cost;
}
ra_debug_msg (level, " spill cost of graph (%s) = "
HOST_WIDE_INT_PRINT_UNSIGNED "\n",
msg ? msg : "", cost);
}
/* Dump the color assignment per web, the coalesced and spilled webs. */
void
dump_ra (struct df *df ATTRIBUTE_UNUSED)
{
struct web *web;
struct dlist *d;
if (!dump_file || (debug_new_regalloc & DUMP_RESULTS) == 0)
return;
ra_debug_msg (DUMP_RESULTS, "\nColored:\n");
for (d = WEBS(COLORED); d; d = d->next)
{
web = DLIST_WEB (d);
ra_debug_msg (DUMP_RESULTS, " %4d : color %d\n", web->id, web->color);
}
ra_debug_msg (DUMP_RESULTS, "\nCoalesced:\n");
for (d = WEBS(COALESCED); d; d = d->next)
{
web = DLIST_WEB (d);
ra_debug_msg (DUMP_RESULTS, " %4d : to web %d, color %d\n", web->id,
alias (web)->id, web->color);
}
ra_debug_msg (DUMP_RESULTS, "\nSpilled:\n");
for (d = WEBS(SPILLED); d; d = d->next)
{
web = DLIST_WEB (d);
ra_debug_msg (DUMP_RESULTS, " %4d\n", web->id);
}
ra_debug_msg (DUMP_RESULTS, "\n");
dump_cost (DUMP_RESULTS);
}
/* Calculate and dump the cumulated costs of certain types of insns
(loads, stores and copies). */
void
dump_static_insn_cost (FILE *file, const char *message, const char *prefix)
{
struct cost
{
unsigned HOST_WIDE_INT cost;
unsigned int count;
};
basic_block bb;
struct cost load, store, regcopy, selfcopy, overall;
memset (&load, 0, sizeof(load));
memset (&store, 0, sizeof(store));
memset (&regcopy, 0, sizeof(regcopy));
memset (&selfcopy, 0, sizeof(selfcopy));
memset (&overall, 0, sizeof(overall));
if (!file)
return;
FOR_EACH_BB (bb)
{
unsigned HOST_WIDE_INT block_cost = bb->frequency;
rtx insn, set;
for (insn = BB_HEAD (bb); insn; insn = NEXT_INSN (insn))
{
/* Yes, yes. We don't calculate the costs precisely.
Only for "simple enough" insns. Those containing single
sets only. */
if (INSN_P (insn) && ((set = single_set (insn)) != NULL))
{
rtx src = SET_SRC (set);
rtx dest = SET_DEST (set);
struct cost *pcost = NULL;
overall.cost += block_cost;
overall.count++;
if (rtx_equal_p (src, dest))
pcost = &selfcopy;
else if (GET_CODE (src) == GET_CODE (dest)
&& ((REG_P (src))
|| (GET_CODE (src) == SUBREG
&& REG_P (SUBREG_REG (src))
&& REG_P (SUBREG_REG (dest)))))
/* XXX is dest guaranteed to be a subreg? */
pcost = &regcopy;
else
{
if (GET_CODE (src) == SUBREG)
src = SUBREG_REG (src);
if (GET_CODE (dest) == SUBREG)
dest = SUBREG_REG (dest);
if (MEM_P (src) && !MEM_P (dest)
&& memref_is_stack_slot (src))
pcost = &load;
else if (!MEM_P (src) && MEM_P (dest)
&& memref_is_stack_slot (dest))
pcost = &store;
}
if (pcost)
{
pcost->cost += block_cost;
pcost->count++;
}
}
if (insn == BB_END (bb))
break;
}
}
if (!prefix)
prefix = "";
fprintf (file, "static insn cost %s\n", message ? message : "");
fprintf (file, " %soverall:\tnum=%6d\tcost=% 8" HOST_WIDE_INT_PRINT "d\n",
prefix, overall.count, overall.cost);
fprintf (file, " %sloads:\tnum=%6d\tcost=% 8" HOST_WIDE_INT_PRINT "d\n",
prefix, load.count, load.cost);
fprintf (file, " %sstores:\tnum=%6d\tcost=% 8" HOST_WIDE_INT_PRINT "d\n",
prefix, store.count, store.cost);
fprintf (file, " %sregcopy:\tnum=%6d\tcost=% 8" HOST_WIDE_INT_PRINT "d\n",
prefix, regcopy.count, regcopy.cost);
fprintf (file, " %sselfcpy:\tnum=%6d\tcost=% 8" HOST_WIDE_INT_PRINT "d\n",
prefix, selfcopy.count, selfcopy.cost);
}
/* Returns nonzero, if WEB1 and WEB2 have some possible
hardregs in common. */
int
web_conflicts_p (struct web *web1, struct web *web2)
{
if (web1->type == PRECOLORED && web2->type == PRECOLORED)
return 0;
if (web1->type == PRECOLORED)
return TEST_HARD_REG_BIT (web2->usable_regs, web1->regno);
if (web2->type == PRECOLORED)
return TEST_HARD_REG_BIT (web1->usable_regs, web2->regno);
return hard_regs_intersect_p (&web1->usable_regs, &web2->usable_regs);
}
/* Dump all uids of insns in which WEB is mentioned. */
void
dump_web_insns (struct web *web)
{
unsigned int i;
ra_debug_msg (DUMP_EVER, "Web: %i(%i)+%i class: %s freedom: %i degree %i\n",
web->id, web->regno, web->add_hardregs,
reg_class_names[web->regclass],
web->num_freedom, web->num_conflicts);
ra_debug_msg (DUMP_EVER, " def insns:");
for (i = 0; i < web->num_defs; ++i)
{
ra_debug_msg (DUMP_EVER, " %d ", INSN_UID (web->defs[i]->insn));
}
ra_debug_msg (DUMP_EVER, "\n use insns:");
for (i = 0; i < web->num_uses; ++i)
{
ra_debug_msg (DUMP_EVER, " %d ", INSN_UID (web->uses[i]->insn));
}
ra_debug_msg (DUMP_EVER, "\n");
}
/* Dump conflicts for web WEB. */
void
dump_web_conflicts (struct web *web)
{
int num = 0;
unsigned int def2;
ra_debug_msg (DUMP_EVER, "Web: %i(%i)+%i class: %s freedom: %i degree %i\n",
web->id, web->regno, web->add_hardregs,
reg_class_names[web->regclass],
web->num_freedom, web->num_conflicts);
for (def2 = 0; def2 < num_webs; def2++)
if (TEST_BIT (igraph, igraph_index (web->id, def2)) && web->id != def2)
{
if ((num % 9) == 5)
ra_debug_msg (DUMP_EVER, "\n ");
num++;
ra_debug_msg (DUMP_EVER, " %d(%d)", def2, id2web[def2]->regno);
if (id2web[def2]->add_hardregs)
ra_debug_msg (DUMP_EVER, "+%d", id2web[def2]->add_hardregs);
if (web_conflicts_p (web, id2web[def2]))
ra_debug_msg (DUMP_EVER, "/x");
if (id2web[def2]->type == SELECT)
ra_debug_msg (DUMP_EVER, "/s");
if (id2web[def2]->type == COALESCED)
ra_debug_msg (DUMP_EVER,"/c/%d", alias (id2web[def2])->id);
}
ra_debug_msg (DUMP_EVER, "\n");
{
struct conflict_link *wl;
num = 0;
ra_debug_msg (DUMP_EVER, "By conflicts: ");
for (wl = web->conflict_list; wl; wl = wl->next)
{
struct web* w = wl->t;
if ((num % 9) == 8)
ra_debug_msg (DUMP_EVER, "\n ");
num++;
ra_debug_msg (DUMP_EVER, "%d(%d)%s ", w->id, w->regno,
web_conflicts_p (web, w) ? "+" : "");
}
ra_debug_msg (DUMP_EVER, "\n");
}
}
/* Output HARD_REG_SET to stderr. */
void
debug_hard_reg_set (HARD_REG_SET set)
{
int i;
for (i = 0; i < FIRST_PSEUDO_REGISTER; ++i)
{
if (TEST_HARD_REG_BIT (set, i))
{
fprintf (stderr, "%s ", reg_names[i]);
}
}
fprintf (stderr, "\n");
}
/*
vim:cinoptions={.5s,g0,p5,t0,(0,^-0.5s,n-0.5s:tw=78:cindent:sw=4:
*/
/* Graph coloring register allocator
Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
Contributed by Michael Matz <matz@suse.de>
and Daniel Berlin <dan@cgsoftware.com>.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software
Foundation; either version 2, or (at your option) any later version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details.
You should have received a copy of the GNU General Public License along
with GCC; see the file COPYING. If not, write to the Free Software
Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "rtl.h"
#include "tm_p.h"
#include "function.h"
#include "regs.h"
#include "hard-reg-set.h"
#include "basic-block.h"
#include "df.h"
#include "expr.h"
#include "output.h"
#include "except.h"
#include "ra.h"
#include "insn-config.h"
#include "reload.h"
/* This file is part of the graph coloring register allocator, and
contains the functions to change the insn stream. I.e. it adds
spill code, rewrites insns to use the new registers after
coloring and deletes coalesced moves. */
struct rewrite_info;
struct rtx_list;
static void spill_coalescing (sbitmap, sbitmap);
static unsigned HOST_WIDE_INT spill_prop_savings (struct web *, sbitmap);
static void spill_prop_insert (struct web *, sbitmap, sbitmap);
static int spill_propagation (sbitmap, sbitmap, sbitmap);
static void spill_coalprop (void);
static void allocate_spill_web (struct web *);
static void choose_spill_colors (void);
static void rewrite_program (bitmap);
static void remember_slot (struct rtx_list **, rtx);
static int slots_overlap_p (rtx, rtx);
static void delete_overlapping_slots (struct rtx_list **, rtx);
static int slot_member_p (struct rtx_list *, rtx);
static void insert_stores (bitmap);
static int spill_same_color_p (struct web *, struct web *);
static bool is_partly_live_1 (sbitmap, struct web *);
static void update_spill_colors (HARD_REG_SET *, struct web *, int);
static int spill_is_free (HARD_REG_SET *, struct web *);
static void emit_loads (struct rewrite_info *, int, rtx);
static void reloads_to_loads (struct rewrite_info *, struct ref **,
unsigned int, struct web **);
static void rewrite_program2 (bitmap);
static void mark_refs_for_checking (struct web *, bitmap);
static void detect_web_parts_to_rebuild (void);
static void delete_useless_defs (void);
static void detect_non_changed_webs (void);
static void reset_changed_flag (void);
/* For tracking some statistics, we count the number (and cost)
of deleted move insns. */
static unsigned int deleted_move_insns;
static unsigned HOST_WIDE_INT deleted_move_cost;
/* This is the spill coalescing phase. In SPILLED the IDs of all
already spilled webs are noted. In COALESCED the IDs of webs still
to check for coalescing. This tries to coalesce two webs, which were
spilled, are connected by a move, and don't conflict. Greatly
reduces memory shuffling. */
static void
spill_coalescing (sbitmap coalesce, sbitmap spilled)
{
struct move_list *ml;
struct move *m;
for (ml = wl_moves; ml; ml = ml->next)
if ((m = ml->move) != NULL)
{
struct web *s = alias (m->source_web);
struct web *t = alias (m->target_web);
if ((TEST_BIT (spilled, s->id) && TEST_BIT (coalesce, t->id))
|| (TEST_BIT (spilled, t->id) && TEST_BIT (coalesce, s->id)))
{
struct conflict_link *wl;
if (TEST_BIT (sup_igraph, s->id * num_webs + t->id)
|| TEST_BIT (sup_igraph, t->id * num_webs + s->id)
|| s->pattern || t->pattern)
continue;
deleted_move_insns++;
deleted_move_cost += BLOCK_FOR_INSN (m->insn)->frequency + 1;
PUT_CODE (m->insn, NOTE);
NOTE_LINE_NUMBER (m->insn) = NOTE_INSN_DELETED;
df_insn_modify (df, BLOCK_FOR_INSN (m->insn), m->insn);
m->target_web->target_of_spilled_move = 1;
if (s == t)
/* May be, already coalesced due to a former move. */
continue;
/* Merge the nodes S and T in the I-graph. Beware: the merging
of conflicts relies on the fact, that in the conflict list
of T all of it's conflicts are noted. This is currently not
the case if T would be the target of a coalesced web, because
then (in combine () above) only those conflicts were noted in
T from the web which was coalesced into T, which at the time
of combine() were not already on the SELECT stack or were
itself coalesced to something other. */
gcc_assert (t->type == SPILLED
&& s->type == SPILLED);
remove_list (t->dlink, &WEBS(SPILLED));
put_web (t, COALESCED);
t->alias = s;
s->is_coalesced = 1;
t->is_coalesced = 1;
merge_moves (s, t);
for (wl = t->conflict_list; wl; wl = wl->next)
{
struct web *pweb = wl->t;
if (wl->sub == NULL)
record_conflict (s, pweb);
else
{
struct sub_conflict *sl;
for (sl = wl->sub; sl; sl = sl->next)
{
struct web *sweb = NULL;
if (SUBWEB_P (sl->s))
sweb = find_subweb (s, sl->s->orig_x);
if (!sweb)
sweb = s;
record_conflict (sweb, sl->t);
}
}
/* No decrement_degree here, because we already have colored
the graph, and don't want to insert pweb into any other
list. */
pweb->num_conflicts -= 1 + t->add_hardregs;
}
}
}
}
/* Returns the probable saving of coalescing WEB with webs from
SPILLED, in terms of removed move insn cost. */
static unsigned HOST_WIDE_INT
spill_prop_savings (struct web *web, sbitmap spilled)
{
unsigned HOST_WIDE_INT savings = 0;
struct move_list *ml;
struct move *m;
unsigned int cost;
if (web->pattern)
return 0;
cost = 1 + MEMORY_MOVE_COST (GET_MODE (web->orig_x), web->regclass, 1);
cost += 1 + MEMORY_MOVE_COST (GET_MODE (web->orig_x), web->regclass, 0);
for (ml = wl_moves; ml; ml = ml->next)
if ((m = ml->move) != NULL)
{
struct web *s = alias (m->source_web);
struct web *t = alias (m->target_web);
if (s != web)
{
struct web *h = s;
s = t;
t = h;
}
if (s != web || !TEST_BIT (spilled, t->id) || t->pattern
|| TEST_BIT (sup_igraph, s->id * num_webs + t->id)
|| TEST_BIT (sup_igraph, t->id * num_webs + s->id))
continue;
savings += BLOCK_FOR_INSN (m->insn)->frequency * cost;
}
return savings;
}
/* This add all IDs of colored webs, which are connected to WEB by a move
to LIST and PROCESSED. */
static void
spill_prop_insert (struct web *web, sbitmap list, sbitmap processed)
{
struct move_list *ml;
struct move *m;
for (ml = wl_moves; ml; ml = ml->next)
if ((m = ml->move) != NULL)
{
struct web *s = alias (m->source_web);
struct web *t = alias (m->target_web);
if (s != web)
{
struct web *h = s;
s = t;
t = h;
}
if (s != web || t->type != COLORED || TEST_BIT (processed, t->id))
continue;
SET_BIT (list, t->id);
SET_BIT (processed, t->id);
}
}
/* The spill propagation pass. If we have to spilled webs, the first
connected through a move to a colored one, and the second also connected
to that colored one, and this colored web is only used to connect both
spilled webs, it might be worthwhile to spill that colored one.
This is the case, if the cost of the removed copy insns (all three webs
could be placed into the same stack slot) is higher than the spill cost
of the web.
TO_PROP are the webs we try to propagate from (i.e. spilled ones),
SPILLED the set of all spilled webs so far and PROCESSED the set
of all webs processed so far, so we don't do work twice. */
static int
spill_propagation (sbitmap to_prop, sbitmap spilled, sbitmap processed)
{
int id;
int again = 0;
sbitmap list = sbitmap_alloc (num_webs);
sbitmap_zero (list);
/* First insert colored move neighbors into the candidate list. */
EXECUTE_IF_SET_IN_SBITMAP (to_prop, 0, id,
{
spill_prop_insert (ID2WEB (id), list, processed);
});
sbitmap_zero (to_prop);
/* For all candidates, see, if the savings are higher than it's
spill cost. */
while ((id = sbitmap_first_set_bit (list)) >= 0)
{
struct web *web = ID2WEB (id);
RESET_BIT (list, id);
if (spill_prop_savings (web, spilled) >= web->spill_cost)
{
/* If so, we found a new spilled web. Insert it's colored
move neighbors again, and mark, that we need to repeat the
whole mainloop of spillprog/coalescing again. */
remove_web_from_list (web);
web->color = -1;
put_web (web, SPILLED);
SET_BIT (spilled, id);
SET_BIT (to_prop, id);
spill_prop_insert (web, list, processed);
again = 1;
}
}
sbitmap_free (list);
return again;
}
/* The main phase to improve spill costs. This repeatedly runs
spill coalescing and spill propagation, until nothing changes. */
static void
spill_coalprop (void)
{
sbitmap spilled, processed, to_prop;
struct dlist *d;
int again;
spilled = sbitmap_alloc (num_webs);
processed = sbitmap_alloc (num_webs);
to_prop = sbitmap_alloc (num_webs);
sbitmap_zero (spilled);
for (d = WEBS(SPILLED); d; d = d->next)
SET_BIT (spilled, DLIST_WEB (d)->id);
sbitmap_copy (to_prop, spilled);
sbitmap_zero (processed);
do
{
spill_coalescing (to_prop, spilled);
/* XXX Currently (with optimistic coalescing) spill_propagation()
doesn't give better code, sometimes it gives worse (but not by much)
code. I believe this is because of slightly wrong cost
measurements. Anyway right now it isn't worth the time it takes,
so deactivate it for now. */
again = 0 && spill_propagation (to_prop, spilled, processed);
}
while (again);
sbitmap_free (to_prop);
sbitmap_free (processed);
sbitmap_free (spilled);
}
/* Allocate a spill slot for a WEB. Currently we spill to pseudo
registers, to be able to track also webs for "stack slots", and also
to possibly colorize them. These pseudos are sometimes handled
in a special way, where we know, that they also can represent
MEM references. */
static void
allocate_spill_web (struct web *web)
{
int regno = web->regno;
rtx slot;
if (web->stack_slot)
return;
slot = gen_reg_rtx (PSEUDO_REGNO_MODE (regno));
web->stack_slot = slot;
}
/* This chooses a color for all SPILLED webs for interference region
spilling. The heuristic isn't good in any way. */
static void
choose_spill_colors (void)
{
struct dlist *d;
unsigned HOST_WIDE_INT *costs = xmalloc (FIRST_PSEUDO_REGISTER * sizeof (costs[0]));
for (d = WEBS(SPILLED); d; d = d->next)
{
struct web *web = DLIST_WEB (d);
struct conflict_link *wl;
int bestc, c;
HARD_REG_SET avail;
memset (costs, 0, FIRST_PSEUDO_REGISTER * sizeof (costs[0]));
for (wl = web->conflict_list; wl; wl = wl->next)
{
struct web *pweb = wl->t;
if (pweb->type == COLORED || pweb->type == PRECOLORED)
costs[pweb->color] += pweb->spill_cost;
}
COPY_HARD_REG_SET (avail, web->usable_regs);
if (web->crosses_call)
{
/* Add an arbitrary constant cost to colors not usable by
call-crossing webs without saves/loads. */
for (c = 0; c < FIRST_PSEUDO_REGISTER; c++)
if (TEST_HARD_REG_BIT (call_used_reg_set, c))
costs[c] += 1000;
}
bestc = -1;
for (c = 0; c < FIRST_PSEUDO_REGISTER; c++)
if ((bestc < 0 || costs[bestc] > costs[c])
&& TEST_HARD_REG_BIT (avail, c)
&& HARD_REGNO_MODE_OK (c, PSEUDO_REGNO_MODE (web->regno)))
{
int i, size;
size = hard_regno_nregs[c][PSEUDO_REGNO_MODE (web->regno)];
for (i = 1; i < size
&& TEST_HARD_REG_BIT (avail, c + i); i++);
if (i == size)
bestc = c;
}
web->color = bestc;
ra_debug_msg (DUMP_PROCESS, "choosing color %d for spilled web %d\n",
bestc, web->id);
}
free (costs);
}
/* For statistics sake we count the number and cost of all new loads,
stores and emitted rematerializations. */
static unsigned int emitted_spill_loads;
static unsigned int emitted_spill_stores;
static unsigned int emitted_remat;
static unsigned HOST_WIDE_INT spill_load_cost;
static unsigned HOST_WIDE_INT spill_store_cost;
static unsigned HOST_WIDE_INT spill_remat_cost;
/* In rewrite_program2() we detect if some def us useless, in the sense,
that the pseudo set is not live anymore at that point. The REF_IDs
of such defs are noted here. */
static bitmap useless_defs;
/* This is the simple and fast version of rewriting the program to
include spill code. It spills at every insn containing spilled
defs or uses. Loads are added only if flag_ra_spill_every_use is
nonzero, otherwise only stores will be added. This doesn't
support rematerialization.
NEW_DEATHS is filled with uids for insns, which probably contain
deaths. */
static void
rewrite_program (bitmap new_deaths)
{
unsigned int i;
struct dlist *d;
bitmap b = BITMAP_XMALLOC ();
/* We walk over all webs, over all uses/defs. For all webs, we need
to look at spilled webs, and webs coalesced to spilled ones, in case
their alias isn't broken up, or they got spill coalesced. */
for (i = 0; i < 2; i++)
for (d = (i == 0) ? WEBS(SPILLED) : WEBS(COALESCED); d; d = d->next)
{
struct web *web = DLIST_WEB (d);
struct web *aweb = alias (web);
unsigned int j;
rtx slot;
/* Is trivially true for spilled webs, but not for coalesced ones. */
if (aweb->type != SPILLED)
continue;
/* First add loads before every use, if we have to. */
if (flag_ra_spill_every_use)
{
bitmap_clear (b);
allocate_spill_web (aweb);
slot = aweb->stack_slot;
for (j = 0; j < web->num_uses; j++)
{
rtx insns, target, source;
rtx insn = DF_REF_INSN (web->uses[j]);
rtx prev = PREV_INSN (insn);
basic_block bb = BLOCK_FOR_INSN (insn);
/* Happens when spill_coalescing() deletes move insns. */
if (!INSN_P (insn))
continue;
/* Check that we didn't already added a load for this web
and insn. Happens, when the an insn uses the same web
multiple times. */
if (bitmap_bit_p (b, INSN_UID (insn)))
continue;
bitmap_set_bit (b, INSN_UID (insn));
target = DF_REF_REG (web->uses[j]);
source = slot;
start_sequence ();
if (GET_CODE (target) == SUBREG)
source = simplify_gen_subreg (GET_MODE (target), source,
GET_MODE (source),
SUBREG_BYTE (target));
ra_emit_move_insn (target, source);
insns = get_insns ();
end_sequence ();
emit_insn_before (insns, insn);
if (BB_HEAD (bb) == insn)
BB_HEAD (bb) = NEXT_INSN (prev);
for (insn = PREV_INSN (insn); insn != prev;
insn = PREV_INSN (insn))
{
set_block_for_insn (insn, bb);
df_insn_modify (df, bb, insn);
}
emitted_spill_loads++;
spill_load_cost += bb->frequency + 1;
}
}
/* Now emit the stores after each def.
If any uses were loaded from stackslots (compared to
rematerialized or not reloaded due to IR spilling),
aweb->stack_slot will be set. If not, we don't need to emit
any stack stores. */
slot = aweb->stack_slot;
bitmap_clear (b);
if (slot)
for (j = 0; j < web->num_defs; j++)
{
rtx insns, source, dest;
rtx insn = DF_REF_INSN (web->defs[j]);
rtx following = NEXT_INSN (insn);
basic_block bb = BLOCK_FOR_INSN (insn);
/* Happens when spill_coalescing() deletes move insns. */
if (!INSN_P (insn))
continue;
if (bitmap_bit_p (b, INSN_UID (insn)))
continue;
bitmap_set_bit (b, INSN_UID (insn));
start_sequence ();
source = DF_REF_REG (web->defs[j]);
dest = slot;
if (GET_CODE (source) == SUBREG)
dest = simplify_gen_subreg (GET_MODE (source), dest,
GET_MODE (dest),
SUBREG_BYTE (source));
ra_emit_move_insn (dest, source);
insns = get_insns ();
end_sequence ();
if (insns)
{
emit_insn_after (insns, insn);
if (BB_END (bb) == insn)
BB_END (bb) = PREV_INSN (following);
for (insn = insns; insn != following; insn = NEXT_INSN (insn))
{
set_block_for_insn (insn, bb);
df_insn_modify (df, bb, insn);
}
}
else
df_insn_modify (df, bb, insn);
emitted_spill_stores++;
spill_store_cost += bb->frequency + 1;
/* XXX we should set new_deaths for all inserted stores
whose pseudo dies here.
Note, that this isn't the case for _all_ stores. */
/* I.e. the next is wrong, and might cause some spilltemps
to be categorized as spilltemp2's (i.e. live over a death),
although they aren't. This might make them spill again,
which causes endlessness in the case, this insn is in fact
_no_ death. */
bitmap_set_bit (new_deaths, INSN_UID (PREV_INSN (following)));
}
}
BITMAP_XFREE (b);
}
/* A simple list of rtx's. */
struct rtx_list
{
struct rtx_list *next;
rtx x;
};
/* Adds X to *LIST. */
static void
remember_slot (struct rtx_list **list, rtx x)
{
struct rtx_list *l;
/* PRE: X is not already in LIST. */
l = ra_alloc (sizeof (*l));
l->next = *list;
l->x = x;
*list = l;
}
/* Given two rtx' S1 and S2, either being REGs or MEMs (or SUBREGs
thereof), return nonzero, if they overlap. REGs and MEMs don't
overlap, and if they are MEMs they must have an easy address
(plus (basereg) (const_inst x)), otherwise they overlap. */
static int
slots_overlap_p (rtx s1, rtx s2)
{
rtx base1, base2;
HOST_WIDE_INT ofs1 = 0, ofs2 = 0;
int size1 = GET_MODE_SIZE (GET_MODE (s1));
int size2 = GET_MODE_SIZE (GET_MODE (s2));
if (GET_CODE (s1) == SUBREG)
ofs1 = SUBREG_BYTE (s1), s1 = SUBREG_REG (s1);
if (GET_CODE (s2) == SUBREG)
ofs2 = SUBREG_BYTE (s2), s2 = SUBREG_REG (s2);
if (s1 == s2)
return 1;
if (GET_CODE (s1) != GET_CODE (s2))
return 0;
if (REG_P (s1) && REG_P (s2))
{
if (REGNO (s1) != REGNO (s2))
return 0;
if (ofs1 >= ofs2 + size2 || ofs2 >= ofs1 + size1)
return 0;
return 1;
}
gcc_assert (MEM_P (s1) && GET_CODE (s2) == MEM);
s1 = XEXP (s1, 0);
s2 = XEXP (s2, 0);
if (GET_CODE (s1) != PLUS || !REG_P (XEXP (s1, 0))
|| GET_CODE (XEXP (s1, 1)) != CONST_INT)
return 1;
if (GET_CODE (s2) != PLUS || !REG_P (XEXP (s2, 0))
|| GET_CODE (XEXP (s2, 1)) != CONST_INT)
return 1;
base1 = XEXP (s1, 0);
base2 = XEXP (s2, 0);
if (!rtx_equal_p (base1, base2))
return 1;
ofs1 += INTVAL (XEXP (s1, 1));
ofs2 += INTVAL (XEXP (s2, 1));
if (ofs1 >= ofs2 + size2 || ofs2 >= ofs1 + size1)
return 0;
return 1;
}
/* This deletes from *LIST all rtx's which overlap with X in the sense
of slots_overlap_p(). */
static void
delete_overlapping_slots (struct rtx_list **list, rtx x)
{
while (*list)
{
if (slots_overlap_p ((*list)->x, x))
*list = (*list)->next;
else
list = &((*list)->next);
}
}
/* Returns nonzero, of X is member of LIST. */
static int
slot_member_p (struct rtx_list *list, rtx x)
{
for (;list; list = list->next)
if (rtx_equal_p (list->x, x))
return 1;
return 0;
}
/* A more sophisticated (and slower) method of adding the stores, than
rewrite_program(). This goes backward the insn stream, adding
stores as it goes, but only if it hasn't just added a store to the
same location. NEW_DEATHS is a bitmap filled with uids of insns
containing deaths. */
static void
insert_stores (bitmap new_deaths)
{
rtx insn;
rtx last_slot = NULL_RTX;
struct rtx_list *slots = NULL;
/* We go simply backwards over basic block borders. */
for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
{
int uid = INSN_UID (insn);
/* If we reach a basic block border, which has more than one
outgoing edge, we simply forget all already emitted stores. */
if (BARRIER_P (insn)
|| JUMP_P (insn) || can_throw_internal (insn))
{
last_slot = NULL_RTX;
slots = NULL;
}
if (!INSN_P (insn))
continue;
/* If this insn was not just added in this pass. */
if (uid < insn_df_max_uid)
{
unsigned int n;
rtx following = NEXT_INSN (insn);
basic_block bb = BLOCK_FOR_INSN (insn);
struct ra_insn_info info;
info = insn_df[uid];
for (n = 0; n < info.num_defs; n++)
{
struct web *web = def2web[DF_REF_ID (info.defs[n])];
struct web *aweb = alias (find_web_for_subweb (web));
rtx slot, source;
if (aweb->type != SPILLED || !aweb->stack_slot)
continue;
slot = aweb->stack_slot;
source = DF_REF_REG (info.defs[n]);
/* adjust_address() might generate code. */
start_sequence ();
if (GET_CODE (source) == SUBREG)
slot = simplify_gen_subreg (GET_MODE (source), slot,
GET_MODE (slot),
SUBREG_BYTE (source));
/* If we have no info about emitted stores, or it didn't
contain the location we intend to use soon, then
add the store. */
if ((!last_slot || !rtx_equal_p (slot, last_slot))
&& ! slot_member_p (slots, slot))
{
rtx insns, ni;
last_slot = slot;
remember_slot (&slots, slot);
ra_emit_move_insn (slot, source);
insns = get_insns ();
end_sequence ();
if (insns)
{
emit_insn_after (insns, insn);
if (BB_END (bb) == insn)
BB_END (bb) = PREV_INSN (following);
for (ni = insns; ni != following; ni = NEXT_INSN (ni))
{
set_block_for_insn (ni, bb);
df_insn_modify (df, bb, ni);
}
}
else
df_insn_modify (df, bb, insn);
emitted_spill_stores++;
spill_store_cost += bb->frequency + 1;
bitmap_set_bit (new_deaths, INSN_UID (PREV_INSN (following)));
}
else
{
/* Otherwise ignore insns from adjust_address() above. */
end_sequence ();
}
}
}
/* If we look at a load generated by the allocator, forget
the last emitted slot, and additionally clear all slots
overlapping it's source (after all, we need it again). */
/* XXX If we emit the stack-ref directly into the using insn the
following needs a change, because that is no new insn. Preferably
we would add some notes to the insn, what stackslots are needed
for it. */
if (uid >= last_max_uid)
{
rtx set = single_set (insn);
last_slot = NULL_RTX;
/* If this was no simple set, give up, and forget everything. */
if (!set)
slots = NULL;
else
{
if (1 || MEM_P (SET_SRC (set)))
delete_overlapping_slots (&slots, SET_SRC (set));
}
}
}
}
/* Returns 1 if both colored webs have some hardregs in common, even if
they are not the same width. */
static int
spill_same_color_p (struct web *web1, struct web *web2)
{
int c1, size1, c2, size2;
if ((c1 = alias (web1)->color) < 0 || c1 == an_unusable_color)
return 0;
if ((c2 = alias (web2)->color) < 0 || c2 == an_unusable_color)
return 0;
size1 = web1->type == PRECOLORED
? 1 : hard_regno_nregs[c1][PSEUDO_REGNO_MODE (web1->regno)];
size2 = web2->type == PRECOLORED
? 1 : hard_regno_nregs[c2][PSEUDO_REGNO_MODE (web2->regno)];
if (c1 >= c2 + size2 || c2 >= c1 + size1)
return 0;
return 1;
}
/* Given the set of live web IDs LIVE, returns nonzero, if any of WEBs
subwebs (or WEB itself) is live. */
static bool
is_partly_live_1 (sbitmap live, struct web *web)
{
do
if (TEST_BIT (live, web->id))
return 1;
while ((web = web->subreg_next));
return 0;
}
/* Fast version in case WEB has no subwebs. */
#define is_partly_live(live, web) ((!web->subreg_next) \
? TEST_BIT (live, web->id) \
: is_partly_live_1 (live, web))
/* Change the set of currently IN_USE colors according to
WEB's color. Either add those colors to the hardreg set (if ADD
is nonzero), or remove them. */
static void
update_spill_colors (HARD_REG_SET *in_use, struct web *web, int add)
{
int c, size;
if ((c = alias (find_web_for_subweb (web))->color) < 0
|| c == an_unusable_color)
return;
size = hard_regno_nregs[c][GET_MODE (web->orig_x)];
if (SUBWEB_P (web))
{
c += subreg_regno_offset (c, GET_MODE (SUBREG_REG (web->orig_x)),
SUBREG_BYTE (web->orig_x),
GET_MODE (web->orig_x));
}
else if (web->type == PRECOLORED)
size = 1;
if (add)
for (; size--;)
SET_HARD_REG_BIT (*in_use, c + size);
else
for (; size--;)
CLEAR_HARD_REG_BIT (*in_use, c + size);
}
/* Given a set of hardregs currently IN_USE and the color C of WEB,
return -1 if WEB has no color, 1 of it has the unusable color,
0 if one of it's used hardregs are in use, and 1 otherwise.
Generally, if WEB can't be left colorized return 1. */
static int
spill_is_free (HARD_REG_SET *in_use, struct web *web)
{
int c, size;
if ((c = alias (web)->color) < 0)
return -1;
if (c == an_unusable_color)
return 1;
size = web->type == PRECOLORED
? 1 : hard_regno_nregs[c][PSEUDO_REGNO_MODE (web->regno)];
for (; size--;)
if (TEST_HARD_REG_BIT (*in_use, c + size))
return 0;
return 1;
}
/* Structure for passing between rewrite_program2() and emit_loads(). */
struct rewrite_info
{
/* The web IDs which currently would need a reload. These are
currently live spilled webs, whose color was still free. */
bitmap need_reload;
/* We need a scratch bitmap, but don't want to allocate one a zillion
times. */
bitmap scratch;
/* Web IDs of currently live webs. This are the precise IDs,
not just those of the superwebs. If only on part is live, only
that ID is placed here. */
sbitmap live;
/* An array of webs, which currently need a load added.
They will be emitted when seeing the first death. */
struct web **needed_loads;
/* The current number of entries in needed_loads. */
int nl_size;
/* The number of bits set in need_reload. */
int num_reloads;
/* The current set of hardregs not available. */
HARD_REG_SET colors_in_use;
/* Nonzero, if we just added some spill temps to need_reload or
needed_loads. In this case we don't wait for the next death
to emit their loads. */
int any_spilltemps_spilled;
/* Nonzero, if we currently need to emit the loads. E.g. when we
saw an insn containing deaths. */
int need_load;
};
/* The needed_loads list of RI contains some webs for which
we add the actual load insns here. They are added just before
their use last seen. NL_FIRST_RELOAD is the index of the first
load which is a converted reload, all other entries are normal
loads. LAST_BLOCK_INSN is the last insn of the current basic block. */
static void
emit_loads (struct rewrite_info *ri, int nl_first_reload, rtx last_block_insn)
{
int j;
for (j = ri->nl_size; j;)
{
struct web *web = ri->needed_loads[--j];
struct web *supweb;
struct web *aweb;
rtx ni, slot, reg;
rtx before = NULL_RTX, after = NULL_RTX;
basic_block bb;
/* When spilltemps were spilled for the last insns, their
loads already are emitted, which is noted by setting
needed_loads[] for it to 0. */
if (!web)
continue;
supweb = find_web_for_subweb (web);
gcc_assert (supweb->regno < max_normal_pseudo);
/* Check for web being a spilltemp, if we only want to
load spilltemps. Also remember, that we emitted that
load, which we don't need to do when we have a death,
because then all of needed_loads[] is emptied. */
if (!ri->need_load)
{
if (!supweb->spill_temp)
continue;
else
ri->needed_loads[j] = 0;
}
web->in_load = 0;
/* The adding of reloads doesn't depend on liveness. */
if (j < nl_first_reload && !TEST_BIT (ri->live, web->id))
continue;
aweb = alias (supweb);
aweb->changed = 1;
start_sequence ();
if (supweb->pattern)
{
/* XXX If we later allow non-constant sources for rematerialization
we must also disallow coalescing _to_ rematerialized webs
(at least then disallow spilling them, which we already ensure
when flag_ra_break_aliases), or not take the pattern but a
stackslot. */
gcc_assert (aweb == supweb);
slot = copy_rtx (supweb->pattern);
reg = copy_rtx (supweb->orig_x);
/* Sanity check. orig_x should be a REG rtx, which should be
shared over all RTL, so copy_rtx should have no effect. */
gcc_assert (reg == supweb->orig_x);
}
else
{
allocate_spill_web (aweb);
slot = aweb->stack_slot;
/* If we don't copy the RTL there might be some SUBREG
rtx shared in the next iteration although being in
different webs, which leads to wrong code. */
reg = copy_rtx (web->orig_x);
if (GET_CODE (reg) == SUBREG)
/*slot = adjust_address (slot, GET_MODE (reg), SUBREG_BYTE
(reg));*/
slot = simplify_gen_subreg (GET_MODE (reg), slot, GET_MODE (slot),
SUBREG_BYTE (reg));
}
ra_emit_move_insn (reg, slot);
ni = get_insns ();
end_sequence ();
before = web->last_use_insn;
web->last_use_insn = NULL_RTX;
if (!before)
{
if (JUMP_P (last_block_insn))
before = last_block_insn;
else
after = last_block_insn;
}
if (after)
{
rtx foll = NEXT_INSN (after);
bb = BLOCK_FOR_INSN (after);
emit_insn_after (ni, after);
if (BB_END (bb) == after)
BB_END (bb) = PREV_INSN (foll);
for (ni = NEXT_INSN (after); ni != foll; ni = NEXT_INSN (ni))
{
set_block_for_insn (ni, bb);
df_insn_modify (df, bb, ni);
}
}
else
{
rtx prev = PREV_INSN (before);
bb = BLOCK_FOR_INSN (before);
emit_insn_before (ni, before);
if (BB_HEAD (bb) == before)
BB_HEAD (bb) = NEXT_INSN (prev);
for (; ni != before; ni = NEXT_INSN (ni))
{
set_block_for_insn (ni, bb);
df_insn_modify (df, bb, ni);
}
}
if (supweb->pattern)
{
emitted_remat++;
spill_remat_cost += bb->frequency + 1;
}
else
{
emitted_spill_loads++;
spill_load_cost += bb->frequency + 1;
}
RESET_BIT (ri->live, web->id);
/* In the special case documented above only emit the reloads and
one load. */
if (ri->need_load == 2 && j < nl_first_reload)
break;
}
if (ri->need_load)
ri->nl_size = j;
}
/* Given a set of reloads in RI, an array of NUM_REFS references (either
uses or defs) in REFS, and REF2WEB to translate ref IDs to webs
(either use2web or def2web) convert some reloads to loads.
This looks at the webs referenced, and how they change the set of
available colors. Now put all still live webs, which needed reloads,
and whose colors isn't free anymore, on the needed_loads list. */
static void
reloads_to_loads (struct rewrite_info *ri, struct ref **refs,
unsigned int num_refs, struct web **ref2web)
{
unsigned int n;
int num_reloads = ri->num_reloads;
for (n = 0; n < num_refs && num_reloads; n++)
{
struct web *web = ref2web[DF_REF_ID (refs[n])];
struct web *supweb = find_web_for_subweb (web);
int is_death;
unsigned j;
/* Only emit reloads when entering their interference
region. A use of a spilled web never opens an
interference region, independent of it's color. */
if (alias (supweb)->type == SPILLED)
continue;
if (supweb->type == PRECOLORED
&& TEST_HARD_REG_BIT (never_use_colors, supweb->color))
continue;
/* Note, that if web (and supweb) are DEFs, we already cleared
the corresponding bits in live. I.e. is_death becomes true, which
is what we want. */
is_death = !TEST_BIT (ri->live, supweb->id);
is_death &= !TEST_BIT (ri->live, web->id);
if (is_death)
{
int old_num_r = num_reloads;
bitmap_iterator bi;
bitmap_clear (ri->scratch);
EXECUTE_IF_SET_IN_BITMAP (ri->need_reload, 0, j, bi)
{
struct web *web2 = ID2WEB (j);
struct web *aweb2 = alias (find_web_for_subweb (web2));
gcc_assert (spill_is_free (&(ri->colors_in_use), aweb2) != 0);
if (spill_same_color_p (supweb, aweb2)
/* && interfere (web, web2) */)
{
if (!web2->in_load)
{
ri->needed_loads[ri->nl_size++] = web2;
web2->in_load = 1;
}
bitmap_set_bit (ri->scratch, j);
num_reloads--;
}
}
if (num_reloads != old_num_r)
bitmap_and_compl_into (ri->need_reload, ri->scratch);
}
}
ri->num_reloads = num_reloads;
}
/* This adds loads for spilled webs to the program. It uses a kind of
interference region spilling. If flag_ra_ir_spilling is zero it
only uses improved chaitin spilling (adding loads only at insns
containing deaths). */
static void
rewrite_program2 (bitmap new_deaths)
{
basic_block bb = NULL;
int nl_first_reload;
struct rewrite_info ri;
rtx insn;
ri.needed_loads = xmalloc (num_webs * sizeof (struct web *));
ri.need_reload = BITMAP_XMALLOC ();
ri.scratch = BITMAP_XMALLOC ();
ri.live = sbitmap_alloc (num_webs);
ri.nl_size = 0;
ri.num_reloads = 0;
for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
{
basic_block last_bb = NULL;
rtx last_block_insn;
unsigned i, j;
bitmap_iterator bi;
if (!INSN_P (insn))
insn = prev_real_insn (insn);
while (insn && !(bb = BLOCK_FOR_INSN (insn)))
insn = prev_real_insn (insn);
if (!insn)
break;
i = bb->index + 2;
last_block_insn = insn;
sbitmap_zero (ri.live);
CLEAR_HARD_REG_SET (ri.colors_in_use);
EXECUTE_IF_SET_IN_BITMAP (live_at_end[i - 2], 0, j, bi)
{
struct web *web = use2web[j];
struct web *aweb = alias (find_web_for_subweb (web));
/* A web is only live at end, if it isn't spilled. If we wouldn't
check this, the last uses of spilled web per basic block
wouldn't be detected as deaths, although they are in the final
code. This would lead to cumulating many loads without need,
only increasing register pressure. */
/* XXX do add also spilled webs which got a color for IR spilling.
Remember to not add to colors_in_use in that case. */
if (aweb->type != SPILLED /*|| aweb->color >= 0*/)
{
SET_BIT (ri.live, web->id);
if (aweb->type != SPILLED)
update_spill_colors (&(ri.colors_in_use), web, 1);
}
}
bitmap_clear (ri.need_reload);
ri.num_reloads = 0;
ri.any_spilltemps_spilled = 0;
if (flag_ra_ir_spilling)
{
struct dlist *d;
int pass;
/* XXX If we don't add spilled nodes into live above, the following
becomes an empty loop. */
for (pass = 0; pass < 2; pass++)
for (d = (pass) ? WEBS(SPILLED) : WEBS(COALESCED); d; d = d->next)
{
struct web *web = DLIST_WEB (d);
struct web *aweb = alias (web);
if (aweb->type != SPILLED)
continue;
if (is_partly_live (ri.live, web)
&& spill_is_free (&(ri.colors_in_use), web) > 0)
{
ri.num_reloads++;
bitmap_set_bit (ri.need_reload, web->id);
/* Last using insn is somewhere in another block. */
web->last_use_insn = NULL_RTX;
}
}
}
last_bb = bb;
for (; insn; insn = PREV_INSN (insn))
{
struct ra_insn_info info;
unsigned int n;
memset (&info, 0, sizeof info);
if (INSN_P (insn) && BLOCK_FOR_INSN (insn) != last_bb)
{
int index = BLOCK_FOR_INSN (insn)->index + 2;
bitmap_iterator bi;
EXECUTE_IF_SET_IN_BITMAP (live_at_end[index - 2], 0, j, bi)
{
struct web *web = use2web[j];
struct web *aweb = alias (find_web_for_subweb (web));
if (aweb->type != SPILLED)
{
SET_BIT (ri.live, web->id);
update_spill_colors (&(ri.colors_in_use), web, 1);
}
}
bitmap_clear (ri.scratch);
EXECUTE_IF_SET_IN_BITMAP (ri.need_reload, 0, j, bi)
{
struct web *web2 = ID2WEB (j);
struct web *supweb2 = find_web_for_subweb (web2);
struct web *aweb2 = alias (supweb2);
if (spill_is_free (&(ri.colors_in_use), aweb2) <= 0)
{
if (!web2->in_load)
{
ri.needed_loads[ri.nl_size++] = web2;
web2->in_load = 1;
}
bitmap_set_bit (ri.scratch, j);
ri.num_reloads--;
}
}
bitmap_and_compl_into (ri.need_reload, ri.scratch);
last_bb = BLOCK_FOR_INSN (insn);
last_block_insn = insn;
if (!INSN_P (last_block_insn))
last_block_insn = prev_real_insn (last_block_insn);
}
ri.need_load = 0;
if (INSN_P (insn))
info = insn_df[INSN_UID (insn)];
if (INSN_P (insn))
for (n = 0; n < info.num_defs; n++)
{
struct ref *ref = info.defs[n];
struct web *web = def2web[DF_REF_ID (ref)];
struct web *supweb = find_web_for_subweb (web);
int is_non_def = 0;
unsigned int n2;
supweb = find_web_for_subweb (web);
/* Webs which are defined here, but also used in the same insn
are rmw webs, or this use isn't a death because of looping
constructs. In neither case makes this def available it's
resources. Reloads for it are still needed, it's still
live and it's colors don't become free. */
for (n2 = 0; n2 < info.num_uses; n2++)
{
struct web *web2 = use2web[DF_REF_ID (info.uses[n2])];
if (supweb == find_web_for_subweb (web2))
{
is_non_def = 1;
break;
}
}
if (is_non_def)
continue;
if (!is_partly_live (ri.live, supweb))
bitmap_set_bit (useless_defs, DF_REF_ID (ref));
RESET_BIT (ri.live, web->id);
if (bitmap_bit_p (ri.need_reload, web->id))
{
ri.num_reloads--;
bitmap_clear_bit (ri.need_reload, web->id);
}
if (web != supweb)
{
/* XXX subwebs aren't precisely tracked here. We have
everything we need (inverse webs), but the code isn't
yet written. We need to make all completely
overlapping web parts non-live here. */
/* If by luck now the whole web isn't live anymore, no
reloads for it are needed. */
if (!is_partly_live (ri.live, supweb)
&& bitmap_bit_p (ri.need_reload, supweb->id))
{
ri.num_reloads--;
bitmap_clear_bit (ri.need_reload, supweb->id);
}
}
else
{
struct web *sweb;
/* If the whole web is defined here, no parts of it are
live anymore and no reloads are needed for them. */
for (sweb = supweb->subreg_next; sweb;
sweb = sweb->subreg_next)
{
RESET_BIT (ri.live, sweb->id);
if (bitmap_bit_p (ri.need_reload, sweb->id))
{
ri.num_reloads--;
bitmap_clear_bit (ri.need_reload, sweb->id);
}
}
}
if (alias (supweb)->type != SPILLED)
update_spill_colors (&(ri.colors_in_use), web, 0);
}
nl_first_reload = ri.nl_size;
/* CALL_INSNs are not really deaths, but still more registers
are free after a call, than before.
XXX Note, that sometimes reload barfs when we emit insns between
a call and the insn which copies the return register into a
pseudo. */
if (CALL_P (insn))
ri.need_load = 1;
else if (INSN_P (insn))
for (n = 0; n < info.num_uses; n++)
{
struct web *web = use2web[DF_REF_ID (info.uses[n])];
struct web *supweb = find_web_for_subweb (web);
int is_death;
if (supweb->type == PRECOLORED
&& TEST_HARD_REG_BIT (never_use_colors, supweb->color))
continue;
is_death = !TEST_BIT (ri.live, supweb->id);
is_death &= !TEST_BIT (ri.live, web->id);
if (is_death)
{
ri.need_load = 1;
bitmap_set_bit (new_deaths, INSN_UID (insn));
break;
}
}
if (INSN_P (insn) && ri.num_reloads)
{
int old_num_reloads = ri.num_reloads;
reloads_to_loads (&ri, info.uses, info.num_uses, use2web);
/* If this insn sets a pseudo, which isn't used later
(i.e. wasn't live before) it is a dead store. We need
to emit all reloads which have the same color as this def.
We don't need to check for non-liveness here to detect
the deadness (it anyway is too late, as we already cleared
the liveness in the first loop over the defs), because if it
_would_ be live here, no reload could have that color, as
they would already have been converted to a load. */
if (ri.num_reloads)
reloads_to_loads (&ri, info.defs, info.num_defs, def2web);
if (ri.num_reloads != old_num_reloads && !ri.need_load)
ri.need_load = 1;
}
if (ri.nl_size && (ri.need_load || ri.any_spilltemps_spilled))
emit_loads (&ri, nl_first_reload, last_block_insn);
if (INSN_P (insn) && flag_ra_ir_spilling)
for (n = 0; n < info.num_uses; n++)
{
struct web *web = use2web[DF_REF_ID (info.uses[n])];
struct web *aweb = alias (find_web_for_subweb (web));
if (aweb->type != SPILLED)
update_spill_colors (&(ri.colors_in_use), web, 1);
}
ri.any_spilltemps_spilled = 0;
if (INSN_P (insn))
for (n = 0; n < info.num_uses; n++)
{
struct web *web = use2web[DF_REF_ID (info.uses[n])];
struct web *supweb = find_web_for_subweb (web);
struct web *aweb = alias (supweb);
SET_BIT (ri.live, web->id);
if (aweb->type != SPILLED)
continue;
if (supweb->spill_temp)
ri.any_spilltemps_spilled = 1;
web->last_use_insn = insn;
if (!web->in_load)
{
if (spill_is_free (&(ri.colors_in_use), aweb) <= 0
|| !flag_ra_ir_spilling)
{
ri.needed_loads[ri.nl_size++] = web;
web->in_load = 1;
web->one_load = 1;
}
else if (!bitmap_bit_p (ri.need_reload, web->id))
{
bitmap_set_bit (ri.need_reload, web->id);
ri.num_reloads++;
web->one_load = 1;
}
else
web->one_load = 0;
}
else
web->one_load = 0;
}
if (LABEL_P (insn))
break;
}
nl_first_reload = ri.nl_size;
if (ri.num_reloads)
{
int in_ir = 0;
edge e;
int num = 0;
edge_iterator ei;
bitmap_iterator bi;
HARD_REG_SET cum_colors, colors;
CLEAR_HARD_REG_SET (cum_colors);
FOR_EACH_EDGE (e, ei, bb->preds)
{
unsigned j;
if (num >= 5)
break;
CLEAR_HARD_REG_SET (colors);
EXECUTE_IF_SET_IN_BITMAP (live_at_end[e->src->index], 0, j, bi)
{
struct web *web = use2web[j];
struct web *aweb = alias (find_web_for_subweb (web));
if (aweb->type != SPILLED)
update_spill_colors (&colors, web, 1);
}
IOR_HARD_REG_SET (cum_colors, colors);
num++;
}
if (num == 5)
in_ir = 1;
bitmap_clear (ri.scratch);
EXECUTE_IF_SET_IN_BITMAP (ri.need_reload, 0, j, bi)
{
struct web *web2 = ID2WEB (j);
struct web *supweb2 = find_web_for_subweb (web2);
struct web *aweb2 = alias (supweb2);
/* block entry is IR boundary for aweb2?
Currently more some tries for good conditions. */
if (((ra_pass > 0 || supweb2->target_of_spilled_move)
&& (1 || in_ir || spill_is_free (&cum_colors, aweb2) <= 0))
|| (ra_pass == 1
&& (in_ir
|| spill_is_free (&cum_colors, aweb2) <= 0)))
{
if (!web2->in_load)
{
ri.needed_loads[ri.nl_size++] = web2;
web2->in_load = 1;
}
bitmap_set_bit (ri.scratch, j);
ri.num_reloads--;
}
}
bitmap_and_compl_into (ri.need_reload, ri.scratch);
}
ri.need_load = 1;
emit_loads (&ri, nl_first_reload, last_block_insn);
gcc_assert (ri.nl_size == 0);
if (!insn)
break;
}
free (ri.needed_loads);
sbitmap_free (ri.live);
BITMAP_XFREE (ri.scratch);
BITMAP_XFREE (ri.need_reload);
}
/* WEBS is a web conflicting with a spilled one. Prepare it
to be able to rescan it in the next pass. Mark all it's uses
for checking, and clear the some members of their web parts
(of defs and uses). Notably don't clear the uplink. We don't
change the layout of this web, just it's conflicts.
Also remember all IDs of its uses in USES_AS_BITMAP. */
static void
mark_refs_for_checking (struct web *web, bitmap uses_as_bitmap)
{
unsigned int i;
for (i = 0; i < web->num_uses; i++)
{
unsigned int id = DF_REF_ID (web->uses[i]);
SET_BIT (last_check_uses, id);
bitmap_set_bit (uses_as_bitmap, id);
web_parts[df->def_id + id].spanned_deaths = 0;
web_parts[df->def_id + id].crosses_call = 0;
}
for (i = 0; i < web->num_defs; i++)
{
unsigned int id = DF_REF_ID (web->defs[i]);
web_parts[id].spanned_deaths = 0;
web_parts[id].crosses_call = 0;
}
}
/* The last step of the spill phase is to set up the structures for
incrementally rebuilding the interference graph. We break up
the web part structure of all spilled webs, mark their uses for
rechecking, look at their neighbors, and clean up some global
information, we will rebuild. */
static void
detect_web_parts_to_rebuild (void)
{
bitmap uses_as_bitmap;
unsigned int i, pass;
struct dlist *d;
sbitmap already_webs = sbitmap_alloc (num_webs);
uses_as_bitmap = BITMAP_XMALLOC ();
if (last_check_uses)
sbitmap_free (last_check_uses);
last_check_uses = sbitmap_alloc (df->use_id);
sbitmap_zero (last_check_uses);
sbitmap_zero (already_webs);
/* We need to recheck all uses of all webs involved in spilling (and the
uses added by spill insns, but those are not analyzed yet).
Those are the spilled webs themselves, webs coalesced to spilled ones,
and webs conflicting with any of them. */
for (pass = 0; pass < 2; pass++)
for (d = (pass == 0) ? WEBS(SPILLED) : WEBS(COALESCED); d; d = d->next)
{
struct web *web = DLIST_WEB (d);
struct conflict_link *wl;
unsigned int j;
bitmap_iterator bi;
/* This check is only needed for coalesced nodes, but hey. */
if (alias (web)->type != SPILLED)
continue;
/* For the spilled web itself we also need to clear it's
uplink, to be able to rebuild smaller webs. After all
spilling has split the web. */
for (i = 0; i < web->num_uses; i++)
{
unsigned int id = DF_REF_ID (web->uses[i]);
SET_BIT (last_check_uses, id);
bitmap_set_bit (uses_as_bitmap, id);
web_parts[df->def_id + id].uplink = NULL;
web_parts[df->def_id + id].spanned_deaths = 0;
web_parts[df->def_id + id].crosses_call = 0;
}
for (i = 0; i < web->num_defs; i++)
{
unsigned int id = DF_REF_ID (web->defs[i]);
web_parts[id].uplink = NULL;
web_parts[id].spanned_deaths = 0;
web_parts[id].crosses_call = 0;
}
/* Now look at all neighbors of this spilled web. */
if (web->have_orig_conflicts)
wl = web->orig_conflict_list;
else
wl = web->conflict_list;
for (; wl; wl = wl->next)
{
if (TEST_BIT (already_webs, wl->t->id))
continue;
SET_BIT (already_webs, wl->t->id);
mark_refs_for_checking (wl->t, uses_as_bitmap);
}
EXECUTE_IF_SET_IN_BITMAP (web->useless_conflicts, 0, j, bi)
{
struct web *web2 = ID2WEB (j);
if (TEST_BIT (already_webs, web2->id))
continue;
SET_BIT (already_webs, web2->id);
mark_refs_for_checking (web2, uses_as_bitmap);
}
}
/* We also recheck unconditionally all uses of any hardregs. This means
we _can_ delete all these uses from the live_at_end[] bitmaps.
And because we sometimes delete insn referring to hardregs (when
they became useless because they setup a rematerializable pseudo, which
then was rematerialized), some of those uses will go away with the next
df_analyze(). This means we even _must_ delete those uses from
the live_at_end[] bitmaps. For simplicity we simply delete
all of them. */
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
if (!fixed_regs[i])
{
struct df_link *link;
for (link = df->regs[i].uses; link; link = link->next)
if (link->ref)
bitmap_set_bit (uses_as_bitmap, DF_REF_ID (link->ref));
}
/* The information in live_at_end[] will be rebuild for all uses
we recheck, so clear it here (the uses of spilled webs, might
indeed not become member of it again). */
live_at_end -= 2;
for (i = 0; i < (unsigned int) last_basic_block + 2; i++)
bitmap_and_compl_into (live_at_end[i], uses_as_bitmap);
live_at_end += 2;
if (dump_file && (debug_new_regalloc & DUMP_REBUILD) != 0)
{
ra_debug_msg (DUMP_REBUILD, "need to check these uses:\n");
dump_sbitmap_file (dump_file, last_check_uses);
}
sbitmap_free (already_webs);
BITMAP_XFREE (uses_as_bitmap);
}
/* Statistics about deleted insns, which are useless now. */
static unsigned int deleted_def_insns;
static unsigned HOST_WIDE_INT deleted_def_cost;
/* In rewrite_program2() we noticed, when a certain insn set a pseudo
which wasn't live. Try to delete all those insns. */
static void
delete_useless_defs (void)
{
unsigned int i;
bitmap_iterator bi;
/* If the insn only sets the def without any sideeffect (besides
clobbers or uses), we can delete it. single_set() also tests
for INSN_P(insn). */
EXECUTE_IF_SET_IN_BITMAP (useless_defs, 0, i, bi)
{
rtx insn = DF_REF_INSN (df->defs[i]);
rtx set = single_set (insn);
struct web *web = find_web_for_subweb (def2web[i]);
if (set && web->type == SPILLED && web->stack_slot == NULL)
{
deleted_def_insns++;
deleted_def_cost += BLOCK_FOR_INSN (insn)->frequency + 1;
PUT_CODE (insn, NOTE);
NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
df_insn_modify (df, BLOCK_FOR_INSN (insn), insn);
}
}
}
/* Look for spilled webs, on whose behalf no insns were emitted.
We inversify (sp?) the changed flag of the webs, so after this function
a nonzero changed flag means, that this web was not spillable (at least
in this pass). */
static void
detect_non_changed_webs (void)
{
struct dlist *d, *d_next;
for (d = WEBS(SPILLED); d; d = d_next)
{
struct web *web = DLIST_WEB (d);
d_next = d->next;
if (!web->changed)
{
ra_debug_msg (DUMP_PROCESS, "no insns emitted for spilled web %d\n",
web->id);
remove_web_from_list (web);
put_web (web, COLORED);
web->changed = 1;
}
else
web->changed = 0;
/* From now on web->changed is used as the opposite flag.
I.e. colored webs, which have changed set were formerly
spilled webs for which no insns were emitted. */
}
}
/* Before spilling we clear the changed flags for all spilled webs. */
static void
reset_changed_flag (void)
{
struct dlist *d;
for (d = WEBS(SPILLED); d; d = d->next)
DLIST_WEB(d)->changed = 0;
}
/* The toplevel function for this file. Given a colorized graph,
and lists of spilled, coalesced and colored webs, we add some
spill code. This also sets up the structures for incrementally
building the interference graph in the next pass. */
void
actual_spill (void)
{
unsigned i;
bitmap_iterator bi;
bitmap new_deaths = BITMAP_XMALLOC ();
reset_changed_flag ();
spill_coalprop ();
choose_spill_colors ();
useless_defs = BITMAP_XMALLOC ();
if (flag_ra_improved_spilling)
rewrite_program2 (new_deaths);
else
rewrite_program (new_deaths);
insert_stores (new_deaths);
delete_useless_defs ();
BITMAP_XFREE (useless_defs);
sbitmap_free (insns_with_deaths);
insns_with_deaths = sbitmap_alloc (get_max_uid ());
death_insns_max_uid = get_max_uid ();
sbitmap_zero (insns_with_deaths);
EXECUTE_IF_SET_IN_BITMAP (new_deaths, 0, i, bi)
{
SET_BIT (insns_with_deaths, i);
}
detect_non_changed_webs ();
detect_web_parts_to_rebuild ();
BITMAP_XFREE (new_deaths);
}
/* A bitmap of pseudo reg numbers which are coalesced directly
to a hardreg. Set in emit_colors(), used and freed in
remove_suspicious_death_notes(). */
static bitmap regnos_coalesced_to_hardregs;
/* Create new pseudos for each web we colored, change insns to
use those pseudos and set up ra_reg_renumber. */
void
emit_colors (struct df *df)
{
unsigned int i;
int si;
struct web *web;
int old_max_regno = max_reg_num ();
regset old_regs;
basic_block bb;
/* This bitmap is freed in remove_suspicious_death_notes(),
which is also the user of it. */
regnos_coalesced_to_hardregs = BITMAP_XMALLOC ();
/* First create the (REG xx) rtx's for all webs, as we need to know
the number, to make sure, flow has enough memory for them in the
various tables. */
for (i = 0; i < num_webs - num_subwebs; i++)
{
web = ID2WEB (i);
if (web->type != COLORED && web->type != COALESCED)
continue;
if (web->type == COALESCED && alias (web)->type == COLORED)
continue;
gcc_assert (!web->reg_rtx);
gcc_assert (web->regno >= FIRST_PSEUDO_REGISTER);
if (web->regno >= max_normal_pseudo)
{
rtx place;
if (web->color == an_unusable_color)
{
unsigned int inherent_size = PSEUDO_REGNO_BYTES (web->regno);
unsigned int total_size = MAX (inherent_size, 0);
place = assign_stack_local (PSEUDO_REGNO_MODE (web->regno),
total_size,
inherent_size == total_size ? 0 : -1);
set_mem_alias_set (place, new_alias_set ());
}
else
{
place = gen_reg_rtx (PSEUDO_REGNO_MODE (web->regno));
}
web->reg_rtx = place;
}
else
{
/* Special case for i386 'fix_truncdi_nomemory' insn.
We must choose mode from insns not from PSEUDO_REGNO_MODE.
Actual only for clobbered register. */
if (web->num_uses == 0 && web->num_defs == 1)
web->reg_rtx = gen_reg_rtx (GET_MODE (DF_REF_REAL_REG (web->defs[0])));
else
web->reg_rtx = gen_reg_rtx (PSEUDO_REGNO_MODE (web->regno));
/* Remember the different parts directly coalesced to a hardreg. */
if (web->type == COALESCED)
bitmap_set_bit (regnos_coalesced_to_hardregs, REGNO (web->reg_rtx));
}
}
ra_max_regno = max_regno = max_reg_num ();
allocate_reg_info (max_regno, FALSE, FALSE);
ra_reg_renumber = xmalloc (max_regno * sizeof (short));
for (si = 0; si < max_regno; si++)
ra_reg_renumber[si] = -1;
/* Then go through all references, and replace them by a new
pseudoreg for each web. All uses. */
/* XXX
Beware: The order of replacements (first uses, then defs) matters only
for read-mod-write insns, where the RTL expression for the REG is
shared between def and use. For normal rmw insns we connected all such
webs, i.e. both the use and the def (which are the same memory)
there get the same new pseudo-reg, so order would not matter.
_However_ we did not connect webs, were the read cycle was an
uninitialized read. If we now would first replace the def reference
and then the use ref, we would initialize it with a REG rtx, which
gets never initialized, and yet more wrong, which would overwrite
the definition of the other REG rtx. So we must replace the defs last.
*/
for (i = 0; i < df->use_id; i++)
if (df->uses[i])
{
regset rs = DF_REF_BB (df->uses[i])->global_live_at_start;
rtx regrtx;
web = use2web[i];
web = find_web_for_subweb (web);
if (web->type != COLORED && web->type != COALESCED)
continue;
regrtx = alias (web)->reg_rtx;
if (!regrtx)
regrtx = web->reg_rtx;
*DF_REF_REAL_LOC (df->uses[i]) = regrtx;
if (REGNO_REG_SET_P (rs, web->regno) && REG_P (regrtx))
{
/*CLEAR_REGNO_REG_SET (rs, web->regno);*/
SET_REGNO_REG_SET (rs, REGNO (regrtx));
}
}
/* And all defs. */
for (i = 0; i < df->def_id; i++)
{
regset rs;
rtx regrtx;
if (!df->defs[i])
continue;
rs = DF_REF_BB (df->defs[i])->global_live_at_start;
web = def2web[i];
web = find_web_for_subweb (web);
if (web->type != COLORED && web->type != COALESCED)
continue;
regrtx = alias (web)->reg_rtx;
if (!regrtx)
regrtx = web->reg_rtx;
*DF_REF_REAL_LOC (df->defs[i]) = regrtx;
if (REGNO_REG_SET_P (rs, web->regno) && REG_P (regrtx))
{
/* Don't simply clear the current regno, as it might be
replaced by two webs. */
/*CLEAR_REGNO_REG_SET (rs, web->regno);*/
SET_REGNO_REG_SET (rs, REGNO (regrtx));
}
}
/* And now set up the ra_reg_renumber array for reload with all the new
pseudo-regs. */
for (i = 0; i < num_webs - num_subwebs; i++)
{
web = ID2WEB (i);
if (web->reg_rtx && REG_P (web->reg_rtx))
{
int r = REGNO (web->reg_rtx);
ra_reg_renumber[r] = web->color;
ra_debug_msg (DUMP_COLORIZE, "Renumber pseudo %d (== web %d) to %d\n",
r, web->id, ra_reg_renumber[r]);
}
}
old_regs = BITMAP_XMALLOC ();
for (si = FIRST_PSEUDO_REGISTER; si < old_max_regno; si++)
SET_REGNO_REG_SET (old_regs, si);
FOR_EACH_BB (bb)
{
AND_COMPL_REG_SET (bb->global_live_at_start, old_regs);
AND_COMPL_REG_SET (bb->global_live_at_end, old_regs);
}
BITMAP_XFREE (old_regs);
}
/* Delete some coalesced moves from the insn stream. */
void
delete_moves (void)
{
struct move_list *ml;
struct web *s, *t;
/* XXX Beware: We normally would test here each copy insn, if
source and target got the same color (either by coalescing or by pure
luck), and then delete it.
This will currently not work. One problem is, that we don't color
the regs ourself, but instead defer to reload. So the colorization
is only a kind of suggestion, which reload doesn't have to follow.
For webs which are coalesced to a normal colored web, we only have one
new pseudo, so in this case we indeed can delete copy insns involving
those (because even if reload colors them different from our suggestion,
it still has to color them the same, as only one pseudo exists). But for
webs coalesced to precolored ones, we have not a single pseudo, but
instead one for each coalesced web. This means, that we can't delete
copy insns, where source and target are webs coalesced to precolored
ones, because then the connection between both webs is destroyed. Note
that this not only means copy insns, where one side is the precolored one
itself, but also those between webs which are coalesced to one color.
Also because reload we can't delete copy insns which involve any
precolored web at all. These often have also special meaning (e.g.
copying a return value of a call to a pseudo, or copying pseudo to the
return register), and the deletion would confuse reload in thinking the
pseudo isn't needed. One of those days reload will get away and we can
do everything we want.
In effect because of the later reload, we can't base our deletion on the
colors itself, but instead need to base them on the newly created
pseudos. */
for (ml = wl_moves; ml; ml = ml->next)
/* The real condition we would ideally use is: s->color == t->color.
Additionally: s->type != PRECOLORED && t->type != PRECOLORED, in case
we want to prevent deletion of "special" copies. */
if (ml->move
&& (s = alias (ml->move->source_web))->reg_rtx
== (t = alias (ml->move->target_web))->reg_rtx
&& s->type != PRECOLORED && t->type != PRECOLORED)
{
basic_block bb = BLOCK_FOR_INSN (ml->move->insn);
df_insn_delete (df, bb, ml->move->insn);
deleted_move_insns++;
deleted_move_cost += bb->frequency + 1;
}
}
/* Due to reasons documented elsewhere we create different pseudos
for all webs coalesced to hardregs. For these parts life_analysis()
might have added REG_DEAD notes without considering, that only this part
but not the whole coalesced web dies. The RTL is correct, there is no
coalescing yet. But if later reload's alter_reg() substitutes the
hardreg into the REG rtx it looks like that particular hardreg dies here,
although (due to coalescing) it still is live. This might make different
places of reload think, it can use that hardreg for reload regs,
accidentally overwriting it. So we need to remove those REG_DEAD notes.
(Or better teach life_analysis() and reload about our coalescing, but
that comes later) Bah. */
void
remove_suspicious_death_notes (void)
{
rtx insn;
for (insn = get_insns(); insn; insn = NEXT_INSN (insn))
if (INSN_P (insn))
{
rtx *pnote = &REG_NOTES (insn);
while (*pnote)
{
rtx note = *pnote;
if ((REG_NOTE_KIND (note) == REG_DEAD
|| REG_NOTE_KIND (note) == REG_UNUSED)
&& (REG_P (XEXP (note, 0))
&& bitmap_bit_p (regnos_coalesced_to_hardregs,
REGNO (XEXP (note, 0)))))
*pnote = XEXP (note, 1);
else
pnote = &XEXP (*pnote, 1);
}
}
BITMAP_XFREE (regnos_coalesced_to_hardregs);
regnos_coalesced_to_hardregs = NULL;
}
/* Allocate space for max_reg_num() pseudo registers, and
fill reg_renumber[] from ra_reg_renumber[]. If FREE_IT
is nonzero, also free ra_reg_renumber and reset ra_max_regno. */
void
setup_renumber (int free_it)
{
int i;
max_regno = max_reg_num ();
allocate_reg_info (max_regno, FALSE, TRUE);
for (i = 0; i < max_regno; i++)
{
reg_renumber[i] = (i < ra_max_regno) ? ra_reg_renumber[i] : -1;
}
if (free_it)
{
free (ra_reg_renumber);
ra_reg_renumber = NULL;
ra_max_regno = 0;
}
}
/* Dump the costs and savings due to spilling, i.e. of added spill insns
and removed moves or useless defs. */
void
dump_cost (unsigned int level)
{
ra_debug_msg (level, "Instructions for spilling\n added:\n");
ra_debug_msg (level, " loads =%d cost=" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
emitted_spill_loads, spill_load_cost);
ra_debug_msg (level, " stores=%d cost=" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
emitted_spill_stores, spill_store_cost);
ra_debug_msg (level, " remat =%d cost=" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
emitted_remat, spill_remat_cost);
ra_debug_msg (level, " removed:\n moves =%d cost="
HOST_WIDE_INT_PRINT_UNSIGNED "\n",
deleted_move_insns, deleted_move_cost);
ra_debug_msg (level, " others=%d cost=" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
deleted_def_insns, deleted_def_cost);
}
/* Initialization of the rewrite phase. */
void
ra_rewrite_init (void)
{
emitted_spill_loads = 0;
emitted_spill_stores = 0;
emitted_remat = 0;
spill_load_cost = 0;
spill_store_cost = 0;
spill_remat_cost = 0;
deleted_move_insns = 0;
deleted_move_cost = 0;
deleted_def_insns = 0;
deleted_def_cost = 0;
}
/*
vim:cinoptions={.5s,g0,p5,t0,(0,^-0.5s,n-0.5s:tw=78:cindent:sw=4:
*/
/* Graph coloring register allocator
Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
Contributed by Michael Matz <matz@suse.de>
and Daniel Berlin <dan@cgsoftware.com>.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software
Foundation; either version 2, or (at your option) any later version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details.
You should have received a copy of the GNU General Public License along
with GCC; see the file COPYING. If not, write to the Free Software
Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "rtl.h"
#include "tm_p.h"
#include "insn-config.h"
#include "recog.h"
#include "reload.h"
#include "integrate.h"
#include "function.h"
#include "regs.h"
#include "obstack.h"
#include "hard-reg-set.h"
#include "basic-block.h"
#include "df.h"
#include "expr.h"
#include "output.h"
#include "toplev.h"
#include "flags.h"
#include "ra.h"
/* This is the toplevel file of a graph coloring register allocator.
It is able to act like a George & Appel allocator, i.e. with iterative
coalescing plus spill coalescing/propagation.
And it can act as a traditional Briggs allocator, although with
optimistic coalescing. Additionally it has a custom pass, which
tries to reduce the overall cost of the colored graph.
We support two modes of spilling: spill-everywhere, which is extremely
fast, and interference region spilling, which reduces spill code to a
large extent, but is slower.
Helpful documents:
Briggs, P., Cooper, K. D., and Torczon, L. 1994. Improvements to graph
coloring register allocation. ACM Trans. Program. Lang. Syst. 16, 3 (May),
428-455.
Bergner, P., Dahl, P., Engebretsen, D., and O'Keefe, M. 1997. Spill code
minimization via interference region spilling. In Proc. ACM SIGPLAN '97
Conf. on Prog. Language Design and Implementation. ACM, 287-295.
George, L., Appel, A.W. 1996. Iterated register coalescing.
ACM Trans. Program. Lang. Syst. 18, 3 (May), 300-324.
*/
/* This file contains the main entry point (reg_alloc), some helper routines
used by more than one file of the register allocator, and the toplevel
driver procedure (one_pass). */
/* Things, one might do somewhen:
* Lattice based rematerialization
* create definitions of ever-life regs at the beginning of
the insn chain
* insert loads as soon, stores as late as possible
* insert spill insns as outward as possible (either looptree, or LCM)
* reuse stack-slots
* delete coalesced insns. Partly done. The rest can only go, when we get
rid of reload.
* don't destroy coalescing information completely when spilling
* use the constraints from asms
*/
static int first_hard_reg (HARD_REG_SET);
static struct obstack ra_obstack;
static void create_insn_info (struct df *);
static void free_insn_info (void);
static void alloc_mem (struct df *);
static void free_mem (struct df *);
static void free_all_mem (struct df *df);
static int one_pass (struct df *, int);
static void check_df (struct df *);
static void init_ra (void);
void reg_alloc (void);
/* These global variables are "internal" to the register allocator.
They are all documented at their declarations in ra.h. */
/* Somewhen we want to get rid of one of those sbitmaps.
(for now I need the sup_igraph to note if there is any conflict between
parts of webs at all. I can't use igraph for this, as there only the real
conflicts are noted.) This is only used to prevent coalescing two
conflicting webs, were only parts of them are in conflict. */
sbitmap igraph;
sbitmap sup_igraph;
/* Note the insns not inserted by the allocator, where we detected any
deaths of pseudos. It is used to detect closeness of defs and uses.
In the first pass this is empty (we could initialize it from REG_DEAD
notes), in the other passes it is left from the pass before. */
sbitmap insns_with_deaths;
int death_insns_max_uid;
struct web_part *web_parts;
unsigned int num_webs;
unsigned int num_subwebs;
unsigned int num_allwebs;
struct web **id2web;
struct web *hardreg2web[FIRST_PSEUDO_REGISTER];
struct web **def2web;
struct web **use2web;
struct move_list *wl_moves;
int ra_max_regno;
short *ra_reg_renumber;
struct df *df;
bitmap *live_at_end;
int ra_pass;
unsigned int max_normal_pseudo;
int an_unusable_color;
/* The different lists on which a web can be (based on the type). */
struct dlist *web_lists[(int) LAST_NODE_TYPE];
unsigned int last_def_id;
unsigned int last_use_id;
unsigned int last_num_webs;
int last_max_uid;
sbitmap last_check_uses;
unsigned int remember_conflicts;
int orig_max_uid;
HARD_REG_SET never_use_colors;
HARD_REG_SET usable_regs[N_REG_CLASSES];
unsigned int num_free_regs[N_REG_CLASSES];
int single_reg_in_regclass[N_REG_CLASSES];
HARD_REG_SET hardregs_for_mode[NUM_MACHINE_MODES];
HARD_REG_SET invalid_mode_change_regs;
unsigned char byte2bitcount[256];
unsigned int debug_new_regalloc = -1;
int flag_ra_biased = 0;
int flag_ra_improved_spilling = 0;
int flag_ra_ir_spilling = 0;
int flag_ra_optimistic_coalescing = 0;
int flag_ra_break_aliases = 0;
int flag_ra_merge_spill_costs = 0;
int flag_ra_spill_every_use = 0;
int flag_ra_dump_notes = 0;
/* Fast allocation of small objects, which live until the allocator
is done. Allocate an object of SIZE bytes. */
void *
ra_alloc (size_t size)
{
return obstack_alloc (&ra_obstack, size);
}
/* Like ra_alloc(), but clear the returned memory. */
void *
ra_calloc (size_t size)
{
void *p = obstack_alloc (&ra_obstack, size);
memset (p, 0, size);
return p;
}
/* Returns the number of hardregs in HARD_REG_SET RS. */
int
hard_regs_count (HARD_REG_SET rs)
{
int count = 0;
#ifdef HARD_REG_SET
while (rs)
{
unsigned char byte = rs & 0xFF;
rs >>= 8;
/* Avoid memory access, if nothing is set. */
if (byte)
count += byte2bitcount[byte];
}
#else
unsigned int ofs;
for (ofs = 0; ofs < HARD_REG_SET_LONGS; ofs++)
{
HARD_REG_ELT_TYPE elt = rs[ofs];
while (elt)
{
unsigned char byte = elt & 0xFF;
elt >>= 8;
if (byte)
count += byte2bitcount[byte];
}
}
#endif
return count;
}
/* Returns the first hardreg in HARD_REG_SET RS. Assumes there is at
least one reg in the set. */
static int
first_hard_reg (HARD_REG_SET rs)
{
int c;
for (c = 0; c < FIRST_PSEUDO_REGISTER; c++)
if (TEST_HARD_REG_BIT (rs, c))
break;
gcc_assert (c < FIRST_PSEUDO_REGISTER);
return c;
}
/* Basically like emit_move_insn (i.e. validifies constants and such),
but also handle MODE_CC moves (but then the operands must already
be basically valid. */
rtx
ra_emit_move_insn (rtx x, rtx y)
{
enum machine_mode mode = GET_MODE (x);
if (GET_MODE_CLASS (mode) == MODE_CC)
return emit_insn (gen_move_insn (x, y));
else
return emit_move_insn (x, y);
}
int insn_df_max_uid;
struct ra_insn_info *insn_df;
static struct ref **refs_for_insn_df;
/* Create the insn_df structure for each insn to have fast access to
all valid defs and uses in an insn. */
static void
create_insn_info (struct df *df)
{
rtx insn;
struct ref **act_refs;
insn_df_max_uid = get_max_uid ();
insn_df = xcalloc (insn_df_max_uid, sizeof (insn_df[0]));
refs_for_insn_df = xcalloc (df->def_id + df->use_id, sizeof (struct ref *));
act_refs = refs_for_insn_df;
/* We create those things backwards to mimic the order in which
the insns are visited in rewrite_program2() and live_in(). */
for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
{
int uid = INSN_UID (insn);
unsigned int n;
struct df_link *link;
if (!INSN_P (insn))
continue;
for (n = 0, link = DF_INSN_DEFS (df, insn); link; link = link->next)
if (link->ref
&& (DF_REF_REGNO (link->ref) >= FIRST_PSEUDO_REGISTER
|| !TEST_HARD_REG_BIT (never_use_colors,
DF_REF_REGNO (link->ref))))
{
if (n == 0)
insn_df[uid].defs = act_refs;
insn_df[uid].defs[n++] = link->ref;
}
act_refs += n;
insn_df[uid].num_defs = n;
for (n = 0, link = DF_INSN_USES (df, insn); link; link = link->next)
if (link->ref
&& (DF_REF_REGNO (link->ref) >= FIRST_PSEUDO_REGISTER
|| !TEST_HARD_REG_BIT (never_use_colors,
DF_REF_REGNO (link->ref))))
{
if (n == 0)
insn_df[uid].uses = act_refs;
insn_df[uid].uses[n++] = link->ref;
}
act_refs += n;
insn_df[uid].num_uses = n;
}
gcc_assert (refs_for_insn_df + (df->def_id + df->use_id) >= act_refs);
}
/* Free the insn_df structures. */
static void
free_insn_info (void)
{
free (refs_for_insn_df);
refs_for_insn_df = NULL;
free (insn_df);
insn_df = NULL;
insn_df_max_uid = 0;
}
/* Search WEB for a subweb, which represents REG. REG needs to
be a SUBREG, and the inner reg of it needs to be the one which is
represented by WEB. Returns the matching subweb or NULL. */
struct web *
find_subweb (struct web *web, rtx reg)
{
struct web *w;
gcc_assert (GET_CODE (reg) == SUBREG);
for (w = web->subreg_next; w; w = w->subreg_next)
if (GET_MODE (w->orig_x) == GET_MODE (reg)
&& SUBREG_BYTE (w->orig_x) == SUBREG_BYTE (reg))
return w;
return NULL;
}
/* Similar to find_subweb(), but matches according to SIZE_WORD,
a collection of the needed size and offset (in bytes). */
struct web *
find_subweb_2 (struct web *web, unsigned int size_word)
{
struct web *w = web;
if (size_word == GET_MODE_SIZE (GET_MODE (web->orig_x)))
/* size_word == size means BYTE_BEGIN(size_word) == 0. */
return web;
for (w = web->subreg_next; w; w = w->subreg_next)
{
unsigned int bl = rtx_to_bits (w->orig_x);
if (size_word == bl)
return w;
}
return NULL;
}
/* Returns the superweb for SUBWEB. */
struct web *
find_web_for_subweb_1 (struct web *subweb)
{
while (subweb->parent_web)
subweb = subweb->parent_web;
return subweb;
}
/* Determine if two hard register sets intersect.
Return 1 if they do. */
int
hard_regs_intersect_p (HARD_REG_SET *a, HARD_REG_SET *b)
{
HARD_REG_SET c;
COPY_HARD_REG_SET (c, *a);
AND_HARD_REG_SET (c, *b);
GO_IF_HARD_REG_SUBSET (c, reg_class_contents[(int) NO_REGS], lose);
return 1;
lose:
return 0;
}
/* Allocate and initialize the memory necessary for one pass of the
register allocator. */
static void
alloc_mem (struct df *df)
{
int i;
ra_build_realloc (df);
if (!live_at_end)
{
live_at_end = xmalloc ((last_basic_block + 2) * sizeof (bitmap));
for (i = 0; i < last_basic_block + 2; i++)
live_at_end[i] = BITMAP_XMALLOC ();
live_at_end += 2;
}
create_insn_info (df);
}
/* Free the memory which isn't necessary for the next pass. */
static void
free_mem (struct df *df ATTRIBUTE_UNUSED)
{
free_insn_info ();
ra_build_free ();
}
/* Free all memory allocated for the register allocator. Used, when
it's done. */
static void
free_all_mem (struct df *df)
{
unsigned int i;
live_at_end -= 2;
for (i = 0; i < (unsigned)last_basic_block + 2; i++)
BITMAP_XFREE (live_at_end[i]);
free (live_at_end);
ra_colorize_free_all ();
ra_build_free_all (df);
obstack_free (&ra_obstack, NULL);
}
static long ticks_build;
static long ticks_rebuild;
/* Perform one pass of allocation. Returns nonzero, if some spill code
was added, i.e. if the allocator needs to rerun. */
static int
one_pass (struct df *df, int rebuild)
{
long ticks = clock ();
int something_spilled;
remember_conflicts = 0;
/* Build the complete interference graph, or if this is not the first
pass, rebuild it incrementally. */
build_i_graph (df);
/* From now on, if we create new conflicts, we need to remember the
initial list of conflicts per web. */
remember_conflicts = 1;
if (!rebuild)
dump_igraph_machine ();
/* Colorize the I-graph. This results in either a list of
spilled_webs, in which case we need to run the spill phase, and
rerun the allocator, or that list is empty, meaning we are done. */
ra_colorize_graph (df);
last_max_uid = get_max_uid ();
/* actual_spill() might change WEBS(SPILLED) and even empty it,
so we need to remember it's state. */
something_spilled = !!WEBS(SPILLED);
/* Add spill code if necessary. */
if (something_spilled)
actual_spill ();
ticks = clock () - ticks;
if (rebuild)
ticks_rebuild += ticks;
else
ticks_build += ticks;
return something_spilled;
}
/* Initialize various arrays for the register allocator. */
static void
init_ra (void)
{
int i;
HARD_REG_SET rs;
#ifdef ELIMINABLE_REGS
static const struct {const int from, to; } eliminables[] = ELIMINABLE_REGS;
unsigned int j;
#endif
int need_fp
= (! flag_omit_frame_pointer
|| (current_function_calls_alloca && EXIT_IGNORE_STACK)
|| FRAME_POINTER_REQUIRED);
ra_colorize_init ();
/* We can't ever use any of the fixed regs. */
COPY_HARD_REG_SET (never_use_colors, fixed_reg_set);
/* Additionally don't even try to use hardregs, which we already
know are not eliminable. This includes also either the
hard framepointer or all regs which are eliminable into the
stack pointer, if need_fp is set. */
#ifdef ELIMINABLE_REGS
for (j = 0; j < ARRAY_SIZE (eliminables); j++)
{
if (! CAN_ELIMINATE (eliminables[j].from, eliminables[j].to)
|| (eliminables[j].to == STACK_POINTER_REGNUM && need_fp))
for (i = hard_regno_nregs[eliminables[j].from][Pmode]; i--;)
SET_HARD_REG_BIT (never_use_colors, eliminables[j].from + i);
}
#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
if (need_fp)
for (i = hard_regno_nregs[HARD_FRAME_POINTER_REGNUM][Pmode]; i--;)
SET_HARD_REG_BIT (never_use_colors, HARD_FRAME_POINTER_REGNUM + i);
#endif
#else
if (need_fp)
for (i = hard_regno_nregs[FRAME_POINTER_REGNUM][Pmode]; i--;)
SET_HARD_REG_BIT (never_use_colors, FRAME_POINTER_REGNUM + i);
#endif
/* Stack and argument pointer are also rather useless to us. */
for (i = hard_regno_nregs[STACK_POINTER_REGNUM][Pmode]; i--;)
SET_HARD_REG_BIT (never_use_colors, STACK_POINTER_REGNUM + i);
for (i = hard_regno_nregs[ARG_POINTER_REGNUM][Pmode]; i--;)
SET_HARD_REG_BIT (never_use_colors, ARG_POINTER_REGNUM + i);
for (i = 0; i < 256; i++)
{
unsigned char byte = ((unsigned) i) & 0xFF;
unsigned char count = 0;
while (byte)
{
if (byte & 1)
count++;
byte >>= 1;
}
byte2bitcount[i] = count;
}
for (i = 0; i < N_REG_CLASSES; i++)
{
int size;
COPY_HARD_REG_SET (rs, reg_class_contents[i]);
AND_COMPL_HARD_REG_SET (rs, never_use_colors);
size = hard_regs_count (rs);
num_free_regs[i] = size;
COPY_HARD_REG_SET (usable_regs[i], rs);
if (size == 1)
single_reg_in_regclass[i] = first_hard_reg (rs);
else
single_reg_in_regclass[i] = -1;
}
/* Setup hardregs_for_mode[].
We are not interested only in the beginning of a multi-reg, but in
all the hardregs involved. Maybe HARD_REGNO_MODE_OK() only ok's
for beginnings. */
for (i = 0; i < NUM_MACHINE_MODES; i++)
{
int reg, size;
CLEAR_HARD_REG_SET (rs);
for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++)
if (HARD_REGNO_MODE_OK (reg, i)
/* Ignore VOIDmode and similar things. */
&& (size = hard_regno_nregs[reg][i]) != 0
&& (reg + size) <= FIRST_PSEUDO_REGISTER)
{
while (size--)
SET_HARD_REG_BIT (rs, reg + size);
}
COPY_HARD_REG_SET (hardregs_for_mode[i], rs);
}
CLEAR_HARD_REG_SET (invalid_mode_change_regs);
#ifdef CANNOT_CHANGE_MODE_CLASS
if (0)
for (i = 0; i < NUM_MACHINE_MODES; i++)
{
enum machine_mode from = (enum machine_mode) i;
enum machine_mode to;
for (to = VOIDmode; to < MAX_MACHINE_MODE; ++to)
{
int r;
for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
if (REG_CANNOT_CHANGE_MODE_P (from, to, r))
SET_HARD_REG_BIT (invalid_mode_change_regs, r);
}
}
#endif
for (an_unusable_color = 0; an_unusable_color < FIRST_PSEUDO_REGISTER;
an_unusable_color++)
if (TEST_HARD_REG_BIT (never_use_colors, an_unusable_color))
break;
gcc_assert (an_unusable_color != FIRST_PSEUDO_REGISTER);
orig_max_uid = get_max_uid ();
compute_bb_for_insn ();
ra_reg_renumber = NULL;
insns_with_deaths = sbitmap_alloc (orig_max_uid);
death_insns_max_uid = orig_max_uid;
sbitmap_ones (insns_with_deaths);
gcc_obstack_init (&ra_obstack);
}
/* Check the consistency of DF. This asserts if it violates some
invariances we expect. */
static void
check_df (struct df *df)
{
struct df_link *link;
rtx insn;
int regno;
unsigned int ui;
bitmap b = BITMAP_XMALLOC ();
bitmap empty_defs = BITMAP_XMALLOC ();
bitmap empty_uses = BITMAP_XMALLOC ();
/* Collect all the IDs of NULL references in the ID->REF arrays,
as df.c leaves them when updating the df structure. */
for (ui = 0; ui < df->def_id; ui++)
if (!df->defs[ui])
bitmap_set_bit (empty_defs, ui);
for (ui = 0; ui < df->use_id; ui++)
if (!df->uses[ui])
bitmap_set_bit (empty_uses, ui);
/* For each insn we check if the chain of references contain each
ref only once, doesn't contain NULL refs, or refs whose ID is invalid
(it df->refs[id] element is NULL). */
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
if (INSN_P (insn))
{
bitmap_clear (b);
for (link = DF_INSN_DEFS (df, insn); link; link = link->next)
{
gcc_assert (link->ref);
gcc_assert (!bitmap_bit_p (empty_defs, DF_REF_ID (link->ref)));
gcc_assert (!bitmap_bit_p (b, DF_REF_ID (link->ref)));
bitmap_set_bit (b, DF_REF_ID (link->ref));
}
bitmap_clear (b);
for (link = DF_INSN_USES (df, insn); link; link = link->next)
{
gcc_assert (link->ref);
gcc_assert (!bitmap_bit_p (empty_uses, DF_REF_ID (link->ref)));
gcc_assert (!bitmap_bit_p (b, DF_REF_ID (link->ref)));
bitmap_set_bit (b, DF_REF_ID (link->ref));
}
}
/* Now the same for the chains per register number. */
for (regno = 0; regno < max_reg_num (); regno++)
{
bitmap_clear (b);
for (link = df->regs[regno].defs; link; link = link->next)
{
gcc_assert (link->ref);
gcc_assert (!bitmap_bit_p (empty_defs, DF_REF_ID (link->ref)));
gcc_assert (!bitmap_bit_p (b, DF_REF_ID (link->ref)));
bitmap_set_bit (b, DF_REF_ID (link->ref));
}
bitmap_clear (b);
for (link = df->regs[regno].uses; link; link = link->next)
{
gcc_assert (link->ref);
gcc_assert (!bitmap_bit_p (empty_uses, DF_REF_ID (link->ref)));
gcc_assert (!bitmap_bit_p (b, DF_REF_ID (link->ref)));
bitmap_set_bit (b, DF_REF_ID (link->ref));
}
}
BITMAP_XFREE (empty_uses);
BITMAP_XFREE (empty_defs);
BITMAP_XFREE (b);
}
/* Main register allocator entry point. */
void
reg_alloc (void)
{
int changed;
FILE *ra_dump_file = dump_file;
rtx last = get_last_insn ();
if (! INSN_P (last))
last = prev_real_insn (last);
/* If this is an empty function we shouldn't do all the following,
but instead just setup what's necessary, and return. */
/* We currently rely on the existence of the return value USE as
one of the last insns. Add it if it's not there anymore. */
if (last)
{
edge e;
edge_iterator ei;
FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
{
basic_block bb = e->src;
last = BB_END (bb);
if (!INSN_P (last) || GET_CODE (PATTERN (last)) != USE)
{
rtx insns;
start_sequence ();
use_return_register ();
insns = get_insns ();
end_sequence ();
emit_insn_after (insns, last);
}
}
}
/* Setup debugging levels. */
switch (0)
{
/* Some useful presets of the debug level, I often use. */
case 0: debug_new_regalloc = DUMP_EVER; break;
case 1: debug_new_regalloc = DUMP_COSTS; break;
case 2: debug_new_regalloc = DUMP_IGRAPH_M; break;
case 3: debug_new_regalloc = DUMP_COLORIZE + DUMP_COSTS; break;
case 4: debug_new_regalloc = DUMP_COLORIZE + DUMP_COSTS + DUMP_WEBS;
break;
case 5: debug_new_regalloc = DUMP_FINAL_RTL + DUMP_COSTS +
DUMP_CONSTRAINTS;
break;
case 6: debug_new_regalloc = DUMP_VALIDIFY; break;
}
if (!dump_file)
debug_new_regalloc = 0;
/* Run regclass first, so we know the preferred and alternate classes
for each pseudo. Deactivate emitting of debug info, if it's not
explicitly requested. */
if ((debug_new_regalloc & DUMP_REGCLASS) == 0)
dump_file = NULL;
regclass (get_insns (), max_reg_num (), dump_file);
dump_file = ra_dump_file;
/* We don't use those NOTEs, and as we anyway change all registers,
they only make problems later. */
count_or_remove_death_notes (NULL, 1);
/* Initialize the different global arrays and regsets. */
init_ra ();
/* And some global variables. */
ra_pass = 0;
no_new_pseudos = 0;
max_normal_pseudo = (unsigned) max_reg_num ();
ra_rewrite_init ();
last_def_id = 0;
last_use_id = 0;
last_num_webs = 0;
last_max_uid = 0;
last_check_uses = NULL;
live_at_end = NULL;
WEBS(INITIAL) = NULL;
WEBS(FREE) = NULL;
memset (hardreg2web, 0, sizeof (hardreg2web));
ticks_build = ticks_rebuild = 0;
/* The default is to use optimistic coalescing with interference
region spilling, without biased coloring. */
flag_ra_biased = 0;
flag_ra_spill_every_use = 0;
flag_ra_improved_spilling = 1;
flag_ra_ir_spilling = 1;
flag_ra_break_aliases = 0;
flag_ra_optimistic_coalescing = 1;
flag_ra_merge_spill_costs = 1;
if (flag_ra_optimistic_coalescing)
flag_ra_break_aliases = 1;
flag_ra_dump_notes = 0;
/* Allocate the global df structure. */
df = df_init ();
/* This is the main loop, calling one_pass as long as there are still
some spilled webs. */
do
{
ra_debug_msg (DUMP_NEARLY_EVER, "RegAlloc Pass %d\n\n", ra_pass);
if (ra_pass++ > 40)
internal_error ("Didn't find a coloring.\n");
/* First collect all the register refs and put them into
chains per insn, and per regno. In later passes only update
that info from the new and modified insns. */
df_analyze (df, (ra_pass == 1) ? 0 : (bitmap) -1,
DF_HARD_REGS | DF_RD_CHAIN | DF_RU_CHAIN | DF_FOR_REGALLOC);
if ((debug_new_regalloc & DUMP_DF) != 0)
{
rtx insn;
df_dump (df, DF_HARD_REGS, dump_file);
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
if (INSN_P (insn))
df_insn_debug_regno (df, insn, dump_file);
}
check_df (df);
/* Now allocate the memory needed for this pass, or (if it's not the
first pass), reallocate only additional memory. */
alloc_mem (df);
/* Build and colorize the interference graph, and possibly emit
spill insns. This also might delete certain move insns. */
changed = one_pass (df, ra_pass > 1);
/* If that produced no changes, the graph was colorizable. */
if (!changed)
{
/* Change the insns to refer to the new pseudos (one per web). */
emit_colors (df);
/* Already setup a preliminary reg_renumber[] array, but don't
free our own version. reg_renumber[] will again be destroyed
later. We right now need it in dump_constraints() for
constrain_operands(1) whose subproc sometimes reference
it (because we are checking strictly, i.e. as if
after reload). */
setup_renumber (0);
/* Delete some more of the coalesced moves. */
delete_moves ();
dump_constraints ();
}
else
{
/* If there were changes, this means spill code was added,
therefore repeat some things, including some initialization
of global data structures. */
if ((debug_new_regalloc & DUMP_REGCLASS) == 0)
dump_file = NULL;
/* We have new pseudos (the stackwebs). */
allocate_reg_info (max_reg_num (), FALSE, FALSE);
/* And new insns. */
compute_bb_for_insn ();
/* Some of them might be dead. */
delete_trivially_dead_insns (get_insns (), max_reg_num ());
/* Those new pseudos need to have their REFS count set. */
reg_scan_update (get_insns (), NULL, max_regno);
max_regno = max_reg_num ();
/* And they need useful classes too. */
regclass (get_insns (), max_reg_num (), dump_file);
dump_file = ra_dump_file;
/* Remember the number of defs and uses, so we can distinguish
new from old refs in the next pass. */
last_def_id = df->def_id;
last_use_id = df->use_id;
}
/* Output the graph, and possibly the current insn sequence. */
dump_ra (df);
if (changed && (debug_new_regalloc & DUMP_RTL) != 0)
{
ra_print_rtl_with_bb (dump_file, get_insns ());
fflush (dump_file);
}
/* Reset the web lists. */
reset_lists ();
free_mem (df);
}
while (changed);
/* We are done with allocation, free all memory and output some
debug info. */
free_all_mem (df);
df_finish (df);
if ((debug_new_regalloc & DUMP_RESULTS) == 0)
dump_cost (DUMP_COSTS);
ra_debug_msg (DUMP_COSTS, "ticks for build-phase: %ld\n", ticks_build);
ra_debug_msg (DUMP_COSTS, "ticks for rebuild-phase: %ld\n", ticks_rebuild);
if ((debug_new_regalloc & (DUMP_FINAL_RTL | DUMP_RTL)) != 0)
ra_print_rtl_with_bb (dump_file, get_insns ());
/* We might have new pseudos, so allocate the info arrays for them. */
if ((debug_new_regalloc & DUMP_SM) == 0)
dump_file = NULL;
no_new_pseudos = 0;
allocate_reg_info (max_reg_num (), FALSE, FALSE);
no_new_pseudos = 1;
dump_file = ra_dump_file;
/* Some spill insns could've been inserted after trapping calls, i.e.
at the end of a basic block, which really ends at that call.
Fixup that breakages by adjusting basic block boundaries. */
fixup_abnormal_edges ();
/* Cleanup the flow graph. */
if ((debug_new_regalloc & DUMP_LAST_FLOW) == 0)
dump_file = NULL;
life_analysis (dump_file,
PROP_DEATH_NOTES | PROP_LOG_LINKS | PROP_REG_INFO);
cleanup_cfg (CLEANUP_EXPENSIVE);
recompute_reg_usage (get_insns (), TRUE);
if (dump_file)
dump_flow_info (dump_file);
dump_file = ra_dump_file;
/* update_equiv_regs() can't be called after register allocation.
It might delete some pseudos, and insert other insns setting
up those pseudos in different places. This of course screws up
the allocation because that may destroy a hardreg for another
pseudo.
XXX we probably should do something like that on our own. I.e.
creating REG_EQUIV notes. */
/*update_equiv_regs ();*/
/* Setup the reg_renumber[] array for reload. */
setup_renumber (1);
sbitmap_free (insns_with_deaths);
/* Remove REG_DEAD notes which are incorrectly set. See the docu
of that function. */
remove_suspicious_death_notes ();
if ((debug_new_regalloc & DUMP_LAST_RTL) != 0)
ra_print_rtl_with_bb (dump_file, get_insns ());
dump_static_insn_cost (dump_file,
"after allocation/spilling, before reload", NULL);
/* Allocate the reg_equiv_memory_loc array for reload. */
VARRAY_GROW (reg_equiv_memory_loc_varray, max_regno);
reg_equiv_memory_loc = &VARRAY_RTX (reg_equiv_memory_loc_varray, 0);
/* And possibly initialize it. */
allocate_initial_values (reg_equiv_memory_loc);
/* And one last regclass pass just before reload. */
regclass (get_insns (), max_reg_num (), dump_file);
}
/*
vim:cinoptions={.5s,g0,p5,t0,(0,^-0.5s,n-0.5s:tw=78:cindent:sw=4:
*/
/* Graph coloring register allocator
Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
Contributed by Michael Matz <matz@suse.de>
and Daniel Berlin <dan@cgsoftware.com>.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software
Foundation; either version 2, or (at your option) any later version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details.
You should have received a copy of the GNU General Public License along
with GCC; see the file COPYING. If not, write to the Free Software
Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
#ifndef GCC_RA_H
#define GCC_RA_H
#include "bitmap.h"
#include "sbitmap.h"
#include "hard-reg-set.h"
#include "insn-modes.h"
/* Double linked list to implement the per-type lists of webs
and moves. */
struct dlist
{
struct dlist *prev;
struct dlist *next;
union
{
struct web *web;
struct move *move;
} value;
};
/* Simple helper macros for ease of misuse. */
#define DLIST_WEB(l) ((l)->value.web)
#define DLIST_MOVE(l) ((l)->value.move)
/* Classification of a given node (i.e. what state it's in). */
enum ra_node_type
{
INITIAL = 0, FREE,
PRECOLORED,
SIMPLIFY, SIMPLIFY_SPILL, SIMPLIFY_FAT, FREEZE, SPILL,
SELECT,
SPILLED, COALESCED, COLORED,
LAST_NODE_TYPE
};
/* A list of conflict bitmaps, factorized on the exact part of
the source, which conflicts with the DEFs, whose ID are noted in
the bitmap. This is used while building web-parts with conflicts. */
struct tagged_conflict
{
struct tagged_conflict *next;
bitmap conflicts;
/* If the part of source identified by size S, byteoffset O conflicts,
then size_word == S | (O << 16). */
unsigned int size_word;
};
/* Such a structure is allocated initially for each def and use.
In the process of building the interference graph web parts are
connected together, if they have common instructions and reference the
same register. That way live ranges are build (by connecting defs and
uses) and implicitly complete webs (by connecting web parts in common
uses). */
struct web_part
{
/* The def or use for this web part. */
struct ref *ref;
/* The uplink implementing the disjoint set. */
struct web_part *uplink;
/* Here dynamic information associated with each def/use is saved.
This all is only valid for root web parts (uplink==NULL).
That's the information we need to merge, if web parts are unioned. */
/* Number of spanned insns containing any deaths. */
unsigned int spanned_deaths;
/* The list of bitmaps of DEF ID's with which this part conflicts. */
struct tagged_conflict *sub_conflicts;
/* If there's any call_insn, while this part is live. */
unsigned int crosses_call : 1;
};
/* Web structure used to store info about connected live ranges.
This represents the nodes of the interference graph, which gets
colored. It can also hold subwebs, which are contained in webs
and represent subregs. */
struct web
{
/* Unique web ID. */
unsigned int id;
/* Register number of the live range's variable. */
unsigned int regno;
/* How many insns containing deaths do we span? */
unsigned int span_deaths;
/* Spill_temp indicates if this web was part of a web spilled in the
last iteration, or or reasons why this shouldn't be spilled again.
1 spill web, can't be spilled.
2 big spill web (live over some deaths). Discouraged, but not
impossible to spill again.
3 short web (spans no deaths), can't be spilled. */
unsigned int spill_temp;
/* When coalescing we might change spill_temp. If breaking aliases we
need to restore it. */
unsigned int orig_spill_temp;
/* Cost of spilling. */
unsigned HOST_WIDE_INT spill_cost;
unsigned HOST_WIDE_INT orig_spill_cost;
/* How many webs are aliased to us? */
unsigned int num_aliased;
/* The color we got. This is a hardreg number. */
int color;
/* 1 + the color this web got in the last pass. If it hadn't got a color,
or we are in the first pass, or this web is a new one, this is zero. */
int old_color;
/* Now follow some flags characterizing the web. */
/* Nonzero, if we should use usable_regs for this web, instead of
preferred_class() or alternate_class(). */
unsigned int use_my_regs:1;
/* Nonzero if we selected this web as possible spill candidate in
select_spill(). */
unsigned int was_spilled:1;
/* We need to distinguish also webs which are targets of coalescing
(all x with some y, so that x==alias(y)), but the alias field is
only set for sources of coalescing. This flag is set for all webs
involved in coalescing in some way. */
unsigned int is_coalesced:1;
/* Nonzero, if this web (or subweb) doesn't correspond with any of
the current functions actual use of reg rtx. Happens e.g. with
conflicts to a web, of which only a part was still undefined at the
point of that conflict. In this case we construct a subweb
representing these yet undefined bits to have a target for the
conflict. Suppose e.g. this sequence:
(set (reg:DI x) ...)
(set (reg:SI y) ...)
(set (subreg:SI (reg:DI x) 0) ...)
(use (reg:DI x))
Here x only partly conflicts with y. Namely only (subreg:SI (reg:DI x)
1) conflicts with it, but this rtx doesn't show up in the program. For
such things an "artificial" subweb is built, and this flag is true for
them. */
unsigned int artificial:1;
/* Nonzero if we span a call_insn. */
unsigned int crosses_call:1;
/* Wether the web is involved in a move insn. */
unsigned int move_related:1;
/* 1 when this web (or parts thereof) are live over an abnormal edge. */
unsigned int live_over_abnormal:1;
/* Nonzero if this web is used in subregs where the mode change
was illegal for hardregs in CLASS_CANNOT_CHANGE_MODE. */
unsigned int mode_changed:1;
/* Nonzero if some references of this web, where in subreg context,
but the actual subreg is already stripped (i.e. we don't know the
outer mode of the actual reference). */
unsigned int subreg_stripped:1;
/* Nonzero, when this web stems from the last pass of the allocator,
and all info is still valid (i.e. it wasn't spilled). */
unsigned int old_web:1;
/* Used in rewrite_program2() to remember webs, which
are already marked for (re)loading. */
unsigned int in_load:1;
/* If in_load is != 0, then this is nonzero, if only one use was seen
since insertion in loadlist. Zero if more uses currently need a
reload. Used to differentiate between inserting register loads or
directly substituting the stackref. */
unsigned int one_load:1;
/* When using rewrite_program2() this flag gets set if some insns
were inserted on behalf of this web. IR spilling might ignore some
deaths up to the def, so no code might be emitted and we need not to
spill such a web again. */
unsigned int changed:1;
/* With interference region spilling it's sometimes the case, that a
bb border is also an IR border for webs, which were targets of moves,
which are already removed due to coalescing. All webs, which are
a destination of such a removed move, have this flag set. */
unsigned int target_of_spilled_move:1;
/* For optimistic coalescing we need to be able to break aliases, which
includes restoring conflicts to those before coalescing. This flag
is set, if we have a list of conflicts before coalescing. It's needed
because that list is lazily constructed only when actually needed. */
unsigned int have_orig_conflicts:1;
/* Current state of the node. */
ENUM_BITFIELD(ra_node_type) type:5;
/* A regclass, combined from preferred and alternate class, or calculated
from usable_regs. Used only for debugging, and to determine
add_hardregs. */
ENUM_BITFIELD(reg_class) regclass:10;
/* Additional consecutive hardregs needed for this web. */
int add_hardregs;
/* Number of conflicts currently. */
int num_conflicts;
/* Numbers of uses and defs, which belong to this web. */
unsigned int num_uses;
unsigned int num_defs;
/* The (reg:M a) or (subreg:M1 (reg:M2 a) x) rtx which this
web is based on. This is used to distinguish subreg webs
from it's reg parents, and to get hold of the mode. */
rtx orig_x;
/* If this web is a subweb, this point to the super web. Otherwise
it's NULL. */
struct web *parent_web;
/* If this web is a subweb, but not the last one, this points to the
next subweb of the same super web. Otherwise it's NULL. */
struct web *subreg_next;
/* The set of webs (or subwebs), this web conflicts with. */
struct conflict_link *conflict_list;
/* If have_orig_conflicts is set this contains a copy of conflict_list,
as it was right after building the interference graph.
It's used for incremental i-graph building and for breaking
coalescings again. */
struct conflict_link *orig_conflict_list;
/* Bitmap of all conflicts which don't count this pass, because of
non-intersecting hardregs of the conflicting webs. See also
record_conflict(). */
bitmap useless_conflicts;
/* Different sets of hard registers, for all usable registers, ... */
HARD_REG_SET usable_regs;
/* ... the same before coalescing, ... */
HARD_REG_SET orig_usable_regs;
/* ... colors of all already colored neighbors (used when biased coloring
is active), and ... */
HARD_REG_SET bias_colors;
/* ... colors of PRECOLORED webs this web is connected to by a move. */
HARD_REG_SET prefer_colors;
/* Number of usable colors in usable_regs. */
int num_freedom;
/* After successful coloring the graph each web gets a new reg rtx,
with which the original uses and defs are replaced. This is it. */
rtx reg_rtx;
/* While spilling this is the rtx of the home of spilled webs.
It can be a mem ref (a stack slot), or a pseudo register. */
rtx stack_slot;
/* Used in rewrite_program2() to remember the using
insn last seen for webs needing (re)loads. */
rtx last_use_insn;
/* If this web is rematerializable, this contains the RTL pattern
usable as source for that. Otherwise it's NULL. */
rtx pattern;
/* All the defs and uses. There are num_defs, resp.
num_uses elements. */
struct ref **defs; /* [0..num_defs-1] */
struct ref **uses; /* [0..num_uses-1] */
/* The web to which this web is aliased (coalesced). If NULL, this
web is not coalesced into some other (but might still be a target
for other webs). */
struct web *alias;
/* With iterated coalescing this is a list of active moves this web
is involved in. */
struct move_list *moves;
/* The list implementation. */
struct dlist *dlink;
/* While building webs, out of web-parts, this holds a (partial)
list of all refs for this web seen so far. */
struct df_link *temp_refs;
};
/* For implementing a single linked list. */
struct web_link
{
struct web_link *next;
struct web *web;
};
/* A subconflict is part of a conflict edge to track precisely,
which parts of two webs conflict, in case not all of both webs do. */
struct sub_conflict
{
/* The next partial conflict. For one such list the parent-web of
all the S webs, resp. the parent of all the T webs are constant. */
struct sub_conflict *next;
struct web *s;
struct web *t;
};
/* This represents an edge in the conflict graph. */
struct conflict_link
{
struct conflict_link *next;
/* The web we conflict with (the Target of the edge). */
struct web *t;
/* If not the complete source web and T conflict, this points to
the list of parts which really conflict. */
struct sub_conflict *sub;
};
/* For iterated coalescing the moves can be in these states. */
enum move_type
{
WORKLIST, MV_COALESCED, CONSTRAINED, FROZEN, ACTIVE,
LAST_MOVE_TYPE
};
/* Structure of a move we are considering coalescing. */
struct move
{
rtx insn;
struct web *source_web;
struct web *target_web;
enum move_type type;
struct dlist *dlink;
};
/* List of moves. */
struct move_list
{
struct move_list *next;
struct move *move;
};
/* To have fast access to the defs and uses per insn, we have one such
structure per insn. The difference to the normal df.c structures is,
that it doesn't contain any NULL refs, which df.c produces in case
an insn was modified and it only contains refs to pseudo regs, or to
hardregs which matter for allocation, i.e. those not in
never_use_colors. */
struct ra_insn_info
{
unsigned int num_defs, num_uses;
struct ref **defs, **uses;
};
/* The above structures are stored in this array, indexed by UID... */
extern struct ra_insn_info *insn_df;
/* ... and the size of that array, as we add insn after setting it up. */
extern int insn_df_max_uid;
/* The interference graph. */
extern sbitmap igraph;
/* And how to access it. I and J are web indices. If the bit
igraph_index(I, J) is set, then they conflict. Note, that
if only parts of webs conflict, then also only those parts
are noted in the I-graph (i.e. the parent webs not). */
#define igraph_index(i, j) ((i) < (j) ? ((j)*((j)-1)/2)+(i) : ((i)*((i)-1)/2)+(j))
/* This is the bitmap of all (even partly) conflicting super webs.
If bit I*num_webs+J or J*num_webs+I is set, then I and J (both being
super web indices) conflict, maybe only partially. Note the
asymmetry. */
extern sbitmap sup_igraph;
/* After the first pass, and when interference region spilling is
activated, bit I is set, when the insn with UID I contains some
refs to pseudos which die at the insn. */
extern sbitmap insns_with_deaths;
/* The size of that sbitmap. */
extern int death_insns_max_uid;
/* All the web-parts. There are exactly as many web-parts as there
are register refs in the insn stream. */
extern struct web_part *web_parts;
/* The number of all webs, including subwebs. */
extern unsigned int num_webs;
/* The number of just the subwebs. */
extern unsigned int num_subwebs;
/* The number of all webs, including subwebs. */
extern unsigned int num_allwebs;
/* For easy access when given a web ID: id2web[W->id] == W. */
extern struct web **id2web;
/* For each hardreg, the web which represents it. */
extern struct web *hardreg2web[FIRST_PSEUDO_REGISTER];
/* Given the ID of a df_ref, which represent a DEF, def2web[ID] is
the web, to which this def belongs. */
extern struct web **def2web;
/* The same as def2web, just for uses. */
extern struct web **use2web;
/* The list of all recognized and coalescable move insns. */
extern struct move_list *wl_moves;
/* Some parts of the compiler which we run after colorizing
clean reg_renumber[], so we need another place for the colors.
This is copied to reg_renumber[] just before returning to toplev. */
extern short *ra_reg_renumber;
/* The size of that array. Some passes after coloring might have created
new pseudos, which will get no color. */
extern int ra_max_regno;
/* The dataflow structure of the current function, while regalloc
runs. */
extern struct df *df;
/* For each basic block B we have a bitmap of DF_REF_ID's of uses,
which backward reach the end of B. */
extern bitmap *live_at_end;
/* One pass is: collecting registers refs, building I-graph, spilling.
And this is how often we already ran that for the current function. */
extern int ra_pass;
/* The maximum pseudo regno, just before register allocation starts.
While regalloc runs all pseudos with a larger number represent
potentially stack slots or hardregs. I call them stackwebs or
stackpseudos. */
extern unsigned int max_normal_pseudo;
/* One of the fixed colors. It must be < FIRST_PSEUDO_REGISTER, because
we sometimes want to check the color against a HARD_REG_SET. It must
be >= 0, because negative values mean "no color".
This color is used for the above stackwebs, when they can't be colored.
I.e. normally they would be spilled, but they already represent
stackslots. So they are colored with an invalid color. It has
the property that even webs which conflict can have that color at the
same time. I.e. a stackweb with that color really represents a
stackslot. */
extern int an_unusable_color;
/* While building the I-graph, every time insn UID is looked at,
number_seen[UID] is incremented. For debugging. */
extern int *number_seen;
/* The different lists on which a web can be (based on the type). */
extern struct dlist *web_lists[(int) LAST_NODE_TYPE];
#define WEBS(type) (web_lists[(int)(type)])
/* The largest DF_REF_ID of defs resp. uses, as it was in the
last pass. In the first pass this is zero. Used to distinguish new
from old references. */
extern unsigned int last_def_id;
extern unsigned int last_use_id;
/* Similar for UIDs and number of webs. */
extern int last_max_uid;
extern unsigned int last_num_webs;
/* If I is the ID of an old use, and last_check_uses[I] is set,
then we must reevaluate it's flow while building the new I-graph. */
extern sbitmap last_check_uses;
/* If nonzero, record_conflict() saves the current conflict list of
webs in orig_conflict_list, when not already done so, and the conflict
list is going to be changed. It is set, after initially building the
I-graph. I.e. new conflicts due to coalescing trigger that copying. */
extern unsigned int remember_conflicts;
/* The maximum UID right before calling regalloc().
Used to detect any instructions inserted by the allocator. */
extern int orig_max_uid;
/* A HARD_REG_SET of those color, which can't be used for coalescing.
Includes e.g. fixed_regs. */
extern HARD_REG_SET never_use_colors;
/* For each class C this is reg_class_contents[C] \ never_use_colors. */
extern HARD_REG_SET usable_regs[N_REG_CLASSES];
/* For each class C the count of hardregs in usable_regs[C]. */
extern unsigned int num_free_regs[N_REG_CLASSES];
/* For each class C which has num_free_regs[C]==1, the color of the
single register in that class, -1 otherwise. */
extern int single_reg_in_regclass[N_REG_CLASSES];
/* For each mode M the hardregs, which are MODE_OK for M, and have
enough space behind them to hold an M value. Additionally
if reg R is OK for mode M, but it needs two hardregs, then R+1 will
also be set here, even if R+1 itself is not OK for M. I.e. this
represent the possible resources which could be taken away be a value
in mode M. */
extern HARD_REG_SET hardregs_for_mode[NUM_MACHINE_MODES];
/* The set of hardregs, for which _any_ mode change is invalid. */
extern HARD_REG_SET invalid_mode_change_regs;
/* For 0 <= I <= 255, the number of bits set in I. Used to calculate
the number of set bits in a HARD_REG_SET. */
extern unsigned char byte2bitcount[256];
/* Expressive helper macros. */
#define ID2WEB(I) id2web[I]
#define NUM_REGS(W) (((W)->type == PRECOLORED) ? 1 : (W)->num_freedom)
#define SUBWEB_P(W) (GET_CODE ((W)->orig_x) == SUBREG)
/* Constant usable as debug area to ra_debug_msg. */
#define DUMP_COSTS 0x0001
#define DUMP_WEBS 0x0002
#define DUMP_IGRAPH 0x0004
#define DUMP_PROCESS 0x0008
#define DUMP_COLORIZE 0x0010
#define DUMP_ASM 0x0020
#define DUMP_CONSTRAINTS 0x0040
#define DUMP_RESULTS 0x0080
#define DUMP_DF 0x0100
#define DUMP_RTL 0x0200
#define DUMP_FINAL_RTL 0x0400
#define DUMP_REGCLASS 0x0800
#define DUMP_SM 0x1000
#define DUMP_LAST_FLOW 0x2000
#define DUMP_LAST_RTL 0x4000
#define DUMP_REBUILD 0x8000
#define DUMP_IGRAPH_M 0x10000
#define DUMP_VALIDIFY 0x20000
#define DUMP_EVER ((unsigned int)-1)
#define DUMP_NEARLY_EVER (DUMP_EVER - DUMP_COSTS - DUMP_IGRAPH_M)
/* All the wanted debug levels as ORing of the various DUMP_xxx
constants. */
extern unsigned int debug_new_regalloc;
/* Nonzero means we want biased coloring. */
extern int flag_ra_biased;
/* Nonzero if we want to use improved (and slow) spilling. This
includes also interference region spilling (see below). */
extern int flag_ra_improved_spilling;
/* Nonzero for using interference region spilling. Zero for improved
Chaintin style spilling (only at deaths). */
extern int flag_ra_ir_spilling;
/* Nonzero if we use optimistic coalescing, zero for iterated
coalescing. */
extern int flag_ra_optimistic_coalescing;
/* Nonzero if we want to break aliases of spilled webs. Forced to
nonzero, when flag_ra_optimistic_coalescing is. */
extern int flag_ra_break_aliases;
/* Nonzero if we want to merge the spill costs of webs which
are coalesced. */
extern int flag_ra_merge_spill_costs;
/* Nonzero if we want to spill at every use, instead of at deaths,
or interference region borders. */
extern int flag_ra_spill_every_use;
/* Nonzero to output all notes in the debug dumps. */
extern int flag_ra_dump_notes;
extern void * ra_alloc (size_t);
extern void * ra_calloc (size_t);
extern int hard_regs_count (HARD_REG_SET);
extern rtx ra_emit_move_insn (rtx, rtx);
extern void ra_debug_msg (unsigned int, const char *, ...) ATTRIBUTE_PRINTF_2;
extern int hard_regs_intersect_p (HARD_REG_SET *, HARD_REG_SET *);
extern unsigned int rtx_to_bits (rtx);
extern struct web * find_subweb (struct web *, rtx);
extern struct web * find_subweb_2 (struct web *, unsigned int);
extern struct web * find_web_for_subweb_1 (struct web *);
#define find_web_for_subweb(w) (((w)->parent_web) \
? find_web_for_subweb_1 ((w)->parent_web) \
: (w))
extern void ra_build_realloc (struct df *);
extern void ra_build_free (void);
extern void ra_build_free_all (struct df *);
extern void ra_colorize_init (void);
extern void ra_colorize_free_all (void);
extern void ra_rewrite_init (void);
extern void ra_print_rtx (FILE *, rtx, int);
extern void ra_print_rtx_top (FILE *, rtx, int);
extern void ra_debug_rtx (rtx);
extern void ra_debug_insns (rtx, int);
extern void ra_debug_bbi (int);
extern void ra_print_rtl_with_bb (FILE *, rtx);
extern void dump_igraph (struct df *);
extern void dump_igraph_machine (void);
extern void dump_constraints (void);
extern void dump_cost (unsigned int);
extern void dump_graph_cost (unsigned int, const char *);
extern void dump_ra (struct df *);
extern void dump_number_seen (void);
extern void dump_static_insn_cost (FILE *, const char *, const char *);
extern void dump_web_conflicts (struct web *);
extern void dump_web_insns (struct web*);
extern int web_conflicts_p (struct web *, struct web *);
extern void debug_hard_reg_set (HARD_REG_SET);
extern void remove_list (struct dlist *, struct dlist **);
extern struct dlist * pop_list (struct dlist **);
extern void record_conflict (struct web *, struct web *);
extern int memref_is_stack_slot (rtx);
extern void build_i_graph (struct df *);
extern void put_web (struct web *, enum ra_node_type);
extern void remove_web_from_list (struct web *);
extern void reset_lists (void);
extern struct web * alias (struct web *);
extern void merge_moves (struct web *, struct web *);
extern void ra_colorize_graph (struct df *);
extern void actual_spill (void);
extern void emit_colors (struct df *);
extern void delete_moves (void);
extern void setup_renumber (int);
extern void remove_suspicious_death_notes (void);
#endif /* GCC_RA_H */
/* toplev.h - Various declarations for functions found in toplev.c
Copyright (C) 1998, 1999, 2000, 2001, 2003, 2004
Copyright (C) 1998, 1999, 2000, 2001, 2003, 2004, 2005
Free Software Foundation, Inc.
This file is part of GCC.
......@@ -134,7 +134,6 @@ extern int flag_unroll_all_loops;
extern int flag_unswitch_loops;
extern int flag_cprop_registers;
extern int time_report;
extern int flag_new_regalloc;
extern int flag_tree_based_profiling;
/* Things to do with target switches. */
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment