Commit f754734f by Richard Sandiford Committed by Richard Sandiford

ira-color.c (conflict_allocno_vec): Delete.

gcc/
	* ira-color.c (conflict_allocno_vec): Delete.
	(update_cost_queue_elem): New structure.
	(update_cost_queue): New variable.
	(update_cost_queue_tail): Likewise.
	(update_cost_queue_elems): Likewise.
	(allocno_update_cost_check): Delete.
	(initiate_cost_update): Allocate update_cost_queue_elems
	instead of allocno_update_cost_check.
	(finish_cost_update): Update the free()s accordingly.
	(start_update_cost): New function.
	(queue_update_cost): Likewise.
	(get_next_update_cost): Likewise.
	(update_copy_costs_1): Inline into...
	(update_copy_costs): ...here.  Use a queue instead of recursive calls.
	Use cover_class instead of ALLOCNO_COVER_CLASS (another_allocno),
	once we've established they are equal.  Don't allocate update
	costs if there is nothing to add to them.
	(update_conflict_hard_regno_costs): Remove ALLOCNO and
	DIVISOR arguments.  Use a queue instead of recursive calls;
	process all the allocnos in the initial queue, rather than
	a single allocno.
	(assign_hard_reg): Use queue_update_cost instead of
	conflict_allocno_vec.  Queue coalesced allocnos instead
	of calling update_conflict_hard_regno_costs for each one.
	Just call update_conflict_hard_regno_costs once for the
	entire queue.
	(ira_color): Remove conflict_allocno_vec handling.

From-SVN: r140335
parent 29d7f409
2008-09-13 Richard Sandiford <rdsandiford@googlemail.com>
* ira-color.c (conflict_allocno_vec): Delete.
(update_cost_queue_elem): New structure.
(update_cost_queue): New variable.
(update_cost_queue_tail): Likewise.
(update_cost_queue_elems): Likewise.
(allocno_update_cost_check): Delete.
(initiate_cost_update): Allocate update_cost_queue_elems
instead of allocno_update_cost_check.
(finish_cost_update): Update the free()s accordingly.
(start_update_cost): New function.
(queue_update_cost): Likewise.
(get_next_update_cost): Likewise.
(update_copy_costs_1): Inline into...
(update_copy_costs): ...here. Use a queue instead of recursive calls.
Use cover_class instead of ALLOCNO_COVER_CLASS (another_allocno),
once we've established they are equal. Don't allocate update
costs if there is nothing to add to them.
(update_conflict_hard_regno_costs): Remove ALLOCNO and
DIVISOR arguments. Use a queue instead of recursive calls;
process all the allocnos in the initial queue, rather than
a single allocno.
(assign_hard_reg): Use queue_update_cost instead of
conflict_allocno_vec. Queue coalesced allocnos instead
of calling update_conflict_hard_regno_costs for each one.
Just call update_conflict_hard_regno_costs once for the
entire queue.
(ira_color): Remove conflict_allocno_vec handling.
2008-09-12 Sebastian Pop <sebastian.pop@amd.com> 2008-09-12 Sebastian Pop <sebastian.pop@amd.com>
PR tree-optimization/37484 PR tree-optimization/37484
......
...@@ -68,9 +68,6 @@ static ira_allocno_t *sorted_allocnos; ...@@ -68,9 +68,6 @@ static ira_allocno_t *sorted_allocnos;
/* Vec representing the stack of allocnos used during coloring. */ /* Vec representing the stack of allocnos used during coloring. */
static VEC(ira_allocno_t,heap) *allocno_stack_vec; static VEC(ira_allocno_t,heap) *allocno_stack_vec;
/* Vec representing conflict allocnos used during assigning. */
static VEC(ira_allocno_t,heap) *conflict_allocno_vec;
/* Array used to choose an allocno for spilling. */ /* Array used to choose an allocno for spilling. */
static ira_allocno_t *allocnos_for_spilling; static ira_allocno_t *allocnos_for_spilling;
...@@ -94,9 +91,31 @@ static VEC(ira_allocno_t,heap) *removed_splay_allocno_vec; ...@@ -94,9 +91,31 @@ static VEC(ira_allocno_t,heap) *removed_splay_allocno_vec;
register was already allocated for an allocno. */ register was already allocated for an allocno. */
static bool allocated_hardreg_p[FIRST_PSEUDO_REGISTER]; static bool allocated_hardreg_p[FIRST_PSEUDO_REGISTER];
/* Array used to check already processed allocnos during the current /* Describes one element in a queue of allocnos whose costs need to be
update_copy_costs call. */ updated. Each allocno in the queue is known to have a cover class. */
static int *allocno_update_cost_check; struct update_cost_queue_elem {
/* This element is in the queue iff CHECK == update_cost_check. */
int check;
/* COST_HOP_DIVISOR**N, where N is the length of the shortest path
connecting this allocno to the one being allocated. */
int divisor;
/* The next allocno in the queue, or null if this is the last element. */
ira_allocno_t next;
};
/* The first element in a queue of allocnos whose copy costs need to be
updated. Null if the queue is empty. */
static ira_allocno_t update_cost_queue;
/* The last element in the queue described by update_cost_queue.
Not valid if update_cost_queue is null. */
static struct update_cost_queue_elem *update_cost_queue_tail;
/* A pool of elements in the queue described by update_cost_queue.
Elements are indexed by ALLOCNO_NUM. */
static struct update_cost_queue_elem *update_cost_queue_elems;
/* The current value of update_copy_cost call count. */ /* The current value of update_copy_cost call count. */
static int update_cost_check; static int update_cost_check;
...@@ -106,9 +125,12 @@ static int update_cost_check; ...@@ -106,9 +125,12 @@ static int update_cost_check;
static void static void
initiate_cost_update (void) initiate_cost_update (void)
{ {
allocno_update_cost_check size_t size;
= (int *) ira_allocate (ira_allocnos_num * sizeof (int));
memset (allocno_update_cost_check, 0, ira_allocnos_num * sizeof (int)); size = ira_allocnos_num * sizeof (struct update_cost_queue_elem);
update_cost_queue_elems
= (struct update_cost_queue_elem *) ira_allocate (size);
memset (update_cost_queue_elems, 0, size);
update_cost_check = 0; update_cost_check = 0;
} }
...@@ -116,7 +138,7 @@ initiate_cost_update (void) ...@@ -116,7 +138,7 @@ initiate_cost_update (void)
static void static void
finish_cost_update (void) finish_cost_update (void)
{ {
ira_free (allocno_update_cost_check); ira_free (update_cost_queue);
} }
/* When we traverse allocnos to update hard register costs, the cost /* When we traverse allocnos to update hard register costs, the cost
...@@ -124,161 +146,196 @@ finish_cost_update (void) ...@@ -124,161 +146,196 @@ finish_cost_update (void)
hop from given allocno to directly connected allocnos. */ hop from given allocno to directly connected allocnos. */
#define COST_HOP_DIVISOR 4 #define COST_HOP_DIVISOR 4
/* This recursive function updates costs (decrease if DECR_P) of the /* Start a new cost-updating pass. */
unassigned allocnos connected by copies with ALLOCNO. This update
increases chances to remove some copies. Copy cost is proportional
the copy frequency divided by DIVISOR. */
static void static void
update_copy_costs_1 (ira_allocno_t allocno, int hard_regno, start_update_cost (void)
bool decr_p, int divisor)
{ {
int i, cost, update_cost; update_cost_check++;
enum machine_mode mode; update_cost_queue = NULL;
enum reg_class rclass, cover_class; }
ira_allocno_t another_allocno;
ira_copy_t cp, next_cp;
cover_class = ALLOCNO_COVER_CLASS (allocno); /* Add (ALLOCNO, DIVISOR) to the end of update_cost_queue,
if (cover_class == NO_REGS) unless ALLOCNO is already in the queue, or has no cover class. */
return; static inline void
if (allocno_update_cost_check[ALLOCNO_NUM (allocno)] == update_cost_check) queue_update_cost (ira_allocno_t allocno, int divisor)
return; {
allocno_update_cost_check[ALLOCNO_NUM (allocno)] = update_cost_check; struct update_cost_queue_elem *elem;
ira_assert (hard_regno >= 0);
i = ira_class_hard_reg_index[cover_class][hard_regno]; elem = &update_cost_queue_elems[ALLOCNO_NUM (allocno)];
ira_assert (i >= 0); if (elem->check != update_cost_check
rclass = REGNO_REG_CLASS (hard_regno); && ALLOCNO_COVER_CLASS (allocno) != NO_REGS)
mode = ALLOCNO_MODE (allocno);
for (cp = ALLOCNO_COPIES (allocno); cp != NULL; cp = next_cp)
{ {
if (cp->first == allocno) elem->check = update_cost_check;
{ elem->divisor = divisor;
next_cp = cp->next_first_allocno_copy; elem->next = NULL;
another_allocno = cp->second; if (update_cost_queue == NULL)
} update_cost_queue = allocno;
else if (cp->second == allocno)
{
next_cp = cp->next_second_allocno_copy;
another_allocno = cp->first;
}
else else
gcc_unreachable (); update_cost_queue_tail->next = allocno;
if (cover_class update_cost_queue_tail = elem;
!= ALLOCNO_COVER_CLASS (another_allocno)
|| ALLOCNO_ASSIGNED_P (another_allocno))
continue;
cost = (cp->second == allocno
? ira_register_move_cost[mode][rclass]
[ALLOCNO_COVER_CLASS (another_allocno)]
: ira_register_move_cost[mode]
[ALLOCNO_COVER_CLASS (another_allocno)][rclass]);
if (decr_p)
cost = -cost;
ira_allocate_and_set_or_copy_costs
(&ALLOCNO_UPDATED_HARD_REG_COSTS (another_allocno), cover_class,
ALLOCNO_COVER_CLASS_COST (another_allocno),
ALLOCNO_HARD_REG_COSTS (another_allocno));
ira_allocate_and_set_or_copy_costs
(&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno),
cover_class, 0,
ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno));
update_cost = cp->freq * cost / divisor;
ALLOCNO_UPDATED_HARD_REG_COSTS (another_allocno)[i] += update_cost;
ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno)[i]
+= update_cost;
if (update_cost != 0)
update_copy_costs_1 (another_allocno, hard_regno,
decr_p, divisor * COST_HOP_DIVISOR);
} }
} }
/* Update the cost of allocnos to increase chances to remove some /* Try to remove the first element from update_cost_queue. Return false
copies as the result of subsequent assignment. */ if the queue was empty, otherwise make (*ALLOCNO, *DIVISOR) describe
static void the removed element. */
update_copy_costs (ira_allocno_t allocno, bool decr_p) static inline bool
get_next_update_cost (ira_allocno_t *allocno, int *divisor)
{ {
update_cost_check++; struct update_cost_queue_elem *elem;
update_copy_costs_1 (allocno, ALLOCNO_HARD_REGNO (allocno), decr_p, 1);
if (update_cost_queue == NULL)
return false;
*allocno = update_cost_queue;
elem = &update_cost_queue_elems[ALLOCNO_NUM (*allocno)];
*divisor = elem->divisor;
update_cost_queue = elem->next;
return true;
} }
/* This recursive function updates COSTS (decrease if DECR_P) by /* Update the cost of allocnos to increase chances to remove some
conflict costs of the unassigned allocnos connected by copies with copies as the result of subsequent assignment. */
ALLOCNO. This update increases chances to remove some copies.
Copy cost is proportional to the copy frequency divided by
DIVISOR. */
static void static void
update_conflict_hard_regno_costs (int *costs, ira_allocno_t allocno, update_copy_costs (ira_allocno_t allocno, bool decr_p)
int divisor, bool decr_p)
{ {
int i, cost, class_size, freq, mult, div; int i, cost, update_cost, hard_regno, divisor;
int *conflict_costs;
bool cont_p;
enum machine_mode mode; enum machine_mode mode;
enum reg_class cover_class; enum reg_class rclass, cover_class;
ira_allocno_t another_allocno; ira_allocno_t another_allocno;
ira_copy_t cp, next_cp; ira_copy_t cp, next_cp;
hard_regno = ALLOCNO_HARD_REGNO (allocno);
ira_assert (hard_regno >= 0);
cover_class = ALLOCNO_COVER_CLASS (allocno); cover_class = ALLOCNO_COVER_CLASS (allocno);
/* Probably 5 hops will be enough. */
if (divisor > (COST_HOP_DIVISOR * COST_HOP_DIVISOR
* COST_HOP_DIVISOR * COST_HOP_DIVISOR * COST_HOP_DIVISOR))
return;
if (cover_class == NO_REGS) if (cover_class == NO_REGS)
return; return;
/* Check that it was already visited. */ i = ira_class_hard_reg_index[cover_class][hard_regno];
if (allocno_update_cost_check[ALLOCNO_NUM (allocno)] == update_cost_check) ira_assert (i >= 0);
return; rclass = REGNO_REG_CLASS (hard_regno);
allocno_update_cost_check[ALLOCNO_NUM (allocno)] = update_cost_check;
mode = ALLOCNO_MODE (allocno); start_update_cost ();
class_size = ira_class_hard_regs_num[cover_class]; divisor = 1;
for (cp = ALLOCNO_COPIES (allocno); cp != NULL; cp = next_cp) do
{ {
if (cp->first == allocno) mode = ALLOCNO_MODE (allocno);
{ for (cp = ALLOCNO_COPIES (allocno); cp != NULL; cp = next_cp)
next_cp = cp->next_first_allocno_copy;
another_allocno = cp->second;
}
else if (cp->second == allocno)
{
next_cp = cp->next_second_allocno_copy;
another_allocno = cp->first;
}
else
gcc_unreachable ();
if (cover_class != ALLOCNO_COVER_CLASS (another_allocno)
|| ALLOCNO_ASSIGNED_P (another_allocno)
|| ALLOCNO_MAY_BE_SPILLED_P (another_allocno))
continue;
ira_allocate_and_copy_costs
(&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno),
cover_class, ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno));
conflict_costs
= ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno);
if (conflict_costs == NULL)
cont_p = true;
else
{ {
mult = cp->freq; if (cp->first == allocno)
freq = ALLOCNO_FREQ (another_allocno);
if (freq == 0)
freq = 1;
div = freq * divisor;
cont_p = false;
for (i = class_size - 1; i >= 0; i--)
{ {
cost = conflict_costs [i] * mult / div; next_cp = cp->next_first_allocno_copy;
if (cost == 0) another_allocno = cp->second;
continue; }
cont_p = true; else if (cp->second == allocno)
if (decr_p) {
cost = -cost; next_cp = cp->next_second_allocno_copy;
costs[i] += cost; another_allocno = cp->first;
} }
else
gcc_unreachable ();
if (cover_class != ALLOCNO_COVER_CLASS (another_allocno)
|| ALLOCNO_ASSIGNED_P (another_allocno))
continue;
cost = (cp->second == allocno
? ira_register_move_cost[mode][rclass][cover_class]
: ira_register_move_cost[mode][cover_class][rclass]);
if (decr_p)
cost = -cost;
update_cost = cp->freq * cost / divisor;
if (update_cost == 0)
continue;
ira_allocate_and_set_or_copy_costs
(&ALLOCNO_UPDATED_HARD_REG_COSTS (another_allocno), cover_class,
ALLOCNO_COVER_CLASS_COST (another_allocno),
ALLOCNO_HARD_REG_COSTS (another_allocno));
ira_allocate_and_set_or_copy_costs
(&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno),
cover_class, 0,
ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno));
ALLOCNO_UPDATED_HARD_REG_COSTS (another_allocno)[i] += update_cost;
ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno)[i]
+= update_cost;
queue_update_cost (another_allocno, divisor * COST_HOP_DIVISOR);
} }
if (cont_p)
update_conflict_hard_regno_costs (costs, another_allocno,
divisor * COST_HOP_DIVISOR, decr_p);
} }
while (get_next_update_cost (&allocno, &divisor));
}
/* This function updates COSTS (decrease if DECR_P) by conflict costs
of the unassigned allocnos connected by copies with allocnos in
update_cost_queue. This update increases chances to remove some
copies. */
static void
update_conflict_hard_regno_costs (int *costs, bool decr_p)
{
int i, cost, class_size, freq, mult, div, divisor;
int *conflict_costs;
bool cont_p;
enum reg_class cover_class;
ira_allocno_t allocno, another_allocno;
ira_copy_t cp, next_cp;
while (get_next_update_cost (&allocno, &divisor))
for (cp = ALLOCNO_COPIES (allocno); cp != NULL; cp = next_cp)
{
if (cp->first == allocno)
{
next_cp = cp->next_first_allocno_copy;
another_allocno = cp->second;
}
else if (cp->second == allocno)
{
next_cp = cp->next_second_allocno_copy;
another_allocno = cp->first;
}
else
gcc_unreachable ();
cover_class = ALLOCNO_COVER_CLASS (allocno);
if (cover_class != ALLOCNO_COVER_CLASS (another_allocno)
|| ALLOCNO_ASSIGNED_P (another_allocno)
|| ALLOCNO_MAY_BE_SPILLED_P (another_allocno))
continue;
class_size = ira_class_hard_regs_num[cover_class];
ira_allocate_and_copy_costs
(&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno),
cover_class, ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno));
conflict_costs
= ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno);
if (conflict_costs == NULL)
cont_p = true;
else
{
mult = cp->freq;
freq = ALLOCNO_FREQ (another_allocno);
if (freq == 0)
freq = 1;
div = freq * divisor;
cont_p = false;
for (i = class_size - 1; i >= 0; i--)
{
cost = conflict_costs [i] * mult / div;
if (cost == 0)
continue;
cont_p = true;
if (decr_p)
cost = -cost;
costs[i] += cost;
}
}
/* Probably 5 hops will be enough. */
if (cont_p
&& divisor <= (COST_HOP_DIVISOR
* COST_HOP_DIVISOR
* COST_HOP_DIVISOR
* COST_HOP_DIVISOR))
queue_update_cost (another_allocno, divisor * COST_HOP_DIVISOR);
}
} }
/* Sort allocnos according to the profit of usage of a hard register /* Sort allocnos according to the profit of usage of a hard register
...@@ -355,6 +412,7 @@ assign_hard_reg (ira_allocno_t allocno, bool retry_p) ...@@ -355,6 +412,7 @@ assign_hard_reg (ira_allocno_t allocno, bool retry_p)
#ifdef STACK_REGS #ifdef STACK_REGS
no_stack_reg_p = false; no_stack_reg_p = false;
#endif #endif
start_update_cost ();
for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);; for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a)) a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
{ {
...@@ -419,8 +477,7 @@ assign_hard_reg (ira_allocno_t allocno, bool retry_p) ...@@ -419,8 +477,7 @@ assign_hard_reg (ira_allocno_t allocno, bool retry_p)
if (conflict_costs != NULL) if (conflict_costs != NULL)
for (j = class_size - 1; j >= 0; j--) for (j = class_size - 1; j >= 0; j--)
full_costs[j] -= conflict_costs[j]; full_costs[j] -= conflict_costs[j];
VEC_safe_push (ira_allocno_t, heap, conflict_allocno_vec, queue_update_cost (conflict_allocno, COST_HOP_DIVISOR);
conflict_allocno);
} }
} }
if (a == allocno) if (a == allocno)
...@@ -428,24 +485,19 @@ assign_hard_reg (ira_allocno_t allocno, bool retry_p) ...@@ -428,24 +485,19 @@ assign_hard_reg (ira_allocno_t allocno, bool retry_p)
} }
/* Take into account preferences of allocnos connected by copies to /* Take into account preferences of allocnos connected by copies to
the conflict allocnos. */ the conflict allocnos. */
update_cost_check++; update_conflict_hard_regno_costs (full_costs, true);
while (VEC_length (ira_allocno_t, conflict_allocno_vec) != 0)
{
conflict_allocno = VEC_pop (ira_allocno_t, conflict_allocno_vec);
update_conflict_hard_regno_costs (full_costs, conflict_allocno,
COST_HOP_DIVISOR, true);
}
update_cost_check++;
/* Take preferences of allocnos connected by copies into /* Take preferences of allocnos connected by copies into
account. */ account. */
start_update_cost ();
for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);; for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a)) a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
{ {
update_conflict_hard_regno_costs (full_costs, a, queue_update_cost (a, COST_HOP_DIVISOR);
COST_HOP_DIVISOR, false);
if (a == allocno) if (a == allocno)
break; break;
} }
update_conflict_hard_regno_costs (full_costs, false);
min_cost = min_full_cost = INT_MAX; min_cost = min_full_cost = INT_MAX;
/* We don't care about giving callee saved registers to allocnos no /* We don't care about giving callee saved registers to allocnos no
living through calls because call clobbered registers are living through calls because call clobbered registers are
...@@ -2926,7 +2978,6 @@ void ...@@ -2926,7 +2978,6 @@ void
ira_color (void) ira_color (void)
{ {
allocno_stack_vec = VEC_alloc (ira_allocno_t, heap, ira_allocnos_num); allocno_stack_vec = VEC_alloc (ira_allocno_t, heap, ira_allocnos_num);
conflict_allocno_vec = VEC_alloc (ira_allocno_t, heap, ira_allocnos_num);
removed_splay_allocno_vec removed_splay_allocno_vec
= VEC_alloc (ira_allocno_t, heap, ira_allocnos_num); = VEC_alloc (ira_allocno_t, heap, ira_allocnos_num);
memset (allocated_hardreg_p, 0, sizeof (allocated_hardreg_p)); memset (allocated_hardreg_p, 0, sizeof (allocated_hardreg_p));
...@@ -2934,7 +2985,6 @@ ira_color (void) ...@@ -2934,7 +2985,6 @@ ira_color (void)
do_coloring (); do_coloring ();
ira_finish_assign (); ira_finish_assign ();
VEC_free (ira_allocno_t, heap, removed_splay_allocno_vec); VEC_free (ira_allocno_t, heap, removed_splay_allocno_vec);
VEC_free (ira_allocno_t, heap, conflict_allocno_vec);
VEC_free (ira_allocno_t, heap, allocno_stack_vec); VEC_free (ira_allocno_t, heap, allocno_stack_vec);
move_spill_restore (); move_spill_restore ();
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment