Commit f754734f by Richard Sandiford Committed by Richard Sandiford

ira-color.c (conflict_allocno_vec): Delete.

gcc/
	* ira-color.c (conflict_allocno_vec): Delete.
	(update_cost_queue_elem): New structure.
	(update_cost_queue): New variable.
	(update_cost_queue_tail): Likewise.
	(update_cost_queue_elems): Likewise.
	(allocno_update_cost_check): Delete.
	(initiate_cost_update): Allocate update_cost_queue_elems
	instead of allocno_update_cost_check.
	(finish_cost_update): Update the free()s accordingly.
	(start_update_cost): New function.
	(queue_update_cost): Likewise.
	(get_next_update_cost): Likewise.
	(update_copy_costs_1): Inline into...
	(update_copy_costs): ...here.  Use a queue instead of recursive calls.
	Use cover_class instead of ALLOCNO_COVER_CLASS (another_allocno),
	once we've established they are equal.  Don't allocate update
	costs if there is nothing to add to them.
	(update_conflict_hard_regno_costs): Remove ALLOCNO and
	DIVISOR arguments.  Use a queue instead of recursive calls;
	process all the allocnos in the initial queue, rather than
	a single allocno.
	(assign_hard_reg): Use queue_update_cost instead of
	conflict_allocno_vec.  Queue coalesced allocnos instead
	of calling update_conflict_hard_regno_costs for each one.
	Just call update_conflict_hard_regno_costs once for the
	entire queue.
	(ira_color): Remove conflict_allocno_vec handling.

From-SVN: r140335
parent 29d7f409
2008-09-13 Richard Sandiford <rdsandiford@googlemail.com>
* ira-color.c (conflict_allocno_vec): Delete.
(update_cost_queue_elem): New structure.
(update_cost_queue): New variable.
(update_cost_queue_tail): Likewise.
(update_cost_queue_elems): Likewise.
(allocno_update_cost_check): Delete.
(initiate_cost_update): Allocate update_cost_queue_elems
instead of allocno_update_cost_check.
(finish_cost_update): Update the free()s accordingly.
(start_update_cost): New function.
(queue_update_cost): Likewise.
(get_next_update_cost): Likewise.
(update_copy_costs_1): Inline into...
(update_copy_costs): ...here. Use a queue instead of recursive calls.
Use cover_class instead of ALLOCNO_COVER_CLASS (another_allocno),
once we've established they are equal. Don't allocate update
costs if there is nothing to add to them.
(update_conflict_hard_regno_costs): Remove ALLOCNO and
DIVISOR arguments. Use a queue instead of recursive calls;
process all the allocnos in the initial queue, rather than
a single allocno.
(assign_hard_reg): Use queue_update_cost instead of
conflict_allocno_vec. Queue coalesced allocnos instead
of calling update_conflict_hard_regno_costs for each one.
Just call update_conflict_hard_regno_costs once for the
entire queue.
(ira_color): Remove conflict_allocno_vec handling.
2008-09-12 Sebastian Pop <sebastian.pop@amd.com> 2008-09-12 Sebastian Pop <sebastian.pop@amd.com>
PR tree-optimization/37484 PR tree-optimization/37484
......
...@@ -68,9 +68,6 @@ static ira_allocno_t *sorted_allocnos; ...@@ -68,9 +68,6 @@ static ira_allocno_t *sorted_allocnos;
/* Vec representing the stack of allocnos used during coloring. */ /* Vec representing the stack of allocnos used during coloring. */
static VEC(ira_allocno_t,heap) *allocno_stack_vec; static VEC(ira_allocno_t,heap) *allocno_stack_vec;
/* Vec representing conflict allocnos used during assigning. */
static VEC(ira_allocno_t,heap) *conflict_allocno_vec;
/* Array used to choose an allocno for spilling. */ /* Array used to choose an allocno for spilling. */
static ira_allocno_t *allocnos_for_spilling; static ira_allocno_t *allocnos_for_spilling;
...@@ -94,9 +91,31 @@ static VEC(ira_allocno_t,heap) *removed_splay_allocno_vec; ...@@ -94,9 +91,31 @@ static VEC(ira_allocno_t,heap) *removed_splay_allocno_vec;
register was already allocated for an allocno. */ register was already allocated for an allocno. */
static bool allocated_hardreg_p[FIRST_PSEUDO_REGISTER]; static bool allocated_hardreg_p[FIRST_PSEUDO_REGISTER];
/* Array used to check already processed allocnos during the current /* Describes one element in a queue of allocnos whose costs need to be
update_copy_costs call. */ updated. Each allocno in the queue is known to have a cover class. */
static int *allocno_update_cost_check; struct update_cost_queue_elem {
/* This element is in the queue iff CHECK == update_cost_check. */
int check;
/* COST_HOP_DIVISOR**N, where N is the length of the shortest path
connecting this allocno to the one being allocated. */
int divisor;
/* The next allocno in the queue, or null if this is the last element. */
ira_allocno_t next;
};
/* The first element in a queue of allocnos whose copy costs need to be
updated. Null if the queue is empty. */
static ira_allocno_t update_cost_queue;
/* The last element in the queue described by update_cost_queue.
Not valid if update_cost_queue is null. */
static struct update_cost_queue_elem *update_cost_queue_tail;
/* A pool of elements in the queue described by update_cost_queue.
Elements are indexed by ALLOCNO_NUM. */
static struct update_cost_queue_elem *update_cost_queue_elems;
/* The current value of update_copy_cost call count. */ /* The current value of update_copy_cost call count. */
static int update_cost_check; static int update_cost_check;
...@@ -106,9 +125,12 @@ static int update_cost_check; ...@@ -106,9 +125,12 @@ static int update_cost_check;
static void static void
initiate_cost_update (void) initiate_cost_update (void)
{ {
allocno_update_cost_check size_t size;
= (int *) ira_allocate (ira_allocnos_num * sizeof (int));
memset (allocno_update_cost_check, 0, ira_allocnos_num * sizeof (int)); size = ira_allocnos_num * sizeof (struct update_cost_queue_elem);
update_cost_queue_elems
= (struct update_cost_queue_elem *) ira_allocate (size);
memset (update_cost_queue_elems, 0, size);
update_cost_check = 0; update_cost_check = 0;
} }
...@@ -116,7 +138,7 @@ initiate_cost_update (void) ...@@ -116,7 +138,7 @@ initiate_cost_update (void)
static void static void
finish_cost_update (void) finish_cost_update (void)
{ {
ira_free (allocno_update_cost_check); ira_free (update_cost_queue);
} }
/* When we traverse allocnos to update hard register costs, the cost /* When we traverse allocnos to update hard register costs, the cost
...@@ -124,30 +146,79 @@ finish_cost_update (void) ...@@ -124,30 +146,79 @@ finish_cost_update (void)
hop from given allocno to directly connected allocnos. */ hop from given allocno to directly connected allocnos. */
#define COST_HOP_DIVISOR 4 #define COST_HOP_DIVISOR 4
/* This recursive function updates costs (decrease if DECR_P) of the /* Start a new cost-updating pass. */
unassigned allocnos connected by copies with ALLOCNO. This update
increases chances to remove some copies. Copy cost is proportional
the copy frequency divided by DIVISOR. */
static void static void
update_copy_costs_1 (ira_allocno_t allocno, int hard_regno, start_update_cost (void)
bool decr_p, int divisor)
{ {
int i, cost, update_cost; update_cost_check++;
update_cost_queue = NULL;
}
/* Add (ALLOCNO, DIVISOR) to the end of update_cost_queue,
unless ALLOCNO is already in the queue, or has no cover class. */
static inline void
queue_update_cost (ira_allocno_t allocno, int divisor)
{
struct update_cost_queue_elem *elem;
elem = &update_cost_queue_elems[ALLOCNO_NUM (allocno)];
if (elem->check != update_cost_check
&& ALLOCNO_COVER_CLASS (allocno) != NO_REGS)
{
elem->check = update_cost_check;
elem->divisor = divisor;
elem->next = NULL;
if (update_cost_queue == NULL)
update_cost_queue = allocno;
else
update_cost_queue_tail->next = allocno;
update_cost_queue_tail = elem;
}
}
/* Try to remove the first element from update_cost_queue. Return false
if the queue was empty, otherwise make (*ALLOCNO, *DIVISOR) describe
the removed element. */
static inline bool
get_next_update_cost (ira_allocno_t *allocno, int *divisor)
{
struct update_cost_queue_elem *elem;
if (update_cost_queue == NULL)
return false;
*allocno = update_cost_queue;
elem = &update_cost_queue_elems[ALLOCNO_NUM (*allocno)];
*divisor = elem->divisor;
update_cost_queue = elem->next;
return true;
}
/* Update the cost of allocnos to increase chances to remove some
copies as the result of subsequent assignment. */
static void
update_copy_costs (ira_allocno_t allocno, bool decr_p)
{
int i, cost, update_cost, hard_regno, divisor;
enum machine_mode mode; enum machine_mode mode;
enum reg_class rclass, cover_class; enum reg_class rclass, cover_class;
ira_allocno_t another_allocno; ira_allocno_t another_allocno;
ira_copy_t cp, next_cp; ira_copy_t cp, next_cp;
hard_regno = ALLOCNO_HARD_REGNO (allocno);
ira_assert (hard_regno >= 0);
cover_class = ALLOCNO_COVER_CLASS (allocno); cover_class = ALLOCNO_COVER_CLASS (allocno);
if (cover_class == NO_REGS) if (cover_class == NO_REGS)
return; return;
if (allocno_update_cost_check[ALLOCNO_NUM (allocno)] == update_cost_check)
return;
allocno_update_cost_check[ALLOCNO_NUM (allocno)] = update_cost_check;
ira_assert (hard_regno >= 0);
i = ira_class_hard_reg_index[cover_class][hard_regno]; i = ira_class_hard_reg_index[cover_class][hard_regno];
ira_assert (i >= 0); ira_assert (i >= 0);
rclass = REGNO_REG_CLASS (hard_regno); rclass = REGNO_REG_CLASS (hard_regno);
start_update_cost ();
divisor = 1;
do
{
mode = ALLOCNO_MODE (allocno); mode = ALLOCNO_MODE (allocno);
for (cp = ALLOCNO_COPIES (allocno); cp != NULL; cp = next_cp) for (cp = ALLOCNO_COPIES (allocno); cp != NULL; cp = next_cp)
{ {
...@@ -163,17 +234,21 @@ update_copy_costs_1 (ira_allocno_t allocno, int hard_regno, ...@@ -163,17 +234,21 @@ update_copy_costs_1 (ira_allocno_t allocno, int hard_regno,
} }
else else
gcc_unreachable (); gcc_unreachable ();
if (cover_class
!= ALLOCNO_COVER_CLASS (another_allocno) if (cover_class != ALLOCNO_COVER_CLASS (another_allocno)
|| ALLOCNO_ASSIGNED_P (another_allocno)) || ALLOCNO_ASSIGNED_P (another_allocno))
continue; continue;
cost = (cp->second == allocno cost = (cp->second == allocno
? ira_register_move_cost[mode][rclass] ? ira_register_move_cost[mode][rclass][cover_class]
[ALLOCNO_COVER_CLASS (another_allocno)] : ira_register_move_cost[mode][cover_class][rclass]);
: ira_register_move_cost[mode]
[ALLOCNO_COVER_CLASS (another_allocno)][rclass]);
if (decr_p) if (decr_p)
cost = -cost; cost = -cost;
update_cost = cp->freq * cost / divisor;
if (update_cost == 0)
continue;
ira_allocate_and_set_or_copy_costs ira_allocate_and_set_or_copy_costs
(&ALLOCNO_UPDATED_HARD_REG_COSTS (another_allocno), cover_class, (&ALLOCNO_UPDATED_HARD_REG_COSTS (another_allocno), cover_class,
ALLOCNO_COVER_CLASS_COST (another_allocno), ALLOCNO_COVER_CLASS_COST (another_allocno),
...@@ -182,55 +257,31 @@ update_copy_costs_1 (ira_allocno_t allocno, int hard_regno, ...@@ -182,55 +257,31 @@ update_copy_costs_1 (ira_allocno_t allocno, int hard_regno,
(&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno), (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno),
cover_class, 0, cover_class, 0,
ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno)); ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno));
update_cost = cp->freq * cost / divisor;
ALLOCNO_UPDATED_HARD_REG_COSTS (another_allocno)[i] += update_cost; ALLOCNO_UPDATED_HARD_REG_COSTS (another_allocno)[i] += update_cost;
ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno)[i] ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno)[i]
+= update_cost; += update_cost;
if (update_cost != 0)
update_copy_costs_1 (another_allocno, hard_regno,
decr_p, divisor * COST_HOP_DIVISOR);
}
}
/* Update the cost of allocnos to increase chances to remove some queue_update_cost (another_allocno, divisor * COST_HOP_DIVISOR);
copies as the result of subsequent assignment. */ }
static void }
update_copy_costs (ira_allocno_t allocno, bool decr_p) while (get_next_update_cost (&allocno, &divisor));
{
update_cost_check++;
update_copy_costs_1 (allocno, ALLOCNO_HARD_REGNO (allocno), decr_p, 1);
} }
/* This recursive function updates COSTS (decrease if DECR_P) by /* This function updates COSTS (decrease if DECR_P) by conflict costs
conflict costs of the unassigned allocnos connected by copies with of the unassigned allocnos connected by copies with allocnos in
ALLOCNO. This update increases chances to remove some copies. update_cost_queue. This update increases chances to remove some
Copy cost is proportional to the copy frequency divided by copies. */
DIVISOR. */
static void static void
update_conflict_hard_regno_costs (int *costs, ira_allocno_t allocno, update_conflict_hard_regno_costs (int *costs, bool decr_p)
int divisor, bool decr_p)
{ {
int i, cost, class_size, freq, mult, div; int i, cost, class_size, freq, mult, div, divisor;
int *conflict_costs; int *conflict_costs;
bool cont_p; bool cont_p;
enum machine_mode mode;
enum reg_class cover_class; enum reg_class cover_class;
ira_allocno_t another_allocno; ira_allocno_t allocno, another_allocno;
ira_copy_t cp, next_cp; ira_copy_t cp, next_cp;
cover_class = ALLOCNO_COVER_CLASS (allocno); while (get_next_update_cost (&allocno, &divisor))
/* Probably 5 hops will be enough. */
if (divisor > (COST_HOP_DIVISOR * COST_HOP_DIVISOR
* COST_HOP_DIVISOR * COST_HOP_DIVISOR * COST_HOP_DIVISOR))
return;
if (cover_class == NO_REGS)
return;
/* Check that it was already visited. */
if (allocno_update_cost_check[ALLOCNO_NUM (allocno)] == update_cost_check)
return;
allocno_update_cost_check[ALLOCNO_NUM (allocno)] = update_cost_check;
mode = ALLOCNO_MODE (allocno);
class_size = ira_class_hard_regs_num[cover_class];
for (cp = ALLOCNO_COPIES (allocno); cp != NULL; cp = next_cp) for (cp = ALLOCNO_COPIES (allocno); cp != NULL; cp = next_cp)
{ {
if (cp->first == allocno) if (cp->first == allocno)
...@@ -245,10 +296,12 @@ update_conflict_hard_regno_costs (int *costs, ira_allocno_t allocno, ...@@ -245,10 +296,12 @@ update_conflict_hard_regno_costs (int *costs, ira_allocno_t allocno,
} }
else else
gcc_unreachable (); gcc_unreachable ();
cover_class = ALLOCNO_COVER_CLASS (allocno);
if (cover_class != ALLOCNO_COVER_CLASS (another_allocno) if (cover_class != ALLOCNO_COVER_CLASS (another_allocno)
|| ALLOCNO_ASSIGNED_P (another_allocno) || ALLOCNO_ASSIGNED_P (another_allocno)
|| ALLOCNO_MAY_BE_SPILLED_P (another_allocno)) || ALLOCNO_MAY_BE_SPILLED_P (another_allocno))
continue; continue;
class_size = ira_class_hard_regs_num[cover_class];
ira_allocate_and_copy_costs ira_allocate_and_copy_costs
(&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno), (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno),
cover_class, ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno)); cover_class, ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno));
...@@ -275,9 +328,13 @@ update_conflict_hard_regno_costs (int *costs, ira_allocno_t allocno, ...@@ -275,9 +328,13 @@ update_conflict_hard_regno_costs (int *costs, ira_allocno_t allocno,
costs[i] += cost; costs[i] += cost;
} }
} }
if (cont_p) /* Probably 5 hops will be enough. */
update_conflict_hard_regno_costs (costs, another_allocno, if (cont_p
divisor * COST_HOP_DIVISOR, decr_p); && divisor <= (COST_HOP_DIVISOR
* COST_HOP_DIVISOR
* COST_HOP_DIVISOR
* COST_HOP_DIVISOR))
queue_update_cost (another_allocno, divisor * COST_HOP_DIVISOR);
} }
} }
...@@ -355,6 +412,7 @@ assign_hard_reg (ira_allocno_t allocno, bool retry_p) ...@@ -355,6 +412,7 @@ assign_hard_reg (ira_allocno_t allocno, bool retry_p)
#ifdef STACK_REGS #ifdef STACK_REGS
no_stack_reg_p = false; no_stack_reg_p = false;
#endif #endif
start_update_cost ();
for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);; for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a)) a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
{ {
...@@ -419,8 +477,7 @@ assign_hard_reg (ira_allocno_t allocno, bool retry_p) ...@@ -419,8 +477,7 @@ assign_hard_reg (ira_allocno_t allocno, bool retry_p)
if (conflict_costs != NULL) if (conflict_costs != NULL)
for (j = class_size - 1; j >= 0; j--) for (j = class_size - 1; j >= 0; j--)
full_costs[j] -= conflict_costs[j]; full_costs[j] -= conflict_costs[j];
VEC_safe_push (ira_allocno_t, heap, conflict_allocno_vec, queue_update_cost (conflict_allocno, COST_HOP_DIVISOR);
conflict_allocno);
} }
} }
if (a == allocno) if (a == allocno)
...@@ -428,24 +485,19 @@ assign_hard_reg (ira_allocno_t allocno, bool retry_p) ...@@ -428,24 +485,19 @@ assign_hard_reg (ira_allocno_t allocno, bool retry_p)
} }
/* Take into account preferences of allocnos connected by copies to /* Take into account preferences of allocnos connected by copies to
the conflict allocnos. */ the conflict allocnos. */
update_cost_check++; update_conflict_hard_regno_costs (full_costs, true);
while (VEC_length (ira_allocno_t, conflict_allocno_vec) != 0)
{
conflict_allocno = VEC_pop (ira_allocno_t, conflict_allocno_vec);
update_conflict_hard_regno_costs (full_costs, conflict_allocno,
COST_HOP_DIVISOR, true);
}
update_cost_check++;
/* Take preferences of allocnos connected by copies into /* Take preferences of allocnos connected by copies into
account. */ account. */
start_update_cost ();
for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);; for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a)) a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
{ {
update_conflict_hard_regno_costs (full_costs, a, queue_update_cost (a, COST_HOP_DIVISOR);
COST_HOP_DIVISOR, false);
if (a == allocno) if (a == allocno)
break; break;
} }
update_conflict_hard_regno_costs (full_costs, false);
min_cost = min_full_cost = INT_MAX; min_cost = min_full_cost = INT_MAX;
/* We don't care about giving callee saved registers to allocnos no /* We don't care about giving callee saved registers to allocnos no
living through calls because call clobbered registers are living through calls because call clobbered registers are
...@@ -2926,7 +2978,6 @@ void ...@@ -2926,7 +2978,6 @@ void
ira_color (void) ira_color (void)
{ {
allocno_stack_vec = VEC_alloc (ira_allocno_t, heap, ira_allocnos_num); allocno_stack_vec = VEC_alloc (ira_allocno_t, heap, ira_allocnos_num);
conflict_allocno_vec = VEC_alloc (ira_allocno_t, heap, ira_allocnos_num);
removed_splay_allocno_vec removed_splay_allocno_vec
= VEC_alloc (ira_allocno_t, heap, ira_allocnos_num); = VEC_alloc (ira_allocno_t, heap, ira_allocnos_num);
memset (allocated_hardreg_p, 0, sizeof (allocated_hardreg_p)); memset (allocated_hardreg_p, 0, sizeof (allocated_hardreg_p));
...@@ -2934,7 +2985,6 @@ ira_color (void) ...@@ -2934,7 +2985,6 @@ ira_color (void)
do_coloring (); do_coloring ();
ira_finish_assign (); ira_finish_assign ();
VEC_free (ira_allocno_t, heap, removed_splay_allocno_vec); VEC_free (ira_allocno_t, heap, removed_splay_allocno_vec);
VEC_free (ira_allocno_t, heap, conflict_allocno_vec);
VEC_free (ira_allocno_t, heap, allocno_stack_vec); VEC_free (ira_allocno_t, heap, allocno_stack_vec);
move_spill_restore (); move_spill_restore ();
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment