Commit 9b69cf83 by Bernd Schmidt Committed by H.J. Lu

re PR bootstrap/48403 (bootstrap comparison failure)

Fix PR bootstrap/48403.

2011-04-04  Bernd Schmidt  <bernds@codesourcery.com>

	PR bootstrap/48403
	* haifa-sched.c (nonscheduled_insns_begin): New static variable.
	(rank_for_schedule): Use scheduled_insns vector instead of
	last_scheduled_insn.
	(ok_for_early_queue_removal): Likewise.
	(queue_to_ready): Search forward in nonscheduled_insns_begin if
	we have a dbg_cnt.
	(choose_ready): Likewise.
	(commit_schedule): Use VEC_iterate.
	(schedule_block): Initialize nonscheduled_insns_begin.  If we have
	a dbg_cnt, use it and ensure the first insn is in the ready list.
	(haifa_sched_init): Allocate scheduled_insns.
	(sched_extend_ready_list): Don't allocate it; reserve space.
	(haifa_sched_finish): Free it.

From-SVN: r171942
parent d355ede7
2011-04-04 Bernd Schmidt <bernds@codesourcery.com>
PR bootstrap/48403
* haifa-sched.c (nonscheduled_insns_begin): New static variable.
(rank_for_schedule): Use scheduled_insns vector instead of
last_scheduled_insn.
(ok_for_early_queue_removal): Likewise.
(queue_to_ready): Search forward in nonscheduled_insns_begin if
we have a dbg_cnt.
(choose_ready): Likewise.
(commit_schedule): Use VEC_iterate.
(schedule_block): Initialize nonscheduled_insns_begin. If we have
a dbg_cnt, use it and ensure the first insn is in the ready list.
(haifa_sched_init): Allocate scheduled_insns.
(sched_extend_ready_list): Don't allocate it; reserve space.
(haifa_sched_finish): Free it.
2011-04-04 Joseph Myers <joseph@codesourcery.com> 2011-04-04 Joseph Myers <joseph@codesourcery.com>
* optc-gen.awk: Always remove type from Variable entry before * optc-gen.awk: Always remove type from Variable entry before
......
...@@ -780,12 +780,14 @@ print_curr_reg_pressure (void) ...@@ -780,12 +780,14 @@ print_curr_reg_pressure (void)
fprintf (sched_dump, "\n"); fprintf (sched_dump, "\n");
} }
/* Pointer to the last instruction scheduled. Used by rank_for_schedule, /* Pointer to the last instruction scheduled. */
so that insns independent of the last scheduled insn will be preferred
over dependent instructions. */
static rtx last_scheduled_insn; static rtx last_scheduled_insn;
/* Pointer that iterates through the list of unscheduled insns if we
have a dbg_cnt enabled. It always points at an insn prior to the
first unscheduled one. */
static rtx nonscheduled_insns_begin;
/* Cached cost of the instruction. Use below function to get cost of the /* Cached cost of the instruction. Use below function to get cost of the
insn. -1 here means that the field is not initialized. */ insn. -1 here means that the field is not initialized. */
#define INSN_COST(INSN) (HID (INSN)->cost) #define INSN_COST(INSN) (HID (INSN)->cost)
...@@ -1239,18 +1241,19 @@ rank_for_schedule (const void *x, const void *y) ...@@ -1239,18 +1241,19 @@ rank_for_schedule (const void *x, const void *y)
if (flag_sched_last_insn_heuristic) if (flag_sched_last_insn_heuristic)
{ {
last = last_scheduled_insn; int i = VEC_length (rtx, scheduled_insns);
last = NULL_RTX;
if (DEBUG_INSN_P (last) && last != current_sched_info->prev_head) while (i-- > 0)
do {
last = PREV_INSN (last); last = VEC_index (rtx, scheduled_insns, i);
while (!NONDEBUG_INSN_P (last) if (NONDEBUG_INSN_P (last))
&& last != current_sched_info->prev_head); break;
}
} }
/* Compare insns based on their relation to the last scheduled /* Compare insns based on their relation to the last scheduled
non-debug insn. */ non-debug insn. */
if (flag_sched_last_insn_heuristic && NONDEBUG_INSN_P (last)) if (flag_sched_last_insn_heuristic && last && NONDEBUG_INSN_P (last))
{ {
dep_t dep1; dep_t dep1;
dep_t dep2; dep_t dep2;
...@@ -2044,9 +2047,16 @@ queue_to_ready (struct ready_list *ready) ...@@ -2044,9 +2047,16 @@ queue_to_ready (struct ready_list *ready)
q_ptr = NEXT_Q (q_ptr); q_ptr = NEXT_Q (q_ptr);
if (dbg_cnt (sched_insn) == false) if (dbg_cnt (sched_insn) == false)
/* If debug counter is activated do not requeue insn next after {
last_scheduled_insn. */ /* If debug counter is activated do not requeue the first
skip_insn = next_nonnote_nondebug_insn (last_scheduled_insn); nonscheduled insn. */
skip_insn = nonscheduled_insns_begin;
do
{
skip_insn = next_nonnote_nondebug_insn (skip_insn);
}
while (QUEUE_INDEX (skip_insn) == QUEUE_SCHEDULED);
}
else else
skip_insn = NULL_RTX; skip_insn = NULL_RTX;
...@@ -2129,22 +2139,18 @@ queue_to_ready (struct ready_list *ready) ...@@ -2129,22 +2139,18 @@ queue_to_ready (struct ready_list *ready)
static bool static bool
ok_for_early_queue_removal (rtx insn) ok_for_early_queue_removal (rtx insn)
{ {
int n_cycles;
rtx prev_insn = last_scheduled_insn;
if (targetm.sched.is_costly_dependence) if (targetm.sched.is_costly_dependence)
{ {
rtx prev_insn;
int n_cycles;
int i = VEC_length (rtx, scheduled_insns);
for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--) for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
{ {
for ( ; prev_insn; prev_insn = PREV_INSN (prev_insn)) while (i-- > 0)
{ {
int cost; int cost;
if (prev_insn == current_sched_info->prev_head) prev_insn = VEC_index (rtx, scheduled_insns, i);
{
prev_insn = NULL;
break;
}
if (!NOTE_P (prev_insn)) if (!NOTE_P (prev_insn))
{ {
...@@ -2166,9 +2172,8 @@ ok_for_early_queue_removal (rtx insn) ...@@ -2166,9 +2172,8 @@ ok_for_early_queue_removal (rtx insn)
break; break;
} }
if (!prev_insn) if (i == 0)
break; break;
prev_insn = PREV_INSN (prev_insn);
} }
} }
...@@ -2673,13 +2678,17 @@ choose_ready (struct ready_list *ready, bool first_cycle_insn_p, ...@@ -2673,13 +2678,17 @@ choose_ready (struct ready_list *ready, bool first_cycle_insn_p,
if (dbg_cnt (sched_insn) == false) if (dbg_cnt (sched_insn) == false)
{ {
rtx insn; rtx insn = nonscheduled_insns_begin;
do
insn = next_nonnote_insn (last_scheduled_insn); {
insn = next_nonnote_insn (insn);
}
while (QUEUE_INDEX (insn) == QUEUE_SCHEDULED);
if (QUEUE_INDEX (insn) == QUEUE_READY) if (QUEUE_INDEX (insn) == QUEUE_READY)
/* INSN is in the ready_list. */ /* INSN is in the ready_list. */
{ {
nonscheduled_insns_begin = insn;
ready_remove_insn (insn); ready_remove_insn (insn);
*insn_ptr = insn; *insn_ptr = insn;
return 0; return 0;
...@@ -2826,13 +2835,14 @@ choose_ready (struct ready_list *ready, bool first_cycle_insn_p, ...@@ -2826,13 +2835,14 @@ choose_ready (struct ready_list *ready, bool first_cycle_insn_p,
static void static void
commit_schedule (rtx prev_head, rtx tail, basic_block *target_bb) commit_schedule (rtx prev_head, rtx tail, basic_block *target_bb)
{ {
int i; unsigned int i;
rtx insn;
last_scheduled_insn = prev_head; last_scheduled_insn = prev_head;
for (i = 0; i < (int)VEC_length (rtx, scheduled_insns); i++) for (i = 0;
VEC_iterate (rtx, scheduled_insns, i, insn);
i++)
{ {
rtx insn = VEC_index (rtx, scheduled_insns, i);
if (control_flow_insn_p (last_scheduled_insn) if (control_flow_insn_p (last_scheduled_insn)
|| current_sched_info->advance_target_bb (*target_bb, insn)) || current_sched_info->advance_target_bb (*target_bb, insn))
{ {
...@@ -2956,7 +2966,7 @@ schedule_block (basic_block *target_bb) ...@@ -2956,7 +2966,7 @@ schedule_block (basic_block *target_bb)
targetm.sched.init (sched_dump, sched_verbose, ready.veclen); targetm.sched.init (sched_dump, sched_verbose, ready.veclen);
/* We start inserting insns after PREV_HEAD. */ /* We start inserting insns after PREV_HEAD. */
last_scheduled_insn = prev_head; last_scheduled_insn = nonscheduled_insns_begin = prev_head;
gcc_assert ((NOTE_P (last_scheduled_insn) gcc_assert ((NOTE_P (last_scheduled_insn)
|| DEBUG_INSN_P (last_scheduled_insn)) || DEBUG_INSN_P (last_scheduled_insn))
...@@ -3001,12 +3011,12 @@ schedule_block (basic_block *target_bb) ...@@ -3001,12 +3011,12 @@ schedule_block (basic_block *target_bb)
/* Delay all insns past it for 1 cycle. If debug counter is /* Delay all insns past it for 1 cycle. If debug counter is
activated make an exception for the insn right after activated make an exception for the insn right after
last_scheduled_insn. */ nonscheduled_insns_begin. */
{ {
rtx skip_insn; rtx skip_insn;
if (dbg_cnt (sched_insn) == false) if (dbg_cnt (sched_insn) == false)
skip_insn = next_nonnote_insn (last_scheduled_insn); skip_insn = next_nonnote_insn (nonscheduled_insns_begin);
else else
skip_insn = NULL_RTX; skip_insn = NULL_RTX;
...@@ -3019,6 +3029,8 @@ schedule_block (basic_block *target_bb) ...@@ -3019,6 +3029,8 @@ schedule_block (basic_block *target_bb)
if (insn != skip_insn) if (insn != skip_insn)
queue_insn (insn, 1, "list truncated"); queue_insn (insn, 1, "list truncated");
} }
if (skip_insn)
ready_add (&ready, skip_insn, true);
} }
} }
...@@ -3540,6 +3552,8 @@ haifa_sched_init (void) ...@@ -3540,6 +3552,8 @@ haifa_sched_init (void)
setup_sched_dump (); setup_sched_dump ();
sched_init (); sched_init ();
scheduled_insns = VEC_alloc (rtx, heap, 0);
if (spec_info != NULL) if (spec_info != NULL)
{ {
sched_deps_info->use_deps_list = 1; sched_deps_info->use_deps_list = 1;
...@@ -3610,6 +3624,8 @@ haifa_sched_finish (void) ...@@ -3610,6 +3624,8 @@ haifa_sched_finish (void)
c, nr_be_in_control); c, nr_be_in_control);
} }
VEC_free (rtx, heap, scheduled_insns);
/* Finalize h_i_d, dependency caches, and luids for the whole /* Finalize h_i_d, dependency caches, and luids for the whole
function. Target will be finalized in md_global_finish (). */ function. Target will be finalized in md_global_finish (). */
sched_deps_finish (); sched_deps_finish ();
...@@ -4008,7 +4024,7 @@ sched_extend_ready_list (int new_sched_ready_n_insns) ...@@ -4008,7 +4024,7 @@ sched_extend_ready_list (int new_sched_ready_n_insns)
{ {
i = 0; i = 0;
sched_ready_n_insns = 0; sched_ready_n_insns = 0;
scheduled_insns = VEC_alloc (rtx, heap, new_sched_ready_n_insns); VEC_reserve (rtx, heap, scheduled_insns, new_sched_ready_n_insns);
} }
else else
i = sched_ready_n_insns + 1; i = sched_ready_n_insns + 1;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment