Commit cda1f765 by Vladimir Makarov Committed by Vladimir Makarov

haifa-sched.c (move_insn): Restore moving all schedule group.

2003-01-16  Vladimir Makarov  <vmakarov@redhat.com>

	* haifa-sched.c (move_insn): Restore moving all schedule group.
	(set_priorities): Restore taking SCHED_GROUP_P into account.

	* sched-deps.c (add_dependence): Restore processing the last group
	insn.
	(remove_dependence, group_leader): Restore the functions.
	(set_sched_group_p): Restore adding dependencies from previous insn
	in the group.
	(compute_forward_dependences): Restore usage of group_leader.

	* sched-ebb.c (init_ready_list): Restore taking SCHED_GROUP_P into
	account.

	* sched-rgn.c (init_ready_list): Restore taking SCHED_GROUP_P into
	account.
	(can_schedule_ready_p): Ditto.
	(add_branch_dependences): Restore skipping over the group insns.

From-SVN: r61412
parent 200097cc
2003-01-16 Vladimir Makarov <vmakarov@redhat.com>
* haifa-sched.c (move_insn): Restore moving all schedule group.
(set_priorities): Restore taking SCHED_GROUP_P into account.
* sched-deps.c (add_dependence): Restore processing the last group
insn.
(remove_dependence, group_leader): Restore the functions.
(set_sched_group_p): Restore adding dependencies from previous insn
in the group.
(compute_forward_dependences): Restore usage of group_leader.
* sched-ebb.c (init_ready_list): Restore taking SCHED_GROUP_P into
account.
* sched-rgn.c (init_ready_list): Restore taking SCHED_GROUP_P into
account.
(can_schedule_ready_p): Ditto.
(add_branch_dependences): Restore skipping over the group insns.
2003-01-16 Stephane Carrez <stcarrez@nerim.fr> 2003-01-16 Stephane Carrez <stcarrez@nerim.fr>
* config/m68hc11/m68hc11.c (m68hc11_check_z_replacement): Fix handling * config/m68hc11/m68hc11.c (m68hc11_check_z_replacement): Fix handling
......
...@@ -1769,6 +1769,25 @@ move_insn (insn, last) ...@@ -1769,6 +1769,25 @@ move_insn (insn, last)
{ {
rtx retval = NULL; rtx retval = NULL;
/* If INSN has SCHED_GROUP_P set, then issue it and any other
insns with SCHED_GROUP_P set first. */
while (SCHED_GROUP_P (insn))
{
rtx prev = PREV_INSN (insn);
/* Move a SCHED_GROUP_P insn. */
move_insn1 (insn, last);
/* If this is the first call to reemit_notes, then record
its return value. */
if (retval == NULL_RTX)
retval = reemit_notes (insn, insn);
else
reemit_notes (insn, insn);
/* Consume SCHED_GROUP_P flag. */
SCHED_GROUP_P (insn) = 0;
insn = prev;
}
/* Now move the first non SCHED_GROUP_P insn. */ /* Now move the first non SCHED_GROUP_P insn. */
move_insn1 (insn, last); move_insn1 (insn, last);
...@@ -1779,8 +1798,6 @@ move_insn (insn, last) ...@@ -1779,8 +1798,6 @@ move_insn (insn, last)
else else
reemit_notes (insn, insn); reemit_notes (insn, insn);
SCHED_GROUP_P (insn) = 0;
return retval; return retval;
} }
...@@ -2376,7 +2393,8 @@ set_priorities (head, tail) ...@@ -2376,7 +2393,8 @@ set_priorities (head, tail)
if (GET_CODE (insn) == NOTE) if (GET_CODE (insn) == NOTE)
continue; continue;
n_insn++; if (! SCHED_GROUP_P (insn))
n_insn++;
(void) priority (insn); (void) priority (insn);
} }
......
...@@ -83,12 +83,14 @@ static sbitmap *forward_dependency_cache; ...@@ -83,12 +83,14 @@ static sbitmap *forward_dependency_cache;
static int deps_may_trap_p PARAMS ((rtx)); static int deps_may_trap_p PARAMS ((rtx));
static void add_dependence_list PARAMS ((rtx, rtx, enum reg_note)); static void add_dependence_list PARAMS ((rtx, rtx, enum reg_note));
static void add_dependence_list_and_free PARAMS ((rtx, rtx *, enum reg_note)); static void add_dependence_list_and_free PARAMS ((rtx, rtx *, enum reg_note));
static void remove_dependence PARAMS ((rtx, rtx));
static void set_sched_group_p PARAMS ((rtx)); static void set_sched_group_p PARAMS ((rtx));
static void flush_pending_lists PARAMS ((struct deps *, rtx, int, int)); static void flush_pending_lists PARAMS ((struct deps *, rtx, int, int));
static void sched_analyze_1 PARAMS ((struct deps *, rtx, rtx)); static void sched_analyze_1 PARAMS ((struct deps *, rtx, rtx));
static void sched_analyze_2 PARAMS ((struct deps *, rtx, rtx)); static void sched_analyze_2 PARAMS ((struct deps *, rtx, rtx));
static void sched_analyze_insn PARAMS ((struct deps *, rtx, rtx, rtx)); static void sched_analyze_insn PARAMS ((struct deps *, rtx, rtx, rtx));
static rtx group_leader PARAMS ((rtx));
static rtx get_condition PARAMS ((rtx)); static rtx get_condition PARAMS ((rtx));
static int conditions_mutex_p PARAMS ((rtx, rtx)); static int conditions_mutex_p PARAMS ((rtx, rtx));
...@@ -235,13 +237,16 @@ add_dependence (insn, elem, dep_type) ...@@ -235,13 +237,16 @@ add_dependence (insn, elem, dep_type)
rtx nnext; rtx nnext;
while ((nnext = next_nonnote_insn (next)) != NULL while ((nnext = next_nonnote_insn (next)) != NULL
&& INSN_P (nnext) && INSN_P (nnext)
&& next != insn
&& SCHED_GROUP_P (nnext)) && SCHED_GROUP_P (nnext))
next = nnext; next = nnext;
if (insn != next) /* Again, don't depend an insn on itself. */
add_dependence (insn, next, REG_DEP_ANTI); if (insn == next)
return;
/* Make the dependence to NEXT, the last insn of the group,
instead of the original ELEM. */
elem = next;
} }
...@@ -380,6 +385,76 @@ add_dependence_list_and_free (insn, listp, dep_type) ...@@ -380,6 +385,76 @@ add_dependence_list_and_free (insn, listp, dep_type)
} }
} }
/* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
of INSN. Abort if not found. */
static void
remove_dependence (insn, elem)
rtx insn;
rtx elem;
{
rtx prev, link, next;
int found = 0;
for (prev = 0, link = LOG_LINKS (insn); link; link = next)
{
next = XEXP (link, 1);
if (XEXP (link, 0) == elem)
{
if (prev)
XEXP (prev, 1) = next;
else
LOG_LINKS (insn) = next;
#ifdef INSN_SCHEDULING
/* If we are removing a dependency from the LOG_LINKS list,
make sure to remove it from the cache too. */
if (true_dependency_cache != NULL)
{
if (REG_NOTE_KIND (link) == 0)
RESET_BIT (true_dependency_cache[INSN_LUID (insn)],
INSN_LUID (elem));
else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
INSN_LUID (elem));
else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
INSN_LUID (elem));
}
#endif
free_INSN_LIST_node (link);
found = 1;
}
else
prev = link;
}
if (!found)
abort ();
return;
}
/* Return an insn which represents a SCHED_GROUP, which is
the last insn in the group. */
static rtx
group_leader (insn)
rtx insn;
{
rtx prev;
do
{
prev = insn;
insn = next_nonnote_insn (insn);
}
while (insn && INSN_P (insn) && SCHED_GROUP_P (insn));
return prev;
}
/* Set SCHED_GROUP_P and care for the rest of the bookkeeping that /* Set SCHED_GROUP_P and care for the rest of the bookkeeping that
goes along with that. */ goes along with that. */
...@@ -391,21 +466,22 @@ set_sched_group_p (insn) ...@@ -391,21 +466,22 @@ set_sched_group_p (insn)
SCHED_GROUP_P (insn) = 1; SCHED_GROUP_P (insn) = 1;
for (link = LOG_LINKS (insn); link; link = XEXP (link, 1)) /* There may be a note before this insn now, but all notes will
{ be removed before we actually try to schedule the insns, so
prev = insn; it won't cause a problem later. We must avoid it here
do though. */
{
prev = prev_nonnote_insn (prev);
if (XEXP (link, 0) == prev)
break;
}
while (SCHED_GROUP_P (prev));
if (XEXP (link, 0) != prev)
add_dependence (prev, XEXP (link, 0), REG_DEP_ANTI);
}
prev = prev_nonnote_insn (insn); prev = prev_nonnote_insn (insn);
add_dependence (insn, prev, REG_DEP_ANTI);
/* Make a copy of all dependencies on the immediately previous
insn, and add to this insn. This is so that all the
dependencies will apply to the group. Remove an explicit
dependence on this insn as SCHED_GROUP_P now represents it. */
if (find_insn_list (prev, LOG_LINKS (insn)))
remove_dependence (insn, prev);
for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
} }
/* Process an insn's memory dependencies. There are four kinds of /* Process an insn's memory dependencies. There are four kinds of
...@@ -1370,9 +1446,11 @@ compute_forward_dependences (head, tail) ...@@ -1370,9 +1446,11 @@ compute_forward_dependences (head, tail)
if (! INSN_P (insn)) if (! INSN_P (insn))
continue; continue;
insn = group_leader (insn);
for (link = LOG_LINKS (insn); link; link = XEXP (link, 1)) for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
{ {
rtx x = XEXP (link, 0); rtx x = group_leader (XEXP (link, 0));
rtx new_link; rtx new_link;
if (x != XEXP (link, 0)) if (x != XEXP (link, 0))
......
...@@ -90,9 +90,16 @@ init_ready_list (ready) ...@@ -90,9 +90,16 @@ init_ready_list (ready)
Count number of insns in the target block being scheduled. */ Count number of insns in the target block being scheduled. */
for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn)) for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
{ {
if (INSN_DEP_COUNT (insn) == 0) rtx next;
if (! INSN_P (insn))
continue;
next = NEXT_INSN (insn);
if (INSN_DEP_COUNT (insn) == 0
&& (! INSN_P (next) || SCHED_GROUP_P (next) == 0))
ready_add (ready, insn); ready_add (ready, insn);
if (!(SCHED_GROUP_P (insn))) if (! SCHED_GROUP_P (insn))
target_n_insns++; target_n_insns++;
} }
} }
......
...@@ -2023,9 +2023,17 @@ init_ready_list (ready) ...@@ -2023,9 +2023,17 @@ init_ready_list (ready)
Count number of insns in the target block being scheduled. */ Count number of insns in the target block being scheduled. */
for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn)) for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
{ {
if (INSN_DEP_COUNT (insn) == 0) rtx next;
if (! INSN_P (insn))
continue;
next = NEXT_INSN (insn);
if (INSN_DEP_COUNT (insn) == 0
&& (! INSN_P (next) || SCHED_GROUP_P (next) == 0))
ready_add (ready, insn); ready_add (ready, insn);
target_n_insns++; if (! SCHED_GROUP_P (insn))
target_n_insns++;
} }
/* Add to ready list all 'ready' insns in valid source blocks. /* Add to ready list all 'ready' insns in valid source blocks.
...@@ -2059,8 +2067,19 @@ init_ready_list (ready) ...@@ -2059,8 +2067,19 @@ init_ready_list (ready)
insn, insn) <= 3))) insn, insn) <= 3)))
&& check_live (insn, bb_src) && check_live (insn, bb_src)
&& is_exception_free (insn, bb_src, target_bb)))) && is_exception_free (insn, bb_src, target_bb))))
if (INSN_DEP_COUNT (insn) == 0) {
ready_add (ready, insn); rtx next;
/* Note that we haven't squirreled away the notes for
blocks other than the current. So if this is a
speculative insn, NEXT might otherwise be a note. */
next = next_nonnote_insn (insn);
if (INSN_DEP_COUNT (insn) == 0
&& (! next
|| ! INSN_P (next)
|| SCHED_GROUP_P (next) == 0))
ready_add (ready, insn);
}
} }
} }
} }
...@@ -2078,6 +2097,7 @@ can_schedule_ready_p (insn) ...@@ -2078,6 +2097,7 @@ can_schedule_ready_p (insn)
/* An interblock motion? */ /* An interblock motion? */
if (INSN_BB (insn) != target_bb) if (INSN_BB (insn) != target_bb)
{ {
rtx temp;
basic_block b1; basic_block b1;
if (IS_SPECULATIVE_INSN (insn)) if (IS_SPECULATIVE_INSN (insn))
...@@ -2094,9 +2114,18 @@ can_schedule_ready_p (insn) ...@@ -2094,9 +2114,18 @@ can_schedule_ready_p (insn)
} }
nr_inter++; nr_inter++;
/* Find the beginning of the scheduling group. */
/* ??? Ought to update basic block here, but later bits of
schedule_block assumes the original insn block is
still intact. */
temp = insn;
while (SCHED_GROUP_P (temp))
temp = PREV_INSN (temp);
/* Update source block boundaries. */ /* Update source block boundaries. */
b1 = BLOCK_FOR_INSN (insn); b1 = BLOCK_FOR_INSN (temp);
if (insn == b1->head && insn == b1->end) if (temp == b1->head && temp == b1->end)
{ {
/* We moved all the insns in the basic block. /* We moved all the insns in the basic block.
Emit a note after the last insn and update the Emit a note after the last insn and update the
...@@ -2110,9 +2139,9 @@ can_schedule_ready_p (insn) ...@@ -2110,9 +2139,9 @@ can_schedule_ready_p (insn)
/* We took insns from the end of the basic block, /* We took insns from the end of the basic block,
so update the end of block boundary so that it so update the end of block boundary so that it
points to the first insn we did not move. */ points to the first insn we did not move. */
b1->end = PREV_INSN (insn); b1->end = PREV_INSN (temp);
} }
else if (insn == b1->head) else if (temp == b1->head)
{ {
/* We took insns from the start of the basic block, /* We took insns from the start of the basic block,
so update the start of block boundary so that so update the start of block boundary so that
...@@ -2332,6 +2361,17 @@ add_branch_dependences (head, tail) ...@@ -2332,6 +2361,17 @@ add_branch_dependences (head, tail)
CANT_MOVE (insn) = 1; CANT_MOVE (insn) = 1;
last = insn; last = insn;
/* Skip over insns that are part of a group.
Make each insn explicitly depend on the previous insn.
This ensures that only the group header will ever enter
the ready queue (and, when scheduled, will automatically
schedule the SCHED_GROUP_P block). */
while (SCHED_GROUP_P (insn))
{
rtx temp = prev_nonnote_insn (insn);
add_dependence (insn, temp, REG_DEP_ANTI);
insn = temp;
}
} }
/* Don't overrun the bounds of the basic block. */ /* Don't overrun the bounds of the basic block. */
...@@ -2353,6 +2393,10 @@ add_branch_dependences (head, tail) ...@@ -2353,6 +2393,10 @@ add_branch_dependences (head, tail)
add_dependence (last, insn, REG_DEP_ANTI); add_dependence (last, insn, REG_DEP_ANTI);
INSN_REF_COUNT (insn) = 1; INSN_REF_COUNT (insn) = 1;
/* Skip over insns that are part of a group. */
while (SCHED_GROUP_P (insn))
insn = prev_nonnote_insn (insn);
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment