Commit 4ba478b8 by Richard Henderson Committed by Richard Henderson

sched-int.h (struct deps): Add max_reg, reg_last_in_use...

        * sched-int.h (struct deps): Add max_reg, reg_last_in_use; merge
        reg_last_uses, reg_last_sets, reg_last_clobbers into struct deps_reg.
        * sched-deps.c (sched_analyze_1): Update uses of struct deps.
        (sched_analyze_2, sched_analyze_insn): Likewise.
        (sched_analyze, init_deps): Likewise.
        (free_deps): Likewise.  Iterate with EXECUTE_IF_SET_IN_REG_SET.
        * sched-rgn.c (propagate_deps): Likewise.  Remove max_reg argument.
        (compute_block_backward_dependences): Update propagate_deps call.

From-SVN: r38835
parent 9c1fcbfb
2001-01-09 Richard Henderson <rth@redhat.com>
* sched-int.h (struct deps): Add max_reg, reg_last_in_use; merge
reg_last_uses, reg_last_sets, reg_last_clobbers into struct deps_reg.
* sched-deps.c (sched_analyze_1): Update uses of struct deps.
(sched_analyze_2, sched_analyze_insn): Likewise.
(sched_analyze, init_deps): Likewise.
(free_deps): Likewise. Iterate with EXECUTE_IF_SET_IN_REG_SET.
* sched-rgn.c (propagate_deps): Likewise. Remove max_reg argument.
(compute_block_backward_dependences): Update propagate_deps call.
2001-01-09 Mark Elbrecht <snowball3@bigfoot.com> 2001-01-09 Mark Elbrecht <snowball3@bigfoot.com>
* gcc.c (process_command): Set switches[n_switches].ordering to 0. * gcc.c (process_command): Set switches[n_switches].ordering to 0.
......
...@@ -590,10 +590,10 @@ sched_analyze_1 (deps, x, insn) ...@@ -590,10 +590,10 @@ sched_analyze_1 (deps, x, insn)
int r = regno + i; int r = regno + i;
rtx u; rtx u;
for (u = deps->reg_last_uses[r]; u; u = XEXP (u, 1)) for (u = deps->reg_last[r].uses; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI); add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
for (u = deps->reg_last_sets[r]; u; u = XEXP (u, 1)) for (u = deps->reg_last[r].sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT); add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
/* Clobbers need not be ordered with respect to one /* Clobbers need not be ordered with respect to one
...@@ -602,8 +602,8 @@ sched_analyze_1 (deps, x, insn) ...@@ -602,8 +602,8 @@ sched_analyze_1 (deps, x, insn)
if (code == SET) if (code == SET)
{ {
if (GET_CODE (PATTERN (insn)) != COND_EXEC) if (GET_CODE (PATTERN (insn)) != COND_EXEC)
free_INSN_LIST_list (&deps->reg_last_uses[r]); free_INSN_LIST_list (&deps->reg_last[r].uses);
for (u = deps->reg_last_clobbers[r]; u; u = XEXP (u, 1)) for (u = deps->reg_last[r].clobbers; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT); add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
SET_REGNO_REG_SET (reg_pending_sets, r); SET_REGNO_REG_SET (reg_pending_sets, r);
} }
...@@ -616,21 +616,30 @@ sched_analyze_1 (deps, x, insn) ...@@ -616,21 +616,30 @@ sched_analyze_1 (deps, x, insn)
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI); add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
} }
} }
/* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
it does not reload. Ignore these as they have served their
purpose already. */
else if (regno >= deps->max_reg)
{
if (GET_CODE (PATTERN (insn)) != USE
&& GET_CODE (PATTERN (insn)) != CLOBBER)
abort ();
}
else else
{ {
rtx u; rtx u;
for (u = deps->reg_last_uses[regno]; u; u = XEXP (u, 1)) for (u = deps->reg_last[regno].uses; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI); add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
for (u = deps->reg_last_sets[regno]; u; u = XEXP (u, 1)) for (u = deps->reg_last[regno].sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT); add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
if (code == SET) if (code == SET)
{ {
if (GET_CODE (PATTERN (insn)) != COND_EXEC) if (GET_CODE (PATTERN (insn)) != COND_EXEC)
free_INSN_LIST_list (&deps->reg_last_uses[regno]); free_INSN_LIST_list (&deps->reg_last[regno].uses);
for (u = deps->reg_last_clobbers[regno]; u; u = XEXP (u, 1)) for (u = deps->reg_last[regno].clobbers; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT); add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
SET_REGNO_REG_SET (reg_pending_sets, regno); SET_REGNO_REG_SET (reg_pending_sets, regno);
} }
...@@ -757,14 +766,15 @@ sched_analyze_2 (deps, x, insn) ...@@ -757,14 +766,15 @@ sched_analyze_2 (deps, x, insn)
while (--i >= 0) while (--i >= 0)
{ {
int r = regno + i; int r = regno + i;
deps->reg_last_uses[r] deps->reg_last[r].uses
= alloc_INSN_LIST (insn, deps->reg_last_uses[r]); = alloc_INSN_LIST (insn, deps->reg_last[r].uses);
SET_REGNO_REG_SET (&deps->reg_last_in_use, r);
for (u = deps->reg_last_sets[r]; u; u = XEXP (u, 1)) for (u = deps->reg_last[r].sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0); add_dependence (insn, XEXP (u, 0), 0);
/* ??? This should never happen. */ /* ??? This should never happen. */
for (u = deps->reg_last_clobbers[r]; u; u = XEXP (u, 1)) for (u = deps->reg_last[r].clobbers; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0); add_dependence (insn, XEXP (u, 0), 0);
if (call_used_regs[r] || global_regs[r]) if (call_used_regs[r] || global_regs[r])
...@@ -773,16 +783,26 @@ sched_analyze_2 (deps, x, insn) ...@@ -773,16 +783,26 @@ sched_analyze_2 (deps, x, insn)
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI); add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
} }
} }
/* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
it does not reload. Ignore these as they have served their
purpose already. */
else if (regno >= deps->max_reg)
{
if (GET_CODE (PATTERN (insn)) != USE
&& GET_CODE (PATTERN (insn)) != CLOBBER)
abort ();
}
else else
{ {
deps->reg_last_uses[regno] deps->reg_last[regno].uses
= alloc_INSN_LIST (insn, deps->reg_last_uses[regno]); = alloc_INSN_LIST (insn, deps->reg_last[regno].uses);
SET_REGNO_REG_SET (&deps->reg_last_in_use, regno);
for (u = deps->reg_last_sets[regno]; u; u = XEXP (u, 1)) for (u = deps->reg_last[regno].sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0); add_dependence (insn, XEXP (u, 0), 0);
/* ??? This should never happen. */ /* ??? This should never happen. */
for (u = deps->reg_last_clobbers[regno]; u; u = XEXP (u, 1)) for (u = deps->reg_last[regno].clobbers; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0); add_dependence (insn, XEXP (u, 0), 0);
/* Pseudos that are REG_EQUIV to something may be replaced /* Pseudos that are REG_EQUIV to something may be replaced
...@@ -867,19 +887,19 @@ sched_analyze_2 (deps, x, insn) ...@@ -867,19 +887,19 @@ sched_analyze_2 (deps, x, insn)
pseudo-regs because it might give an incorrectly rounded result. */ pseudo-regs because it might give an incorrectly rounded result. */
if (code != ASM_OPERANDS || MEM_VOLATILE_P (x)) if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
{ {
int max_reg = max_reg_num (); for (i = 0; i < deps->max_reg; i++)
for (i = 0; i < max_reg; i++)
{ {
for (u = deps->reg_last_uses[i]; u; u = XEXP (u, 1)) struct deps_reg *reg_last = &deps->reg_last[i];
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
if (GET_CODE (PATTERN (insn)) != COND_EXEC)
free_INSN_LIST_list (&deps->reg_last_uses[i]);
for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1)) for (u = reg_last->uses; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
for (u = reg_last->sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0); add_dependence (insn, XEXP (u, 0), 0);
for (u = reg_last->clobbers; u; u = XEXP (u, 1))
for (u = deps->reg_last_clobbers[i]; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0); add_dependence (insn, XEXP (u, 0), 0);
if (GET_CODE (PATTERN (insn)) != COND_EXEC)
free_INSN_LIST_list (&reg_last->uses);
} }
reg_pending_sets_all = 1; reg_pending_sets_all = 1;
...@@ -948,7 +968,6 @@ sched_analyze_insn (deps, x, insn, loop_notes) ...@@ -948,7 +968,6 @@ sched_analyze_insn (deps, x, insn, loop_notes)
{ {
register RTX_CODE code = GET_CODE (x); register RTX_CODE code = GET_CODE (x);
rtx link; rtx link;
int maxreg = max_reg_num ();
int i; int i;
if (code == COND_EXEC) if (code == COND_EXEC)
...@@ -1001,13 +1020,15 @@ sched_analyze_insn (deps, x, insn, loop_notes) ...@@ -1001,13 +1020,15 @@ sched_analyze_insn (deps, x, insn, loop_notes)
next = next_nonnote_insn (insn); next = next_nonnote_insn (insn);
if (next && GET_CODE (next) == BARRIER) if (next && GET_CODE (next) == BARRIER)
{ {
for (i = 0; i < maxreg; i++) for (i = 0; i < deps->max_reg; i++)
{ {
for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1)) struct deps_reg *reg_last = &deps->reg_last[i];
for (u = reg_last->uses; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI); add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
for (u = deps->reg_last_clobbers[i]; u; u = XEXP (u, 1)) for (u = reg_last->sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI); add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
for (u = deps->reg_last_uses[i]; u; u = XEXP (u, 1)) for (u = reg_last->clobbers; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI); add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
} }
} }
...@@ -1017,13 +1038,13 @@ sched_analyze_insn (deps, x, insn, loop_notes) ...@@ -1017,13 +1038,13 @@ sched_analyze_insn (deps, x, insn, loop_notes)
INIT_REG_SET (&tmp); INIT_REG_SET (&tmp);
(*current_sched_info->compute_jump_reg_dependencies) (insn, &tmp); (*current_sched_info->compute_jump_reg_dependencies) (insn, &tmp);
EXECUTE_IF_SET_IN_REG_SET EXECUTE_IF_SET_IN_REG_SET (&tmp, 0, i,
(&tmp, 0, i,
{ {
for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1)) struct deps_reg *reg_last = &deps->reg_last[i];
for (u = reg_last->sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI); add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
deps->reg_last_uses[i] reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
= alloc_INSN_LIST (insn, deps->reg_last_uses[i]); SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
}); });
CLEAR_REG_SET (&tmp); CLEAR_REG_SET (&tmp);
...@@ -1049,7 +1070,6 @@ sched_analyze_insn (deps, x, insn, loop_notes) ...@@ -1049,7 +1070,6 @@ sched_analyze_insn (deps, x, insn, loop_notes)
if (loop_notes) if (loop_notes)
{ {
int max_reg = max_reg_num ();
int schedule_barrier_found = 0; int schedule_barrier_found = 0;
rtx link; rtx link;
...@@ -1074,19 +1094,20 @@ sched_analyze_insn (deps, x, insn, loop_notes) ...@@ -1074,19 +1094,20 @@ sched_analyze_insn (deps, x, insn, loop_notes)
/* Add dependencies if a scheduling barrier was found. */ /* Add dependencies if a scheduling barrier was found. */
if (schedule_barrier_found) if (schedule_barrier_found)
{ {
for (i = 0; i < max_reg; i++) for (i = 0; i < deps->max_reg; i++)
{ {
struct deps_reg *reg_last = &deps->reg_last[i];
rtx u; rtx u;
for (u = deps->reg_last_uses[i]; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
if (GET_CODE (PATTERN (insn)) != COND_EXEC)
free_INSN_LIST_list (&deps->reg_last_uses[i]);
for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1)) for (u = reg_last->uses; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
for (u = reg_last->sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0); add_dependence (insn, XEXP (u, 0), 0);
for (u = reg_last->clobbers; u; u = XEXP (u, 1))
for (u = deps->reg_last_clobbers[i]; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0); add_dependence (insn, XEXP (u, 0), 0);
if (GET_CODE (PATTERN (insn)) != COND_EXEC)
free_INSN_LIST_list (&reg_last->uses);
} }
reg_pending_sets_all = 1; reg_pending_sets_all = 1;
...@@ -1095,46 +1116,46 @@ sched_analyze_insn (deps, x, insn, loop_notes) ...@@ -1095,46 +1116,46 @@ sched_analyze_insn (deps, x, insn, loop_notes)
} }
/* Accumulate clobbers until the next set so that it will be output dependent /* Accumulate clobbers until the next set so that it will be output
on all of them. At the next set we can clear the clobber list, since dependent on all of them. At the next set we can clear the clobber
subsequent sets will be output dependent on it. */ list, since subsequent sets will be output dependent on it. */
EXECUTE_IF_SET_IN_REG_SET if (reg_pending_sets_all)
(reg_pending_sets, 0, i,
{ {
if (GET_CODE (PATTERN (insn)) != COND_EXEC) reg_pending_sets_all = 0;
for (i = 0; i < deps->max_reg; i++)
{ {
free_INSN_LIST_list (&deps->reg_last_sets[i]); struct deps_reg *reg_last = &deps->reg_last[i];
free_INSN_LIST_list (&deps->reg_last_clobbers[i]); if (GET_CODE (PATTERN (insn)) != COND_EXEC)
deps->reg_last_sets[i] = 0; {
free_INSN_LIST_list (&reg_last->sets);
free_INSN_LIST_list (&reg_last->clobbers);
}
reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
} }
deps->reg_last_sets[i] }
= alloc_INSN_LIST (insn, deps->reg_last_sets[i]); else
});
EXECUTE_IF_SET_IN_REG_SET
(reg_pending_clobbers, 0, i,
{
deps->reg_last_clobbers[i]
= alloc_INSN_LIST (insn, deps->reg_last_clobbers[i]);
});
CLEAR_REG_SET (reg_pending_sets);
CLEAR_REG_SET (reg_pending_clobbers);
if (reg_pending_sets_all)
{ {
for (i = 0; i < maxreg; i++) EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
{ {
struct deps_reg *reg_last = &deps->reg_last[i];
if (GET_CODE (PATTERN (insn)) != COND_EXEC) if (GET_CODE (PATTERN (insn)) != COND_EXEC)
{ {
free_INSN_LIST_list (&deps->reg_last_sets[i]); free_INSN_LIST_list (&reg_last->sets);
free_INSN_LIST_list (&deps->reg_last_clobbers[i]); free_INSN_LIST_list (&reg_last->clobbers);
deps->reg_last_sets[i] = 0;
} }
deps->reg_last_sets[i] reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
= alloc_INSN_LIST (insn, deps->reg_last_sets[i]); SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
} });
EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i,
reg_pending_sets_all = 0; {
struct deps_reg *reg_last = &deps->reg_last[i];
reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
});
} }
CLEAR_REG_SET (reg_pending_sets);
CLEAR_REG_SET (reg_pending_clobbers);
/* If a post-call group is still open, see if it should remain so. /* If a post-call group is still open, see if it should remain so.
This insn must be a simple move of a hard reg to a pseudo or This insn must be a simple move of a hard reg to a pseudo or
...@@ -1242,18 +1263,18 @@ sched_analyze (deps, head, tail) ...@@ -1242,18 +1263,18 @@ sched_analyze (deps, head, tail)
if (NEXT_INSN (insn) && GET_CODE (NEXT_INSN (insn)) == NOTE if (NEXT_INSN (insn) && GET_CODE (NEXT_INSN (insn)) == NOTE
&& NOTE_LINE_NUMBER (NEXT_INSN (insn)) == NOTE_INSN_SETJMP) && NOTE_LINE_NUMBER (NEXT_INSN (insn)) == NOTE_INSN_SETJMP)
{ {
int max_reg = max_reg_num (); for (i = 0; i < deps->max_reg; i++)
for (i = 0; i < max_reg; i++)
{ {
for (u = deps->reg_last_uses[i]; u; u = XEXP (u, 1)) struct deps_reg *reg_last = &deps->reg_last[i];
for (u = reg_last->uses; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI); add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
free_INSN_LIST_list (&deps->reg_last_uses[i]); for (u = reg_last->sets; u; u = XEXP (u, 1))
for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0); add_dependence (insn, XEXP (u, 0), 0);
for (u = reg_last->clobbers; u; u = XEXP (u, 1))
for (u = deps->reg_last_clobbers[i]; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0); add_dependence (insn, XEXP (u, 0), 0);
free_INSN_LIST_list (&reg_last->uses);
} }
reg_pending_sets_all = 1; reg_pending_sets_all = 1;
...@@ -1272,10 +1293,9 @@ sched_analyze (deps, head, tail) ...@@ -1272,10 +1293,9 @@ sched_analyze (deps, head, tail)
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
if (call_used_regs[i] || global_regs[i]) if (call_used_regs[i] || global_regs[i])
{ {
for (u = deps->reg_last_uses[i]; u; u = XEXP (u, 1)) for (u = deps->reg_last[i].uses; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI); add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
for (u = deps->reg_last[i].sets; u; u = XEXP (u, 1))
for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI); add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
SET_REGNO_REG_SET (reg_pending_clobbers, i); SET_REGNO_REG_SET (reg_pending_clobbers, i);
...@@ -1424,10 +1444,12 @@ void ...@@ -1424,10 +1444,12 @@ void
init_deps (deps) init_deps (deps)
struct deps *deps; struct deps *deps;
{ {
int maxreg = max_reg_num (); int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
deps->reg_last_uses = (rtx *) xcalloc (maxreg, sizeof (rtx));
deps->reg_last_sets = (rtx *) xcalloc (maxreg, sizeof (rtx)); deps->max_reg = max_reg;
deps->reg_last_clobbers = (rtx *) xcalloc (maxreg, sizeof (rtx)); deps->reg_last = (struct deps_reg *)
xcalloc (max_reg, sizeof (struct deps_reg));
INIT_REG_SET (&deps->reg_last_in_use);
deps->pending_read_insns = 0; deps->pending_read_insns = 0;
deps->pending_read_mems = 0; deps->pending_read_mems = 0;
...@@ -1450,26 +1472,22 @@ void ...@@ -1450,26 +1472,22 @@ void
free_deps (deps) free_deps (deps)
struct deps *deps; struct deps *deps;
{ {
int max_reg = max_reg_num ();
int i; int i;
/* Note this loop is executed max_reg * nr_regions times. It's first /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
implementation accounted for over 90% of the calls to free_INSN_LIST_list. times. For a test case with 42000 regs and 8000 small basic blocks,
The list was empty for the vast majority of those calls. On the PA, not this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
calling free_INSN_LIST_list in those cases improves -O2 compile times by EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
3-5% on average. */
for (i = 0; i < max_reg; ++i)
{ {
if (deps->reg_last_clobbers[i]) struct deps_reg *reg_last = &deps->reg_last[i];
free_INSN_LIST_list (&deps->reg_last_clobbers[i]); free_INSN_LIST_list (&reg_last->uses);
if (deps->reg_last_sets[i]) free_INSN_LIST_list (&reg_last->sets);
free_INSN_LIST_list (&deps->reg_last_sets[i]); free_INSN_LIST_list (&reg_last->clobbers);
if (deps->reg_last_uses[i]) });
free_INSN_LIST_list (&deps->reg_last_uses[i]); CLEAR_REG_SET (&deps->reg_last_in_use);
}
free (deps->reg_last_clobbers); free (deps->reg_last);
free (deps->reg_last_sets); deps->reg_last = NULL;
free (deps->reg_last_uses);
} }
/* If it is profitable to use them, initialize caches for tracking /* If it is profitable to use them, initialize caches for tracking
......
...@@ -78,13 +78,24 @@ struct deps ...@@ -78,13 +78,24 @@ struct deps
to ensure that they won't cross a call after scheduling is done. */ to ensure that they won't cross a call after scheduling is done. */
rtx sched_before_next_call; rtx sched_before_next_call;
/* The maximum register number for the following arrays. Before reload
this is max_reg_num; after reload it is FIRST_PSEUDO_REGISTER. */
int max_reg;
/* Element N is the next insn that sets (hard or pseudo) register /* Element N is the next insn that sets (hard or pseudo) register
N within the current basic block; or zero, if there is no N within the current basic block; or zero, if there is no
such insn. Needed for new registers which may be introduced such insn. Needed for new registers which may be introduced
by splitting insns. */ by splitting insns. */
rtx *reg_last_uses; struct deps_reg
rtx *reg_last_sets; {
rtx *reg_last_clobbers; rtx uses;
rtx sets;
rtx clobbers;
} *reg_last;
/* Element N is set for each register that has any non-zero element
in reg_last[N].{uses,sets,clobbers}. */
regset_head reg_last_in_use;
}; };
/* This structure holds some state of the current scheduling pass, and /* This structure holds some state of the current scheduling pass, and
......
...@@ -304,7 +304,7 @@ void debug_dependencies PARAMS ((void)); ...@@ -304,7 +304,7 @@ void debug_dependencies PARAMS ((void));
static void init_regions PARAMS ((void)); static void init_regions PARAMS ((void));
static void schedule_region PARAMS ((int)); static void schedule_region PARAMS ((int));
static void propagate_deps PARAMS ((int, struct deps *, int)); static void propagate_deps PARAMS ((int, struct deps *));
static void free_pending_lists PARAMS ((void)); static void free_pending_lists PARAMS ((void));
/* Functions for construction of the control flow graph. */ /* Functions for construction of the control flow graph. */
...@@ -2440,13 +2440,11 @@ add_branch_dependences (head, tail) ...@@ -2440,13 +2440,11 @@ add_branch_dependences (head, tail)
static struct deps *bb_deps; static struct deps *bb_deps;
/* After computing the dependencies for block BB, propagate the dependencies /* After computing the dependencies for block BB, propagate the dependencies
found in TMP_DEPS to the successors of the block. MAX_REG is the number found in TMP_DEPS to the successors of the block. */
of registers. */
static void static void
propagate_deps (bb, tmp_deps, max_reg) propagate_deps (bb, tmp_deps)
int bb; int bb;
struct deps *tmp_deps; struct deps *tmp_deps;
int max_reg;
{ {
int b = BB_TO_BLOCK (bb); int b = BB_TO_BLOCK (bb);
int e, first_edge; int e, first_edge;
...@@ -2481,43 +2479,28 @@ propagate_deps (bb, tmp_deps, max_reg) ...@@ -2481,43 +2479,28 @@ propagate_deps (bb, tmp_deps, max_reg)
continue; continue;
} }
for (reg = 0; reg < max_reg; reg++) /* The reg_last lists are inherited by bb_succ. */
EXECUTE_IF_SET_IN_REG_SET (&tmp_deps->reg_last_in_use, 0, reg,
{ {
/* reg-last-uses lists are inherited by bb_succ. */ struct deps_reg *tmp_deps_reg = &tmp_deps->reg_last[reg];
for (u = tmp_deps->reg_last_uses[reg]; u; u = XEXP (u, 1)) struct deps_reg *succ_deps_reg = &succ_deps->reg_last[reg];
{
if (find_insn_list (XEXP (u, 0), for (u = tmp_deps_reg->uses; u; u = XEXP (u, 1))
succ_deps->reg_last_uses[reg])) if (! find_insn_list (XEXP (u, 0), succ_deps_reg->uses))
continue; succ_deps_reg->uses
= alloc_INSN_LIST (XEXP (u, 0), succ_deps_reg->uses);
succ_deps->reg_last_uses[reg]
= alloc_INSN_LIST (XEXP (u, 0), for (u = tmp_deps_reg->sets; u; u = XEXP (u, 1))
succ_deps->reg_last_uses[reg]); if (! find_insn_list (XEXP (u, 0), succ_deps_reg->sets))
} succ_deps_reg->sets
= alloc_INSN_LIST (XEXP (u, 0), succ_deps_reg->sets);
/* reg-last-defs lists are inherited by bb_succ. */
for (u = tmp_deps->reg_last_sets[reg]; u; u = XEXP (u, 1)) for (u = tmp_deps_reg->clobbers; u; u = XEXP (u, 1))
{ if (! find_insn_list (XEXP (u, 0), succ_deps_reg->clobbers))
if (find_insn_list (XEXP (u, 0), succ_deps_reg->clobbers
succ_deps->reg_last_sets[reg])) = alloc_INSN_LIST (XEXP (u, 0), succ_deps_reg->clobbers);
continue; });
IOR_REG_SET (&succ_deps->reg_last_in_use, &tmp_deps->reg_last_in_use);
succ_deps->reg_last_sets[reg]
= alloc_INSN_LIST (XEXP (u, 0),
succ_deps->reg_last_sets[reg]);
}
for (u = tmp_deps->reg_last_clobbers[reg]; u; u = XEXP (u, 1))
{
if (find_insn_list (XEXP (u, 0),
succ_deps->reg_last_clobbers[reg]))
continue;
succ_deps->reg_last_clobbers[reg]
= alloc_INSN_LIST (XEXP (u, 0),
succ_deps->reg_last_clobbers[reg]);
}
}
/* Mem read/write lists are inherited by bb_succ. */ /* Mem read/write lists are inherited by bb_succ. */
link_insn = tmp_deps->pending_read_insns; link_insn = tmp_deps->pending_read_insns;
...@@ -2554,27 +2537,17 @@ propagate_deps (bb, tmp_deps, max_reg) ...@@ -2554,27 +2537,17 @@ propagate_deps (bb, tmp_deps, max_reg)
/* last_function_call is inherited by bb_succ. */ /* last_function_call is inherited by bb_succ. */
for (u = tmp_deps->last_function_call; u; u = XEXP (u, 1)) for (u = tmp_deps->last_function_call; u; u = XEXP (u, 1))
{ if (! find_insn_list (XEXP (u, 0), succ_deps->last_function_call))
if (find_insn_list (XEXP (u, 0),
succ_deps->last_function_call))
continue;
succ_deps->last_function_call succ_deps->last_function_call
= alloc_INSN_LIST (XEXP (u, 0), = alloc_INSN_LIST (XEXP (u, 0), succ_deps->last_function_call);
succ_deps->last_function_call);
}
/* last_pending_memory_flush is inherited by bb_succ. */ /* last_pending_memory_flush is inherited by bb_succ. */
for (u = tmp_deps->last_pending_memory_flush; u; u = XEXP (u, 1)) for (u = tmp_deps->last_pending_memory_flush; u; u = XEXP (u, 1))
{ if (! find_insn_list (XEXP (u, 0),
if (find_insn_list (XEXP (u, 0),
succ_deps->last_pending_memory_flush)) succ_deps->last_pending_memory_flush))
continue;
succ_deps->last_pending_memory_flush succ_deps->last_pending_memory_flush
= alloc_INSN_LIST (XEXP (u, 0), = alloc_INSN_LIST (XEXP (u, 0),
succ_deps->last_pending_memory_flush); succ_deps->last_pending_memory_flush);
}
/* sched_before_next_call is inherited by bb_succ. */ /* sched_before_next_call is inherited by bb_succ. */
x = LOG_LINKS (tmp_deps->sched_before_next_call); x = LOG_LINKS (tmp_deps->sched_before_next_call);
...@@ -2594,8 +2567,8 @@ propagate_deps (bb, tmp_deps, max_reg) ...@@ -2594,8 +2567,8 @@ propagate_deps (bb, tmp_deps, max_reg)
Specifically for reg-reg data dependences, the block insns are Specifically for reg-reg data dependences, the block insns are
scanned by sched_analyze () top-to-bottom. Two lists are scanned by sched_analyze () top-to-bottom. Two lists are
maintained by sched_analyze (): reg_last_sets[] for register DEFs, maintained by sched_analyze (): reg_last[].sets for register DEFs,
and reg_last_uses[] for register USEs. and reg_last[].uses for register USEs.
When analysis is completed for bb, we update for its successors: When analysis is completed for bb, we update for its successors:
; - DEFS[succ] = Union (DEFS [succ], DEFS [bb]) ; - DEFS[succ] = Union (DEFS [succ], DEFS [bb])
...@@ -2609,7 +2582,6 @@ compute_block_backward_dependences (bb) ...@@ -2609,7 +2582,6 @@ compute_block_backward_dependences (bb)
int bb; int bb;
{ {
rtx head, tail; rtx head, tail;
int max_reg = max_reg_num ();
struct deps tmp_deps; struct deps tmp_deps;
tmp_deps = bb_deps[bb]; tmp_deps = bb_deps[bb];
...@@ -2620,18 +2592,12 @@ compute_block_backward_dependences (bb) ...@@ -2620,18 +2592,12 @@ compute_block_backward_dependences (bb)
add_branch_dependences (head, tail); add_branch_dependences (head, tail);
if (current_nr_blocks > 1) if (current_nr_blocks > 1)
propagate_deps (bb, &tmp_deps, max_reg); propagate_deps (bb, &tmp_deps);
/* Free up the INSN_LISTs. */ /* Free up the INSN_LISTs. */
free_deps (&tmp_deps); free_deps (&tmp_deps);
/* Assert that we won't need bb_reg_last_* for this block anymore.
The vectors we're zeroing out have just been freed by the call to
free_deps. */
bb_deps[bb].reg_last_uses = 0;
bb_deps[bb].reg_last_sets = 0;
bb_deps[bb].reg_last_clobbers = 0;
} }
/* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add /* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
them to the unused_*_list variables, so that they can be reused. */ them to the unused_*_list variables, so that they can be reused. */
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment