Commit 37a0f8a5 by Richard Henderson Committed by Richard Henderson

sched-deps.c (reg_pending_uses_head): New.

        * sched-deps.c (reg_pending_uses_head): New.
        (reg_pending_barrier): Rename from reg_pending_sets_all.
        (find_insn_list): Don't mark inline.
        (find_insn_mem_list): Remove.
        (add_dependence_list, add_dependence_list_and_free): New.
        (flush_pending_lists): Replace only_write param with separate
        for_read and for_write parameters.  Update all callers.  Use
        add_dependence_list_and_free.
        (sched_analyze_1): Do not add reg dependencies here; just set
        the pending bits.  Use add_dependence_list.
        (sched_analyze_2): Likewise.
        (sched_analyze_insn): Replace schedule_barrier_found with
        reg_pending_barrier.  Add all dependencies for pending reg
        uses, sets, and clobbers.
        (sched_analyze): Don't add reg dependencies for calls, just
        set pending bits.  Use regs_invalidated_by_call.  Treat
        sched_before_next_call as a normal list, not a fake insn.
        (init_deps): No funny init for sched_before_next_call.
        (free_deps): Free pending mems lists.  Don't zero reg_last.
        (init_deps_global): Init reg_pending_uses.
        (finish_deps_global): Free it.
        * sched-int.h (deps): Make in_post_call_group_p boolean.  Update docs.
        (find_insn_mem_list): Remove.
        * sched-rgn.c (concat_INSN_LIST, concat_insn_mem_list): New.
        (propagate_deps): Use them.  Zero temp mem lists.

From-SVN: r49262
parent cea3bd3e
2002-01-26 Richard Henderson <rth@redhat.com>
* sched-deps.c (reg_pending_uses_head): New.
(reg_pending_barrier): Rename from reg_pending_sets_all.
(find_insn_list): Don't mark inline.
(find_insn_mem_list): Remove.
(add_dependence_list, add_dependence_list_and_free): New.
(flush_pending_lists): Replace only_write param with separate
for_read and for_write parameters. Update all callers. Use
add_dependence_list_and_free.
(sched_analyze_1): Do not add reg dependencies here; just set
the pending bits. Use add_dependence_list.
(sched_analyze_2): Likewise.
(sched_analyze_insn): Replace schedule_barrier_found with
reg_pending_barrier. Add all dependencies for pending reg
uses, sets, and clobbers.
(sched_analyze): Don't add reg dependencies for calls, just
set pending bits. Use regs_invalidated_by_call. Treat
sched_before_next_call as a normal list, not a fake insn.
(init_deps): No funny init for sched_before_next_call.
(free_deps): Free pending mems lists. Don't zero reg_last.
(init_deps_global): Init reg_pending_uses.
(finish_deps_global): Free it.
* sched-int.h (deps): Make in_post_call_group_p boolean. Update docs.
(find_insn_mem_list): Remove.
* sched-rgn.c (concat_INSN_LIST, concat_insn_mem_list): New.
(propagate_deps): Use them. Zero temp mem lists.
2002-01-26 Richard Henderson <rth@redhat.com>
* Makefile.in (CRTSTUFF_CFLAGS): New.
(crtbegin.o, crtend.o, crtbeginS.o, crtendS.o, crtbeginT.o): Use it.
* config.gcc (alpha-linux, alpha-freebsd, alpha-netbsd): Use plain
......
......@@ -68,19 +68,20 @@ struct deps
too large. */
rtx last_pending_memory_flush;
/* The last function call we have seen. All hard regs, and, of course,
the last function call, must depend on this. */
/* A list of the last function calls we have seen. We use a list to
represent last function calls from multiple predecessor blocks.
Used to prevent register lifetimes from expanding unnecessarily. */
rtx last_function_call;
/* A list of insns which use a pseudo register that does not already
cross a call. We create dependencies between each of those insn
and the next call insn, to ensure that they won't cross a call after
scheduling is done. */
rtx sched_before_next_call;
/* Used to keep post-call psuedo/hard reg movements together with
the call. */
int in_post_call_group_p;
/* The LOG_LINKS field of this is a list of insns which use a pseudo
register that does not already cross a call. We create
dependencies between each of those insn and the next call insn,
to ensure that they won't cross a call after scheduling is done. */
rtx sched_before_next_call;
bool in_post_call_group_p;
/* The maximum register number for the following arrays. Before reload
this is max_reg_num; after reload it is FIRST_PSEUDO_REGISTER. */
......@@ -274,7 +275,6 @@ extern void free_deps PARAMS ((struct deps *));
extern void init_deps_global PARAMS ((void));
extern void finish_deps_global PARAMS ((void));
extern void compute_forward_dependences PARAMS ((rtx, rtx));
extern int find_insn_mem_list PARAMS ((rtx, rtx, rtx, rtx));
extern rtx find_insn_list PARAMS ((rtx, rtx));
extern void init_dependency_caches PARAMS ((int));
extern void free_dependency_caches PARAMS ((void));
......
......@@ -300,6 +300,8 @@ void debug_dependencies PARAMS ((void));
static void init_regions PARAMS ((void));
static void schedule_region PARAMS ((int));
static rtx concat_INSN_LIST PARAMS ((rtx, rtx));
static void concat_insn_mem_list PARAMS ((rtx, rtx, rtx *, rtx *));
static void propagate_deps PARAMS ((int, struct deps *));
static void free_pending_lists PARAMS ((void));
......@@ -2299,8 +2301,7 @@ add_branch_dependences (head, tail)
{
if (GET_CODE (insn) != NOTE)
{
if (last != 0
&& !find_insn_list (insn, LOG_LINKS (last)))
if (last != 0 && !find_insn_list (insn, LOG_LINKS (last)))
{
add_dependence (last, insn, REG_DEP_ANTI);
INSN_REF_COUNT (insn)++;
......@@ -2356,125 +2357,122 @@ add_branch_dependences (head, tail)
static struct deps *bb_deps;
/* Duplicate the INSN_LIST elements of COPY and prepend them to OLD. */
static rtx
concat_INSN_LIST (copy, old)
rtx copy, old;
{
rtx new = old;
for (; copy ; copy = XEXP (copy, 1))
new = alloc_INSN_LIST (XEXP (copy, 0), new);
return new;
}
static void
concat_insn_mem_list (copy_insns, copy_mems, old_insns_p, old_mems_p)
rtx copy_insns, copy_mems;
rtx *old_insns_p, *old_mems_p;
{
rtx new_insns = *old_insns_p;
rtx new_mems = *old_mems_p;
while (copy_insns)
{
new_insns = alloc_INSN_LIST (XEXP (copy_insns, 0), new_insns);
new_mems = alloc_EXPR_LIST (VOIDmode, XEXP (copy_mems, 0), new_mems);
copy_insns = XEXP (copy_insns, 1);
copy_mems = XEXP (copy_mems, 1);
}
*old_insns_p = new_insns;
*old_mems_p = new_mems;
}
/* After computing the dependencies for block BB, propagate the dependencies
found in TMP_DEPS to the successors of the block. */
static void
propagate_deps (bb, tmp_deps)
propagate_deps (bb, pred_deps)
int bb;
struct deps *tmp_deps;
struct deps *pred_deps;
{
int b = BB_TO_BLOCK (bb);
int e, first_edge;
int reg;
rtx link_insn, link_mem;
rtx u;
/* These lists should point to the right place, for correct
freeing later. */
bb_deps[bb].pending_read_insns = tmp_deps->pending_read_insns;
bb_deps[bb].pending_read_mems = tmp_deps->pending_read_mems;
bb_deps[bb].pending_write_insns = tmp_deps->pending_write_insns;
bb_deps[bb].pending_write_mems = tmp_deps->pending_write_mems;
/* bb's structures are inherited by its successors. */
first_edge = e = OUT_EDGES (b);
if (e <= 0)
return;
do
{
rtx x;
int b_succ = TO_BLOCK (e);
int bb_succ = BLOCK_TO_BB (b_succ);
struct deps *succ_deps = bb_deps + bb_succ;
/* Only bbs "below" bb, in the same region, are interesting. */
if (CONTAINING_RGN (b) != CONTAINING_RGN (b_succ)
|| bb_succ <= bb)
{
e = NEXT_OUT (e);
continue;
}
/* The reg_last lists are inherited by bb_succ. */
EXECUTE_IF_SET_IN_REG_SET (&tmp_deps->reg_last_in_use, 0, reg,
{
struct deps_reg *tmp_deps_reg = &tmp_deps->reg_last[reg];
struct deps_reg *succ_deps_reg = &succ_deps->reg_last[reg];
for (u = tmp_deps_reg->uses; u; u = XEXP (u, 1))
if (! find_insn_list (XEXP (u, 0), succ_deps_reg->uses))
succ_deps_reg->uses
= alloc_INSN_LIST (XEXP (u, 0), succ_deps_reg->uses);
for (u = tmp_deps_reg->sets; u; u = XEXP (u, 1))
if (! find_insn_list (XEXP (u, 0), succ_deps_reg->sets))
succ_deps_reg->sets
= alloc_INSN_LIST (XEXP (u, 0), succ_deps_reg->sets);
for (u = tmp_deps_reg->clobbers; u; u = XEXP (u, 1))
if (! find_insn_list (XEXP (u, 0), succ_deps_reg->clobbers))
succ_deps_reg->clobbers
= alloc_INSN_LIST (XEXP (u, 0), succ_deps_reg->clobbers);
});
IOR_REG_SET (&succ_deps->reg_last_in_use, &tmp_deps->reg_last_in_use);
/* Mem read/write lists are inherited by bb_succ. */
link_insn = tmp_deps->pending_read_insns;
link_mem = tmp_deps->pending_read_mems;
while (link_insn)
{
if (!(find_insn_mem_list (XEXP (link_insn, 0),
XEXP (link_mem, 0),
succ_deps->pending_read_insns,
succ_deps->pending_read_mems)))
add_insn_mem_dependence (succ_deps, &succ_deps->pending_read_insns,
&succ_deps->pending_read_mems,
XEXP (link_insn, 0), XEXP (link_mem, 0));
link_insn = XEXP (link_insn, 1);
link_mem = XEXP (link_mem, 1);
}
if (e > 0)
do
{
int b_succ = TO_BLOCK (e);
int bb_succ = BLOCK_TO_BB (b_succ);
struct deps *succ_deps = bb_deps + bb_succ;
int reg;
/* Only bbs "below" bb, in the same region, are interesting. */
if (CONTAINING_RGN (b) != CONTAINING_RGN (b_succ)
|| bb_succ <= bb)
{
e = NEXT_OUT (e);
continue;
}
link_insn = tmp_deps->pending_write_insns;
link_mem = tmp_deps->pending_write_mems;
while (link_insn)
{
if (!(find_insn_mem_list (XEXP (link_insn, 0),
XEXP (link_mem, 0),
succ_deps->pending_write_insns,
succ_deps->pending_write_mems)))
add_insn_mem_dependence (succ_deps,
&succ_deps->pending_write_insns,
&succ_deps->pending_write_mems,
XEXP (link_insn, 0), XEXP (link_mem, 0));
link_insn = XEXP (link_insn, 1);
link_mem = XEXP (link_mem, 1);
}
/* The reg_last lists are inherited by bb_succ. */
EXECUTE_IF_SET_IN_REG_SET (&pred_deps->reg_last_in_use, 0, reg,
{
struct deps_reg *pred_rl = &pred_deps->reg_last[reg];
struct deps_reg *succ_rl = &succ_deps->reg_last[reg];
succ_rl->uses = concat_INSN_LIST (pred_rl->uses, succ_rl->uses);
succ_rl->sets = concat_INSN_LIST (pred_rl->sets, succ_rl->sets);
succ_rl->clobbers = concat_INSN_LIST (pred_rl->clobbers,
succ_rl->clobbers);
});
IOR_REG_SET (&succ_deps->reg_last_in_use, &pred_deps->reg_last_in_use);
/* Mem read/write lists are inherited by bb_succ. */
concat_insn_mem_list (pred_deps->pending_read_insns,
pred_deps->pending_read_mems,
&succ_deps->pending_read_insns,
&succ_deps->pending_read_mems);
concat_insn_mem_list (pred_deps->pending_write_insns,
pred_deps->pending_write_mems,
&succ_deps->pending_write_insns,
&succ_deps->pending_write_mems);
succ_deps->last_pending_memory_flush
= concat_INSN_LIST (pred_deps->last_pending_memory_flush,
succ_deps->last_pending_memory_flush);
succ_deps->pending_lists_length += pred_deps->pending_lists_length;
succ_deps->pending_flush_length += pred_deps->pending_flush_length;
/* last_function_call is inherited by bb_succ. */
succ_deps->last_function_call
= concat_INSN_LIST (pred_deps->last_function_call,
succ_deps->last_function_call);
/* sched_before_next_call is inherited by bb_succ. */
succ_deps->sched_before_next_call
= concat_INSN_LIST (pred_deps->sched_before_next_call,
succ_deps->sched_before_next_call);
e = NEXT_OUT (e);
}
while (e != first_edge);
/* last_function_call is inherited by bb_succ. */
for (u = tmp_deps->last_function_call; u; u = XEXP (u, 1))
if (! find_insn_list (XEXP (u, 0), succ_deps->last_function_call))
succ_deps->last_function_call
= alloc_INSN_LIST (XEXP (u, 0), succ_deps->last_function_call);
/* last_pending_memory_flush is inherited by bb_succ. */
for (u = tmp_deps->last_pending_memory_flush; u; u = XEXP (u, 1))
if (! find_insn_list (XEXP (u, 0),
succ_deps->last_pending_memory_flush))
succ_deps->last_pending_memory_flush
= alloc_INSN_LIST (XEXP (u, 0),
succ_deps->last_pending_memory_flush);
/* sched_before_next_call is inherited by bb_succ. */
x = LOG_LINKS (tmp_deps->sched_before_next_call);
for (; x; x = XEXP (x, 1))
add_dependence (succ_deps->sched_before_next_call,
XEXP (x, 0), REG_DEP_ANTI);
e = NEXT_OUT (e);
}
while (e != first_edge);
/* These lists should point to the right place, for correct
freeing later. */
bb_deps[bb].pending_read_insns = pred_deps->pending_read_insns;
bb_deps[bb].pending_read_mems = pred_deps->pending_read_mems;
bb_deps[bb].pending_write_insns = pred_deps->pending_write_insns;
bb_deps[bb].pending_write_mems = pred_deps->pending_write_mems;
/* Can't allow these to be freed twice. */
pred_deps->pending_read_insns = 0;
pred_deps->pending_read_mems = 0;
pred_deps->pending_write_insns = 0;
pred_deps->pending_write_mems = 0;
}
/* Compute backward dependences inside bb. In a multiple blocks region:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment