Commit 88302d54 by Jan Hubicka

sched-ebb.c: Rename struct deps to struct deps_desc.


	* sched-ebb.c: Rename struct deps to struct deps_desc.
	* ddg.c: Likewise.
	* sel-sched-ir.c: Likewise.
	* sched-deps.c: Likewise.
	* sched-int.h: Likewise.
	* sched-rgn.c: Likewise.

From-SVN: r159923
parent 9bb81453
2010-05-27 Jon `:wqBeniston <jon@beniston.com
2010-05-27 Jan Hubicka <jh@suse.cz>
* sched-ebb.c: Rename struct deps to struct deps_desc.
* ddg.c: Likewise.
* sel-sched-ir.c: Likewise.
* sched-deps.c: Likewise.
* sched-int.h: Likewise.
* sched-rgn.c: Likewise.
2010-05-27 Jon Beniston <jon@beniston.com>
PR 43726
* config/lm32/lm32.h: Remove definition of
......
......@@ -390,7 +390,7 @@ build_intra_loop_deps (ddg_ptr g)
{
int i;
/* Hold the dependency analysis state during dependency calculations. */
struct deps tmp_deps;
struct deps_desc tmp_deps;
rtx head, tail;
/* Build the dependence information, using the sched_analyze function. */
......
......@@ -442,15 +442,15 @@ static int cache_size;
static int deps_may_trap_p (const_rtx);
static void add_dependence_list (rtx, rtx, int, enum reg_note);
static void add_dependence_list_and_free (struct deps *, rtx,
static void add_dependence_list_and_free (struct deps_desc *, rtx,
rtx *, int, enum reg_note);
static void delete_all_dependences (rtx);
static void fixup_sched_groups (rtx);
static void flush_pending_lists (struct deps *, rtx, int, int);
static void sched_analyze_1 (struct deps *, rtx, rtx);
static void sched_analyze_2 (struct deps *, rtx, rtx);
static void sched_analyze_insn (struct deps *, rtx, rtx);
static void flush_pending_lists (struct deps_desc *, rtx, int, int);
static void sched_analyze_1 (struct deps_desc *, rtx, rtx);
static void sched_analyze_2 (struct deps_desc *, rtx, rtx);
static void sched_analyze_insn (struct deps_desc *, rtx, rtx);
static bool sched_has_condition_p (const_rtx);
static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
......@@ -1402,7 +1402,7 @@ add_dependence_list (rtx insn, rtx list, int uncond, enum reg_note dep_type)
is not readonly. */
static void
add_dependence_list_and_free (struct deps *deps, rtx insn, rtx *listp,
add_dependence_list_and_free (struct deps_desc *deps, rtx insn, rtx *listp,
int uncond, enum reg_note dep_type)
{
rtx list, next;
......@@ -1541,7 +1541,7 @@ fixup_sched_groups (rtx insn)
so that we can do memory aliasing on it. */
static void
add_insn_mem_dependence (struct deps *deps, bool read_p,
add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
rtx insn, rtx mem)
{
rtx *insn_list;
......@@ -1580,7 +1580,7 @@ add_insn_mem_dependence (struct deps *deps, bool read_p,
dependencies for a read operation, similarly with FOR_WRITE. */
static void
flush_pending_lists (struct deps *deps, rtx insn, int for_read,
flush_pending_lists (struct deps_desc *deps, rtx insn, int for_read,
int for_write)
{
if (for_write)
......@@ -1764,7 +1764,7 @@ create_insn_reg_set (int regno, rtx insn)
/* Set up insn register uses for INSN and dependency context DEPS. */
static void
setup_insn_reg_uses (struct deps *deps, rtx insn)
setup_insn_reg_uses (struct deps_desc *deps, rtx insn)
{
unsigned i;
reg_set_iterator rsi;
......@@ -2058,7 +2058,7 @@ static bool can_start_lhs_rhs_p;
/* Extend reg info for the deps context DEPS given that
we have just generated a register numbered REGNO. */
static void
extend_deps_reg_info (struct deps *deps, int regno)
extend_deps_reg_info (struct deps_desc *deps, int regno)
{
int max_regno = regno + 1;
......@@ -2107,7 +2107,7 @@ maybe_extend_reg_info_p (void)
CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
static void
sched_analyze_reg (struct deps *deps, int regno, enum machine_mode mode,
sched_analyze_reg (struct deps_desc *deps, int regno, enum machine_mode mode,
enum rtx_code ref, rtx insn)
{
/* We could emit new pseudos in renaming. Extend the reg structures. */
......@@ -2186,7 +2186,7 @@ sched_analyze_reg (struct deps *deps, int regno, enum machine_mode mode,
destination of X, and reads of everything mentioned. */
static void
sched_analyze_1 (struct deps *deps, rtx x, rtx insn)
sched_analyze_1 (struct deps_desc *deps, rtx x, rtx insn)
{
rtx dest = XEXP (x, 0);
enum rtx_code code = GET_CODE (x);
......@@ -2358,7 +2358,7 @@ sched_analyze_1 (struct deps *deps, rtx x, rtx insn)
/* Analyze the uses of memory and registers in rtx X in INSN. */
static void
sched_analyze_2 (struct deps *deps, rtx x, rtx insn)
sched_analyze_2 (struct deps_desc *deps, rtx x, rtx insn)
{
int i;
int j;
......@@ -2609,7 +2609,7 @@ sched_analyze_2 (struct deps *deps, rtx x, rtx insn)
/* Analyze an INSN with pattern X to find all dependencies. */
static void
sched_analyze_insn (struct deps *deps, rtx x, rtx insn)
sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
{
RTX_CODE code = GET_CODE (x);
rtx link;
......@@ -3224,7 +3224,7 @@ call_may_noreturn_p (rtx insn)
/* Analyze INSN with DEPS as a context. */
void
deps_analyze_insn (struct deps *deps, rtx insn)
deps_analyze_insn (struct deps_desc *deps, rtx insn)
{
if (sched_deps_info->start_insn)
sched_deps_info->start_insn (insn);
......@@ -3357,7 +3357,7 @@ deps_analyze_insn (struct deps *deps, rtx insn)
/* Initialize DEPS for the new block beginning with HEAD. */
void
deps_start_bb (struct deps *deps, rtx head)
deps_start_bb (struct deps_desc *deps, rtx head)
{
gcc_assert (!deps->readonly);
......@@ -3378,7 +3378,7 @@ deps_start_bb (struct deps *deps, rtx head)
/* Analyze every insn between HEAD and TAIL inclusive, creating backward
dependencies for each insn. */
void
sched_analyze (struct deps *deps, rtx head, rtx tail)
sched_analyze (struct deps_desc *deps, rtx head, rtx tail)
{
rtx insn;
......@@ -3463,10 +3463,10 @@ sched_free_deps (rtx head, rtx tail, bool resolved_p)
/* Initialize variables for region data dependence analysis.
When LAZY_REG_LAST is true, do not allocate reg_last array
of struct deps immediately. */
of struct deps_desc immediately. */
void
init_deps (struct deps *deps, bool lazy_reg_last)
init_deps (struct deps_desc *deps, bool lazy_reg_last)
{
int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
......@@ -3498,7 +3498,7 @@ init_deps (struct deps *deps, bool lazy_reg_last)
/* Init only reg_last field of DEPS, which was not allocated before as
we inited DEPS lazily. */
void
init_deps_reg_last (struct deps *deps)
init_deps_reg_last (struct deps_desc *deps)
{
gcc_assert (deps && deps->max_reg > 0);
gcc_assert (deps->reg_last == NULL);
......@@ -3510,7 +3510,7 @@ init_deps_reg_last (struct deps *deps)
/* Free insn lists found in DEPS. */
void
free_deps (struct deps *deps)
free_deps (struct deps_desc *deps)
{
unsigned i;
reg_set_iterator rsi;
......@@ -3559,7 +3559,7 @@ free_deps (struct deps *deps)
/* Remove INSN from dependence contexts DEPS. Caution: reg_conditional_sets
is not handled. */
void
remove_from_deps (struct deps *deps, rtx insn)
remove_from_deps (struct deps_desc *deps, rtx insn)
{
int removed;
unsigned i;
......
......@@ -463,7 +463,7 @@ static basic_block
schedule_ebb (rtx head, rtx tail)
{
basic_block first_bb, target_bb;
struct deps tmp_deps;
struct deps_desc tmp_deps;
first_bb = BLOCK_FOR_INSN (head);
last_bb = BLOCK_FOR_INSN (tail);
......
......@@ -448,7 +448,7 @@ struct deps_reg
};
/* Describe state of dependencies used during sched_analyze phase. */
struct deps
struct deps_desc
{
/* The *_insns and *_mems are paired lists. Each pending memory operation
will have a pointer to the MEM rtx on one list and a pointer to the
......@@ -546,7 +546,7 @@ struct deps
BOOL_BITFIELD readonly : 1;
};
typedef struct deps *deps_t;
typedef struct deps_desc *deps_t;
/* This structure holds some state of the current scheduling pass, and
contains some function pointers that abstract out some of the non-generic
......@@ -1198,14 +1198,14 @@ extern struct sched_deps_info_def *sched_deps_info;
extern bool sched_insns_conditions_mutex_p (const_rtx, const_rtx);
extern bool sched_insn_is_legitimate_for_speculation_p (const_rtx, ds_t);
extern void add_dependence (rtx, rtx, enum reg_note);
extern void sched_analyze (struct deps *, rtx, rtx);
extern void init_deps (struct deps *, bool);
extern void init_deps_reg_last (struct deps *);
extern void free_deps (struct deps *);
extern void sched_analyze (struct deps_desc *, rtx, rtx);
extern void init_deps (struct deps_desc *, bool);
extern void init_deps_reg_last (struct deps_desc *);
extern void free_deps (struct deps_desc *);
extern void init_deps_global (void);
extern void finish_deps_global (void);
extern void deps_analyze_insn (struct deps *, rtx);
extern void remove_from_deps (struct deps *, rtx);
extern void deps_analyze_insn (struct deps_desc *, rtx);
extern void remove_from_deps (struct deps_desc *, rtx);
extern dw_t get_dep_weak_1 (ds_t, ds_t);
extern dw_t get_dep_weak (ds_t, ds_t);
......@@ -1227,7 +1227,7 @@ extern void haifa_note_reg_use (int);
extern void maybe_extend_reg_info_p (void);
extern void deps_start_bb (struct deps *, rtx);
extern void deps_start_bb (struct deps_desc *, rtx);
extern enum reg_note ds_to_dt (ds_t);
extern bool deps_pools_are_empty_p (void);
......@@ -1337,7 +1337,7 @@ extern void debug_dependencies (rtx, rtx);
extern void free_rgn_deps (void);
extern int contributes_to_priority (rtx, rtx);
extern void extend_rgns (int *, int *, sbitmap, int *);
extern void deps_join (struct deps *, struct deps *);
extern void deps_join (struct deps_desc *, struct deps_desc *);
extern void rgn_setup_common_sched_info (void);
extern void rgn_setup_sched_infos (void);
......
......@@ -237,7 +237,7 @@ static void compute_block_dependences (int);
static void schedule_region (int);
static rtx concat_INSN_LIST (rtx, rtx);
static void concat_insn_mem_list (rtx, rtx, rtx *, rtx *);
static void propagate_deps (int, struct deps *);
static void propagate_deps (int, struct deps_desc *);
static void free_pending_lists (void);
/* Functions for construction of the control flow graph. */
......@@ -2567,7 +2567,7 @@ add_branch_dependences (rtx head, rtx tail)
the variables of its predecessors. When the analysis for a bb completes,
we save the contents to the corresponding bb_deps[bb] variable. */
static struct deps *bb_deps;
static struct deps_desc *bb_deps;
/* Duplicate the INSN_LIST elements of COPY and prepend them to OLD. */
......@@ -2601,7 +2601,7 @@ concat_insn_mem_list (rtx copy_insns, rtx copy_mems, rtx *old_insns_p,
/* Join PRED_DEPS to the SUCC_DEPS. */
void
deps_join (struct deps *succ_deps, struct deps *pred_deps)
deps_join (struct deps_desc *succ_deps, struct deps_desc *pred_deps)
{
unsigned reg;
reg_set_iterator rsi;
......@@ -2660,7 +2660,7 @@ deps_join (struct deps *succ_deps, struct deps *pred_deps)
/* After computing the dependencies for block BB, propagate the dependencies
found in TMP_DEPS to the successors of the block. */
static void
propagate_deps (int bb, struct deps *pred_deps)
propagate_deps (int bb, struct deps_desc *pred_deps)
{
basic_block block = BASIC_BLOCK (BB_TO_BLOCK (bb));
edge_iterator ei;
......@@ -2715,7 +2715,7 @@ static void
compute_block_dependences (int bb)
{
rtx head, tail;
struct deps tmp_deps;
struct deps_desc tmp_deps;
tmp_deps = bb_deps[bb];
......@@ -3150,7 +3150,7 @@ sched_rgn_compute_dependencies (int rgn)
init_deps_global ();
/* Initializations for region data dependence analysis. */
bb_deps = XNEWVEC (struct deps, current_nr_blocks);
bb_deps = XNEWVEC (struct deps_desc, current_nr_blocks);
for (bb = 0; bb < current_nr_blocks; bb++)
init_deps (bb_deps + bb, false);
......
......@@ -426,7 +426,7 @@ reset_target_context (tc_t tc, bool clean_p)
}
/* Functions to work with dependence contexts.
Dc (aka deps context, aka deps_t, aka struct deps *) is short for dependence
Dc (aka deps context, aka deps_t, aka struct deps_desc *) is short for dependence
context. It accumulates information about processed insns to decide if
current insn is dependent on the processed ones. */
......@@ -442,7 +442,7 @@ copy_deps_context (deps_t to, deps_t from)
static deps_t
alloc_deps_context (void)
{
return XNEW (struct deps);
return XNEW (struct deps_desc);
}
/* Allocate and initialize dep context. */
......@@ -2674,7 +2674,7 @@ init_id_from_df (idata_t id, insn_t insn, bool force_unique_p)
static void
deps_init_id (idata_t id, insn_t insn, bool force_unique_p)
{
struct deps _dc, *dc = &_dc;
struct deps_desc _dc, *dc = &_dc;
deps_init_id_data.where = DEPS_IN_NOWHERE;
deps_init_id_data.id = id;
......@@ -3229,7 +3229,7 @@ has_dependence_p (expr_t expr, insn_t pred, ds_t **has_dep_pp)
{
int i;
ds_t ds;
struct deps *dc;
struct deps_desc *dc;
if (INSN_SIMPLEJUMP_P (pred))
/* Unconditional jump is just a transfer of control flow.
......
......@@ -727,7 +727,7 @@ struct _sel_insn_data
htab_t transformed_insns;
/* A context incapsulating this insn. */
struct deps deps_context;
struct deps_desc deps_context;
/* This field is initialized at the beginning of scheduling and is used
to handle sched group instructions. If it is non-null, then it points
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment