Commit 60564289 by Kaveh R. Ghazi Committed by Kaveh Ghazi

df-problems.c (df_set_note): Avoid C++ keywords.

	* df-problems.c (df_set_note): Avoid C++ keywords.
	* df-scan.c (df_ref_change_reg_with_loc_1): Likewise.
	* dse.c (record_store, remove_useless_values): Likewise.
	* emit-rtl.c (gen_reg_rtx, update_reg_offset, gen_rtx_REG_offset,
	gen_reg_rtx_offset, operand_subword, change_address_1,
	change_address, adjust_address_1, offset_address,
	widen_memory_access, emit_copy_of_insn_after): Likewise.
	* explow.c (round_push, allocate_dynamic_stack_space): Likewise.
	* fwprop.c (should_replace_address, propagate_rtx_1,
	propagate_rtx, try_fwprop_subst, forward_propagate_and_simplify):
	Likewise.
	* gcse.c (cprop_jump, find_implicit_sets, bypass_block,
	gcse_emit_move_after, update_ld_motion_stores): Likewise.
	* lcm.c (compute_insert_delete, pre_edge_lcm,
	compute_rev_insert_delete, pre_edge_rev_lcm): Likewise.
	* lower-subreg.c (resolve_reg_notes): Likewise.
	* mode-switching.c (optimize_mode_switching): Likewise.

From-SVN: r137848
parent 32e9fa48
2008-07-15 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
* df-problems.c (df_set_note): Avoid C++ keywords.
* df-scan.c (df_ref_change_reg_with_loc_1): Likewise.
* dse.c (record_store, remove_useless_values): Likewise.
* emit-rtl.c (gen_reg_rtx, update_reg_offset, gen_rtx_REG_offset,
gen_reg_rtx_offset, operand_subword, change_address_1,
change_address, adjust_address_1, offset_address,
widen_memory_access, emit_copy_of_insn_after): Likewise.
* explow.c (round_push, allocate_dynamic_stack_space): Likewise.
* fwprop.c (should_replace_address, propagate_rtx_1,
propagate_rtx, try_fwprop_subst, forward_propagate_and_simplify):
Likewise.
* gcse.c (cprop_jump, find_implicit_sets, bypass_block,
gcse_emit_move_after, update_ld_motion_stores): Likewise.
* lcm.c (compute_insert_delete, pre_edge_lcm,
compute_rev_insert_delete, pre_edge_rev_lcm): Likewise.
* lower-subreg.c (resolve_reg_notes): Likewise.
* mode-switching.c (optimize_mode_switching): Likewise.
2008-07-15 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
* bt-load.c (add_btr_def, migrate_btr_def,
branch_target_load_optimize): Avoid C++ keywords.
* caller-save.c (insert_restore, insert_save, insert_one_insn):
......
......@@ -3170,24 +3170,24 @@ df_kill_notes (rtx insn, rtx *old_dead_notes, rtx *old_unused_notes)
static inline rtx
df_set_note (enum reg_note note_type, rtx insn, rtx old, rtx reg)
{
rtx this = old;
rtx curr = old;
rtx prev = NULL;
while (this)
if (XEXP (this, 0) == reg)
while (curr)
if (XEXP (curr, 0) == reg)
{
if (prev)
XEXP (prev, 1) = XEXP (this, 1);
XEXP (prev, 1) = XEXP (curr, 1);
else
old = XEXP (this, 1);
XEXP (this, 1) = REG_NOTES (insn);
REG_NOTES (insn) = this;
old = XEXP (curr, 1);
XEXP (curr, 1) = REG_NOTES (insn);
REG_NOTES (insn) = curr;
return old;
}
else
{
prev = this;
this = XEXP (this, 1);
prev = curr;
curr = XEXP (curr, 1);
}
/* Did not find the note. */
......
......@@ -1887,10 +1887,10 @@ df_insn_change_bb (rtx insn, basic_block new_bb)
/* Helper function for df_ref_change_reg_with_loc. */
static void
df_ref_change_reg_with_loc_1 (struct df_reg_info *old, struct df_reg_info *new,
df_ref_change_reg_with_loc_1 (struct df_reg_info *old_df, struct df_reg_info *new_df,
int new_regno, rtx loc)
{
struct df_ref *the_ref = old->reg_chain;
struct df_ref *the_ref = old_df->reg_chain;
while (the_ref)
{
......@@ -1908,18 +1908,18 @@ df_ref_change_reg_with_loc_1 (struct df_reg_info *old, struct df_reg_info *new,
if (prev_ref)
prev_ref->next_reg = next_ref;
else
old->reg_chain = next_ref;
old_df->reg_chain = next_ref;
if (next_ref)
next_ref->prev_reg = prev_ref;
old->n_refs--;
old_df->n_refs--;
/* Put the ref into the new regno chain. */
the_ref->prev_reg = NULL;
the_ref->next_reg = new->reg_chain;
if (new->reg_chain)
new->reg_chain->prev_reg = the_ref;
new->reg_chain = the_ref;
new->n_refs++;
the_ref->next_reg = new_df->reg_chain;
if (new_df->reg_chain)
new_df->reg_chain->prev_reg = the_ref;
new_df->reg_chain = the_ref;
new_df->n_refs++;
df_set_bb_dirty (DF_REF_BB (the_ref));
/* Need to resort the record that the ref was in because the
......
......@@ -1295,7 +1295,7 @@ record_store (rtx body, bb_info_t bb_info)
{
insn_info_t next = ptr->next_local_store;
store_info_t s_info = ptr->store_rec;
bool delete = true;
bool del = true;
/* Skip the clobbers. We delete the active insn if this insn
shadows the set. To have been put on the active list, it
......@@ -1304,7 +1304,7 @@ record_store (rtx body, bb_info_t bb_info)
s_info = s_info->next;
if (s_info->alias_set != spill_alias_set)
delete = false;
del = false;
else if (s_info->alias_set)
{
struct clear_alias_mode_holder *entry
......@@ -1317,7 +1317,7 @@ record_store (rtx body, bb_info_t bb_info)
if ((GET_MODE (mem) == GET_MODE (s_info->mem))
&& (GET_MODE (mem) == entry->mode))
{
delete = true;
del = true;
s_info->positions_needed = (unsigned HOST_WIDE_INT) 0;
}
if (dump_file)
......@@ -1352,9 +1352,9 @@ record_store (rtx body, bb_info_t bb_info)
/* An insn can be deleted if every position of every one of
its s_infos is zero. */
if (s_info->positions_needed != (unsigned HOST_WIDE_INT) 0)
delete = false;
del = false;
if (delete)
if (del)
{
insn_info_t insn_to_delete = ptr;
......@@ -2080,7 +2080,7 @@ remove_useless_values (cselib_val *base)
while (insn_info)
{
store_info_t store_info = insn_info->store_rec;
bool delete = false;
bool del = false;
/* If ANY of the store_infos match the cselib group that is
being deleted, then the insn can not be deleted. */
......@@ -2089,13 +2089,13 @@ remove_useless_values (cselib_val *base)
if ((store_info->group_id == -1)
&& (store_info->cse_base == base))
{
delete = true;
del = true;
break;
}
store_info = store_info->next;
}
if (delete)
if (del)
{
if (last)
last->next_local_store = insn_info->next_local_store;
......
......@@ -874,10 +874,10 @@ round_push (rtx size)
if (GET_CODE (size) == CONST_INT)
{
HOST_WIDE_INT new = (INTVAL (size) + align - 1) / align * align;
HOST_WIDE_INT new_size = (INTVAL (size) + align - 1) / align * align;
if (INTVAL (size) != new)
size = GEN_INT (new);
if (INTVAL (size) != new_size)
size = GEN_INT (new_size);
}
else
{
......@@ -1136,10 +1136,10 @@ allocate_dynamic_stack_space (rtx size, rtx target, int known_align)
if (GET_CODE (size) == CONST_INT)
{
HOST_WIDE_INT new = INTVAL (size) / align * align;
HOST_WIDE_INT new_size = INTVAL (size) / align * align;
if (INTVAL (size) != new)
size = GEN_INT (new);
if (INTVAL (size) != new_size)
size = GEN_INT (new_size);
}
else
{
......
......@@ -2791,7 +2791,7 @@ find_avail_set (int regno, rtx insn)
static int
cprop_jump (basic_block bb, rtx setcc, rtx jump, rtx from, rtx src)
{
rtx new, set_src, note_src;
rtx new_rtx, set_src, note_src;
rtx set = pc_set (jump);
rtx note = find_reg_equal_equiv_note (jump);
......@@ -2823,22 +2823,22 @@ cprop_jump (basic_block bb, rtx setcc, rtx jump, rtx from, rtx src)
else
setcc = NULL_RTX;
new = simplify_replace_rtx (set_src, from, src);
new_rtx = simplify_replace_rtx (set_src, from, src);
/* If no simplification can be made, then try the next register. */
if (rtx_equal_p (new, SET_SRC (set)))
if (rtx_equal_p (new_rtx, SET_SRC (set)))
return 0;
/* If this is now a no-op delete it, otherwise this must be a valid insn. */
if (new == pc_rtx)
if (new_rtx == pc_rtx)
delete_insn (jump);
else
{
/* Ensure the value computed inside the jump insn to be equivalent
to one computed by setcc. */
if (setcc && modified_in_p (new, setcc))
if (setcc && modified_in_p (new_rtx, setcc))
return 0;
if (! validate_unshare_change (jump, &SET_SRC (set), new, 0))
if (! validate_unshare_change (jump, &SET_SRC (set), new_rtx, 0))
{
/* When (some) constants are not valid in a comparison, and there
are two registers to be replaced by constants before the entire
......@@ -2849,8 +2849,8 @@ cprop_jump (basic_block bb, rtx setcc, rtx jump, rtx from, rtx src)
we need to attach a note to the branch itself to make this
optimization work. */
if (!rtx_equal_p (new, note_src))
set_unique_reg_note (jump, REG_EQUAL, copy_rtx (new));
if (!rtx_equal_p (new_rtx, note_src))
set_unique_reg_note (jump, REG_EQUAL, copy_rtx (new_rtx));
return 0;
}
......@@ -2881,7 +2881,7 @@ cprop_jump (basic_block bb, rtx setcc, rtx jump, rtx from, rtx src)
/* If a conditional jump has been changed into unconditional jump, remove
the jump and make the edge fallthru - this is always called in
cfglayout mode. */
if (new != pc_rtx && simplejump_p (jump))
if (new_rtx != pc_rtx && simplejump_p (jump))
{
edge e;
edge_iterator ei;
......@@ -3306,7 +3306,7 @@ find_implicit_sets (void)
{
basic_block bb, dest;
unsigned int count;
rtx cond, new;
rtx cond, new_rtx;
count = 0;
FOR_EACH_BB (bb)
......@@ -3327,9 +3327,9 @@ find_implicit_sets (void)
if (dest && single_pred_p (dest)
&& dest != EXIT_BLOCK_PTR)
{
new = gen_rtx_SET (VOIDmode, XEXP (cond, 0),
new_rtx = gen_rtx_SET (VOIDmode, XEXP (cond, 0),
XEXP (cond, 1));
implicit_sets[dest->index] = new;
implicit_sets[dest->index] = new_rtx;
if (dump_file)
{
fprintf(dump_file, "Implicit set of reg %d in ",
......@@ -3539,7 +3539,7 @@ bypass_block (basic_block bb, rtx setcc, rtx jump)
unsigned int regno = REGNO (reg_used->reg_rtx);
basic_block dest, old_dest;
struct expr *set;
rtx src, new;
rtx src, new_rtx;
if (regno >= max_gcse_regno)
continue;
......@@ -3560,7 +3560,7 @@ bypass_block (basic_block bb, rtx setcc, rtx jump)
SET_DEST (PATTERN (setcc)),
SET_SRC (PATTERN (setcc)));
new = simplify_replace_rtx (src, reg_used->reg_rtx,
new_rtx = simplify_replace_rtx (src, reg_used->reg_rtx,
SET_SRC (set->expr));
/* Jump bypassing may have already placed instructions on
......@@ -3568,14 +3568,14 @@ bypass_block (basic_block bb, rtx setcc, rtx jump)
has instructions associated with it, as these insns won't
get executed if the incoming edge is redirected. */
if (new == pc_rtx)
if (new_rtx == pc_rtx)
{
edest = FALLTHRU_EDGE (bb);
dest = edest->insns.r ? NULL : edest->dest;
}
else if (GET_CODE (new) == LABEL_REF)
else if (GET_CODE (new_rtx) == LABEL_REF)
{
dest = BLOCK_FOR_INSN (XEXP (new, 0));
dest = BLOCK_FOR_INSN (XEXP (new_rtx, 0));
/* Don't bypass edges containing instructions. */
edest = find_edge (bb, dest);
if (edest && edest->insns.r)
......@@ -4336,7 +4336,7 @@ pre_insert_copies (void)
static rtx
gcse_emit_move_after (rtx src, rtx dest, rtx insn)
{
rtx new;
rtx new_rtx;
rtx set = single_set (insn), set2;
rtx note;
rtx eqv;
......@@ -4344,20 +4344,20 @@ gcse_emit_move_after (rtx src, rtx dest, rtx insn)
/* This should never fail since we're creating a reg->reg copy
we've verified to be valid. */
new = emit_insn_after (gen_move_insn (dest, src), insn);
new_rtx = emit_insn_after (gen_move_insn (dest, src), insn);
/* Note the equivalence for local CSE pass. */
set2 = single_set (new);
set2 = single_set (new_rtx);
if (!set2 || !rtx_equal_p (SET_DEST (set2), dest))
return new;
return new_rtx;
if ((note = find_reg_equal_equiv_note (insn)))
eqv = XEXP (note, 0);
else
eqv = SET_SRC (set);
set_unique_reg_note (new, REG_EQUAL, copy_insn_1 (eqv));
set_unique_reg_note (new_rtx, REG_EQUAL, copy_insn_1 (eqv));
return new;
return new_rtx;
}
/* Delete redundant computations.
......@@ -5384,7 +5384,7 @@ update_ld_motion_stores (struct expr * expr)
rtx pat = PATTERN (insn);
rtx src = SET_SRC (pat);
rtx reg = expr->reaching_reg;
rtx copy, new;
rtx copy, new_rtx;
/* If we've already copied it, continue. */
if (expr->reaching_reg == src)
......@@ -5400,8 +5400,8 @@ update_ld_motion_stores (struct expr * expr)
}
copy = gen_move_insn ( reg, copy_rtx (SET_SRC (pat)));
new = emit_insn_before (copy, insn);
record_one_set (REGNO (reg), new);
new_rtx = emit_insn_before (copy, insn);
record_one_set (REGNO (reg), new_rtx);
SET_SRC (pat) = reg;
df_insn_rescan (insn);
......
......@@ -350,13 +350,13 @@ compute_laterin (struct edge_list *edge_list, sbitmap *earliest,
static void
compute_insert_delete (struct edge_list *edge_list, sbitmap *antloc,
sbitmap *later, sbitmap *laterin, sbitmap *insert,
sbitmap *delete)
sbitmap *del)
{
int x;
basic_block bb;
FOR_EACH_BB (bb)
sbitmap_difference (delete[bb->index], antloc[bb->index],
sbitmap_difference (del[bb->index], antloc[bb->index],
laterin[bb->index]);
for (x = 0; x < NUM_EDGES (edge_list); x++)
......@@ -377,7 +377,7 @@ compute_insert_delete (struct edge_list *edge_list, sbitmap *antloc,
struct edge_list *
pre_edge_lcm (int n_exprs, sbitmap *transp,
sbitmap *avloc, sbitmap *antloc, sbitmap *kill,
sbitmap **insert, sbitmap **delete)
sbitmap **insert, sbitmap **del)
{
sbitmap *antin, *antout, *earliest;
sbitmap *avin, *avout;
......@@ -450,8 +450,8 @@ pre_edge_lcm (int n_exprs, sbitmap *transp,
sbitmap_vector_free (earliest);
*insert = sbitmap_vector_alloc (num_edges, n_exprs);
*delete = sbitmap_vector_alloc (last_basic_block, n_exprs);
compute_insert_delete (edge_list, antloc, later, laterin, *insert, *delete);
*del = sbitmap_vector_alloc (last_basic_block, n_exprs);
compute_insert_delete (edge_list, antloc, later, laterin, *insert, *del);
sbitmap_vector_free (laterin);
sbitmap_vector_free (later);
......@@ -460,7 +460,7 @@ pre_edge_lcm (int n_exprs, sbitmap *transp,
if (dump_file)
{
dump_sbitmap_vector (dump_file, "pre_insert_map", "", *insert, num_edges);
dump_sbitmap_vector (dump_file, "pre_delete_map", "", *delete,
dump_sbitmap_vector (dump_file, "pre_delete_map", "", *del,
last_basic_block);
}
#endif
......@@ -684,13 +684,13 @@ compute_nearerout (struct edge_list *edge_list, sbitmap *farthest,
static void
compute_rev_insert_delete (struct edge_list *edge_list, sbitmap *st_avloc,
sbitmap *nearer, sbitmap *nearerout,
sbitmap *insert, sbitmap *delete)
sbitmap *insert, sbitmap *del)
{
int x;
basic_block bb;
FOR_EACH_BB (bb)
sbitmap_difference (delete[bb->index], st_avloc[bb->index],
sbitmap_difference (del[bb->index], st_avloc[bb->index],
nearerout[bb->index]);
for (x = 0; x < NUM_EDGES (edge_list); x++)
......@@ -711,7 +711,7 @@ compute_rev_insert_delete (struct edge_list *edge_list, sbitmap *st_avloc,
struct edge_list *
pre_edge_rev_lcm (int n_exprs, sbitmap *transp,
sbitmap *st_avloc, sbitmap *st_antloc, sbitmap *kill,
sbitmap **insert, sbitmap **delete)
sbitmap **insert, sbitmap **del)
{
sbitmap *st_antin, *st_antout;
sbitmap *st_avout, *st_avin, *farthest;
......@@ -790,9 +790,9 @@ pre_edge_rev_lcm (int n_exprs, sbitmap *transp,
sbitmap_vector_free (farthest);
*insert = sbitmap_vector_alloc (num_edges, n_exprs);
*delete = sbitmap_vector_alloc (last_basic_block, n_exprs);
*del = sbitmap_vector_alloc (last_basic_block, n_exprs);
compute_rev_insert_delete (edge_list, st_avloc, nearer, nearerout,
*insert, *delete);
*insert, *del);
sbitmap_vector_free (nearerout);
sbitmap_vector_free (nearer);
......@@ -801,7 +801,7 @@ pre_edge_rev_lcm (int n_exprs, sbitmap *transp,
if (dump_file)
{
dump_sbitmap_vector (dump_file, "pre_insert_map", "", *insert, num_edges);
dump_sbitmap_vector (dump_file, "pre_delete_map", "", *delete,
dump_sbitmap_vector (dump_file, "pre_delete_map", "", *del,
last_basic_block);
}
#endif
......
......@@ -577,7 +577,7 @@ resolve_reg_notes (rtx insn)
pnote = &REG_NOTES (insn);
while (*pnote != NULL_RTX)
{
bool delete = false;
bool del = false;
note = *pnote;
switch (REG_NOTE_KIND (note))
......@@ -585,14 +585,14 @@ resolve_reg_notes (rtx insn)
case REG_DEAD:
case REG_UNUSED:
if (resolve_reg_p (XEXP (note, 0)))
delete = true;
del = true;
break;
default:
break;
}
if (delete)
if (del)
*pnote = XEXP (note, 1);
else
pnote = &XEXP (note, 1);
......
......@@ -585,7 +585,7 @@ optimize_mode_switching (void)
for (i = 0; i < max_num_modes; i++)
{
int current_mode[N_ENTITIES];
sbitmap *delete;
sbitmap *del;
sbitmap *insert;
/* Set the anticipatable and computing arrays. */
......@@ -612,7 +612,7 @@ optimize_mode_switching (void)
FOR_EACH_BB (bb)
sbitmap_not (kill[bb->index], transp[bb->index]);
edge_list = pre_edge_lcm (n_entities, transp, comp, antic,
kill, &insert, &delete);
kill, &insert, &del);
for (j = n_entities - 1; j >= 0; j--)
{
......@@ -663,7 +663,7 @@ optimize_mode_switching (void)
}
FOR_EACH_BB_REVERSE (bb)
if (TEST_BIT (delete[bb->index], j))
if (TEST_BIT (del[bb->index], j))
{
make_preds_opaque (bb, j);
/* Cancel the 'deleted' mode set. */
......@@ -671,7 +671,7 @@ optimize_mode_switching (void)
}
}
sbitmap_vector_free (delete);
sbitmap_vector_free (del);
sbitmap_vector_free (insert);
clear_aux_for_edges ();
free_edge_list (edge_list);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment