Commit f4eafc30 by H.J. Lu Committed by H.J. Lu

Remove trailing white spaces

	* lra-assigns.c: Remove trailing white spaces.
	* lra-coalesce.c: Likewise.
	* lra-constraints.c: Likewise.
	* lra-eliminations.c: Likewise.
	* lra-int.h: Likewise.
	* lra-spills.c: Likewise.
	* lra.c: Likewise.

From-SVN: r192966
parent 1ea58d34
2012-10-29 H.J. Lu <hongjiu.lu@intel.com>
* lra-assigns.c: Remove trailing white spaces.
* lra-coalesce.c: Likewise.
* lra-constraints.c: Likewise.
* lra-eliminations.c: Likewise.
* lra-int.h: Likewise.
* lra-spills.c: Likewise.
* lra.c: Likewise.
2012-10-29 Manuel López-Ibáñez <manu@gcc.gnu.org>
PR c/53066
......@@ -149,7 +149,7 @@ init_regno_assign_info (void)
{
int i, regno1, regno2, max_regno = max_reg_num ();
lra_copy_t cp;
regno_assign_info = XNEWVEC (struct regno_assign_info, max_regno);
for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
{
......@@ -185,10 +185,10 @@ reload_pseudo_compare_func (const void *v1p, const void *v2p)
enum reg_class cl1 = regno_allocno_class_array[r1];
enum reg_class cl2 = regno_allocno_class_array[r2];
int diff;
lra_assert (r1 >= lra_constraint_new_regno_start
&& r2 >= lra_constraint_new_regno_start);
/* Prefer to assign reload registers with smaller classes first to
guarantee assignment to all reload registers. */
if ((diff = (ira_class_hard_regs_num[cl1]
......@@ -217,7 +217,7 @@ pseudo_compare_func (const void *v1p, const void *v2p)
/* Prefer to assign more frequently used registers first. */
if ((diff = lra_reg_info[r2].freq - lra_reg_info[r1].freq) != 0)
return diff;
/* If regs are equally good, sort by their numbers, so that the
results of qsort leave nothing to chance. */
return r1 - r2;
......@@ -378,7 +378,7 @@ init_live_reload_and_inheritance_pseudos (void)
{
int i, p, max_regno = max_reg_num ();
lra_live_range_t r;
conflict_reload_and_inheritance_pseudos = sparseset_alloc (max_regno);
live_reload_and_inheritance_pseudos = XNEWVEC (bitmap_head, lra_live_max_point);
bitmap_obstack_initialize (&live_reload_and_inheritance_pseudos_bitmap_obstack);
......@@ -470,7 +470,7 @@ find_hard_regno_for (int regno, int *cost, int try_only_hard_regno)
for (p = r->start + 1; p <= r->finish; p++)
{
lra_live_range_t r2;
for (r2 = start_point_ranges[p];
r2 != NULL;
r2 = r2->start_next)
......@@ -511,7 +511,7 @@ find_hard_regno_for (int regno, int *cost, int try_only_hard_regno)
[lra_reg_info[conflict_regno].biggest_mode]);
/* Remember about multi-register pseudos. For example, 2 hard
register pseudos can start on the same hard register but can
not start on HR and HR+1/HR-1. */
not start on HR and HR+1/HR-1. */
for (hr = conflict_hr + 1;
hr < FIRST_PSEUDO_REGISTER && hr < conflict_hr + nregs;
hr++)
......@@ -810,7 +810,7 @@ spill_for (int regno, bitmap spilled_pseudo_bitmap)
EXECUTE_IF_SET_IN_BITMAP (&lra_reg_info[regno].insn_bitmap, 0, uid, bi)
{
struct lra_insn_reg *ir;
for (ir = lra_get_insn_regs (uid); ir != NULL; ir = ir->next)
if (ir->regno >= FIRST_PSEUDO_REGISTER)
bitmap_set_bit (&insn_conflict_pseudos, ir->regno);
......@@ -867,7 +867,7 @@ spill_for (int regno, bitmap spilled_pseudo_bitmap)
for (p = r->start; p <= r->finish; p++)
{
lra_live_range_t r2;
for (r2 = start_point_ranges[p];
r2 != NULL;
r2 = r2->start_next)
......@@ -913,7 +913,7 @@ spill_for (int regno, bitmap spilled_pseudo_bitmap)
EXECUTE_IF_SET_IN_BITMAP (&spill_pseudos_bitmap, 0, spill_regno, bi)
{
rtx x;
cost += lra_reg_info[spill_regno].freq;
if (ira_reg_equiv[spill_regno].memory != NULL
|| ira_reg_equiv[spill_regno].constant != NULL)
......@@ -1038,7 +1038,7 @@ setup_live_pseudos_and_spill_after_risky_transforms (bitmap
for (p = r->start + 1; p <= r->finish; p++)
{
lra_live_range_t r2;
for (r2 = start_point_ranges[p];
r2 != NULL;
r2 = r2->start_next)
......@@ -1239,7 +1239,7 @@ assign_by_spills (void)
{
lra_insn_recog_data_t data;
struct lra_insn_reg *r;
data = lra_get_insn_recog_data (insn);
for (r = data->regs; r != NULL; r = r->next)
{
......
......@@ -79,7 +79,7 @@ move_freq_compare_func (const void *v1p, const void *v2p)
rtx mv1 = *(const rtx *) v1p;
rtx mv2 = *(const rtx *) v2p;
int pri1, pri2;
pri1 = BLOCK_FOR_INSN (mv1)->frequency;
pri2 = BLOCK_FOR_INSN (mv2)->frequency;
if (pri2 - pri1)
......
......@@ -81,25 +81,25 @@ along with GCC; see the file COPYING3. If not see
struct elim_table
{
/* Hard register number to be eliminated. */
int from;
int from;
/* Hard register number used as replacement. */
int to;
int to;
/* Difference between values of the two hard registers above on
previous iteration. */
HOST_WIDE_INT previous_offset;
/* Difference between the values on the current iteration. */
HOST_WIDE_INT offset;
HOST_WIDE_INT offset;
/* Nonzero if this elimination can be done. */
bool can_eliminate;
bool can_eliminate;
/* CAN_ELIMINATE since the last check. */
bool prev_can_eliminate;
/* REG rtx for the register to be eliminated. We cannot simply
compare the number since we might then spuriously replace a hard
register corresponding to a pseudo assigned to the reg to be
eliminated. */
rtx from_rtx;
rtx from_rtx;
/* REG rtx for the replacement. */
rtx to_rtx;
rtx to_rtx;
};
/* The elimination table. Each array entry describes one possible way
......@@ -335,7 +335,7 @@ lra_eliminate_regs_1 (rtx x, enum machine_mode mem_mode,
if ((ep = get_elimination (x)) != NULL)
{
rtx to = subst_p ? ep->to_rtx : ep->from_rtx;
if (update_p)
return plus_constant (Pmode, to, ep->offset - ep->previous_offset);
else if (full_p)
......@@ -354,10 +354,10 @@ lra_eliminate_regs_1 (rtx x, enum machine_mode mem_mode,
{
HOST_WIDE_INT offset;
rtx to = subst_p ? ep->to_rtx : ep->from_rtx;
if (! update_p && ! full_p)
return gen_rtx_PLUS (Pmode, to, XEXP (x, 1));
offset = (update_p
? ep->offset - ep->previous_offset : ep->offset);
if (CONST_INT_P (XEXP (x, 1))
......@@ -405,7 +405,7 @@ lra_eliminate_regs_1 (rtx x, enum machine_mode mem_mode,
&& (ep = get_elimination (XEXP (x, 0))) != NULL)
{
rtx to = subst_p ? ep->to_rtx : ep->from_rtx;
if (update_p)
return
plus_constant (Pmode,
......@@ -420,7 +420,7 @@ lra_eliminate_regs_1 (rtx x, enum machine_mode mem_mode,
else
return gen_rtx_MULT (Pmode, to, XEXP (x, 1));
}
/* ... fall through ... */
case CALL:
......@@ -777,7 +777,7 @@ eliminate_regs_in_insn (rtx insn, bool replace_p)
&& (ep = get_elimination (SET_DEST (old_set))) != NULL)
{
bool delete_p = replace_p;
#ifdef HARD_FRAME_POINTER_REGNUM
/* If this is setting the frame pointer register to the hardware
frame pointer register and this is an elimination that will
......@@ -798,11 +798,11 @@ eliminate_regs_in_insn (rtx insn, bool replace_p)
rtx base = SET_SRC (old_set);
HOST_WIDE_INT offset = 0;
rtx base_insn = insn;
while (base != ep->to_rtx)
{
rtx prev_insn, prev_set;
if (GET_CODE (base) == PLUS && CONST_INT_P (XEXP (base, 1)))
{
offset += INTVAL (XEXP (base, 1));
......@@ -818,14 +818,14 @@ eliminate_regs_in_insn (rtx insn, bool replace_p)
else
break;
}
if (base == ep->to_rtx)
{
rtx src;
offset -= (ep->offset - ep->previous_offset);
src = plus_constant (Pmode, ep->to_rtx, offset);
/* First see if this insn remains valid when we make
the change. If not, keep the INSN_CODE the same
and let the constraint pass fit it up. */
......@@ -841,14 +841,14 @@ eliminate_regs_in_insn (rtx insn, bool replace_p)
return;
}
}
/* We can't delete this insn, but needn't process it
since it won't be used unless something changes. */
delete_p = false;
}
#endif
/* This insn isn't serving a useful purpose. We delete it
when REPLACE is set. */
if (delete_p)
......@@ -892,13 +892,13 @@ eliminate_regs_in_insn (rtx insn, bool replace_p)
if (REG_P (reg) && (ep = get_elimination (reg)) != NULL)
{
rtx to_rtx = replace_p ? ep->to_rtx : ep->from_rtx;
if (! replace_p)
{
offset += (ep->offset - ep->previous_offset);
offset = trunc_int_for_mode (offset, GET_MODE (plus_cst_src));
}
if (GET_CODE (XEXP (plus_cst_src, 0)) == SUBREG)
to_rtx = gen_lowpart (GET_MODE (XEXP (plus_cst_src, 0)), to_rtx);
/* If we have a nonzero offset, and the source is already a
......@@ -909,7 +909,7 @@ eliminate_regs_in_insn (rtx insn, bool replace_p)
if (offset == 0 || plus_src)
{
rtx new_src = plus_constant (GET_MODE (to_rtx), to_rtx, offset);
old_set = single_set (insn);
/* First see if this insn remains valid when we make the
......@@ -923,7 +923,7 @@ eliminate_regs_in_insn (rtx insn, bool replace_p)
{
rtx new_pat = gen_rtx_SET (VOIDmode,
SET_DEST (old_set), new_src);
if (! validate_change (insn, &PATTERN (insn), new_pat, 0))
SET_SRC (old_set) = new_src;
}
......@@ -1153,7 +1153,7 @@ init_elim_table (void)
ep->to = ep1->to;
value_p = (targetm.can_eliminate (ep->from, ep->to)
&& ! (ep->to == STACK_POINTER_REGNUM
&& frame_pointer_needed
&& frame_pointer_needed
&& (! SUPPORTS_STACK_ALIGNMENT
|| ! stack_realign_fp)));
setup_can_eliminate (ep, value_p);
......
......@@ -390,7 +390,7 @@ lra_update_operator_dups (lra_insn_recog_data_t id)
for (i = 0; i < static_id->n_dups; i++)
{
int ndup = static_id->dup_num[i];
if (static_id->operand[ndup].is_operator)
*id->dup_loc[i] = *id->operand_loc[ndup];
}
......
......@@ -34,7 +34,7 @@ along with GCC; see the file COPYING3. If not see
end
create new stack slot S and assign P to S
end
The actual algorithm is bit more complicated because of different
pseudo sizes.
......@@ -143,9 +143,9 @@ assign_mem_slot (int i)
lra_assert (regno_reg_rtx[i] != NULL_RTX && REG_P (regno_reg_rtx[i])
&& lra_reg_info[i].nrefs != 0 && reg_renumber[i] < 0);
x = slots[pseudo_slots[i].slot_num].mem;
/* We can use a slot already allocated because it is guaranteed the
slot provides both enough inherent space and enough total
space. */
......@@ -181,14 +181,14 @@ assign_mem_slot (int i)
}
slots[pseudo_slots[i].slot_num].mem = stack_slot;
}
/* On a big endian machine, the "address" of the slot is the address
of the low part that fits its inherent mode. */
if (BYTES_BIG_ENDIAN && inherent_size < total_size)
adjust += (total_size - inherent_size);
x = adjust_address_nv (x, GET_MODE (regno_reg_rtx[i]), adjust);
/* Set all of the memory attributes as appropriate for a spill. */
set_mem_attrs_for_spill (x);
pseudo_slots[i].mem = x;
......@@ -265,7 +265,7 @@ assign_spill_hard_regs (int *pseudo_regnos, int n)
bitmap setjump_crosses = regstat_get_setjmp_crosses ();
/* Hard registers which can not be used for any purpose at given
program point because they are unallocatable or already allocated
for other pseudos. */
for other pseudos. */
HARD_REG_SET *reserved_hard_regs;
if (! lra_reg_spill_p)
......@@ -604,7 +604,7 @@ alter_subregs (rtx *loc, bool final_p)
else if (fmt[i] == 'E')
{
int j;
for (j = XVECLEN (x, i) - 1; j >= 0; j--)
if (alter_subregs (&XVECEXP (x, i, j), final_p))
res = true;
......
......@@ -44,12 +44,12 @@ along with GCC; see the file COPYING3. If not see
Here is block diagram of LRA passes:
---------------------
| Undo inheritance | --------------- ---------------
---------------------
| Undo inheritance | --------------- ---------------
| for spilled pseudos)| | Memory-memory | | New (and old) |
| and splits (for |<----| move coalesce |<-----| pseudos |
| pseudos got the | --------------- | assignment |
Start | same hard regs) | ---------------
Start | same hard regs) | ---------------
| --------------------- ^
V | ---------------- |
----------- V | Update virtual | |
......@@ -63,7 +63,7 @@ along with GCC; see the file COPYING3. If not see
| to memory |<-------| RTL |--------->| transformations |
| substitution | | transfor- | | in EBB scope |
---------------- | mations | -------------------
| ------------
| ------------
V
-------------------------
| Hard regs substitution, |
......@@ -958,7 +958,7 @@ collect_non_operand_hard_regs (rtx *x, lra_insn_recog_data_t data,
break;
case CLOBBER:
/* We treat clobber of non-operand hard registers as early
clobber (the behavior is expected from asm). */
clobber (the behavior is expected from asm). */
list = collect_non_operand_hard_regs (&XEXP (op, 0), data,
list, OP_OUT, true);
break;
......@@ -1055,7 +1055,7 @@ lra_set_insn_recog_data (rtx insn)
if (nop > 0)
{
const char *p = recog_data.constraints[0];
for (p = constraints[0]; *p; p++)
n += *p == ',';
}
......@@ -1241,7 +1241,7 @@ lra_update_insn_recog_data (rtx insn)
int n;
unsigned int uid = INSN_UID (insn);
struct lra_static_insn_data *insn_static_data;
check_and_expand_insn_recog_data (uid);
if ((data = lra_insn_recog_data[uid]) != NULL
&& data->icode != INSN_CODE (insn))
......@@ -1310,7 +1310,7 @@ lra_update_insn_recog_data (rtx insn)
{
int i;
bool *bp;
n = insn_static_data->n_alternatives;
bp = data->alternative_enabled_p;
lra_assert (n >= 0 && bp != NULL);
......@@ -1578,7 +1578,7 @@ add_regs_to_insn_regno_info (lra_insn_recog_data_t data, rtx x, int uid,
break;
case CLOBBER:
/* We treat clobber of non-operand hard registers as early
clobber (the behavior is expected from asm). */
clobber (the behavior is expected from asm). */
add_regs_to_insn_regno_info (data, XEXP (x, 0), uid, OP_OUT, true);
break;
case PRE_INC: case PRE_DEC: case POST_INC: case POST_DEC:
......@@ -2026,7 +2026,7 @@ check_rtl (bool final_p)
for (i = 0; i < id->insn_static_data->n_operands; i++)
{
rtx op = *id->operand_loc[i];
if (MEM_P (op)
&& (GET_MODE (op) != BLKmode
|| GET_CODE (XEXP (op, 0)) != SCRATCH)
......@@ -2055,7 +2055,7 @@ has_nonexceptional_receiver (void)
/* If we're not optimizing, then just err on the safe side. */
if (!optimize)
return true;
/* First determine which blocks can reach exit via normal paths. */
tos = worklist = XNEWVEC (basic_block, n_basic_blocks + 1);
......@@ -2065,7 +2065,7 @@ has_nonexceptional_receiver (void)
/* Place the exit block on our worklist. */
EXIT_BLOCK_PTR->flags |= BB_REACHABLE;
*tos++ = EXIT_BLOCK_PTR;
/* Iterate: find everything reachable from what we've already seen. */
while (tos != worklist)
{
......@@ -2155,17 +2155,17 @@ update_inc_notes (void)
/* Set to 1 while in lra. */
int lra_in_progress;
/* Start of reload pseudo regnos before the new spill pass. */
/* Start of reload pseudo regnos before the new spill pass. */
int lra_constraint_new_regno_start;
/* Inheritance pseudo regnos before the new spill pass. */
/* Inheritance pseudo regnos before the new spill pass. */
bitmap_head lra_inheritance_pseudos;
/* Split regnos before the new spill pass. */
/* Split regnos before the new spill pass. */
bitmap_head lra_split_regs;
/* Reload pseudo regnos before the new assign pass which still can be
spilled after the assinment pass. */
spilled after the assinment pass. */
bitmap_head lra_optional_reload_pseudos;
/* First UID of insns generated before a new spill pass. */
......@@ -2307,7 +2307,7 @@ lra (FILE *f)
else
{
/* Do coalescing only for regular algorithms. */
if (! lra_assign () && lra_coalesce ())
if (! lra_assign () && lra_coalesce ())
live_p = false;
if (lra_undo_inheritance ())
live_p = false;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment