Commit 832a3292 by Kazu Hirata Committed by Kazu Hirata

* config/sh/sh.c: Fix formatting.

From-SVN: r78646
parent 508ea1c5
2004-02-28 Kazu Hirata <kazu@cs.umass.edu> 2004-02-28 Kazu Hirata <kazu@cs.umass.edu>
* config/sh/sh.c: Fix formatting.
2004-02-28 Kazu Hirata <kazu@cs.umass.edu>
* config/sh/sh.c: Convert to ISO-C. * config/sh/sh.c: Convert to ISO-C.
2004-02-28 Andrew Pinski <pinskia@physics.uc.edu> 2004-02-28 Andrew Pinski <pinskia@physics.uc.edu>
......
...@@ -1324,7 +1324,7 @@ output_branch (int logic, rtx insn, rtx *operands) ...@@ -1324,7 +1324,7 @@ output_branch (int logic, rtx insn, rtx *operands)
output_asm_insn ("bra\t%l0", &op0); output_asm_insn ("bra\t%l0", &op0);
fprintf (asm_out_file, "\tnop\n"); fprintf (asm_out_file, "\tnop\n");
(*targetm.asm_out.internal_label)(asm_out_file, "LF", label); (*targetm.asm_out.internal_label) (asm_out_file, "LF", label);
return ""; return "";
} }
...@@ -1567,7 +1567,7 @@ shift_insns_rtx (rtx insn) ...@@ -1567,7 +1567,7 @@ shift_insns_rtx (rtx insn)
case ASHIFT: case ASHIFT:
return shift_insns[shift_count]; return shift_insns[shift_count];
default: default:
abort(); abort ();
} }
} }
...@@ -2089,7 +2089,7 @@ shl_and_kind (rtx left_rtx, rtx mask_rtx, int *attrp) ...@@ -2089,7 +2089,7 @@ shl_and_kind (rtx left_rtx, rtx mask_rtx, int *attrp)
mask = (unsigned HOST_WIDE_INT) INTVAL (mask_rtx) >> left; mask = (unsigned HOST_WIDE_INT) INTVAL (mask_rtx) >> left;
else else
mask = (unsigned HOST_WIDE_INT) GET_MODE_MASK (SImode) >> left; mask = (unsigned HOST_WIDE_INT) GET_MODE_MASK (SImode) >> left;
/* Can this be expressed as a right shift / left shift pair ? */ /* Can this be expressed as a right shift / left shift pair? */
lsb = ((mask ^ (mask - 1)) >> 1) + 1; lsb = ((mask ^ (mask - 1)) >> 1) + 1;
right = exact_log2 (lsb); right = exact_log2 (lsb);
mask2 = ~(mask + lsb - 1); mask2 = ~(mask + lsb - 1);
...@@ -2103,7 +2103,7 @@ shl_and_kind (rtx left_rtx, rtx mask_rtx, int *attrp) ...@@ -2103,7 +2103,7 @@ shl_and_kind (rtx left_rtx, rtx mask_rtx, int *attrp)
int late_right = exact_log2 (lsb2); int late_right = exact_log2 (lsb2);
best_cost = shift_insns[left + late_right] + shift_insns[late_right]; best_cost = shift_insns[left + late_right] + shift_insns[late_right];
} }
/* Try to use zero extend */ /* Try to use zero extend. */
if (mask2 == ~(lsb2 - 1)) if (mask2 == ~(lsb2 - 1))
{ {
int width, first; int width, first;
...@@ -2111,7 +2111,7 @@ shl_and_kind (rtx left_rtx, rtx mask_rtx, int *attrp) ...@@ -2111,7 +2111,7 @@ shl_and_kind (rtx left_rtx, rtx mask_rtx, int *attrp)
for (width = 8; width <= 16; width += 8) for (width = 8; width <= 16; width += 8)
{ {
/* Can we zero-extend right away? */ /* Can we zero-extend right away? */
if (lsb2 == (unsigned HOST_WIDE_INT)1 << width) if (lsb2 == (unsigned HOST_WIDE_INT) 1 << width)
{ {
cost cost
= 1 + ext_shift_insns[right] + ext_shift_insns[left + right]; = 1 + ext_shift_insns[right] + ext_shift_insns[left + right];
...@@ -2164,7 +2164,7 @@ shl_and_kind (rtx left_rtx, rtx mask_rtx, int *attrp) ...@@ -2164,7 +2164,7 @@ shl_and_kind (rtx left_rtx, rtx mask_rtx, int *attrp)
} }
} }
/* Try to use a scratch register to hold the AND operand. */ /* Try to use a scratch register to hold the AND operand. */
can_ext = ((mask << left) & ((unsigned HOST_WIDE_INT)3 << 30)) == 0; can_ext = ((mask << left) & ((unsigned HOST_WIDE_INT) 3 << 30)) == 0;
for (i = 0; i <= 2; i++) for (i = 0; i <= 2; i++)
{ {
if (i > right) if (i > right)
...@@ -2229,7 +2229,7 @@ gen_shl_and (rtx dest, rtx left_rtx, rtx mask_rtx, rtx source) ...@@ -2229,7 +2229,7 @@ gen_shl_and (rtx dest, rtx left_rtx, rtx mask_rtx, rtx source)
unsigned HOST_WIDE_INT mask; unsigned HOST_WIDE_INT mask;
int kind = shl_and_kind (left_rtx, mask_rtx, attributes); int kind = shl_and_kind (left_rtx, mask_rtx, attributes);
int right, total_shift; int right, total_shift;
void (*shift_gen_fun) (int, rtx*) = gen_shifty_hi_op; void (*shift_gen_fun) (int, rtx *) = gen_shifty_hi_op;
right = attributes[0]; right = attributes[0];
total_shift = INTVAL (left_rtx) + right; total_shift = INTVAL (left_rtx) + right;
...@@ -2246,9 +2246,9 @@ gen_shl_and (rtx dest, rtx left_rtx, rtx mask_rtx, rtx source) ...@@ -2246,9 +2246,9 @@ gen_shl_and (rtx dest, rtx left_rtx, rtx mask_rtx, rtx source)
if (first < 0) if (first < 0)
{ {
emit_insn ((mask << right) <= 0xff emit_insn ((mask << right) <= 0xff
? gen_zero_extendqisi2(dest, ? gen_zero_extendqisi2 (dest,
gen_lowpart (QImode, source)) gen_lowpart (QImode, source))
: gen_zero_extendhisi2(dest, : gen_zero_extendhisi2 (dest,
gen_lowpart (HImode, source))); gen_lowpart (HImode, source)));
source = dest; source = dest;
} }
...@@ -2269,8 +2269,8 @@ gen_shl_and (rtx dest, rtx left_rtx, rtx mask_rtx, rtx source) ...@@ -2269,8 +2269,8 @@ gen_shl_and (rtx dest, rtx left_rtx, rtx mask_rtx, rtx source)
} }
if (first >= 0) if (first >= 0)
emit_insn (mask <= 0xff emit_insn (mask <= 0xff
? gen_zero_extendqisi2(dest, gen_lowpart (QImode, dest)) ? gen_zero_extendqisi2 (dest, gen_lowpart (QImode, dest))
: gen_zero_extendhisi2(dest, gen_lowpart (HImode, dest))); : gen_zero_extendhisi2 (dest, gen_lowpart (HImode, dest)));
if (total_shift > 0) if (total_shift > 0)
{ {
operands[2] = GEN_INT (total_shift); operands[2] = GEN_INT (total_shift);
...@@ -2284,8 +2284,8 @@ gen_shl_and (rtx dest, rtx left_rtx, rtx mask_rtx, rtx source) ...@@ -2284,8 +2284,8 @@ gen_shl_and (rtx dest, rtx left_rtx, rtx mask_rtx, rtx source)
/* If the topmost bit that matters is set, set the topmost bits /* If the topmost bit that matters is set, set the topmost bits
that don't matter. This way, we might be able to get a shorter that don't matter. This way, we might be able to get a shorter
signed constant. */ signed constant. */
if (mask & ((HOST_WIDE_INT)1 << (31 - total_shift))) if (mask & ((HOST_WIDE_INT) 1 << (31 - total_shift)))
mask |= (HOST_WIDE_INT)~0 << (31 - total_shift); mask |= (HOST_WIDE_INT) ~0 << (31 - total_shift);
case 2: case 2:
/* Don't expand fine-grained when combining, because that will /* Don't expand fine-grained when combining, because that will
make the pattern fail. */ make the pattern fail. */
...@@ -2502,8 +2502,8 @@ gen_shl_sext (rtx dest, rtx left_rtx, rtx size_rtx, rtx source) ...@@ -2502,8 +2502,8 @@ gen_shl_sext (rtx dest, rtx left_rtx, rtx size_rtx, rtx source)
gen_shifty_hi_op (ASHIFT, operands); gen_shifty_hi_op (ASHIFT, operands);
} }
emit_insn (kind & 1 emit_insn (kind & 1
? gen_extendqisi2(dest, gen_lowpart (QImode, dest)) ? gen_extendqisi2 (dest, gen_lowpart (QImode, dest))
: gen_extendhisi2(dest, gen_lowpart (HImode, dest))); : gen_extendhisi2 (dest, gen_lowpart (HImode, dest)));
if (kind <= 2) if (kind <= 2)
{ {
if (shift2) if (shift2)
...@@ -3722,7 +3722,7 @@ barrier_align (rtx barrier_or_label) ...@@ -3722,7 +3722,7 @@ barrier_align (rtx barrier_or_label)
the table to the minimum for proper code alignment. */ the table to the minimum for proper code alignment. */
return ((TARGET_SMALLCODE return ((TARGET_SMALLCODE
|| ((unsigned) XVECLEN (pat, 1) * GET_MODE_SIZE (GET_MODE (pat)) || ((unsigned) XVECLEN (pat, 1) * GET_MODE_SIZE (GET_MODE (pat))
<= (unsigned)1 << (CACHE_LOG - 2))) <= (unsigned) 1 << (CACHE_LOG - 2)))
? 1 << TARGET_SHMEDIA : align_jumps_log); ? 1 << TARGET_SHMEDIA : align_jumps_log);
} }
...@@ -4813,7 +4813,7 @@ push_regs (HARD_REG_SET *mask, int interrupt_handler) ...@@ -4813,7 +4813,7 @@ push_regs (HARD_REG_SET *mask, int interrupt_handler)
HARD_REG_SET unsaved; HARD_REG_SET unsaved;
push (FPSCR_REG); push (FPSCR_REG);
COMPL_HARD_REG_SET(unsaved, *mask); COMPL_HARD_REG_SET (unsaved, *mask);
fpscr_set_from_mem (NORMAL_MODE (FP_MODE), unsaved); fpscr_set_from_mem (NORMAL_MODE (FP_MODE), unsaved);
skip_fpscr = 1; skip_fpscr = 1;
} }
...@@ -5103,8 +5103,8 @@ sh5_schedule_saves (HARD_REG_SET *live_regs_mask, save_schedule *schedule, ...@@ -5103,8 +5103,8 @@ sh5_schedule_saves (HARD_REG_SET *live_regs_mask, save_schedule *schedule,
&& ! (current_function_needs_context && i == STATIC_CHAIN_REGNUM) && ! (current_function_needs_context && i == STATIC_CHAIN_REGNUM)
&& ! (current_function_calls_eh_return && ! (current_function_calls_eh_return
&& (i == EH_RETURN_STACKADJ_REGNO && (i == EH_RETURN_STACKADJ_REGNO
|| ((unsigned)i <= EH_RETURN_DATA_REGNO (0) || ((unsigned) i <= EH_RETURN_DATA_REGNO (0)
&& (unsigned)i >= EH_RETURN_DATA_REGNO (3))))) && (unsigned) i >= EH_RETURN_DATA_REGNO (3)))))
schedule->temps[tmpx++] = i; schedule->temps[tmpx++] = i;
entry->reg = -1; entry->reg = -1;
entry->mode = VOIDmode; entry->mode = VOIDmode;
...@@ -6003,7 +6003,7 @@ sh_builtin_saveregs (void) ...@@ -6003,7 +6003,7 @@ sh_builtin_saveregs (void)
regno = first_floatreg; regno = first_floatreg;
if (regno & 1) if (regno & 1)
{ {
emit_insn (gen_addsi3 (fpregs, fpregs, GEN_INT (- UNITS_PER_WORD))); emit_insn (gen_addsi3 (fpregs, fpregs, GEN_INT (-UNITS_PER_WORD)));
mem = gen_rtx_MEM (SFmode, fpregs); mem = gen_rtx_MEM (SFmode, fpregs);
set_mem_alias_set (mem, alias_set); set_mem_alias_set (mem, alias_set);
emit_move_insn (mem, emit_move_insn (mem,
...@@ -6016,7 +6016,7 @@ sh_builtin_saveregs (void) ...@@ -6016,7 +6016,7 @@ sh_builtin_saveregs (void)
{ {
rtx mem; rtx mem;
emit_insn (gen_addsi3 (fpregs, fpregs, GEN_INT (- UNITS_PER_WORD))); emit_insn (gen_addsi3 (fpregs, fpregs, GEN_INT (-UNITS_PER_WORD)));
mem = gen_rtx_MEM (SFmode, fpregs); mem = gen_rtx_MEM (SFmode, fpregs);
set_mem_alias_set (mem, alias_set); set_mem_alias_set (mem, alias_set);
emit_move_insn (mem, emit_move_insn (mem,
...@@ -6402,7 +6402,7 @@ sh_function_arg (CUMULATIVE_ARGS *ca, enum machine_mode mode, ...@@ -6402,7 +6402,7 @@ sh_function_arg (CUMULATIVE_ARGS *ca, enum machine_mode mode,
BASE_ARG_REG (mode) BASE_ARG_REG (mode)
+ (ROUND_REG (*ca, mode) ^ 1)), + (ROUND_REG (*ca, mode) ^ 1)),
const0_rtx); const0_rtx);
rtx r2 = gen_rtx_EXPR_LIST(VOIDmode, rtx r2 = gen_rtx_EXPR_LIST (VOIDmode,
gen_rtx_REG (SFmode, gen_rtx_REG (SFmode,
BASE_ARG_REG (mode) BASE_ARG_REG (mode)
+ ((ROUND_REG (*ca, mode) + 1) ^ 1)), + ((ROUND_REG (*ca, mode) + 1) ^ 1)),
...@@ -7429,7 +7429,8 @@ equality_comparison_operator (rtx op, enum machine_mode mode) ...@@ -7429,7 +7429,8 @@ equality_comparison_operator (rtx op, enum machine_mode mode)
&& (GET_CODE (op) == EQ || GET_CODE (op) == NE)); && (GET_CODE (op) == EQ || GET_CODE (op) == NE));
} }
int greater_comparison_operator (rtx op, enum machine_mode mode) int
greater_comparison_operator (rtx op, enum machine_mode mode)
{ {
if (mode != VOIDmode && GET_MODE (op) == mode) if (mode != VOIDmode && GET_MODE (op) == mode)
return 0; return 0;
...@@ -7445,7 +7446,8 @@ int greater_comparison_operator (rtx op, enum machine_mode mode) ...@@ -7445,7 +7446,8 @@ int greater_comparison_operator (rtx op, enum machine_mode mode)
} }
} }
int less_comparison_operator (rtx op, enum machine_mode mode) int
less_comparison_operator (rtx op, enum machine_mode mode)
{ {
if (mode != VOIDmode && GET_MODE (op) == mode) if (mode != VOIDmode && GET_MODE (op) == mode)
return 0; return 0;
...@@ -7508,7 +7510,7 @@ mextr_bit_offset (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) ...@@ -7508,7 +7510,7 @@ mextr_bit_offset (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
if (GET_CODE (op) != CONST_INT) if (GET_CODE (op) != CONST_INT)
return 0; return 0;
i = INTVAL (op); i = INTVAL (op);
return i >= 1*8 && i <= 7*8 && (i & 7) == 0; return i >= 1 * 8 && i <= 7 * 8 && (i & 7) == 0;
} }
int int
...@@ -7571,7 +7573,7 @@ sh_rep_vec (rtx v, enum machine_mode mode) ...@@ -7571,7 +7573,7 @@ sh_rep_vec (rtx v, enum machine_mode mode)
if (GET_MODE_UNIT_SIZE (mode) == 1) if (GET_MODE_UNIT_SIZE (mode) == 1)
{ {
y = XVECEXP (v, 0, i); y = XVECEXP (v, 0, i);
for (i -= 2 ; i >= 0; i -= 2) for (i -= 2; i >= 0; i -= 2)
if (! rtx_equal_p (XVECEXP (v, 0, i + 1), x) if (! rtx_equal_p (XVECEXP (v, 0, i + 1), x)
|| ! rtx_equal_p (XVECEXP (v, 0, i), y)) || ! rtx_equal_p (XVECEXP (v, 0, i), y))
return 0; return 0;
...@@ -8182,8 +8184,7 @@ int ...@@ -8182,8 +8184,7 @@ int
sh_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED, sh_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
unsigned int new_reg) unsigned int new_reg)
{ {
/* Interrupt functions can only use registers that have already been
/* Interrupt functions can only use registers that have already been
saved by the prologue, even if they would normally be saved by the prologue, even if they would normally be
call-clobbered. */ call-clobbered. */
...@@ -8266,7 +8267,7 @@ sh_adjust_cost (rtx insn, rtx link ATTRIBUTE_UNUSED, rtx dep_insn, int cost) ...@@ -8266,7 +8267,7 @@ sh_adjust_cost (rtx insn, rtx link ATTRIBUTE_UNUSED, rtx dep_insn, int cost)
&& get_attr_type (insn) == TYPE_DYN_SHIFT && get_attr_type (insn) == TYPE_DYN_SHIFT
&& get_attr_any_int_load (dep_insn) == ANY_INT_LOAD_YES && get_attr_any_int_load (dep_insn) == ANY_INT_LOAD_YES
&& reg_overlap_mentioned_p (SET_DEST (PATTERN (dep_insn)), && reg_overlap_mentioned_p (SET_DEST (PATTERN (dep_insn)),
XEXP (SET_SRC (single_set(insn)), XEXP (SET_SRC (single_set (insn)),
1))) 1)))
cost++; cost++;
/* When an LS group instruction with a latency of less than /* When an LS group instruction with a latency of less than
...@@ -8338,7 +8339,7 @@ sh_pr_n_sets (void) ...@@ -8338,7 +8339,7 @@ sh_pr_n_sets (void)
/* This Function returns nonzero if the DFA based scheduler interface /* This Function returns nonzero if the DFA based scheduler interface
is to be used. At present this is supported for the SH4 only. */ is to be used. At present this is supported for the SH4 only. */
static int static int
sh_use_dfa_interface(void) sh_use_dfa_interface (void)
{ {
if (TARGET_HARD_SH4) if (TARGET_HARD_SH4)
return 1; return 1;
...@@ -8349,7 +8350,7 @@ sh_use_dfa_interface(void) ...@@ -8349,7 +8350,7 @@ sh_use_dfa_interface(void)
/* This function returns "2" to indicate dual issue for the SH4 /* This function returns "2" to indicate dual issue for the SH4
processor. To be used by the DFA pipeline description. */ processor. To be used by the DFA pipeline description. */
static int static int
sh_issue_rate(void) sh_issue_rate (void)
{ {
if (TARGET_SUPERSCALAR) if (TARGET_SUPERSCALAR)
return 2; return 2;
...@@ -8468,11 +8469,14 @@ swap_reorder (rtx *a, int n) ...@@ -8468,11 +8469,14 @@ swap_reorder (rtx *a, int n)
} }
#define SCHED_REORDER(READY, N_READY) \ #define SCHED_REORDER(READY, N_READY) \
do { if ((N_READY) == 2) \ do \
{ \
if ((N_READY) == 2) \
swap_reorder (READY, N_READY); \ swap_reorder (READY, N_READY); \
else if ((N_READY) > 2) \ else if ((N_READY) > 2) \
qsort (READY, N_READY, sizeof (rtx), rank_for_reorder); } \ qsort (READY, N_READY, sizeof (rtx), rank_for_reorder); \
while (0) } \
while (0)
/* Sort the ready list READY by ascending priority, using the SCHED_REORDER /* Sort the ready list READY by ascending priority, using the SCHED_REORDER
macro. */ macro. */
...@@ -9252,7 +9256,7 @@ sh_register_move_cost (enum machine_mode mode, ...@@ -9252,7 +9256,7 @@ sh_register_move_cost (enum machine_mode mode,
return 4; return 4;
if ((REGCLASS_HAS_FP_REG (dstclass) && srcclass == MAC_REGS) if ((REGCLASS_HAS_FP_REG (dstclass) && srcclass == MAC_REGS)
|| (dstclass== MAC_REGS && REGCLASS_HAS_FP_REG (srcclass))) || (dstclass == MAC_REGS && REGCLASS_HAS_FP_REG (srcclass)))
return 9; return 9;
if ((REGCLASS_HAS_FP_REG (dstclass) if ((REGCLASS_HAS_FP_REG (dstclass)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment