Commit c4ad648e by Alan Modra Committed by Alan Modra

linux.h: Formatting, whitespace.

	* config/rs6000/linux.h: Formatting, whitespace.
	* config/rs6000/linux64.h: Likewise.
	* config/rs6000/rs6000-protos.h: Likewise.
	* config/rs6000/rs6000.c: Likewise.
	(easy_vector_splat_const): Add fall thru comments.
	(output_vec_const_move): Likewise.

From-SVN: r88551
parent 406176be
2004-10-05 Alan Modra <amodra@bigpond.net.au>
* config/rs6000/linux.h: Formatting, whitespace.
* config/rs6000/linux64.h: Likewise.
* config/rs6000/rs6000-protos.h: Likewise.
* config/rs6000/rs6000.c: Likewise.
(easy_vector_splat_const): Add fall thru comments.
(output_vec_const_move): Likewise.
2004-10-05 Kelley Cook <kcook@gcc.gnu.org> 2004-10-05 Kelley Cook <kcook@gcc.gnu.org>
* Makefile.in: Update -Wno-error exceptions for move to build dir. * Makefile.in: Update -Wno-error exceptions for move to build dir.
......
...@@ -147,7 +147,8 @@ extern HOST_WIDE_INT rs6000_initial_elimination_offset (int, int); ...@@ -147,7 +147,8 @@ extern HOST_WIDE_INT rs6000_initial_elimination_offset (int, int);
extern bool rs6000_legitimate_offset_address_p (enum machine_mode, rtx, int); extern bool rs6000_legitimate_offset_address_p (enum machine_mode, rtx, int);
extern rtx rs6000_machopic_legitimize_pic_address (rtx orig, extern rtx rs6000_machopic_legitimize_pic_address (rtx orig,
enum machine_mode mode, rtx reg); enum machine_mode mode,
rtx reg);
#endif /* RTX_CODE */ #endif /* RTX_CODE */
......
...@@ -62,8 +62,7 @@ ...@@ -62,8 +62,7 @@
#endif #endif
#define EASY_VECTOR_15(n) ((n) >= -16 && (n) <= 15) #define EASY_VECTOR_15(n) ((n) >= -16 && (n) <= 15)
#define EASY_VECTOR_15_ADD_SELF(n) ((n) >= 0x10 && (n) <= 0x1e \ #define EASY_VECTOR_15_ADD_SELF(n) ((n) >= 0x10 && (n) <= 0x1e && !((n) & 1))
&& !((n) & 1))
#define min(A,B) ((A) < (B) ? (A) : (B)) #define min(A,B) ((A) < (B) ? (A) : (B))
#define max(A,B) ((A) > (B) ? (A) : (B)) #define max(A,B) ((A) > (B) ? (A) : (B))
...@@ -1769,7 +1768,6 @@ count_register_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) ...@@ -1769,7 +1768,6 @@ count_register_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
int int
altivec_register_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) altivec_register_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
{ {
return (register_operand (op, mode) return (register_operand (op, mode)
&& (GET_CODE (op) != REG && (GET_CODE (op) != REG
|| REGNO (op) > FIRST_PSEUDO_REGISTER || REGNO (op) > FIRST_PSEUDO_REGISTER
...@@ -1794,7 +1792,7 @@ xer_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) ...@@ -1794,7 +1792,7 @@ xer_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
int int
s8bit_cint_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) s8bit_cint_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
{ {
return ( GET_CODE (op) == CONST_INT return (GET_CODE (op) == CONST_INT
&& (INTVAL (op) >= -128 && INTVAL (op) <= 127)); && (INTVAL (op) >= -128 && INTVAL (op) <= 127));
} }
...@@ -2232,6 +2230,8 @@ easy_vector_splat_const (int cst, enum machine_mode mode) ...@@ -2232,6 +2230,8 @@ easy_vector_splat_const (int cst, enum machine_mode mode)
if ((cst & 0xffff) != ((cst >> 16) & 0xffff)) if ((cst & 0xffff) != ((cst >> 16) & 0xffff))
break; break;
cst = cst >> 16; cst = cst >> 16;
/* Fall thru */
case V8HImode: case V8HImode:
if (EASY_VECTOR_15 (cst) if (EASY_VECTOR_15 (cst)
|| EASY_VECTOR_15_ADD_SELF (cst)) || EASY_VECTOR_15_ADD_SELF (cst))
...@@ -2239,6 +2239,8 @@ easy_vector_splat_const (int cst, enum machine_mode mode) ...@@ -2239,6 +2239,8 @@ easy_vector_splat_const (int cst, enum machine_mode mode)
if ((cst & 0xff) != ((cst >> 8) & 0xff)) if ((cst & 0xff) != ((cst >> 8) & 0xff))
break; break;
cst = cst >> 8; cst = cst >> 8;
/* Fall thru */
case V16QImode: case V16QImode:
if (EASY_VECTOR_15 (cst) if (EASY_VECTOR_15 (cst)
|| EASY_VECTOR_15_ADD_SELF (cst)) || EASY_VECTOR_15_ADD_SELF (cst))
...@@ -2385,6 +2387,8 @@ output_vec_const_move (rtx *operands) ...@@ -2385,6 +2387,8 @@ output_vec_const_move (rtx *operands)
else if (EASY_VECTOR_15_ADD_SELF (cst)) else if (EASY_VECTOR_15_ADD_SELF (cst))
return "#"; return "#";
cst = cst >> 16; cst = cst >> 16;
/* Fall thru */
case V8HImode: case V8HImode:
if (EASY_VECTOR_15 (cst)) if (EASY_VECTOR_15 (cst))
{ {
...@@ -2394,6 +2398,8 @@ output_vec_const_move (rtx *operands) ...@@ -2394,6 +2398,8 @@ output_vec_const_move (rtx *operands)
else if (EASY_VECTOR_15_ADD_SELF (cst)) else if (EASY_VECTOR_15_ADD_SELF (cst))
return "#"; return "#";
cst = cst >> 8; cst = cst >> 8;
/* Fall thru */
case V16QImode: case V16QImode:
if (EASY_VECTOR_15 (cst)) if (EASY_VECTOR_15 (cst))
{ {
...@@ -2402,6 +2408,7 @@ output_vec_const_move (rtx *operands) ...@@ -2402,6 +2408,7 @@ output_vec_const_move (rtx *operands)
} }
else if (EASY_VECTOR_15_ADD_SELF (cst)) else if (EASY_VECTOR_15_ADD_SELF (cst))
return "#"; return "#";
default: default:
abort (); abort ();
} }
...@@ -2416,8 +2423,7 @@ output_vec_const_move (rtx *operands) ...@@ -2416,8 +2423,7 @@ output_vec_const_move (rtx *operands)
pattern of V1DI, V4HI, and V2SF. pattern of V1DI, V4HI, and V2SF.
FIXME: We should probably return # and add post reload FIXME: We should probably return # and add post reload
splitters for these, but this way is so easy ;-). splitters for these, but this way is so easy ;-). */
*/
operands[1] = GEN_INT (cst); operands[1] = GEN_INT (cst);
operands[2] = GEN_INT (cst2); operands[2] = GEN_INT (cst2);
if (cst == cst2) if (cst == cst2)
...@@ -3675,7 +3681,8 @@ rs6000_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED) ...@@ -3675,7 +3681,8 @@ rs6000_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
machopic_function_base_name() defined. */ machopic_function_base_name() defined. */
rtx rtx
rs6000_legitimize_reload_address (rtx x, enum machine_mode mode, rs6000_legitimize_reload_address (rtx x, enum machine_mode mode,
int opnum, int type, int ind_levels ATTRIBUTE_UNUSED, int *win) int opnum, int type,
int ind_levels ATTRIBUTE_UNUSED, int *win)
{ {
/* We must recognize output that we have already generated ourselves. */ /* We must recognize output that we have already generated ourselves. */
if (GET_CODE (x) == PLUS if (GET_CODE (x) == PLUS
...@@ -5179,11 +5186,14 @@ function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, ...@@ -5179,11 +5186,14 @@ function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
int i=0; int i=0;
if (align_words + n_words > GP_ARG_NUM_REG if (align_words + n_words > GP_ARG_NUM_REG
&& (TARGET_32BIT && TARGET_POWERPC64)) && (TARGET_32BIT && TARGET_POWERPC64))
/* Not all of the arg fits in gprs. Say that it goes in memory too, /* Not all of the arg fits in gprs. Say that it
using a magic NULL_RTX component. Also see comment in goes in memory too, using a magic NULL_RTX
component. Also see comment in
rs6000_mixed_function_arg for why the normal rs6000_mixed_function_arg for why the normal
function_arg_partial_nregs scheme doesn't work in this case. */ function_arg_partial_nregs scheme doesn't work
rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx); in this case. */
rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX,
const0_rtx);
do do
{ {
r = gen_rtx_REG (rmode, r = gen_rtx_REG (rmode,
...@@ -5338,7 +5348,6 @@ rs6000_move_block_from_reg (int regno, rtx x, int nregs) ...@@ -5338,7 +5348,6 @@ rs6000_move_block_from_reg (int regno, rtx x, int nregs)
} }
} }
/* Perform any needed actions needed for a function that is receiving a /* Perform any needed actions needed for a function that is receiving a
variable number of arguments. variable number of arguments.
...@@ -5355,7 +5364,8 @@ rs6000_move_block_from_reg (int regno, rtx x, int nregs) ...@@ -5355,7 +5364,8 @@ rs6000_move_block_from_reg (int regno, rtx x, int nregs)
static void static void
setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode, setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
tree type, int *pretend_size ATTRIBUTE_UNUSED, int no_rtl) tree type, int *pretend_size ATTRIBUTE_UNUSED,
int no_rtl)
{ {
CUMULATIVE_ARGS next_cum; CUMULATIVE_ARGS next_cum;
int reg_size = TARGET_32BIT ? 4 : 8; int reg_size = TARGET_32BIT ? 4 : 8;
...@@ -5407,7 +5417,8 @@ setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode, ...@@ -5407,7 +5417,8 @@ setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
rtx lab = gen_label_rtx (); rtx lab = gen_label_rtx ();
int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG) * 8); int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG) * 8);
emit_jump_insn (gen_rtx_SET (VOIDmode, emit_jump_insn
(gen_rtx_SET (VOIDmode,
pc_rtx, pc_rtx,
gen_rtx_IF_THEN_ELSE (VOIDmode, gen_rtx_IF_THEN_ELSE (VOIDmode,
gen_rtx_NE (VOIDmode, cr1, gen_rtx_NE (VOIDmode, cr1,
...@@ -6864,7 +6875,8 @@ altivec_expand_builtin (tree exp, rtx target, bool *expandedp) ...@@ -6864,7 +6875,8 @@ altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
dp = (struct builtin_description_predicates *) bdesc_altivec_preds; dp = (struct builtin_description_predicates *) bdesc_altivec_preds;
for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, dp++) for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, dp++)
if (dp->code == fcode) if (dp->code == fcode)
return altivec_expand_predicate_builtin (dp->icode, dp->opcode, arglist, target); return altivec_expand_predicate_builtin (dp->icode, dp->opcode,
arglist, target);
/* LV* are funky. We initialized them differently. */ /* LV* are funky. We initialized them differently. */
switch (fcode) switch (fcode)
...@@ -10496,7 +10508,8 @@ print_operand (FILE *file, rtx x, int code) ...@@ -10496,7 +10508,8 @@ print_operand (FILE *file, rtx x, int code)
if (GET_CODE (x) != SYMBOL_REF) if (GET_CODE (x) != SYMBOL_REF)
abort (); abort ();
/* Mark the decl as referenced so that cgraph will output the function. */ /* Mark the decl as referenced so that cgraph will output the
function. */
if (SYMBOL_REF_DECL (x)) if (SYMBOL_REF_DECL (x))
mark_decl_referenced (SYMBOL_REF_DECL (x)); mark_decl_referenced (SYMBOL_REF_DECL (x));
...@@ -11485,9 +11498,9 @@ rs6000_split_multireg_move (rtx dst, rtx src) ...@@ -11485,9 +11498,9 @@ rs6000_split_multireg_move (rtx dst, rtx src)
{ {
rtx delta_rtx; rtx delta_rtx;
breg = XEXP (XEXP (src, 0), 0); breg = XEXP (XEXP (src, 0), 0);
delta_rtx = GET_CODE (XEXP (src, 0)) == PRE_INC delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
? GEN_INT (GET_MODE_SIZE (GET_MODE (src))) ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
: GEN_INT (-GET_MODE_SIZE (GET_MODE (src))); : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
emit_insn (TARGET_32BIT emit_insn (TARGET_32BIT
? gen_addsi3 (breg, breg, delta_rtx) ? gen_addsi3 (breg, breg, delta_rtx)
: gen_adddi3 (breg, breg, delta_rtx)); : gen_adddi3 (breg, breg, delta_rtx));
...@@ -11524,9 +11537,9 @@ rs6000_split_multireg_move (rtx dst, rtx src) ...@@ -11524,9 +11537,9 @@ rs6000_split_multireg_move (rtx dst, rtx src)
{ {
rtx delta_rtx; rtx delta_rtx;
breg = XEXP (XEXP (dst, 0), 0); breg = XEXP (XEXP (dst, 0), 0);
delta_rtx = GET_CODE (XEXP (dst, 0)) == PRE_INC delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
? GEN_INT (GET_MODE_SIZE (GET_MODE (dst))) ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
: GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))); : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
/* We have to update the breg before doing the store. /* We have to update the breg before doing the store.
Use store with update, if available. */ Use store with update, if available. */
...@@ -13719,8 +13732,8 @@ rs6000_emit_epilogue (int sibcall) ...@@ -13719,8 +13732,8 @@ rs6000_emit_epilogue (int sibcall)
+ LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
+ 63 + 1 - info->first_fp_reg_save); + 63 + 1 - info->first_fp_reg_save);
strcpy (rname, (current_function_calls_eh_return) ? strcpy (rname, ((current_function_calls_eh_return) ?
"*eh_rest_world_r10" : "*rest_world"); "*eh_rest_world_r10" : "*rest_world"));
alloc_rname = ggc_strdup (rname); alloc_rname = ggc_strdup (rname);
j = 0; j = 0;
...@@ -15517,7 +15530,8 @@ rs6000_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority) ...@@ -15517,7 +15530,8 @@ rs6000_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
&& rs6000_sched_restricted_insns_priority) && rs6000_sched_restricted_insns_priority)
{ {
/* Prioritize insns that can be dispatched only in the first dispatch slot. */ /* Prioritize insns that can be dispatched only in the first
dispatch slot. */
if (rs6000_sched_restricted_insns_priority == 1) if (rs6000_sched_restricted_insns_priority == 1)
/* Attach highest priority to insn. This means that in /* Attach highest priority to insn. This means that in
haifa-sched.c:ready_sort(), dispatch-slot restriction considerations haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
...@@ -15525,8 +15539,8 @@ rs6000_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority) ...@@ -15525,8 +15539,8 @@ rs6000_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
return current_sched_info->sched_max_insns_priority; return current_sched_info->sched_max_insns_priority;
else if (rs6000_sched_restricted_insns_priority == 2) else if (rs6000_sched_restricted_insns_priority == 2)
/* Increase priority of insn by a minimal amount. This means that in /* Increase priority of insn by a minimal amount. This means that in
haifa-sched.c:ready_sort(), only 'priority' (critical path) considerations haifa-sched.c:ready_sort(), only 'priority' (critical path)
precede dispatch-slot restriction considerations. */ considerations precede dispatch-slot restriction considerations. */
return (priority + 1); return (priority + 1);
} }
...@@ -15681,7 +15695,8 @@ is_store_insn (rtx insn) ...@@ -15681,7 +15695,8 @@ is_store_insn (rtx insn)
costly by the given target. */ costly by the given target. */
static bool static bool
rs6000_is_costly_dependence (rtx insn, rtx next, rtx link, int cost, int distance) rs6000_is_costly_dependence (rtx insn, rtx next, rtx link, int cost,
int distance)
{ {
/* If the flag is not enbled - no dependence is considered costly; /* If the flag is not enbled - no dependence is considered costly;
allow all dependent insns in the same group. allow all dependent insns in the same group.
...@@ -15705,7 +15720,8 @@ rs6000_is_costly_dependence (rtx insn, rtx next, rtx link, int cost, int distanc ...@@ -15705,7 +15720,8 @@ rs6000_is_costly_dependence (rtx insn, rtx next, rtx link, int cost, int distanc
&& is_load_insn (next) && is_load_insn (next)
&& is_store_insn (insn) && is_store_insn (insn)
&& (!link || (int) REG_NOTE_KIND (link) == 0)) && (!link || (int) REG_NOTE_KIND (link) == 0))
/* Prevent load after store in the same group if it is a true dependence. */ /* Prevent load after store in the same group if it is a true
dependence. */
return true; return true;
/* The flag is set to X; dependences with latency >= X are considered costly, /* The flag is set to X; dependences with latency >= X are considered costly,
...@@ -15833,8 +15849,9 @@ is_costly_group (rtx *group_insns, rtx next_insn) ...@@ -15833,8 +15849,9 @@ is_costly_group (rtx *group_insns, rtx next_insn)
last group, and how many dispatch groups were encountered so far). */ last group, and how many dispatch groups were encountered so far). */
static int static int
force_new_group (int sched_verbose, FILE *dump, rtx *group_insns, rtx next_insn, force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
bool *group_end, int can_issue_more, int *group_count) rtx next_insn, bool *group_end, int can_issue_more,
int *group_count)
{ {
rtx nop; rtx nop;
bool force; bool force;
...@@ -15864,8 +15881,8 @@ force_new_group (int sched_verbose, FILE *dump, rtx *group_insns, rtx next_insn, ...@@ -15864,8 +15881,8 @@ force_new_group (int sched_verbose, FILE *dump, rtx *group_insns, rtx next_insn,
/* Since only a branch can be issued in the last issue_slot, it is /* Since only a branch can be issued in the last issue_slot, it is
sufficient to insert 'can_issue_more - 1' nops if next_insn is not sufficient to insert 'can_issue_more - 1' nops if next_insn is not
a branch. If next_insn is a branch, we insert 'can_issue_more' nops; a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
in this case the last nop will start a new group and the branch will be in this case the last nop will start a new group and the branch
forced to the new group. */ will be forced to the new group. */
if (can_issue_more && !is_branch_slot_insn (next_insn)) if (can_issue_more && !is_branch_slot_insn (next_insn))
can_issue_more--; can_issue_more--;
...@@ -15923,8 +15940,9 @@ force_new_group (int sched_verbose, FILE *dump, rtx *group_insns, rtx next_insn, ...@@ -15923,8 +15940,9 @@ force_new_group (int sched_verbose, FILE *dump, rtx *group_insns, rtx next_insn,
/* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */ /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
can_issue_more++; can_issue_more++;
*group_end = /* Is next_insn going to start a new group? */ /* Is next_insn going to start a new group? */
(end *group_end
= (end
|| (can_issue_more == 1 && !is_branch_slot_insn (next_insn)) || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
|| (can_issue_more <= 2 && is_cracked_insn (next_insn)) || (can_issue_more <= 2 && is_cracked_insn (next_insn))
|| (can_issue_more < issue_rate && || (can_issue_more < issue_rate &&
...@@ -16005,15 +16023,17 @@ redefine_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail) ...@@ -16005,15 +16023,17 @@ redefine_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
if (next_insn == NULL_RTX) if (next_insn == NULL_RTX)
return group_count + 1; return group_count + 1;
group_end = /* Is next_insn going to start a new group? */ /* Is next_insn going to start a new group? */
(can_issue_more == 0 group_end
= (can_issue_more == 0
|| (can_issue_more == 1 && !is_branch_slot_insn (next_insn)) || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
|| (can_issue_more <= 2 && is_cracked_insn (next_insn)) || (can_issue_more <= 2 && is_cracked_insn (next_insn))
|| (can_issue_more < issue_rate && || (can_issue_more < issue_rate &&
insn_terminates_group_p (next_insn, previous_group))); insn_terminates_group_p (next_insn, previous_group)));
can_issue_more = force_new_group (sched_verbose, dump, group_insns, can_issue_more = force_new_group (sched_verbose, dump, group_insns,
next_insn, &group_end, can_issue_more, &group_count); next_insn, &group_end, can_issue_more,
&group_count);
if (group_end) if (group_end)
{ {
...@@ -16277,9 +16297,10 @@ rs6000_handle_altivec_attribute (tree *node, tree name, tree args, ...@@ -16277,9 +16297,10 @@ rs6000_handle_altivec_attribute (tree *node, tree name, tree args,
break; break;
case SFmode: result = V4SF_type_node; break; case SFmode: result = V4SF_type_node; break;
/* If the user says 'vector int bool', we may be handed the 'bool' /* If the user says 'vector int bool', we may be handed the 'bool'
attribute _before_ the 'vector' attribute, and so select the proper attribute _before_ the 'vector' attribute, and so select the
type in the 'b' case below. */ proper type in the 'b' case below. */
case V4SImode: case V8HImode: case V16QImode: case V4SFmode: result = type; case V4SImode: case V8HImode: case V16QImode: case V4SFmode:
result = type;
default: break; default: break;
} }
break; break;
...@@ -16576,7 +16597,8 @@ static tree branch_island_list = 0; ...@@ -16576,7 +16597,8 @@ static tree branch_island_list = 0;
function. */ function. */
static void static void
add_compiler_branch_island (tree label_name, tree function_name, int line_number) add_compiler_branch_island (tree label_name, tree function_name,
int line_number)
{ {
tree branch_island = build_tree_list (function_name, label_name); tree branch_island = build_tree_list (function_name, label_name);
TREE_TYPE (branch_island) = build_int_cst (NULL_TREE, line_number); TREE_TYPE (branch_island) = build_int_cst (NULL_TREE, line_number);
...@@ -16704,7 +16726,8 @@ get_prev_label (tree function_name) ...@@ -16704,7 +16726,8 @@ get_prev_label (tree function_name)
CALL_DEST is the routine we are calling. */ CALL_DEST is the routine we are calling. */
char * char *
output_call (rtx insn, rtx *operands, int dest_operand_number, int cookie_operand_number) output_call (rtx insn, rtx *operands, int dest_operand_number,
int cookie_operand_number)
{ {
static char buf[256]; static char buf[256];
if (GET_CODE (operands[dest_operand_number]) == SYMBOL_REF if (GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
...@@ -17547,20 +17570,21 @@ rs6000_register_move_cost (enum machine_mode mode, ...@@ -17547,20 +17570,21 @@ rs6000_register_move_cost (enum machine_mode mode,
return (rs6000_memory_move_cost (mode, from, 0) return (rs6000_memory_move_cost (mode, from, 0)
+ rs6000_memory_move_cost (mode, GENERAL_REGS, 0)); + rs6000_memory_move_cost (mode, GENERAL_REGS, 0));
/* It's more expensive to move CR_REGS than CR0_REGS because of the shift.... */ /* It's more expensive to move CR_REGS than CR0_REGS because of the
shift. */
else if (from == CR_REGS) else if (from == CR_REGS)
return 4; return 4;
else else
/* A move will cost one instruction per GPR moved. */ /* A move will cost one instruction per GPR moved. */
return 2 * HARD_REGNO_NREGS (0, mode); return 2 * HARD_REGNO_NREGS (0, mode);
} }
/* Moving between two similar registers is just one instruction. */ /* Moving between two similar registers is just one instruction. */
else if (reg_classes_intersect_p (to, from)) else if (reg_classes_intersect_p (to, from))
return mode == TFmode ? 4 : 2; return mode == TFmode ? 4 : 2;
/* Everything else has to go through GENERAL_REGS. */ /* Everything else has to go through GENERAL_REGS. */
else else
return (rs6000_register_move_cost (mode, GENERAL_REGS, to) return (rs6000_register_move_cost (mode, GENERAL_REGS, to)
+ rs6000_register_move_cost (mode, from, GENERAL_REGS)); + rs6000_register_move_cost (mode, from, GENERAL_REGS));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment