Commit d7a9e7c5 by Nathan Froyd Committed by Jakub Jelinek

re PR target/41175 (-Os generates significantly larger code)

	PR target/41175
	PR target/40677
	* config/rs6000/rs6000.c (no_global_regs_above): Fix precedence
	problem.
	(SAVRES_NOINLINE_GPRS_SAVES_LR, SAVRES_NOINLINE_FPRS_SAVES_LR,
	SAVRES_NOINLINE_FPRS_DOESNT_RESTORE_LR): New strategy bits.
	(rs6000_savres_strategy): Always save FP registers inline if the
	target doesn't support hardware double-precision.  Set the above
	bits in return value when needed.
	(rs6000_savres_routine_sym): Fix computation for cache selector.
	Mark the generated symbol as a function.  Rename exitp argument to
	lr.  Move code for determining the name of the symbol...
	(rs6000_savres_routine_name): ...here.  New function.  Add cases for
	getting the names right on AIX and 64-bit Linux.
	(savres_routine_name): New variable.
	(rs6000_make_savres_rtx): Rename exitp argument to lr.  Don't assert
	lr isn't set when savep.  Use r12 resp. r1 instead of r11 depending
	on what the target routine uses as a base register.  If savep && lr
	describe saving of r0 into memory slot.
	(rs6000_emit_prologue): Correct use of call_used_regs.  Fix out of
	line calls for AIX ABI.
	(rs6000_output_function_prologue): Use rs6000_savres_routine_name to
	determine FP save/restore functions.
	(rs6000_emit_stack_reset): Handle savres if sp_offset != 0 and
	frame_reg_rtx != sp_reg_rtx.  Use gen_add3_insn instead of
	gen_addsi3.
	(rs6000_emit_epilogue): Adjust computation of restore_lr.
	Duplicate restoration of LR and execute the appropriate one
	depending on whether GPRs are being restored inline.  Set r11 from
	offsetted frame_reg_rtx instead of sp_reg_rtx; if frame_reg_rtx is
	r11, adjust sp_offset.  Use gen_add3_insn instead of gen_addsi3.
	Fix out of line calls for AIX ABI.
	* config/rs6000/rs6000.md (*return_and_restore_fpregs_aix_<mode>):
	New insn.
	* config/rs6000/spe.md (*save_gpregs_spe): Use explicit match for
	register 11.
	(*restore_gpregs_spe): Likewise.
	(*return_and_restore_gpregs_spe): Likewise.
	* config/rs6000/linux64.h (SAVE_FP_SUFFIX, RESTORE_FP_SUFFIX):
	Define to empty string unconditionally.
	* config/rs6000/sysv4.h (SAVE_FP_SUFFIX, RESTORE_FP_SUFFIX):
	Define to empty string unconditionally.
	(GP_SAVE_INLINE, FP_SAVE_INLINE): Handle TARGET_64BIT the same as
	!TARGET_64BIT.

	* gcc.target/powerpc/pr41175.c: New test.

Co-Authored-By: Jakub Jelinek <jakub@redhat.com>

From-SVN: r151729
parent c7d68c96
2009-09-15 Nathan Froyd <froydnj@codesourcery.com>
Jakub Jelinek <jakub@redhat.com>
PR target/41175
PR target/40677
* config/rs6000/rs6000.c (no_global_regs_above): Fix precedence
problem.
(SAVRES_NOINLINE_GPRS_SAVES_LR, SAVRES_NOINLINE_FPRS_SAVES_LR,
SAVRES_NOINLINE_FPRS_DOESNT_RESTORE_LR): New strategy bits.
(rs6000_savres_strategy): Always save FP registers inline if the
target doesn't support hardware double-precision. Set the above
bits in return value when needed.
(rs6000_savres_routine_sym): Fix computation for cache selector.
Mark the generated symbol as a function. Rename exitp argument to
lr. Move code for determining the name of the symbol...
(rs6000_savres_routine_name): ...here. New function. Add cases for
getting the names right on AIX and 64-bit Linux.
(savres_routine_name): New variable.
(rs6000_make_savres_rtx): Rename exitp argument to lr. Don't assert
lr isn't set when savep. Use r12 resp. r1 instead of r11 depending
on what the target routine uses as a base register. If savep && lr
describe saving of r0 into memory slot.
(rs6000_emit_prologue): Correct use of call_used_regs. Fix out of
line calls for AIX ABI.
(rs6000_output_function_prologue): Use rs6000_savres_routine_name to
determine FP save/restore functions.
(rs6000_emit_stack_reset): Handle savres if sp_offset != 0 and
frame_reg_rtx != sp_reg_rtx. Use gen_add3_insn instead of
gen_addsi3.
(rs6000_emit_epilogue): Adjust computation of restore_lr.
Duplicate restoration of LR and execute the appropriate one
depending on whether GPRs are being restored inline. Set r11 from
offsetted frame_reg_rtx instead of sp_reg_rtx; if frame_reg_rtx is
r11, adjust sp_offset. Use gen_add3_insn instead of gen_addsi3.
Fix out of line calls for AIX ABI.
* config/rs6000/rs6000.md (*return_and_restore_fpregs_aix_<mode>):
New insn.
* config/rs6000/spe.md (*save_gpregs_spe): Use explicit match for
register 11.
(*restore_gpregs_spe): Likewise.
(*return_and_restore_gpregs_spe): Likewise.
* config/rs6000/linux64.h (SAVE_FP_SUFFIX, RESTORE_FP_SUFFIX):
Define to empty string unconditionally.
* config/rs6000/sysv4.h (SAVE_FP_SUFFIX, RESTORE_FP_SUFFIX):
Define to empty string unconditionally.
(GP_SAVE_INLINE, FP_SAVE_INLINE): Handle TARGET_64BIT the same as
!TARGET_64BIT.
2009-09-15 Jan Hubicka <jh@suse.cz>
* doc/invoke.texi (inline-insns-auto): Drop from 60 to 50.
......
......@@ -437,11 +437,11 @@ extern int dot_symbols;
#undef SAVE_FP_PREFIX
#define SAVE_FP_PREFIX (TARGET_64BIT ? "._savef" : "_savefpr_")
#undef SAVE_FP_SUFFIX
#define SAVE_FP_SUFFIX (TARGET_64BIT ? "" : "_l")
#define SAVE_FP_SUFFIX ""
#undef RESTORE_FP_PREFIX
#define RESTORE_FP_PREFIX (TARGET_64BIT ? "._restf" : "_restfpr_")
#undef RESTORE_FP_SUFFIX
#define RESTORE_FP_SUFFIX (TARGET_64BIT ? "" : "_l")
#define RESTORE_FP_SUFFIX ""
/* Dwarf2 debugging. */
#undef PREFERRED_DEBUGGING_TYPE
......
......@@ -18033,7 +18033,8 @@ static bool
no_global_regs_above (int first, bool gpr)
{
int i;
for (i = first; i < gpr ? 32 : 64 ; i++)
int last = gpr ? 32 : 64;
for (i = first; i < last; i++)
if (global_regs[i])
return false;
return true;
......@@ -18050,54 +18051,136 @@ no_global_regs_above (int first, bool gpr)
static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][8];
/* Return the symbol for an out-of-line register save/restore routine.
/* Temporary holding space for an out-of-line register save/restore
routine name. */
static char savres_routine_name[30];
/* Return the name for an out-of-line register save/restore routine.
We are saving/restoring GPRs if GPR is true. */
static char *
rs6000_savres_routine_name (rs6000_stack_t *info, int regno,
bool savep, bool gpr, bool lr)
{
const char *prefix = "";
const char *suffix = "";
/* Different targets are supposed to define
{SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
routine name could be defined with:
sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
This is a nice idea in practice, but in reality, things are
complicated in several ways:
- ELF targets have save/restore routines for GPRs.
- SPE targets use different prefixes for 32/64-bit registers, and
neither of them fit neatly in the FOO_{PREFIX,SUFFIX} regimen.
- PPC64 ELF targets have routines for save/restore of GPRs that
differ in what they do with the link register, so having a set
prefix doesn't work. (We only use one of the save routines at
the moment, though.)
- PPC32 elf targets have "exit" versions of the restore routines
that restore the link register and can save some extra space.
These require an extra suffix. (There are also "tail" versions
of the restore routines and "GOT" versions of the save routines,
but we don't generate those at present. Same problems apply,
though.)
We deal with all this by synthesizing our own prefix/suffix and
using that for the simple sprintf call shown above. */
if (TARGET_SPE)
{
/* No floating point saves on the SPE. */
gcc_assert (gpr);
if (savep)
prefix = info->spe_64bit_regs_used ? "_save64gpr_" : "_save32gpr_";
else
prefix = info->spe_64bit_regs_used ? "_rest64gpr_" : "_rest32gpr_";
if (lr)
suffix = "_x";
}
else if (DEFAULT_ABI == ABI_V4)
{
if (TARGET_64BIT)
goto aix_names;
if (gpr)
prefix = savep ? "_savegpr_" : "_restgpr_";
else
prefix = savep ? "_savefpr_" : "_restfpr_";
if (lr)
suffix = "_x";
}
else if (DEFAULT_ABI == ABI_AIX)
{
#ifndef POWERPC_LINUX
/* No out-of-line save/restore routines for GPRs on AIX. */
gcc_assert (!TARGET_AIX || !gpr);
#endif
aix_names:
if (gpr)
prefix = (savep
? (lr ? "_savegpr0_" : "_savegpr1_")
: (lr ? "_restgpr0_" : "_restgpr1_"));
#ifdef POWERPC_LINUX
else if (lr)
prefix = (savep ? "_savefpr_" : "_restfpr_");
#endif
else
{
prefix = savep ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
suffix = savep ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
}
}
else if (DEFAULT_ABI == ABI_DARWIN)
sorry ("Out-of-line save/restore routines not supported on Darwin");
sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
return savres_routine_name;
}
/* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
We are saving/restoring GPRs if GPR is true. */
static rtx
rs6000_savres_routine_sym (rs6000_stack_t *info, bool savep, bool gpr, bool exitp)
rs6000_savres_routine_sym (rs6000_stack_t *info, bool savep,
bool gpr, bool lr)
{
int regno = gpr ? info->first_gp_reg_save : (info->first_fp_reg_save - 32);
rtx sym;
int select = ((savep ? 1 : 0) << 2
| (gpr
| ((TARGET_SPE_ABI
/* On the SPE, we never have any FPRs, but we do have
32/64-bit versions of the routines. */
? (TARGET_SPE_ABI && info->spe_64bit_regs_used ? 1 : 0)
: 0) << 1
| (exitp ? 1: 0));
? (info->spe_64bit_regs_used ? 1 : 0)
: (gpr ? 1 : 0)) << 1)
| (lr ? 1: 0));
/* Don't generate bogus routine names. */
gcc_assert (FIRST_SAVRES_REGISTER <= regno && regno <= LAST_SAVRES_REGISTER);
gcc_assert (FIRST_SAVRES_REGISTER <= regno
&& regno <= LAST_SAVRES_REGISTER);
sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
if (sym == NULL)
{
char name[30];
const char *action;
const char *regkind;
const char *exit_suffix;
action = savep ? "save" : "rest";
/* SPE has slightly different names for its routines depending on
whether we are saving 32-bit or 64-bit registers. */
if (TARGET_SPE_ABI)
{
/* No floating point saves on the SPE. */
gcc_assert (gpr);
regkind = info->spe_64bit_regs_used ? "64gpr" : "32gpr";
}
else
regkind = gpr ? "gpr" : "fpr";
exit_suffix = exitp ? "_x" : "";
char *name;
sprintf (name, "_%s%s_%d%s", action, regkind, regno, exit_suffix);
name = rs6000_savres_routine_name (info, regno, savep, gpr, lr);
sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
= gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
}
return sym;
......@@ -18124,8 +18207,11 @@ rs6000_emit_stack_reset (rs6000_stack_t *info,
if (frame_reg_rtx != sp_reg_rtx)
{
if (sp_offset != 0)
return emit_insn (gen_addsi3 (sp_reg_rtx, frame_reg_rtx,
{
rtx dest_reg = savres ? gen_rtx_REG (Pmode, 11) : sp_reg_rtx;
return emit_insn (gen_add3_insn (dest_reg, frame_reg_rtx,
GEN_INT (sp_offset)));
}
else if (!savres)
return emit_move_insn (sp_reg_rtx, frame_reg_rtx);
}
......@@ -18154,7 +18240,7 @@ static rtx
rs6000_make_savres_rtx (rs6000_stack_t *info,
rtx frame_reg_rtx, int save_area_offset,
enum machine_mode reg_mode,
bool savep, bool gpr, bool exitp)
bool savep, bool gpr, bool lr)
{
int i;
int offset, start_reg, end_reg, n_regs;
......@@ -18168,20 +18254,21 @@ rs6000_make_savres_rtx (rs6000_stack_t *info,
: info->first_fp_reg_save);
end_reg = gpr ? 32 : 64;
n_regs = end_reg - start_reg;
p = rtvec_alloc ((exitp ? 4 : 3) + n_regs);
/* If we're saving registers, then we should never say we're exiting. */
gcc_assert ((savep && !exitp) || !savep);
p = rtvec_alloc ((lr ? 4 : 3) + n_regs);
if (exitp)
if (!savep && lr)
RTVEC_ELT (p, offset++) = gen_rtx_RETURN (VOIDmode);
RTVEC_ELT (p, offset++)
= gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 65));
sym = rs6000_savres_routine_sym (info, savep, gpr, exitp);
sym = rs6000_savres_routine_sym (info, savep, gpr, lr);
RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 11));
RTVEC_ELT (p, offset++)
= gen_rtx_USE (VOIDmode,
gen_rtx_REG (Pmode, DEFAULT_ABI != ABI_AIX ? 11
: gpr && !lr ? 12
: 1));
for (i = 0; i < end_reg - start_reg; i++)
{
......@@ -18196,6 +18283,16 @@ rs6000_make_savres_rtx (rs6000_stack_t *info,
savep ? reg : mem);
}
if (savep && lr)
{
rtx addr, reg, mem;
reg = gen_rtx_REG (Pmode, 0);
addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
GEN_INT (info->lr_save_offset));
mem = gen_frame_mem (Pmode, addr);
RTVEC_ELT (p, i + offset) = gen_rtx_SET (VOIDmode, mem, reg);
}
return gen_rtx_PARALLEL (VOIDmode, p);
}
......@@ -18216,7 +18313,10 @@ rs6000_reg_live_or_pic_offset_p (int reg)
enum {
SAVRES_MULTIPLE = 0x1,
SAVRES_INLINE_FPRS = 0x2,
SAVRES_INLINE_GPRS = 0x4
SAVRES_INLINE_GPRS = 0x4,
SAVRES_NOINLINE_GPRS_SAVES_LR = 0x8,
SAVRES_NOINLINE_FPRS_SAVES_LR = 0x10,
SAVRES_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x20
};
/* Determine the strategy for savings/restoring registers. */
......@@ -18231,6 +18331,7 @@ rs6000_savres_strategy (rs6000_stack_t *info, bool savep,
bool savres_gprs_inline;
bool noclobber_global_gprs
= no_global_regs_above (info->first_gp_reg_save, /*gpr=*/true);
int strategy;
using_multiple_p = (TARGET_MULTIPLE && ! TARGET_POWERPC64
&& (!TARGET_SPE_ABI
......@@ -18250,6 +18351,10 @@ rs6000_savres_strategy (rs6000_stack_t *info, bool savep,
|| info->first_fp_reg_save == 64
|| !no_global_regs_above (info->first_fp_reg_save,
/*gpr=*/false)
/* The out-of-line FP routines use
double-precision stores; we can't use those
routines if we don't have such stores. */
|| (TARGET_HARD_FLOAT && !TARGET_DOUBLE_FLOAT)
|| FP_SAVE_INLINE (info->first_fp_reg_save));
savres_gprs_inline = (common
/* Saving CR interferes with the exit routines
......@@ -18287,9 +18392,22 @@ rs6000_savres_strategy (rs6000_stack_t *info, bool savep,
savres_gprs_inline = savres_gprs_inline || using_multiple_p;
}
return (using_multiple_p
strategy = (using_multiple_p
| (savres_fprs_inline << 1)
| (savres_gprs_inline << 2));
#ifdef POWERPC_LINUX
if (TARGET_64BIT)
{
if (!savres_fprs_inline)
strategy |= SAVRES_NOINLINE_FPRS_SAVES_LR;
else if (!savres_gprs_inline && info->first_fp_reg_save == 64)
strategy |= SAVRES_NOINLINE_GPRS_SAVES_LR;
}
#else
if (TARGET_AIX && !savres_fprs_inline)
strategy |= SAVRES_NOINLINE_FPRS_DOESNT_RESTORE_LR;
#endif
return strategy;
}
/* Emit function prologue as insns. */
......@@ -18311,7 +18429,7 @@ rs6000_emit_prologue (void)
int using_store_multiple;
int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
&& df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
&& !call_used_regs[STATIC_CHAIN_REGNUM]);
&& call_used_regs[STATIC_CHAIN_REGNUM]);
HOST_WIDE_INT sp_offset = 0;
if (TARGET_FIX_AND_CONTINUE)
......@@ -18496,6 +18614,9 @@ rs6000_emit_prologue (void)
gen_rtx_REG (Pmode, LR_REGNO));
RTX_FRAME_RELATED_P (insn) = 1;
if (!(strategy & (SAVRES_NOINLINE_GPRS_SAVES_LR
| SAVRES_NOINLINE_FPRS_SAVES_LR)))
{
addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
GEN_INT (info->lr_save_offset + sp_offset));
reg = gen_rtx_REG (Pmode, 0);
......@@ -18507,13 +18628,16 @@ rs6000_emit_prologue (void)
rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
NULL_RTX, NULL_RTX);
}
}
/* If we need to save CR, put it into r12. */
/* If we need to save CR, put it into r12 or r11. */
if (!WORLD_SAVE_P (info) && info->cr_save_p && frame_reg_rtx != frame_ptr_rtx)
{
rtx set;
cr_save_rtx = gen_rtx_REG (SImode, 12);
cr_save_rtx
= gen_rtx_REG (SImode, DEFAULT_ABI == ABI_AIX && !saving_GPRs_inline
? 11 : 12);
insn = emit_insn (gen_movesi_from_cr (cr_save_rtx));
RTX_FRAME_RELATED_P (insn) = 1;
/* Now, there's no way that dwarf2out_frame_debug_expr is going
......@@ -18550,7 +18674,9 @@ rs6000_emit_prologue (void)
info->fp_save_offset + sp_offset,
DFmode,
/*savep=*/true, /*gpr=*/false,
/*exitp=*/false);
/*lr=*/(strategy
& SAVRES_NOINLINE_FPRS_SAVES_LR)
!= 0);
insn = emit_insn (par);
rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
NULL_RTX, NULL_RTX);
......@@ -18646,7 +18772,7 @@ rs6000_emit_prologue (void)
par = rs6000_make_savres_rtx (info, gen_rtx_REG (Pmode, 11),
0, reg_mode,
/*savep=*/true, /*gpr=*/true,
/*exitp=*/false);
/*lr=*/false);
insn = emit_insn (par);
rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
NULL_RTX, NULL_RTX);
......@@ -18661,23 +18787,23 @@ rs6000_emit_prologue (void)
{
rtx par;
/* Need to adjust r11 if we saved any FPRs. */
/* Need to adjust r11 (r12) if we saved any FPRs. */
if (info->first_fp_reg_save != 64)
{
rtx r11 = gen_rtx_REG (reg_mode, 11);
rtx offset = GEN_INT (info->total_size
rtx dest_reg = gen_rtx_REG (reg_mode, DEFAULT_ABI == ABI_AIX
? 12 : 11);
rtx offset = GEN_INT (sp_offset
+ (-8 * (64-info->first_fp_reg_save)));
rtx ptr_reg = (sp_reg_rtx == frame_reg_rtx
? sp_reg_rtx : r11);
emit_insn (gen_add3_insn (r11, ptr_reg, offset));
emit_insn (gen_add3_insn (dest_reg, frame_reg_rtx, offset));
}
par = rs6000_make_savres_rtx (info, frame_reg_rtx,
info->gp_save_offset + sp_offset,
reg_mode,
/*savep=*/true, /*gpr=*/true,
/*exitp=*/false);
/*lr=*/(strategy
& SAVRES_NOINLINE_GPRS_SAVES_LR)
!= 0);
insn = emit_insn (par);
rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
NULL_RTX, NULL_RTX);
......@@ -18954,9 +19080,18 @@ rs6000_output_function_prologue (FILE *file,
fp values. */
if (info->first_fp_reg_save < 64
&& !FP_SAVE_INLINE (info->first_fp_reg_save))
fprintf (file, "\t.extern %s%d%s\n\t.extern %s%d%s\n",
SAVE_FP_PREFIX, info->first_fp_reg_save - 32, SAVE_FP_SUFFIX,
RESTORE_FP_PREFIX, info->first_fp_reg_save - 32, RESTORE_FP_SUFFIX);
{
char *name;
int regno = info->first_fp_reg_save - 32;
name = rs6000_savres_routine_name (info, regno, /*savep=*/true,
/*gpr=*/false, /*lr=*/false);
fprintf (file, "\t.extern %s\n", name);
name = rs6000_savres_routine_name (info, regno, /*savep=*/false,
/*gpr=*/false, /*lr=*/true);
fprintf (file, "\t.extern %s\n", name);
}
/* Write .extern for AIX common mode routines, if needed. */
if (! TARGET_POWER && ! TARGET_POWERPC && ! common_mode_defined)
......@@ -19082,6 +19217,7 @@ rs6000_emit_epilogue (int sibcall)
rtx frame_reg_rtx = sp_reg_rtx;
rtx cfa_restores = NULL_RTX;
rtx insn;
rtx cr_save_reg = NULL_RTX;
enum machine_mode reg_mode = Pmode;
int reg_size = TARGET_32BIT ? 4 : 8;
int i;
......@@ -19115,8 +19251,10 @@ rs6000_emit_epilogue (int sibcall)
|| (cfun->calls_alloca
&& !frame_pointer_needed));
restore_lr = (info->lr_save_p
&& restoring_GPRs_inline
&& restoring_FPRs_inline);
&& (restoring_FPRs_inline
|| (strategy & SAVRES_NOINLINE_FPRS_DOESNT_RESTORE_LR))
&& (restoring_GPRs_inline
|| info->first_fp_reg_save < 64));
if (WORLD_SAVE_P (info))
{
......@@ -19403,7 +19541,7 @@ rs6000_emit_epilogue (int sibcall)
/* Get the old lr if we saved it. If we are restoring registers
out-of-line, then the out-of-line routines can do this for us. */
if (restore_lr)
if (restore_lr && restoring_GPRs_inline)
{
rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx,
info->lr_save_offset + sp_offset);
......@@ -19418,12 +19556,17 @@ rs6000_emit_epilogue (int sibcall)
GEN_INT (info->cr_save_offset + sp_offset));
rtx mem = gen_frame_mem (SImode, addr);
emit_move_insn (gen_rtx_REG (SImode, 12), mem);
cr_save_reg = gen_rtx_REG (SImode,
DEFAULT_ABI == ABI_AIX
&& !restoring_GPRs_inline
&& info->first_fp_reg_save < 64
? 11 : 12);
emit_move_insn (cr_save_reg, mem);
}
/* Set LR here to try to overlap restores below. LR is always saved
above incoming stack, so it never needs REG_CFA_RESTORE. */
if (restore_lr)
if (restore_lr && restoring_GPRs_inline)
emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO),
gen_rtx_REG (Pmode, 0));
......@@ -19540,7 +19683,7 @@ rs6000_emit_epilogue (int sibcall)
par = rs6000_make_savres_rtx (info, gen_rtx_REG (Pmode, 11),
0, reg_mode,
/*savep=*/false, /*gpr=*/true,
/*exitp=*/true);
/*lr=*/true);
emit_jump_insn (par);
/* We don't want anybody else emitting things after we jumped
back. */
......@@ -19558,21 +19701,25 @@ rs6000_emit_epilogue (int sibcall)
rs6000_emit_stack_reset (info, sp_reg_rtx, frame_reg_rtx,
sp_offset, can_use_exit);
else
emit_insn (gen_addsi3 (gen_rtx_REG (Pmode, 11),
sp_reg_rtx,
{
emit_insn (gen_add3_insn (gen_rtx_REG (Pmode, DEFAULT_ABI == ABI_AIX
? 12 : 11),
frame_reg_rtx,
GEN_INT (sp_offset - info->fp_size)));
if (REGNO (frame_reg_rtx) == 11)
sp_offset += info->fp_size;
}
par = rs6000_make_savres_rtx (info, frame_reg_rtx,
info->gp_save_offset, reg_mode,
/*savep=*/false, /*gpr=*/true,
/*exitp=*/can_use_exit);
/*lr=*/can_use_exit);
if (can_use_exit)
{
if (info->cr_save_p)
{
rs6000_restore_saved_cr (gen_rtx_REG (SImode, 12),
using_mtcr_multiple);
rs6000_restore_saved_cr (cr_save_reg, using_mtcr_multiple);
if (DEFAULT_ABI == ABI_V4)
cfa_restores
= alloc_reg_note (REG_CFA_RESTORE,
......@@ -19659,6 +19806,16 @@ rs6000_emit_epilogue (int sibcall)
}
}
if (restore_lr && !restoring_GPRs_inline)
{
rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx,
info->lr_save_offset + sp_offset);
emit_move_insn (gen_rtx_REG (Pmode, 0), mem);
emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO),
gen_rtx_REG (Pmode, 0));
}
/* Restore fpr's if we need to do it without calling a function. */
if (restoring_FPRs_inline)
for (i = 0; i < 64 - info->first_fp_reg_save; i++)
......@@ -19685,7 +19842,7 @@ rs6000_emit_epilogue (int sibcall)
/* If we saved cr, restore it here. Just those that were used. */
if (info->cr_save_p)
{
rs6000_restore_saved_cr (gen_rtx_REG (SImode, 12), using_mtcr_multiple);
rs6000_restore_saved_cr (cr_save_reg, using_mtcr_multiple);
if (DEFAULT_ABI == ABI_V4)
cfa_restores
= alloc_reg_note (REG_CFA_RESTORE, gen_rtx_REG (SImode, CR2_REGNO),
......@@ -19716,13 +19873,14 @@ rs6000_emit_epilogue (int sibcall)
if (!sibcall)
{
rtvec p;
bool lr = (strategy & SAVRES_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
if (! restoring_FPRs_inline)
p = rtvec_alloc (4 + 64 - info->first_fp_reg_save);
else
p = rtvec_alloc (2);
RTVEC_ELT (p, 0) = gen_rtx_RETURN (VOIDmode);
RTVEC_ELT (p, 1) = (restoring_FPRs_inline
RTVEC_ELT (p, 1) = ((restoring_FPRs_inline || !lr)
? gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 65))
: gen_rtx_CLOBBER (VOIDmode,
gen_rtx_REG (Pmode, 65)));
......@@ -19737,10 +19895,12 @@ rs6000_emit_epilogue (int sibcall)
sym = rs6000_savres_routine_sym (info,
/*savep=*/false,
/*gpr=*/false,
/*exitp=*/true);
/*lr=*/lr);
RTVEC_ELT (p, 2) = gen_rtx_USE (VOIDmode, sym);
RTVEC_ELT (p, 3) = gen_rtx_USE (VOIDmode,
gen_rtx_REG (Pmode, 11));
gen_rtx_REG (Pmode,
DEFAULT_ABI == ABI_AIX
? 1 : 11));
for (i = 0; i < 64 - info->first_fp_reg_save; i++)
{
rtx addr, mem;
......
......@@ -15436,6 +15436,19 @@
[(set_attr "type" "branch")
(set_attr "length" "4")])
(define_insn "*return_and_restore_fpregs_aix_<mode>"
[(match_parallel 0 "any_parallel_operand"
[(return)
(use (match_operand:P 1 "register_operand" "l"))
(use (match_operand:P 2 "symbol_ref_operand" "s"))
(use (match_operand:P 3 "gpc_reg_operand" "r"))
(set (match_operand:DF 4 "gpc_reg_operand" "=d")
(match_operand:DF 5 "memory_operand" "m"))])]
""
"b %z2"
[(set_attr "type" "branch")
(set_attr "length" "4")])
; This is used in compiling the unwind routines.
(define_expand "eh_return"
[(use (match_operand 0 "general_operand" ""))]
......
......@@ -3156,9 +3156,9 @@
[(match_parallel 0 "any_parallel_operand"
[(clobber (reg:P 65))
(use (match_operand:P 1 "symbol_ref_operand" "s"))
(use (match_operand:P 2 "gpc_reg_operand" "r"))
(set (match_operand:V2SI 3 "memory_operand" "=m")
(match_operand:V2SI 4 "gpc_reg_operand" "r"))])]
(use (reg:P 11))
(set (match_operand:V2SI 2 "memory_operand" "=m")
(match_operand:V2SI 3 "gpc_reg_operand" "r"))])]
"TARGET_SPE_ABI"
"bl %z1"
[(set_attr "type" "branch")
......@@ -3168,9 +3168,9 @@
[(match_parallel 0 "any_parallel_operand"
[(clobber (reg:P 65))
(use (match_operand:P 1 "symbol_ref_operand" "s"))
(use (match_operand:P 2 "gpc_reg_operand" "r"))
(set (match_operand:V2SI 3 "gpc_reg_operand" "=r")
(match_operand:V2SI 4 "memory_operand" "m"))])]
(use (reg:P 11))
(set (match_operand:V2SI 2 "gpc_reg_operand" "=r")
(match_operand:V2SI 3 "memory_operand" "m"))])]
"TARGET_SPE_ABI"
"bl %z1"
[(set_attr "type" "branch")
......@@ -3181,9 +3181,9 @@
[(return)
(clobber (reg:P 65))
(use (match_operand:P 1 "symbol_ref_operand" "s"))
(use (match_operand:P 2 "gpc_reg_operand" "r"))
(set (match_operand:V2SI 3 "gpc_reg_operand" "=r")
(match_operand:V2SI 4 "memory_operand" "m"))])]
(use (reg:P 11))
(set (match_operand:V2SI 2 "gpc_reg_operand" "=r")
(match_operand:V2SI 3 "memory_operand" "m"))])]
"TARGET_SPE_ABI"
"b %z1"
[(set_attr "type" "branch")
......
......@@ -272,27 +272,25 @@ do { \
#endif
/* Define cutoff for using external functions to save floating point.
Currently on 64-bit V.4, always use inline stores. When optimizing
for size on 32-bit targets, use external functions when
profitable. */
#define FP_SAVE_INLINE(FIRST_REG) (optimize_size && !TARGET_64BIT \
When optimizing for size, use external functions when profitable. */
#define FP_SAVE_INLINE(FIRST_REG) (optimize_size \
? ((FIRST_REG) == 62 \
|| (FIRST_REG) == 63) \
: (FIRST_REG) < 64)
/* And similarly for general purpose registers. */
#define GP_SAVE_INLINE(FIRST_REG) ((FIRST_REG) < 32 \
&& (TARGET_64BIT || !optimize_size))
&& !optimize_size)
/* Put jump tables in read-only memory, rather than in .text. */
#define JUMP_TABLES_IN_TEXT_SECTION 0
/* Prefix and suffix to use to saving floating point. */
#define SAVE_FP_PREFIX "_savefpr_"
#define SAVE_FP_SUFFIX (TARGET_64BIT ? "_l" : "")
#define SAVE_FP_SUFFIX ""
/* Prefix and suffix to use to restoring floating point. */
#define RESTORE_FP_PREFIX "_restfpr_"
#define RESTORE_FP_SUFFIX (TARGET_64BIT ? "_l" : "")
#define RESTORE_FP_SUFFIX ""
/* Type used for ptrdiff_t, as a string used in a declaration. */
#define PTRDIFF_TYPE "int"
......
2009-09-15 Nathan Froyd <froydnj@codesourcery.com>
Jakub Jelinek <jakub@redhat.com>
PR target/41175
* gcc.target/powerpc/pr41175.c: New test.
2009-09-14 Richard Henderson <rth@redhat.com>
* c-c++-common/asmgoto-1.c, c-c++-common/asmgoto-2.c,
......
/* PR target/41175 */
/* { dg-do run } */
/* { dg-options "-Os" } */
#define X2(n) X1(n##0) X1(n##1)
#define X4(n) X2(n##0) X2(n##1)
#define X8(n) X4(n##0) X4(n##1)
#ifndef __SPE__
#define FLOAT_REG_CONSTRAINT "f"
#else
#define FLOAT_REG_CONSTRAINT "r"
#endif
volatile int ll;
__attribute__((noinline)) void
foo (void)
{
asm volatile ("" : : : "memory");
}
__attribute__((noinline)) void
bar (char *p)
{
asm volatile ("" : : "r" (p) : "memory");
}
__attribute__((noinline)) void
f1 (void)
{
int mem;
#undef X1
#define X1(n) int gpr##n = 0;
X8(a) X8(b) X8(c)
#undef X1
#define X1(n) "+r" (gpr##n),
asm volatile ("" : X8(a) "=m" (mem) : : "memory");
asm volatile ("" : X8(b) "=m" (mem) : : "memory");
asm volatile ("" : X8(c) "=m" (mem) : : "memory");
foo ();
#undef X1
#define X1(n) "r" (gpr##n),
asm volatile ("" : : X8(a) "m" (mem) : "memory");
asm volatile ("" : : X8(b) "m" (mem) : "memory");
asm volatile ("" : : X8(c) "m" (mem) : "memory");
}
__attribute__((noinline)) void
f2 (void)
{
int mem;
#undef X1
#define X1(n) int gpr##n = 0;
X8(a) X8(b) X8(c)
#undef X1
#define X1(n) "+r" (gpr##n),
asm volatile ("" : X8(a) "=m" (mem) : : "memory");
asm volatile ("" : X8(b) "=m" (mem) : : "memory");
asm volatile ("" : X8(c) "=m" (mem) : : "memory");
char *pp = __builtin_alloca (ll);
bar (pp);
#undef X1
#define X1(n) "r" (gpr##n),
asm volatile ("" : : X8(a) "m" (mem) : "memory");
asm volatile ("" : : X8(b) "m" (mem) : "memory");
asm volatile ("" : : X8(c) "m" (mem) : "memory");
}
__attribute__((noinline)) void
f3 (void)
{
int mem;
#undef X1
#define X1(n) int gpr##n = 0;
X8(a) X8(b) X8(c)
#undef X1
#define X1(n) "+r" (gpr##n),
asm volatile ("" : X8(a) "=m" (mem) : : "memory");
asm volatile ("" : X8(b) "=m" (mem) : : "memory");
asm volatile ("" : X8(c) "=m" (mem) : : "memory");
#undef X1
#define X1(n) "r" (gpr##n),
asm volatile ("" : : X8(a) "m" (mem) : "memory");
asm volatile ("" : : X8(b) "m" (mem) : "memory");
asm volatile ("" : : X8(c) "m" (mem) : "memory");
}
#ifndef __NO_FPRS__
__attribute__((noinline)) void
f4 (void)
{
int mem;
#undef X1
#define X1(n) int gpr##n = 0;
X8(a) X8(b) X8(c)
#undef X1
#define X1(n) double fpr##n = 0.0;
X4(d)
#undef X1
#define X1(n) "+r" (gpr##n),
asm volatile ("" : X8(a) "=m" (mem) : : "memory");
asm volatile ("" : X8(b) "=m" (mem) : : "memory");
asm volatile ("" : X8(c) "=m" (mem) : : "memory");
#undef X1
#define X1(n) "+" FLOAT_REG_CONSTRAINT (fpr##n),
asm volatile ("" : X4(d) "=m" (mem) : : "memory");
foo ();
#undef X1
#define X1(n) "r" (gpr##n),
asm volatile ("" : : X8(a) "m" (mem) : "memory");
asm volatile ("" : : X8(b) "m" (mem) : "memory");
asm volatile ("" : : X8(c) "m" (mem) : "memory");
#undef X1
#define X1(n) FLOAT_REG_CONSTRAINT (fpr##n),
asm volatile ("" : : X4(d) "m" (mem) : "memory");
}
__attribute__((noinline)) void
f5 (void)
{
int mem;
#undef X1
#define X1(n) int gpr##n = 0;
X8(a) X8(b) X8(c)
#undef X1
#define X1(n) double fpr##n = 0.0;
X4(d)
#undef X1
#define X1(n) "+r" (gpr##n),
asm volatile ("" : X8(a) "=m" (mem) : : "memory");
asm volatile ("" : X8(b) "=m" (mem) : : "memory");
asm volatile ("" : X8(c) "=m" (mem) : : "memory");
#undef X1
#define X1(n) "+" FLOAT_REG_CONSTRAINT (fpr##n),
asm volatile ("" : X4(d) "=m" (mem) : : "memory");
char *pp = __builtin_alloca (ll);
bar (pp);
#undef X1
#define X1(n) "r" (gpr##n),
asm volatile ("" : : X8(a) "m" (mem) : "memory");
asm volatile ("" : : X8(b) "m" (mem) : "memory");
asm volatile ("" : : X8(c) "m" (mem) : "memory");
#undef X1
#define X1(n) FLOAT_REG_CONSTRAINT (fpr##n),
asm volatile ("" : : X4(d) "m" (mem) : "memory");
}
__attribute__((noinline)) void
f6 (void)
{
int mem;
#undef X1
#define X1(n) int gpr##n = 0;
X8(a) X8(b) X8(c)
#undef X1
#define X1(n) double fpr##n = 0.0;
X4(d)
#undef X1
#define X1(n) "+r" (gpr##n),
asm volatile ("" : X8(a) "=m" (mem) : : "memory");
asm volatile ("" : X8(b) "=m" (mem) : : "memory");
asm volatile ("" : X8(c) "=m" (mem) : : "memory");
#undef X1
#define X1(n) "+" FLOAT_REG_CONSTRAINT (fpr##n),
asm volatile ("" : X4(d) "=m" (mem) : : "memory");
#undef X1
#define X1(n) "r" (gpr##n),
asm volatile ("" : : X8(a) "m" (mem) : "memory");
asm volatile ("" : : X8(b) "m" (mem) : "memory");
asm volatile ("" : : X8(c) "m" (mem) : "memory");
#undef X1
#define X1(n) FLOAT_REG_CONSTRAINT (fpr##n),
asm volatile ("" : : X4(d) "m" (mem) : "memory");
}
__attribute__((noinline)) void
f7 (void)
{
int mem;
#undef X1
#define X1(n) int gpr##n = 0;
X8(a) X8(b) X8(c)
#undef X1
#define X1(n) double fpr##n = 0.0;
X2(d)
#undef X1
#define X1(n) "+r" (gpr##n),
asm volatile ("" : X8(a) "=m" (mem) : : "memory");
asm volatile ("" : X8(b) "=m" (mem) : : "memory");
asm volatile ("" : X8(c) "=m" (mem) : : "memory");
#undef X1
#define X1(n) "+" FLOAT_REG_CONSTRAINT (fpr##n),
asm volatile ("" : X2(d) "=m" (mem) : : "memory");
foo ();
#undef X1
#define X1(n) "r" (gpr##n),
asm volatile ("" : : X8(a) "m" (mem) : "memory");
asm volatile ("" : : X8(b) "m" (mem) : "memory");
asm volatile ("" : : X8(c) "m" (mem) : "memory");
#undef X1
#define X1(n) FLOAT_REG_CONSTRAINT (fpr##n),
asm volatile ("" : : X2(d) "m" (mem) : "memory");
}
__attribute__((noinline)) void
f8 (void)
{
int mem;
#undef X1
#define X1(n) int gpr##n = 0;
X8(a) X8(b) X8(c)
#undef X1
#define X1(n) double fpr##n = 0.0;
X2(d)
#undef X1
#define X1(n) "+r" (gpr##n),
asm volatile ("" : X8(a) "=m" (mem) : : "memory");
asm volatile ("" : X8(b) "=m" (mem) : : "memory");
asm volatile ("" : X8(c) "=m" (mem) : : "memory");
#undef X1
#define X1(n) "+" FLOAT_REG_CONSTRAINT (fpr##n),
asm volatile ("" : X2(d) "=m" (mem) : : "memory");
char *pp = __builtin_alloca (ll);
bar (pp);
#undef X1
#define X1(n) "r" (gpr##n),
asm volatile ("" : : X8(a) "m" (mem) : "memory");
asm volatile ("" : : X8(b) "m" (mem) : "memory");
asm volatile ("" : : X8(c) "m" (mem) : "memory");
#undef X1
#define X1(n) FLOAT_REG_CONSTRAINT (fpr##n),
asm volatile ("" : : X2(d) "m" (mem) : "memory");
}
__attribute__((noinline)) void
f9 (void)
{
int mem;
#undef X1
#define X1(n) int gpr##n = 0;
X8(a) X8(b) X8(c)
#undef X1
#define X1(n) double fpr##n = 0.0;
X2(d)
#undef X1
#define X1(n) "+r" (gpr##n),
asm volatile ("" : X8(a) "=m" (mem) : : "memory");
asm volatile ("" : X8(b) "=m" (mem) : : "memory");
asm volatile ("" : X8(c) "=m" (mem) : : "memory");
#undef X1
#define X1(n) "+" FLOAT_REG_CONSTRAINT (fpr##n),
asm volatile ("" : X2(d) "=m" (mem) : : "memory");
#undef X1
#define X1(n) "r" (gpr##n),
asm volatile ("" : : X8(a) "m" (mem) : "memory");
asm volatile ("" : : X8(b) "m" (mem) : "memory");
asm volatile ("" : : X8(c) "m" (mem) : "memory");
#undef X1
#define X1(n) FLOAT_REG_CONSTRAINT (fpr##n),
asm volatile ("" : : X2(d) "m" (mem) : "memory");
}
__attribute__((noinline)) void
f10 (void)
{
int mem;
#undef X1
#define X1(n) int gpr##n = 0;
X8(a) X8(b) X4(c)
#undef X1
#define X1(n) double fpr##n = 0.0;
X1(d)
#undef X1
#define X1(n) "+r" (gpr##n),
asm volatile ("" : X8(a) "=m" (mem) : : "memory");
asm volatile ("" : X8(b) "=m" (mem) : : "memory");
asm volatile ("" : X4(c) "=m" (mem) : : "memory");
#undef X1
#define X1(n) "+" FLOAT_REG_CONSTRAINT (fpr##n),
asm volatile ("" : X1(d) "=m" (mem) : : "memory");
foo ();
#undef X1
#define X1(n) "r" (gpr##n),
asm volatile ("" : : X8(a) "m" (mem) : "memory");
asm volatile ("" : : X8(b) "m" (mem) : "memory");
asm volatile ("" : : X4(c) "m" (mem) : "memory");
#undef X1
#define X1(n) FLOAT_REG_CONSTRAINT (fpr##n),
asm volatile ("" : : X1(d) "m" (mem) : "memory");
}
__attribute__((noinline)) void
f11 (void)
{
int mem;
#undef X1
#define X1(n) int gpr##n = 0;
X8(a) X8(b) X4(c)
#undef X1
#define X1(n) double fpr##n = 0.0;
X1(d)
#undef X1
#define X1(n) "+r" (gpr##n),
asm volatile ("" : X8(a) "=m" (mem) : : "memory");
asm volatile ("" : X8(b) "=m" (mem) : : "memory");
asm volatile ("" : X4(c) "=m" (mem) : : "memory");
#undef X1
#define X1(n) "+" FLOAT_REG_CONSTRAINT (fpr##n),
asm volatile ("" : X1(d) "=m" (mem) : : "memory");
char *pp = __builtin_alloca (ll);
bar (pp);
#undef X1
#define X1(n) "r" (gpr##n),
asm volatile ("" : : X8(a) "m" (mem) : "memory");
asm volatile ("" : : X8(b) "m" (mem) : "memory");
asm volatile ("" : : X4(c) "m" (mem) : "memory");
#undef X1
#define X1(n) FLOAT_REG_CONSTRAINT (fpr##n),
asm volatile ("" : : X1(d) "m" (mem) : "memory");
}
__attribute__((noinline)) void
f12 (void)
{
int mem;
#undef X1
#define X1(n) int gpr##n = 0;
X8(a) X8(b) X4(c)
#undef X1
#define X1(n) double fpr##n = 0.0;
X1(d)
#undef X1
#define X1(n) "+r" (gpr##n),
asm volatile ("" : X8(a) "=m" (mem) : : "memory");
asm volatile ("" : X8(b) "=m" (mem) : : "memory");
asm volatile ("" : X4(c) "=m" (mem) : : "memory");
#undef X1
#define X1(n) "+" FLOAT_REG_CONSTRAINT (fpr##n),
asm volatile ("" : X1(d) "=m" (mem) : : "memory");
#undef X1
#define X1(n) "r" (gpr##n),
asm volatile ("" : : X8(a) "m" (mem) : "memory");
asm volatile ("" : : X8(b) "m" (mem) : "memory");
asm volatile ("" : : X4(c) "m" (mem) : "memory");
#undef X1
#define X1(n) FLOAT_REG_CONSTRAINT (fpr##n),
asm volatile ("" : : X1(d) "m" (mem) : "memory");
}
__attribute__((noinline)) void
f13 (void)
{
int mem;
#undef X1
#define X1(n) int gpr##n = 0;
X8(a) X8(b) X2(c)
#undef X1
#define X1(n) double fpr##n = 0.0;
X8(d)
#undef X1
#define X1(n) "+r" (gpr##n),
asm volatile ("" : X8(a) "=m" (mem) : : "memory");
asm volatile ("" : X8(b) "=m" (mem) : : "memory");
asm volatile ("" : X2(c) "=m" (mem) : : "memory");
#undef X1
#define X1(n) "+" FLOAT_REG_CONSTRAINT (fpr##n),
asm volatile ("" : X8(d) "=m" (mem) : : "memory");
foo ();
#undef X1
#define X1(n) "r" (gpr##n),
asm volatile ("" : : X8(a) "m" (mem) : "memory");
asm volatile ("" : : X8(b) "m" (mem) : "memory");
asm volatile ("" : : X2(c) "m" (mem) : "memory");
#undef X1
#define X1(n) FLOAT_REG_CONSTRAINT (fpr##n),
asm volatile ("" : : X8(d) "m" (mem) : "memory");
}
__attribute__((noinline)) void
f14 (void)
{
int mem;
#undef X1
#define X1(n) int gpr##n = 0;
X8(a) X8(b) X2(c)
#undef X1
#define X1(n) double fpr##n = 0.0;
X8(d)
#undef X1
#define X1(n) "+r" (gpr##n),
asm volatile ("" : X8(a) "=m" (mem) : : "memory");
asm volatile ("" : X8(b) "=m" (mem) : : "memory");
asm volatile ("" : X2(c) "=m" (mem) : : "memory");
#undef X1
#define X1(n) "+" FLOAT_REG_CONSTRAINT (fpr##n),
asm volatile ("" : X8(d) "=m" (mem) : : "memory");
char *pp = __builtin_alloca (ll);
bar (pp);
#undef X1
#define X1(n) "r" (gpr##n),
asm volatile ("" : : X8(a) "m" (mem) : "memory");
asm volatile ("" : : X8(b) "m" (mem) : "memory");
asm volatile ("" : : X2(c) "m" (mem) : "memory");
#undef X1
#define X1(n) FLOAT_REG_CONSTRAINT (fpr##n),
asm volatile ("" : : X8(d) "m" (mem) : "memory");
}
__attribute__((noinline)) void
f15 (void)
{
int mem;
#undef X1
#define X1(n) int gpr##n = 0;
X8(a) X8(b) X2(c)
#undef X1
#define X1(n) double fpr##n = 0.0;
X8(d)
#undef X1
#define X1(n) "+r" (gpr##n),
asm volatile ("" : X8(a) "=m" (mem) : : "memory");
asm volatile ("" : X8(b) "=m" (mem) : : "memory");
asm volatile ("" : X2(c) "=m" (mem) : : "memory");
#undef X1
#define X1(n) "+" FLOAT_REG_CONSTRAINT (fpr##n),
asm volatile ("" : X8(d) "=m" (mem) : : "memory");
#undef X1
#define X1(n) "r" (gpr##n),
asm volatile ("" : : X8(a) "m" (mem) : "memory");
asm volatile ("" : : X8(b) "m" (mem) : "memory");
asm volatile ("" : : X2(c) "m" (mem) : "memory");
#undef X1
#define X1(n) FLOAT_REG_CONSTRAINT (fpr##n),
asm volatile ("" : : X8(d) "m" (mem) : "memory");
}
#endif
int
main ()
{
ll = 60;
f1 ();
f2 ();
f3 ();
#ifndef __NO_FPRS__
f4 ();
f5 ();
f6 ();
f7 ();
f8 ();
f9 ();
f10 ();
f11 ();
f12 ();
f13 ();
f14 ();
f15 ();
#endif
return 0;
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment