Commit 8947065c by Richard Henderson

sparc.h (PREFERRED_RELOAD_CLASS): Select GENERAL_REGS for integer data not destined for fp regs.

        * sparc.h (PREFERRED_RELOAD_CLASS): Select GENERAL_REGS for
        integer data not destined for fp regs.
        (LEGITIMIZE_RELOAD_ADDRESS): New.
Thu Jan  7 03:03:42 1999  Stan Cox  <scox@cygnus.com>
                          Richard Henderson  <rth@cygnus.com>
        Support for Hypersparc and Sparclite86x:
        * sparc.h (TARGET_CPU_hypersparc, TARGET_CPU_sparclite86x): New.
        (CPP_CPU32_DEFAULT_SPEC): Fix up for the new targets.
        (ASM_CPU32_DEFAULT_SPEC): Likewise.
        (TARGET_CPU_DEFAULT): Likewise.
        (enum processor_type): Likewise.
        (CPP_ENDIAN_SPEC): Handle little endian data.
        (LIBGCC2_WORDS_BIG_ENDIAN): Likewise.
        (ADJUST_COST): Call sparc_adjust_cost.
        * sparc.c (sparc_override_options): Fix up for the new targets.
        (supersparc_adjust_cost): Make static.
        (hypersparc_adjust_cost): New.
        (ultrasparc_adjust_cost): Make static.
        (sparc_adjust_cost): New.
        * sparc.md (attr cpu): Add hypersparc and sparclite86x.
        (function_unit): Add hypersparc scheduling rules.
        * configure.in (with_cpu handler): Recognize hypersparc.

From-SVN: r24556
parent 4ddb3ea6
Thu Jan 7 03:08:17 1999 Richard Henderson <rth@cygnus.com>
* sparc.h (PREFERRED_RELOAD_CLASS): Select GENERAL_REGS for
integer data not destined for fp regs.
(LEGITIMIZE_RELOAD_ADDRESS): New.
Thu Jan 7 03:03:42 1999 Stan Cox <scox@cygnus.com>
Richard Henderson <rth@cygnus.com>
Support for Hypersparc and Sparclite86x:
* sparc.h (TARGET_CPU_hypersparc, TARGET_CPU_sparclite86x): New.
(CPP_CPU32_DEFAULT_SPEC): Fix up for the new targets.
(ASM_CPU32_DEFAULT_SPEC): Likewise.
(TARGET_CPU_DEFAULT): Likewise.
(enum processor_type): Likewise.
(CPP_ENDIAN_SPEC): Handle little endian data.
(LIBGCC2_WORDS_BIG_ENDIAN): Likewise.
(ADJUST_COST): Call sparc_adjust_cost.
* sparc.c (sparc_override_options): Fix up for the new targets.
(supersparc_adjust_cost): Make static.
(hypersparc_adjust_cost): New.
(ultrasparc_adjust_cost): Make static.
(sparc_adjust_cost): New.
* sparc.md (attr cpu): Add hypersparc and sparclite86x.
(function_unit): Add hypersparc scheduling rules.
* configure.in (with_cpu handler): Recognize hypersparc.
Thu Jan 7 23:54:05 1999 Michael Hayes <m.hayes@elec.canterbury.ac.nz> Thu Jan 7 23:54:05 1999 Michael Hayes <m.hayes@elec.canterbury.ac.nz>
* config/c4x/c4x.c: Added space after negation operator. * config/c4x/c4x.c: Added space after negation operator.
......
...@@ -110,6 +110,11 @@ static void build_big_number PROTO((FILE *, int, char *)); ...@@ -110,6 +110,11 @@ static void build_big_number PROTO((FILE *, int, char *));
static int function_arg_slotno PROTO((const CUMULATIVE_ARGS *, static int function_arg_slotno PROTO((const CUMULATIVE_ARGS *,
enum machine_mode, tree, int, int, enum machine_mode, tree, int, int,
int *, int *)); int *, int *));
static int supersparc_adjust_cost PROTO((rtx, rtx, rtx, int));
static int hypersparc_adjust_cost PROTO((rtx, rtx, rtx, int));
static int ultrasparc_adjust_cost PROTO((rtx, rtx, rtx, int));
static void sparc_output_addr_vec PROTO((rtx)); static void sparc_output_addr_vec PROTO((rtx));
static void sparc_output_addr_diff_vec PROTO((rtx)); static void sparc_output_addr_diff_vec PROTO((rtx));
static void sparc_output_deferred_case_vectors PROTO((void)); static void sparc_output_deferred_case_vectors PROTO((void));
...@@ -176,6 +181,8 @@ sparc_override_options () ...@@ -176,6 +181,8 @@ sparc_override_options ()
{ TARGET_CPU_sparclet, "tsc701" }, { TARGET_CPU_sparclet, "tsc701" },
{ TARGET_CPU_sparclite, "f930" }, { TARGET_CPU_sparclite, "f930" },
{ TARGET_CPU_v8, "v8" }, { TARGET_CPU_v8, "v8" },
{ TARGET_CPU_hypersparc, "hypersparc" },
{ TARGET_CPU_sparclite86x, "sparclite86x" },
{ TARGET_CPU_supersparc, "supersparc" }, { TARGET_CPU_supersparc, "supersparc" },
{ TARGET_CPU_v9, "v9" }, { TARGET_CPU_v9, "v9" },
{ TARGET_CPU_ultrasparc, "ultrasparc" }, { TARGET_CPU_ultrasparc, "ultrasparc" },
...@@ -199,6 +206,8 @@ sparc_override_options () ...@@ -199,6 +206,8 @@ sparc_override_options ()
The Fujitsu MB86934 is the recent sparclite chip, with an fpu. */ The Fujitsu MB86934 is the recent sparclite chip, with an fpu. */
{ "f930", PROCESSOR_F930, MASK_ISA|MASK_FPU, MASK_SPARCLITE }, { "f930", PROCESSOR_F930, MASK_ISA|MASK_FPU, MASK_SPARCLITE },
{ "f934", PROCESSOR_F934, MASK_ISA, MASK_SPARCLITE|MASK_FPU }, { "f934", PROCESSOR_F934, MASK_ISA, MASK_SPARCLITE|MASK_FPU },
{ "hypersparc", PROCESSOR_HYPERSPARC, MASK_ISA, MASK_V8|MASK_FPU },
{ "sparclite86x", PROCESSOR_SPARCLITE86X, MASK_ISA|MASK_FPU, MASK_V8 },
{ "sparclet", PROCESSOR_SPARCLET, MASK_ISA, MASK_SPARCLET }, { "sparclet", PROCESSOR_SPARCLET, MASK_ISA, MASK_SPARCLET },
/* TEMIC sparclet */ /* TEMIC sparclet */
{ "tsc701", PROCESSOR_TSC701, MASK_ISA, MASK_SPARCLET }, { "tsc701", PROCESSOR_TSC701, MASK_ISA, MASK_SPARCLET },
...@@ -6194,7 +6203,7 @@ sparc_flat_eligible_for_epilogue_delay (trial, slot) ...@@ -6194,7 +6203,7 @@ sparc_flat_eligible_for_epilogue_delay (trial, slot)
/* Adjust the cost of a scheduling dependency. Return the new cost of /* Adjust the cost of a scheduling dependency. Return the new cost of
a dependency LINK or INSN on DEP_INSN. COST is the current cost. */ a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
int static int
supersparc_adjust_cost (insn, link, dep_insn, cost) supersparc_adjust_cost (insn, link, dep_insn, cost)
rtx insn; rtx insn;
rtx link; rtx link;
...@@ -6259,6 +6268,261 @@ supersparc_adjust_cost (insn, link, dep_insn, cost) ...@@ -6259,6 +6268,261 @@ supersparc_adjust_cost (insn, link, dep_insn, cost)
return cost; return cost;
} }
static int
hypersparc_adjust_cost (insn, link, dep_insn, cost)
rtx insn;
rtx link;
rtx dep_insn;
int cost;
{
enum attr_type insn_type, dep_type;
rtx pat = PATTERN(insn);
rtx dep_pat = PATTERN (dep_insn);
if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
return cost;
insn_type = get_attr_type (insn);
dep_type = get_attr_type (dep_insn);
switch (REG_NOTE_KIND (link))
{
case 0:
/* Data dependency; DEP_INSN writes a register that INSN reads some
cycles later. */
switch (insn_type)
{
case TYPE_STORE:
case TYPE_FPSTORE:
/* Get the delay iff the address of the store is the dependence. */
if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
return cost;
if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
return cost;
return cost + 3;
case TYPE_LOAD:
case TYPE_SLOAD:
case TYPE_FPLOAD:
/* If a load, then the dependence must be on the memory address. If
the addresses aren't equal, then it might be a false dependency */
if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
{
if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
|| GET_CODE (SET_DEST (dep_pat)) != MEM
|| GET_CODE (SET_SRC (pat)) != MEM
|| ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
XEXP (SET_SRC (pat), 0)))
return cost + 2;
return cost + 8;
}
break;
case TYPE_BRANCH:
/* Compare to branch latency is 0. There is no benefit from
separating compare and branch. */
if (dep_type == TYPE_COMPARE)
return 0;
/* Floating point compare to branch latency is less than
compare to conditional move. */
if (dep_type == TYPE_FPCMP)
return cost - 1;
break;
}
break;
case REG_DEP_ANTI:
/* Anti-dependencies only penalize the fpu unit. */
if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
return 0;
break;
default:
break;
}
return cost;
}
static int
ultrasparc_adjust_cost (insn, link, dep_insn, cost)
rtx insn;
rtx link;
rtx dep_insn;
int cost;
{
enum attr_type insn_type, dep_type;
rtx pat = PATTERN(insn);
rtx dep_pat = PATTERN (dep_insn);
if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
return cost;
insn_type = get_attr_type (insn);
dep_type = get_attr_type (dep_insn);
/* Nothing issues in parallel with integer multiplies, so
mark as zero cost since the scheduler can not do anything
about it. */
if (insn_type == TYPE_IMUL)
return 0;
#define SLOW_FP(dep_type) \
(dep_type == TYPE_FPSQRT || dep_type == TYPE_FPDIVS || dep_type == TYPE_FPDIVD)
switch (REG_NOTE_KIND (link))
{
case 0:
/* Data dependency; DEP_INSN writes a register that INSN reads some
cycles later. */
if (dep_type == TYPE_CMOVE)
{
/* Instructions that read the result of conditional moves cannot
be in the same group or the following group. */
return cost + 1;
}
switch (insn_type)
{
/* UltraSPARC can dual issue a store and an instruction setting
the value stored, except for divide and square root. */
case TYPE_FPSTORE:
if (! SLOW_FP (dep_type))
return 0;
return cost;
case TYPE_STORE:
if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
return cost;
if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
/* The dependency between the two instructions is on the data
that is being stored. Assume that the address of the store
is not also dependent. */
return 0;
return cost;
case TYPE_LOAD:
case TYPE_SLOAD:
case TYPE_FPLOAD:
/* A load does not return data until at least 11 cycles after
a store to the same location. 3 cycles are accounted for
in the load latency; add the other 8 here. */
if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
{
/* If the addresses are not equal this may be a false
dependency because pointer aliasing could not be
determined. Add only 2 cycles in that case. 2 is
an arbitrary compromise between 8, which would cause
the scheduler to generate worse code elsewhere to
compensate for a dependency which might not really
exist, and 0. */
if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
|| GET_CODE (SET_SRC (pat)) != MEM
|| GET_CODE (SET_DEST (dep_pat)) != MEM
|| ! rtx_equal_p (XEXP (SET_SRC (pat), 0),
XEXP (SET_DEST (dep_pat), 0)))
return cost + 2;
return cost + 8;
}
return cost;
case TYPE_BRANCH:
/* Compare to branch latency is 0. There is no benefit from
separating compare and branch. */
if (dep_type == TYPE_COMPARE)
return 0;
/* Floating point compare to branch latency is less than
compare to conditional move. */
if (dep_type == TYPE_FPCMP)
return cost - 1;
return cost;
case TYPE_FPCMOVE:
/* FMOVR class instructions can not issue in the same cycle
or the cycle after an instruction which writes any
integer register. Model this as cost 2 for dependent
instructions. */
if ((dep_type == TYPE_IALU || dep_type == TYPE_UNARY
|| dep_type == TYPE_BINARY)
&& cost < 2)
return 2;
/* Otherwise check as for integer conditional moves. */
case TYPE_CMOVE:
/* Conditional moves involving integer registers wait until
3 cycles after loads return data. The interlock applies
to all loads, not just dependent loads, but that is hard
to model. */
if (dep_type == TYPE_LOAD || dep_type == TYPE_SLOAD)
return cost + 3;
return cost;
default:
break;
}
break;
case REG_DEP_ANTI:
/* Divide and square root lock destination registers for full latency. */
if (! SLOW_FP (dep_type))
return 0;
break;
case REG_DEP_OUTPUT:
/* IEU and FPU instruction that have the same destination
register cannot be grouped together. */
return cost + 1;
default:
break;
}
/* Other costs not accounted for:
- Single precision floating point loads lock the other half of
the even/odd register pair.
- Several hazards associated with ldd/std are ignored because these
instructions are rarely generated for V9.
- The floating point pipeline can not have both a single and double
precision operation active at the same time. Format conversions
and graphics instructions are given honorary double precision status.
- call and jmpl are always the first instruction in a group. */
return cost;
#undef SLOW_FP
}
int
sparc_adjust_cost(insn, link, dep, cost)
rtx insn;
rtx link;
rtx dep;
int cost;
{
switch (sparc_cpu)
{
case PROCESSOR_SUPERSPARC:
cost = supersparc_adjust_cost (insn, link, dep, cost);
break;
case PROCESSOR_HYPERSPARC:
case PROCESSOR_SPARCLITE86X:
cost = hypersparc_adjust_cost (insn, link, dep, cost);
break;
case PROCESSOR_ULTRASPARC:
cost = ultrasparc_adjust_cost (insn, link, dep, cost);
break;
default:
break;
}
return cost;
}
/* This describes the state of the UltraSPARC pipeline during /* This describes the state of the UltraSPARC pipeline during
instruction scheduling. */ instruction scheduling. */
...@@ -6990,155 +7254,6 @@ ultrasparc_sched_reorder (dump, sched_verbose, ready, n_ready) ...@@ -6990,155 +7254,6 @@ ultrasparc_sched_reorder (dump, sched_verbose, ready, n_ready)
} }
} }
int
ultrasparc_adjust_cost (insn, link, dep_insn, cost)
rtx insn;
rtx link;
rtx dep_insn;
int cost;
{
enum attr_type insn_type, dep_type;
rtx pat = PATTERN(insn);
rtx dep_pat = PATTERN (dep_insn);
if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
return cost;
insn_type = get_attr_type (insn);
dep_type = get_attr_type (dep_insn);
/* Nothing issues in parallel with integer multiplies, so
mark as zero cost since the scheduler can not do anything
about it. */
if (insn_type == TYPE_IMUL)
return 0;
#define SLOW_FP(dep_type) \
(dep_type == TYPE_FPSQRT || dep_type == TYPE_FPDIVS || dep_type == TYPE_FPDIVD)
switch (REG_NOTE_KIND (link))
{
case 0:
/* Data dependency; DEP_INSN writes a register that INSN reads some
cycles later. */
if (dep_type == TYPE_CMOVE)
{
/* Instructions that read the result of conditional moves cannot
be in the same group or the following group. */
return cost + 1;
}
switch (insn_type)
{
/* UltraSPARC can dual issue a store and an instruction setting
the value stored, except for divide and square root. */
case TYPE_FPSTORE:
if (! SLOW_FP (dep_type))
return 0;
return cost;
case TYPE_STORE:
if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
return cost;
if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
/* The dependency between the two instructions is on the data
that is being stored. Assume that the address of the store
is not also dependent. */
return 0;
return cost;
case TYPE_LOAD:
case TYPE_SLOAD:
case TYPE_FPLOAD:
/* A load does not return data until at least 11 cycles after
a store to the same location. 3 cycles are accounted for
in the load latency; add the other 8 here. */
if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
{
/* If the addresses are not equal this may be a false
dependency because pointer aliasing could not be
determined. Add only 2 cycles in that case. 2 is
an arbitrary compromise between 8, which would cause
the scheduler to generate worse code elsewhere to
compensate for a dependency which might not really
exist, and 0. */
if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
|| GET_CODE (SET_SRC (pat)) != MEM
|| GET_CODE (SET_DEST (dep_pat)) != MEM
|| ! rtx_equal_p (XEXP (SET_SRC (pat), 0),
XEXP (SET_DEST (dep_pat), 0)))
return cost + 2;
return cost + 8;
}
return cost;
case TYPE_BRANCH:
/* Compare to branch latency is 0. There is no benefit from
separating compare and branch. */
if (dep_type == TYPE_COMPARE)
return 0;
/* Floating point compare to branch latency is less than
compare to conditional move. */
if (dep_type == TYPE_FPCMP)
return cost - 1;
return cost;
case TYPE_FPCMOVE:
/* FMOVR class instructions can not issue in the same cycle
or the cycle after an instruction which writes any
integer register. Model this as cost 2 for dependent
instructions. */
if ((dep_type == TYPE_IALU || dep_type == TYPE_UNARY
|| dep_type == TYPE_BINARY)
&& cost < 2)
return 2;
/* Otherwise check as for integer conditional moves. */
case TYPE_CMOVE:
/* Conditional moves involving integer registers wait until
3 cycles after loads return data. The interlock applies
to all loads, not just dependent loads, but that is hard
to model. */
if (dep_type == TYPE_LOAD || dep_type == TYPE_SLOAD)
return cost + 3;
return cost;
default:
break;
}
break;
case REG_DEP_ANTI:
/* Divide and square root lock destination registers for full latency. */
if (! SLOW_FP (dep_type))
return 0;
break;
case REG_DEP_OUTPUT:
/* IEU and FPU instruction that have the same destination
register cannot be grouped together. */
return cost + 1;
default:
break;
}
/* Other costs not accounted for:
- Single precision floating point loads lock the other half of
the even/odd register pair.
- Several hazards associated with ldd/std are ignored because these
instructions are rarely generated for V9.
- The floating point pipeline can not have both a single and double
precision operation active at the same time. Format conversions
and graphics instructions are given honorary double precision status.
- call and jmpl are always the first instruction in a group. */
return cost;
}
int int
sparc_issue_rate () sparc_issue_rate ()
{ {
......
...@@ -106,18 +106,23 @@ extern enum cmodel sparc_cmodel; ...@@ -106,18 +106,23 @@ extern enum cmodel sparc_cmodel;
/* Values of TARGET_CPU_DEFAULT, set via -D in the Makefile, /* Values of TARGET_CPU_DEFAULT, set via -D in the Makefile,
and specified by the user via --with-cpu=foo. and specified by the user via --with-cpu=foo.
This specifies the cpu implementation, not the architecture size. */ This specifies the cpu implementation, not the architecture size. */
/* Note that TARGET_CPU_v9 is assumed to start the list of 64-bit
capable cpu's. */
#define TARGET_CPU_sparc 0 #define TARGET_CPU_sparc 0
#define TARGET_CPU_v7 0 /* alias for previous */ #define TARGET_CPU_v7 0 /* alias for previous */
#define TARGET_CPU_sparclet 1 #define TARGET_CPU_sparclet 1
#define TARGET_CPU_sparclite 2 #define TARGET_CPU_sparclite 2
#define TARGET_CPU_v8 3 /* generic v8 implementation */ #define TARGET_CPU_v8 3 /* generic v8 implementation */
#define TARGET_CPU_supersparc 4 #define TARGET_CPU_supersparc 4
#define TARGET_CPU_v9 5 /* generic v9 implementation */ #define TARGET_CPU_hypersparc 5
#define TARGET_CPU_sparcv9 5 /* alias */ #define TARGET_CPU_sparclite86x 6
#define TARGET_CPU_sparc64 5 /* alias */ #define TARGET_CPU_v9 7 /* generic v9 implementation */
#define TARGET_CPU_ultrasparc 6 #define TARGET_CPU_sparcv9 7 /* alias */
#define TARGET_CPU_sparc64 7 /* alias */
#define TARGET_CPU_ultrasparc 8
#if TARGET_CPU_DEFAULT == TARGET_CPU_v9 || TARGET_CPU_DEFAULT == TARGET_CPU_ultrasparc #if TARGET_CPU_DEFAULT == TARGET_CPU_v9 \
|| TARGET_CPU_DEFAULT == TARGET_CPU_ultrasparc
#define CPP_CPU32_DEFAULT_SPEC "" #define CPP_CPU32_DEFAULT_SPEC ""
#define ASM_CPU32_DEFAULT_SPEC "" #define ASM_CPU32_DEFAULT_SPEC ""
...@@ -140,19 +145,37 @@ extern enum cmodel sparc_cmodel; ...@@ -140,19 +145,37 @@ extern enum cmodel sparc_cmodel;
#define CPP_CPU64_DEFAULT_SPEC "" #define CPP_CPU64_DEFAULT_SPEC ""
#define ASM_CPU64_DEFAULT_SPEC "" #define ASM_CPU64_DEFAULT_SPEC ""
#if TARGET_CPU_DEFAULT == TARGET_CPU_sparc || TARGET_CPU_DEFAULT == TARGET_CPU_v8 || TARGET_CPU_DEFAULT == TARGET_CPU_supersparc #if TARGET_CPU_DEFAULT == TARGET_CPU_sparc \
|| TARGET_CPU_DEFAULT == TARGET_CPU_v8
#define CPP_CPU32_DEFAULT_SPEC "" #define CPP_CPU32_DEFAULT_SPEC ""
#define ASM_CPU32_DEFAULT_SPEC "" #define ASM_CPU32_DEFAULT_SPEC ""
#endif #endif
#if TARGET_CPU_DEFAULT == TARGET_CPU_sparclet #if TARGET_CPU_DEFAULT == TARGET_CPU_sparclet
#define CPP_CPU32_DEFAULT_SPEC "-D__sparclet__" #define CPP_CPU32_DEFAULT_SPEC "-D__sparclet__"
#define ASM_CPU32_DEFAULT_SPEC "-Asparclet" #define ASM_CPU32_DEFAULT_SPEC "-Asparclet"
#endif #endif
#if TARGET_CPU_DEFAULT == TARGET_CPU_sparclite #if TARGET_CPU_DEFAULT == TARGET_CPU_sparclite
#define CPP_CPU32_DEFAULT_SPEC "-D__sparclite__" #define CPP_CPU32_DEFAULT_SPEC "-D__sparclite__"
#define ASM_CPU32_DEFAULT_SPEC "-Asparclite" #define ASM_CPU32_DEFAULT_SPEC "-Asparclite"
#endif #endif
#if TARGET_CPU_DEFAULT == TARGET_CPU_supersparc
#define CPP_CPU32_DEFAULT_SPEC "-D__supersparc__ -D__sparc_v8__"
#define ASM_CPU32_DEFAULT_SPEC ""
#endif
#if TARGET_CPU_DEFAULT == TARGET_CPU_hypersparc
#define CPP_CPU32_DEFAULT_SPEC "-D__hypersparc__ -D__sparc_v8__"
#define ASM_CPU32_DEFAULT_SPEC ""
#endif
#if TARGET_CPU_DEFAULT == TARGET_CPU_sparclite86x
#define CPP_CPU32_DEFAULT_SPEC "-D__sparclite86x__ -D__sparc_v8__"
#define ASM_CPU32_DEFAULT_SPEC "-Av8"
#endif
#endif #endif
#if !defined(CPP_CPU32_DEFAULT_SPEC) || !defined(CPP_CPU64_DEFAULT_SPEC) #if !defined(CPP_CPU32_DEFAULT_SPEC) || !defined(CPP_CPU64_DEFAULT_SPEC)
...@@ -208,6 +231,8 @@ Unrecognized value in TARGET_CPU_DEFAULT. ...@@ -208,6 +231,8 @@ Unrecognized value in TARGET_CPU_DEFAULT.
%{mcpu=f930:-D__sparclite__} %{mcpu=f934:-D__sparclite__} \ %{mcpu=f930:-D__sparclite__} %{mcpu=f934:-D__sparclite__} \
%{mcpu=v8:-D__sparc_v8__} \ %{mcpu=v8:-D__sparc_v8__} \
%{mcpu=supersparc:-D__supersparc__ -D__sparc_v8__} \ %{mcpu=supersparc:-D__supersparc__ -D__sparc_v8__} \
%{mcpu=hypersparc:-D__hypersparc__ -D__sparc_v8__} \
%{mcpu=sparclite86x:-D__sparclite86x__ -D__sparc_v8__} \
%{mcpu=v9:-D__sparc_v9__} \ %{mcpu=v9:-D__sparc_v9__} \
%{mcpu=ultrasparc:-D__sparc_v9__} \ %{mcpu=ultrasparc:-D__sparc_v9__} \
%{!mcpu*:%{!mcypress:%{!msparclite:%{!mf930:%{!mf934:%{!mv8:%{!msupersparc:%(cpp_cpu_default)}}}}}}} \ %{!mcpu*:%{!mcypress:%{!msparclite:%{!mf930:%{!mf934:%{!mv8:%{!msupersparc:%(cpp_cpu_default)}}}}}}} \
...@@ -243,7 +268,9 @@ Unrecognized value in TARGET_CPU_DEFAULT. ...@@ -243,7 +268,9 @@ Unrecognized value in TARGET_CPU_DEFAULT.
" "
/* Macros to distinguish endianness. */ /* Macros to distinguish endianness. */
#define CPP_ENDIAN_SPEC "%{mlittle-endian:-D__LITTLE_ENDIAN__}" #define CPP_ENDIAN_SPEC "\
%{mlittle-endian:-D__LITTLE_ENDIAN__} \
%{mlittle-endian-data:-D__LITTLE_ENDIAN_DATA__}"
/* Macros to distinguish the particular subtarget. */ /* Macros to distinguish the particular subtarget. */
#define CPP_SUBTARGET_SPEC "" #define CPP_SUBTARGET_SPEC ""
...@@ -598,6 +625,8 @@ enum processor_type { ...@@ -598,6 +625,8 @@ enum processor_type {
PROCESSOR_SPARCLITE, PROCESSOR_SPARCLITE,
PROCESSOR_F930, PROCESSOR_F930,
PROCESSOR_F934, PROCESSOR_F934,
PROCESSOR_HYPERSPARC,
PROCESSOR_SPARCLITE86X,
PROCESSOR_SPARCLET, PROCESSOR_SPARCLET,
PROCESSOR_TSC701, PROCESSOR_TSC701,
PROCESSOR_V9, PROCESSOR_V9,
...@@ -684,7 +713,7 @@ extern int sparc_align_funcs; ...@@ -684,7 +713,7 @@ extern int sparc_align_funcs;
/* Define this to set the endianness to use in libgcc2.c, which can /* Define this to set the endianness to use in libgcc2.c, which can
not depend on target_flags. */ not depend on target_flags. */
#if defined (__LITTLE_ENDIAN__) #if defined (__LITTLE_ENDIAN__) || defined(__LITTLE_ENDIAN_DATA__)
#define LIBGCC2_WORDS_BIG_ENDIAN 0 #define LIBGCC2_WORDS_BIG_ENDIAN 0
#else #else
#define LIBGCC2_WORDS_BIG_ENDIAN 1 #define LIBGCC2_WORDS_BIG_ENDIAN 1
...@@ -1410,15 +1439,23 @@ extern char leaf_reg_remap[]; ...@@ -1410,15 +1439,23 @@ extern char leaf_reg_remap[];
in class CLASS, return the class of reg to actually use. in class CLASS, return the class of reg to actually use.
In general this is just CLASS; but on some machines In general this is just CLASS; but on some machines
in some cases it is preferable to use a more restrictive class. */ in some cases it is preferable to use a more restrictive class. */
/* We can't load constants into FP registers. We can't load any FP constant /* - We can't load constants into FP registers. We can't load any FP
if an 'E' constraint fails to match it. */ constant if an 'E' constraint fails to match it.
- Try and reload integer constants (symbolic or otherwise) back into
registers directly, rather than having them dumped to memory. */
#define PREFERRED_RELOAD_CLASS(X,CLASS) \ #define PREFERRED_RELOAD_CLASS(X,CLASS) \
(CONSTANT_P (X) \ (CONSTANT_P (X) \
&& (FP_REG_CLASS_P (CLASS) \ ? ((FP_REG_CLASS_P (CLASS) \
|| (GET_MODE_CLASS (GET_MODE (X)) == MODE_FLOAT \ || (GET_MODE_CLASS (GET_MODE (X)) == MODE_FLOAT \
&& (HOST_FLOAT_FORMAT != IEEE_FLOAT_FORMAT \ && (HOST_FLOAT_FORMAT != IEEE_FLOAT_FORMAT \
|| HOST_BITS_PER_INT != BITS_PER_WORD))) \ || HOST_BITS_PER_INT != BITS_PER_WORD))) \
? NO_REGS : (CLASS)) ? NO_REGS \
: (!FP_REG_CLASS_P (CLASS) \
&& GET_MODE_CLASS (GET_MODE (X)) == MODE_INT) \
? GENERAL_REGS \
: (CLASS)) \
: (CLASS))
/* Return the register class of a scratch register needed to load IN into /* Return the register class of a scratch register needed to load IN into
a register of class CLASS in MODE. a register of class CLASS in MODE.
...@@ -2515,6 +2552,32 @@ extern struct rtx_def *legitimize_pic_address (); ...@@ -2515,6 +2552,32 @@ extern struct rtx_def *legitimize_pic_address ();
if (memory_address_p (MODE, X)) \ if (memory_address_p (MODE, X)) \
goto WIN; } goto WIN; }
/* Try a machine-dependent way of reloading an illegitimate address
operand. If we find one, push the reload and jump to WIN. This
macro is used in only one place: `find_reloads_address' in reload.c.
For Sparc 32, we wish to handle addresses by splitting them into
HIGH+LO_SUM pairs, retaining the LO_SUM in the memory reference.
This cuts the number of extra insns by one. */
#define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND_LEVELS,WIN) \
do { \
/* Decompose SImode constants into hi+lo_sum. We do have to \
rerecognize what we produce, so be careful. */ \
if (CONSTANT_P (X) \
&& GET_MODE (X) == SImode \
&& GET_CODE (X) != LO_SUM && GET_CODE (X) != HIGH) \
{ \
X = gen_rtx_LO_SUM (GET_MODE (X), \
gen_rtx_HIGH (GET_MODE (X), X), X); \
push_reload (XEXP (X, 0), NULL_RTX, &XEXP (X, 0), NULL_PTR, \
BASE_REG_CLASS, GET_MODE (X), VOIDmode, 0, 0, \
OPNUM, TYPE); \
goto WIN; \
} \
/* ??? 64-bit reloads. */ \
} while (0)
/* Go to LABEL if ADDR (a legitimate address expression) /* Go to LABEL if ADDR (a legitimate address expression)
has an effect that depends on the machine mode it is used for. has an effect that depends on the machine mode it is used for.
On the SPARC this is never true. */ On the SPARC this is never true. */
...@@ -2803,12 +2866,8 @@ extern struct rtx_def *legitimize_pic_address (); ...@@ -2803,12 +2866,8 @@ extern struct rtx_def *legitimize_pic_address ();
#define ISSUE_RATE sparc_issue_rate() #define ISSUE_RATE sparc_issue_rate()
/* Adjust the cost of dependencies. */ /* Adjust the cost of dependencies. */
#define ADJUST_COST(INSN,LINK,DEP,COST) \ #define ADJUST_COST(INSN,LINK,DEP,COST) \
if (sparc_cpu == PROCESSOR_SUPERSPARC) \ sparc_adjust_cost(INSN, LINK, DEP, COST)
(COST) = supersparc_adjust_cost (INSN, LINK, DEP, COST); \
else if (sparc_cpu == PROCESSOR_ULTRASPARC) \
(COST) = ultrasparc_adjust_cost (INSN, LINK, DEP, COST); \
else
extern void ultrasparc_sched_reorder (); extern void ultrasparc_sched_reorder ();
extern void ultrasparc_sched_init (); extern void ultrasparc_sched_init ();
...@@ -3394,11 +3453,10 @@ extern int sparc_flat_epilogue_delay_slots (); ...@@ -3394,11 +3453,10 @@ extern int sparc_flat_epilogue_delay_slots ();
extern int sparc_issue_rate (); extern int sparc_issue_rate ();
extern int splittable_immediate_memory_operand (); extern int splittable_immediate_memory_operand ();
extern int splittable_symbolic_memory_operand (); extern int splittable_symbolic_memory_operand ();
extern int supersparc_adjust_cost (); extern int sparc_adjust_cost ();
extern int symbolic_memory_operand (); extern int symbolic_memory_operand ();
extern int symbolic_operand (); extern int symbolic_operand ();
extern int text_segment_operand (); extern int text_segment_operand ();
extern int ultrasparc_adjust_cost ();
extern int uns_small_int (); extern int uns_small_int ();
extern int v9_regcmp_op (); extern int v9_regcmp_op ();
extern int v9_regcmp_p (); extern int v9_regcmp_p ();
......
...@@ -65,7 +65,7 @@ ...@@ -65,7 +65,7 @@
;; Attribute for cpu type. ;; Attribute for cpu type.
;; These must match the values for enum processor_type in sparc.h. ;; These must match the values for enum processor_type in sparc.h.
(define_attr "cpu" "v7,cypress,v8,supersparc,sparclite,f930,f934,sparclet,tsc701,v9,ultrasparc" (define_attr "cpu" "v7,cypress,v8,supersparc,sparclite,f930,f934,hypersparc,sparclite86x,sparclet,tsc701,v9,ultrasparc"
(const (symbol_ref "sparc_cpu_attr"))) (const (symbol_ref "sparc_cpu_attr")))
;; Attribute for the instruction set. ;; Attribute for the instruction set.
...@@ -345,6 +345,53 @@ ...@@ -345,6 +345,53 @@
(eq_attr "type" "imul")) (eq_attr "type" "imul"))
4 4) 4 4)
;; ----- hypersparc/sparclite86x scheduling
;; The Hypersparc can issue 1 - 2 insns per cycle. The dual issue cases are:
;; L-Ld/St I-Int F-Float B-Branch LI/LF/LB/II/IF/IB/FF/FB
;; II/FF case is only when loading a 32 bit hi/lo constant
;; Single issue insns include call, jmpl, u/smul, u/sdiv, lda, sta, fcmp
;; Memory delivers its result in one cycle to IU
(define_function_unit "memory" 1 0
(and (ior (eq_attr "cpu" "hypersparc") (eq_attr "cpu" "sparclite86x"))
(eq_attr "type" "load,sload,fpload"))
1 1)
(define_function_unit "memory" 1 0
(and (ior (eq_attr "cpu" "hypersparc") (eq_attr "cpu" "sparclite86x"))
(eq_attr "type" "store,fpstore"))
2 1)
(define_function_unit "fp_alu" 1 0
(and (ior (eq_attr "cpu" "hypersparc") (eq_attr "cpu" "sparclite86x"))
(eq_attr "type" "fp,fpmove,fpcmp"))
1 1)
(define_function_unit "fp_mds" 1 0
(and (ior (eq_attr "cpu" "hypersparc") (eq_attr "cpu" "sparclite86x"))
(eq_attr "type" "fpmul"))
1 1)
(define_function_unit "fp_mds" 1 0
(and (ior (eq_attr "cpu" "hypersparc") (eq_attr "cpu" "sparclite86x"))
(eq_attr "type" "fpdivs"))
8 6)
(define_function_unit "fp_mds" 1 0
(and (ior (eq_attr "cpu" "hypersparc") (eq_attr "cpu" "sparclite86x"))
(eq_attr "type" "fpdivd"))
12 10)
(define_function_unit "fp_mds" 1 0
(and (ior (eq_attr "cpu" "hypersparc") (eq_attr "cpu" "sparclite86x"))
(eq_attr "type" "fpsqrt"))
17 15)
(define_function_unit "fp_mds" 1 0
(and (ior (eq_attr "cpu" "hypersparc") (eq_attr "cpu" "sparclite86x"))
(eq_attr "type" "imul"))
17 15)
;; ----- sparclet tsc701 scheduling ;; ----- sparclet tsc701 scheduling
;; The tsc701 issues 1 insn per cycle. ;; The tsc701 issues 1 insn per cycle.
;; Results may be written back out of order. ;; Results may be written back out of order.
......
...@@ -5518,7 +5518,7 @@ for machine in $build $host $target; do ...@@ -5518,7 +5518,7 @@ for machine in $build $host $target; do
.) .)
target_cpu_default2=TARGET_CPU_"`echo $machine | sed 's/-.*$//'`" target_cpu_default2=TARGET_CPU_"`echo $machine | sed 's/-.*$//'`"
;; ;;
.supersparc | .ultrasparc | .v7 | .v8 | .v9) .supersparc | .hypersparc | .ultrasparc | .v7 | .v8 | .v9)
target_cpu_default2="TARGET_CPU_$with_cpu" target_cpu_default2="TARGET_CPU_$with_cpu"
;; ;;
*) *)
......
...@@ -3347,7 +3347,7 @@ changequote([,])dnl ...@@ -3347,7 +3347,7 @@ changequote([,])dnl
.) .)
target_cpu_default2=TARGET_CPU_"`echo $machine | sed 's/-.*$//'`" target_cpu_default2=TARGET_CPU_"`echo $machine | sed 's/-.*$//'`"
;; ;;
.supersparc | .ultrasparc | .v7 | .v8 | .v9) .supersparc | .hypersparc | .ultrasparc | .v7 | .v8 | .v9)
target_cpu_default2="TARGET_CPU_$with_cpu" target_cpu_default2="TARGET_CPU_$with_cpu"
;; ;;
*) *)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment