Commit d19fb8e3 by Nick Clifton Committed by Nick Clifton

Add support for XScale target

Add support for StrongARM target

From-SVN: r37984
parent accc8668
2000-12-03 Nick Clifton <nickc@redhat.com>
* config.gcc: Add support for StrongARM targets.
* config/arm/t-strongarm-elf: New file.
* config/arm/t-strongarm-coff: New file.
* config/arm/t-strongarm-pe: New file.
* config/arm/strongarm-pe.h: New file.
2000-12-03 Nick Clifton <nickc@redhat.com>
* NEWS: Mention XScale has been added.
* config.gcc: Add support for XScale targets.
* config/arm/arm.h: Add support for XScale processor.
* config/arm/arm.c: Add support for XScale processor.
* config/arm/arm.md: Add support for XScale processor.
* config/arm/t-xscale-elf: New file.
* config/arm/t-xscale-coff: New file.
* config/arm/xscale-elf.h: New file.
* config/arm/xscale-coff.h: New file.
2000-12-03 Richard Henderson <rth@redhat.com> 2000-12-03 Richard Henderson <rth@redhat.com>
* bb-reorder.c (reorder_basic_blocks): Don't check for EH edges * bb-reorder.c (reorder_basic_blocks): Don't check for EH edges
......
...@@ -213,6 +213,9 @@ strongarm*-*-*) ...@@ -213,6 +213,9 @@ strongarm*-*-*)
arm*-*-*) arm*-*-*)
cpu_type=arm cpu_type=arm
;; ;;
xscale-*-*)
cpu_type=arm
;;
c*-convex-*) c*-convex-*)
cpu_type=convex cpu_type=convex
;; ;;
...@@ -3114,6 +3117,28 @@ sparc64-*-linux*) # 64-bit Sparc's running GNU/Linux ...@@ -3114,6 +3117,28 @@ sparc64-*-linux*) # 64-bit Sparc's running GNU/Linux
fi fi
float_format=sparc float_format=sparc
;; ;;
strongarm-*-elf*)
tm_file=arm/strongarm-elf.h
tmake_file=arm/t-strongarm-elf
out_file=arm/arm.c
xm_file=arm/xm-arm.h
md_file=arm/arm.md
;;
strongarm-*-coff*)
tm_file=arm/strongarm-coff.h
tmake_file=arm/t-strongarm-coff
out_file=arm/arm.c
xm_file=arm/xm-arm.h
md_file=arm/arm.md
;;
strongarm-*-pe)
tm_file=arm/strongarm-pe.h
tmake_file=arm/t-strongarm-pe
out_file=arm/arm.c
xm_file=arm/xm-arm.h
md_file=arm/arm.md
extra_objs=pe.o
;;
thumb*-*-*) thumb*-*-*)
{ echo "config.gcc: error: { echo "config.gcc: error:
*** The Thumb targets have been deprecated. The equivalent *** The Thumb targets have been deprecated. The equivalent
...@@ -3185,6 +3210,20 @@ we32k-att-sysv*) ...@@ -3185,6 +3210,20 @@ we32k-att-sysv*)
xm_file="${xm_file} xm-svr3" xm_file="${xm_file} xm-svr3"
use_collect2=yes use_collect2=yes
;; ;;
xscale-*-elf)
tm_file=arm/xscale-elf.h
tmake_file=arm/t-xscale-elf
out_file=arm/arm.c
xm_file=arm/xm-arm.h
md_file=arm/arm.md
;;
xscale-*-coff)
tm_file=arm/xscale-coff.h
tmake_file=arm/t-xscale-coff
out_file=arm/arm.c
xm_file=arm/xm-arm.h
md_file=arm/arm.md
;;
*) *)
echo "Configuration $machine not supported" 1>&2 echo "Configuration $machine not supported" 1>&2
exit 1 exit 1
...@@ -3280,6 +3319,7 @@ arm*-*-*) ...@@ -3280,6 +3319,7 @@ arm*-*-*)
xarm[236789] | xarm250 | xarm[67][01]0 \ xarm[236789] | xarm250 | xarm[67][01]0 \
| xarm7m | xarm7dm | xarm7dmi | xarm[79]tdmi \ | xarm7m | xarm7dm | xarm7dmi | xarm[79]tdmi \
| xarm7100 | xarm7500 | xarm7500fe | xarm810 \ | xarm7100 | xarm7500 | xarm7500fe | xarm810 \
| xxscale \
| xstrongarm | xstrongarm110 | xstrongarm1100) | xstrongarm | xstrongarm110 | xstrongarm1100)
target_cpu_default2="TARGET_CPU_$with_cpu" target_cpu_default2="TARGET_CPU_$with_cpu"
;; ;;
......
...@@ -197,6 +197,12 @@ extern void arm_mark_dllexport PARAMS ((tree)); ...@@ -197,6 +197,12 @@ extern void arm_mark_dllexport PARAMS ((tree));
extern void arm_mark_dllimport PARAMS ((tree)); extern void arm_mark_dllimport PARAMS ((tree));
#endif #endif
extern void arm_init_builtins PARAMS ((void));
#if defined (TREE_CODE) && defined (RTX_CODE)
extern rtx arm_expand_builtin PARAMS ((tree, rtx, rtx,
enum machine_mode, int));
#endif
#ifdef _C_PRAGMA_H /* included from code that cares about pragmas */ #ifdef _C_PRAGMA_H /* included from code that cares about pragmas */
extern void arm_pr_long_calls PARAMS ((cpp_reader *)); extern void arm_pr_long_calls PARAMS ((cpp_reader *));
extern void arm_pr_no_long_calls PARAMS ((cpp_reader *)); extern void arm_pr_no_long_calls PARAMS ((cpp_reader *));
......
...@@ -146,6 +146,8 @@ int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY; ...@@ -146,6 +146,8 @@ int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
#define FL_THUMB (1 << 6) /* Thumb aware */ #define FL_THUMB (1 << 6) /* Thumb aware */
#define FL_LDSCHED (1 << 7) /* Load scheduling necessary */ #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
#define FL_STRONG (1 << 8) /* StrongARM */ #define FL_STRONG (1 << 8) /* StrongARM */
#define FL_ARCH5E (1 << 9) /* El Segundo extenstions to v5 */
#define FL_XSCALE (1 << 10) /* XScale */
/* The bits in this mask specify which instructions we are /* The bits in this mask specify which instructions we are
allowed to generate. */ allowed to generate. */
...@@ -175,6 +177,9 @@ int arm_ld_sched = 0; ...@@ -175,6 +177,9 @@ int arm_ld_sched = 0;
/* Nonzero if this chip is a StrongARM. */ /* Nonzero if this chip is a StrongARM. */
int arm_is_strong = 0; int arm_is_strong = 0;
/* Nonzero if this chip is an XScale. */
int arm_is_xscale = 0;
/* Nonzero if this chip is a an ARM6 or an ARM7. */ /* Nonzero if this chip is a an ARM6 or an ARM7. */
int arm_is_6_or_7 = 0; int arm_is_6_or_7 = 0;
...@@ -269,6 +274,7 @@ static struct processors all_cores[] = ...@@ -269,6 +274,7 @@ static struct processors all_cores[] =
{"strongarm", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG }, {"strongarm", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
{"strongarm110", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG }, {"strongarm110", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
{"strongarm1100", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG }, {"strongarm1100", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
{"xscale", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED | FL_STRONG | FL_XSCALE | FL_ARCH5 },
{NULL, 0} {NULL, 0}
}; };
...@@ -286,6 +292,8 @@ static struct processors all_architectures[] = ...@@ -286,6 +292,8 @@ static struct processors all_architectures[] =
implementations that support it, so we will leave it out for now. */ implementations that support it, so we will leave it out for now. */
{ "armv4t", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB }, { "armv4t", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB },
{ "armv5", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 }, { "armv5", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 },
{ "armv5t", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 },
{ "armv5te", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E },
{ NULL, 0 } { NULL, 0 }
}; };
...@@ -382,6 +390,7 @@ arm_override_options () ...@@ -382,6 +390,7 @@ arm_override_options ()
{ TARGET_CPU_arm810, "arm810" }, { TARGET_CPU_arm810, "arm810" },
{ TARGET_CPU_arm9, "arm9" }, { TARGET_CPU_arm9, "arm9" },
{ TARGET_CPU_strongarm, "strongarm" }, { TARGET_CPU_strongarm, "strongarm" },
{ TARGET_CPU_xscale, "xscale" },
{ TARGET_CPU_generic, "arm" }, { TARGET_CPU_generic, "arm" },
{ 0, 0 } { 0, 0 }
}; };
...@@ -516,7 +525,13 @@ arm_override_options () ...@@ -516,7 +525,13 @@ arm_override_options ()
/* warning ("ignoring -mapcs-frame because -mthumb was used."); */ /* warning ("ignoring -mapcs-frame because -mthumb was used."); */
target_flags &= ~ARM_FLAG_APCS_FRAME; target_flags &= ~ARM_FLAG_APCS_FRAME;
} }
if (TARGET_HARD_FLOAT && (tune_flags & FL_XSCALE))
{
warning ("XScale does not support hardware FP instructions.");
target_flags |= ARM_FLAG_SOFT_FLOAT;
}
/* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
from here where no function is being compiled currently. */ from here where no function is being compiled currently. */
if ((target_flags & (THUMB_FLAG_LEAF_BACKTRACE | THUMB_FLAG_BACKTRACE)) if ((target_flags & (THUMB_FLAG_LEAF_BACKTRACE | THUMB_FLAG_BACKTRACE))
...@@ -576,6 +591,7 @@ arm_override_options () ...@@ -576,6 +591,7 @@ arm_override_options ()
arm_ld_sched = (tune_flags & FL_LDSCHED) != 0; arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
arm_is_strong = (tune_flags & FL_STRONG) != 0; arm_is_strong = (tune_flags & FL_STRONG) != 0;
thumb_code = (TARGET_ARM == 0); thumb_code = (TARGET_ARM == 0);
arm_is_xscale = (tune_flags & FL_XSCALE) != 0;
arm_is_6_or_7 = (((tune_flags & (FL_MODE26 | FL_MODE32)) arm_is_6_or_7 = (((tune_flags & (FL_MODE26 | FL_MODE32))
&& !(tune_flags & FL_ARCH4))) != 0; && !(tune_flags & FL_ARCH4))) != 0;
...@@ -651,6 +667,9 @@ arm_override_options () ...@@ -651,6 +667,9 @@ arm_override_options ()
if (optimize_size || (tune_flags & FL_LDSCHED)) if (optimize_size || (tune_flags & FL_LDSCHED))
arm_constant_limit = 1; arm_constant_limit = 1;
if (arm_is_xscale)
arm_constant_limit = 2;
/* If optimizing for size, bump the number of instructions that we /* If optimizing for size, bump the number of instructions that we
are prepared to conditionally execute (even on a StrongARM). are prepared to conditionally execute (even on a StrongARM).
Otherwise for the StrongARM, which has early execution of branches, Otherwise for the StrongARM, which has early execution of branches,
...@@ -1718,7 +1737,7 @@ arm_encode_call_attribute (decl, flag) ...@@ -1718,7 +1737,7 @@ arm_encode_call_attribute (decl, flag)
{ {
const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0); const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
int len = strlen (str); int len = strlen (str);
const char * newstr; char * newstr;
if (TREE_CODE (decl) != FUNCTION_DECL) if (TREE_CODE (decl) != FUNCTION_DECL)
return; return;
...@@ -2001,7 +2020,7 @@ legitimize_pic_address (orig, mode, reg) ...@@ -2001,7 +2020,7 @@ legitimize_pic_address (orig, mode, reg)
emit_insn (gen_pic_load_addr_arm (address, orig)); emit_insn (gen_pic_load_addr_arm (address, orig));
else else
emit_insn (gen_pic_load_addr_thumb (address, orig)); emit_insn (gen_pic_load_addr_thumb (address, orig));
pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address); pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
emit_move_insn (address, pic_ref); emit_move_insn (address, pic_ref);
...@@ -2475,6 +2494,47 @@ arm_adjust_cost (insn, link, dep, cost) ...@@ -2475,6 +2494,47 @@ arm_adjust_cost (insn, link, dep, cost)
{ {
rtx i_pat, d_pat; rtx i_pat, d_pat;
/* Some true dependencies can have a higher cost depending
on precisely how certain input operands are used. */
if (arm_is_xscale
&& REG_NOTE_KIND (link) == 0
&& recog_memoized (insn) < 0
&& recog_memoized (dep) < 0)
{
int shift_opnum = get_attr_shift (insn);
enum attr_type attr_type = get_attr_type (dep);
/* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
operand for INSN. If we have a shifted input operand and the
instruction we depend on is another ALU instruction, then we may
have to account for an additional stall. */
if (shift_opnum != 0 && attr_type == TYPE_NORMAL)
{
rtx shifted_operand;
int opno;
/* Get the shifted operand. */
extract_insn (insn);
shifted_operand = recog_data.operand[shift_opnum];
/* Iterate over all the operands in DEP. If we write an operand
that overlaps with SHIFTED_OPERAND, then we have increase the
cost of this dependency. */
extract_insn (dep);
preprocess_constraints ();
for (opno = 0; opno < recog_data.n_operands; opno++)
{
/* We can ignore strict inputs. */
if (recog_data.operand_type[opno] == OP_IN)
continue;
if (reg_overlap_mentioned_p (recog_data.operand[opno],
shifted_operand))
return 2;
}
}
}
/* XXX This is not strictly true for the FPA. */ /* XXX This is not strictly true for the FPA. */
if (REG_NOTE_KIND (link) == REG_DEP_ANTI if (REG_NOTE_KIND (link) == REG_DEP_ANTI
|| REG_NOTE_KIND (link) == REG_DEP_OUTPUT) || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
...@@ -3822,7 +3882,7 @@ arm_valid_machine_decl_attribute (decl, attr, args) ...@@ -3822,7 +3882,7 @@ arm_valid_machine_decl_attribute (decl, attr, args)
if (is_attribute_p ("naked", attr)) if (is_attribute_p ("naked", attr))
return TREE_CODE (decl) == FUNCTION_DECL; return TREE_CODE (decl) == FUNCTION_DECL;
#ifdef ARM_PE #ifdef ARM_PE
if (is_attribute_p ("interfacearm", attr)) if (is_attribute_p ("interfacearm", attr))
return TREE_CODE (decl) == FUNCTION_DECL; return TREE_CODE (decl) == FUNCTION_DECL;
...@@ -3863,6 +3923,58 @@ arm_gen_load_multiple (base_regno, count, from, up, write_back, unchanging_p, ...@@ -3863,6 +3923,58 @@ arm_gen_load_multiple (base_regno, count, from, up, write_back, unchanging_p,
int sign = up ? 1 : -1; int sign = up ? 1 : -1;
rtx mem; rtx mem;
/* XScale has load-store double instructions, but they have stricter
alignment requirements than load-store multiple, so we can not
use them.
For XScale ldm requires 2 + NREGS cycles to complete and blocks
the pipeline until completion.
NREGS CYCLES
1 3
2 4
3 5
4 6
An ldr instruction takes 1-3 cycles, but does not block the
pipeline.
NREGS CYCLES
1 1-3
2 2-6
3 3-9
4 4-12
Best case ldr will always win. However, the more ldr instructions
we issue, the less likely we are to be able to schedule them well.
Using ldr instructions also increases code size.
As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
for counts of 3 or 4 regs. */
if (arm_is_xscale && count <= 2 && ! optimize_size)
{
rtx seq;
start_sequence ();
for (i = 0; i < count; i++)
{
mem = gen_rtx_MEM (SImode, plus_constant (from, i * 4 * sign));
RTX_UNCHANGING_P (mem) = unchanging_p;
MEM_IN_STRUCT_P (mem) = in_struct_p;
MEM_SCALAR_P (mem) = scalar_p;
emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
}
if (write_back)
emit_move_insn (from, plus_constant (from, count * 4 * sign));
seq = gen_sequence ();
end_sequence ();
return seq;
}
result = gen_rtx_PARALLEL (VOIDmode, result = gen_rtx_PARALLEL (VOIDmode,
rtvec_alloc (count + (write_back ? 1 : 0))); rtvec_alloc (count + (write_back ? 1 : 0)));
if (write_back) if (write_back)
...@@ -3904,6 +4016,32 @@ arm_gen_store_multiple (base_regno, count, to, up, write_back, unchanging_p, ...@@ -3904,6 +4016,32 @@ arm_gen_store_multiple (base_regno, count, to, up, write_back, unchanging_p,
int sign = up ? 1 : -1; int sign = up ? 1 : -1;
rtx mem; rtx mem;
/* See arm_gen_load_multiple for discussion of
the pros/cons of ldm/stm usage for XScale. */
if (arm_is_xscale && count <= 2 && ! optimize_size)
{
rtx seq;
start_sequence ();
for (i = 0; i < count; i++)
{
mem = gen_rtx_MEM (SImode, plus_constant (to, i * 4 * sign));
RTX_UNCHANGING_P (mem) = unchanging_p;
MEM_IN_STRUCT_P (mem) = in_struct_p;
MEM_SCALAR_P (mem) = scalar_p;
emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
}
if (write_back)
emit_move_insn (to, plus_constant (to, count * 4 * sign));
seq = gen_sequence ();
end_sequence ();
return seq;
}
result = gen_rtx_PARALLEL (VOIDmode, result = gen_rtx_PARALLEL (VOIDmode,
rtvec_alloc (count + (write_back ? 1 : 0))); rtvec_alloc (count + (write_back ? 1 : 0)));
if (write_back) if (write_back)
...@@ -4145,6 +4283,7 @@ arm_gen_rotated_half_load (memref) ...@@ -4145,6 +4283,7 @@ arm_gen_rotated_half_load (memref)
If we are unable to support a dominance comparsison we return CC mode. If we are unable to support a dominance comparsison we return CC mode.
This will then fail to match for the RTL expressions that generate this This will then fail to match for the RTL expressions that generate this
call. */ call. */
static enum machine_mode static enum machine_mode
select_dominance_cc_mode (x, y, cond_or) select_dominance_cc_mode (x, y, cond_or)
rtx x; rtx x;
...@@ -5583,7 +5722,6 @@ arm_reorg (first) ...@@ -5583,7 +5722,6 @@ arm_reorg (first)
/* Scan all the insns and record the operands that will need fixing. */ /* Scan all the insns and record the operands that will need fixing. */
for (insn = next_nonnote_insn (first); insn; insn = next_nonnote_insn (insn)) for (insn = next_nonnote_insn (first); insn; insn = next_nonnote_insn (insn))
{ {
if (GET_CODE (insn) == BARRIER) if (GET_CODE (insn) == BARRIER)
push_minipool_barrier (insn, address); push_minipool_barrier (insn, address);
else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN
...@@ -7357,7 +7495,7 @@ arm_expand_prologue () ...@@ -7357,7 +7495,7 @@ arm_expand_prologue ()
rtx insn; rtx insn;
rtx ip_rtx; rtx ip_rtx;
int fp_offset = 0; int fp_offset = 0;
/* Naked functions don't have prologues. */ /* Naked functions don't have prologues. */
if (arm_naked_function_p (current_function_decl)) if (arm_naked_function_p (current_function_decl))
...@@ -8379,6 +8517,99 @@ arm_debugger_arg_offset (value, addr) ...@@ -8379,6 +8517,99 @@ arm_debugger_arg_offset (value, addr)
return value; return value;
} }
#define def_builtin(NAME, TYPE, CODE) \
builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL_PTR)
void
arm_init_builtins ()
{
tree endlink = tree_cons (NULL_TREE, void_type_node, NULL_TREE);
tree int_endlink = tree_cons (NULL_TREE, integer_type_node, endlink);
tree pchar_type_node = build_pointer_type (char_type_node);
tree int_ftype_int, void_ftype_pchar;
/* void func (void *) */
void_ftype_pchar
= build_function_type (void_type_node,
tree_cons (NULL_TREE, pchar_type_node, endlink));
/* int func (int) */
int_ftype_int
= build_function_type (integer_type_node, int_endlink);
/* Initialize arm V5 builtins. */
if (arm_arch5)
{
def_builtin ("__builtin_clz", int_ftype_int, ARM_BUILTIN_CLZ);
def_builtin ("__builtin_prefetch", void_ftype_pchar,
ARM_BUILTIN_PREFETCH);
}
}
/* Expand an expression EXP that calls a built-in function,
with result going to TARGET if that's convenient
(and in mode MODE if that's convenient).
SUBTARGET may be used as the target for computing one of EXP's operands.
IGNORE is nonzero if the value is to be ignored. */
rtx
arm_expand_builtin (exp, target, subtarget, mode, ignore)
tree exp;
rtx target;
rtx subtarget ATTRIBUTE_UNUSED;
enum machine_mode mode ATTRIBUTE_UNUSED;
int ignore ATTRIBUTE_UNUSED;
{
enum insn_code icode;
tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
tree arglist = TREE_OPERAND (exp, 1);
tree arg0;
rtx op0, pat;
enum machine_mode tmode, mode0;
int fcode = DECL_FUNCTION_CODE (fndecl);
switch (fcode)
{
default:
break;
case ARM_BUILTIN_CLZ:
icode = CODE_FOR_clz;
arg0 = TREE_VALUE (arglist);
op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
tmode = insn_data[icode].operand[0].mode;
mode0 = insn_data[icode].operand[1].mode;
if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
op0 = copy_to_mode_reg (mode0, op0);
if (target == 0
|| GET_MODE (target) != tmode
|| ! (*insn_data[icode].operand[0].predicate) (target, tmode))
target = gen_reg_rtx (tmode);
pat = GEN_FCN (icode) (target, op0);
if (! pat)
return 0;
emit_insn (pat);
return target;
case ARM_BUILTIN_PREFETCH:
icode = CODE_FOR_prefetch;
arg0 = TREE_VALUE (arglist);
op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
op0 = gen_rtx_MEM (SImode, copy_to_mode_reg (Pmode, op0));
pat = GEN_FCN (icode) (op0);
if (! pat)
return 0;
emit_insn (pat);
return target;
}
/* @@@ Should really do something sensible here. */
return NULL_RTX;
}
/* Recursively search through all of the blocks in a function /* Recursively search through all of the blocks in a function
checking to see if any of the variables created in that checking to see if any of the variables created in that
......
...@@ -48,6 +48,7 @@ Boston, MA 02111-1307, USA. */ ...@@ -48,6 +48,7 @@ Boston, MA 02111-1307, USA. */
#define TARGET_CPU_strongarm1100 0x0040 #define TARGET_CPU_strongarm1100 0x0040
#define TARGET_CPU_arm9 0x0080 #define TARGET_CPU_arm9 0x0080
#define TARGET_CPU_arm9tdmi 0x0080 #define TARGET_CPU_arm9tdmi 0x0080
#define TARGET_CPU_xscale 0x0100
/* Configure didn't specify. */ /* Configure didn't specify. */
#define TARGET_CPU_generic 0x8000 #define TARGET_CPU_generic 0x8000
...@@ -115,12 +116,16 @@ extern int current_function_anonymous_args; ...@@ -115,12 +116,16 @@ extern int current_function_anonymous_args;
#if TARGET_CPU_DEFAULT == TARGET_CPU_arm8 || TARGET_CPU_DEFAULT == TARGET_CPU_arm810 || TARGET_CPU_DEFAULT == TARGET_CPU_strongarm || TARGET_CPU_DEFAULT == TARGET_CPU_strongarm110 || TARGET_CPU_DEFAULT == TARGET_CPU_strongarm1100 #if TARGET_CPU_DEFAULT == TARGET_CPU_arm8 || TARGET_CPU_DEFAULT == TARGET_CPU_arm810 || TARGET_CPU_DEFAULT == TARGET_CPU_strongarm || TARGET_CPU_DEFAULT == TARGET_CPU_strongarm110 || TARGET_CPU_DEFAULT == TARGET_CPU_strongarm1100
#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_4__" #define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_4__"
#else #else
#if TARGET_CPU_DEFAULT == TARGET_CPU_xscale
#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_5TE__ -D__XSCALE__"
#else
Unrecognized value in TARGET_CPU_DEFAULT. Unrecognized value in TARGET_CPU_DEFAULT.
#endif #endif
#endif #endif
#endif #endif
#endif #endif
#endif #endif
#endif
#ifndef CPP_PREDEFINES #ifndef CPP_PREDEFINES
#define CPP_PREDEFINES "-Acpu=arm -Amachine=arm" #define CPP_PREDEFINES "-Acpu=arm -Amachine=arm"
...@@ -161,6 +166,8 @@ Unrecognized value in TARGET_CPU_DEFAULT. ...@@ -161,6 +166,8 @@ Unrecognized value in TARGET_CPU_DEFAULT.
%{march=strongarm:-D__ARM_ARCH_4__} \ %{march=strongarm:-D__ARM_ARCH_4__} \
%{march=strongarm110:-D__ARM_ARCH_4__} \ %{march=strongarm110:-D__ARM_ARCH_4__} \
%{march=strongarm1100:-D__ARM_ARCH_4__} \ %{march=strongarm1100:-D__ARM_ARCH_4__} \
%{march=xscale:-D__ARM_ARCH_5TE__} \
%{march=xscale:-D__XSCALE__} \
%{march=armv2:-D__ARM_ARCH_2__} \ %{march=armv2:-D__ARM_ARCH_2__} \
%{march=armv2a:-D__ARM_ARCH_2__} \ %{march=armv2a:-D__ARM_ARCH_2__} \
%{march=armv3:-D__ARM_ARCH_3__} \ %{march=armv3:-D__ARM_ARCH_3__} \
...@@ -198,6 +205,8 @@ Unrecognized value in TARGET_CPU_DEFAULT. ...@@ -198,6 +205,8 @@ Unrecognized value in TARGET_CPU_DEFAULT.
%{mcpu=strongarm:-D__ARM_ARCH_4__} \ %{mcpu=strongarm:-D__ARM_ARCH_4__} \
%{mcpu=strongarm110:-D__ARM_ARCH_4__} \ %{mcpu=strongarm110:-D__ARM_ARCH_4__} \
%{mcpu=strongarm1100:-D__ARM_ARCH_4__} \ %{mcpu=strongarm1100:-D__ARM_ARCH_4__} \
%{mcpu=xscale:-D__ARM_ARCH_5TE__} \
%{mcpu=xscale:-D__XSCALE__} \
%{!mcpu*:%(cpp_cpu_arch_default)}} \ %{!mcpu*:%(cpp_cpu_arch_default)}} \
" "
...@@ -560,6 +569,9 @@ extern int thumb_code; ...@@ -560,6 +569,9 @@ extern int thumb_code;
/* Nonzero if this chip is a StrongARM. */ /* Nonzero if this chip is a StrongARM. */
extern int arm_is_strong; extern int arm_is_strong;
/* Nonzero if this chip is an XScale. */
extern int arm_is_xscale;
/* Nonzero if this chip is a an ARM6 or an ARM7. */ /* Nonzero if this chip is a an ARM6 or an ARM7. */
extern int arm_is_6_or_7; extern int arm_is_6_or_7;
...@@ -696,9 +708,12 @@ extern int arm_is_6_or_7; ...@@ -696,9 +708,12 @@ extern int arm_is_6_or_7;
#define BIGGEST_ALIGNMENT 32 #define BIGGEST_ALIGNMENT 32
/* Make strings word-aligned so strcpy from constants will be faster. */ /* Make strings word-aligned so strcpy from constants will be faster. */
#define CONSTANT_ALIGNMENT(EXP, ALIGN) \ #define CONSTANT_ALIGNMENT_FACTOR (TARGET_THUMB || ! arm_is_xscale ? 1 : 2)
(TREE_CODE (EXP) == STRING_CST \
&& (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN)) #define CONSTANT_ALIGNMENT(EXP, ALIGN) \
((TREE_CODE (EXP) == STRING_CST \
&& (ALIGN) < BITS_PER_WORD * CONSTANT_ALIGNMENT_FACTOR) \
? BITS_PER_WORD * CONSTANT_ALIGNMENT_FACTOR : (ALIGN))
/* Setting STRUCTURE_SIZE_BOUNDARY to 32 produces more efficient code, but the /* Setting STRUCTURE_SIZE_BOUNDARY to 32 produces more efficient code, but the
value set in previous versions of this toolchain was 8, which produces more value set in previous versions of this toolchain was 8, which produces more
...@@ -2050,63 +2065,63 @@ typedef struct ...@@ -2050,63 +2065,63 @@ typedef struct
floating SYMBOL_REF to the constant pool. Allow REG-only and floating SYMBOL_REF to the constant pool. Allow REG-only and
AUTINC-REG if handling TImode or HImode. Other symbol refs must be AUTINC-REG if handling TImode or HImode. Other symbol refs must be
forced though a static cell to ensure addressability. */ forced though a static cell to ensure addressability. */
#define ARM_GO_IF_LEGITIMATE_ADDRESS(MODE, X, LABEL) \ #define ARM_GO_IF_LEGITIMATE_ADDRESS(MODE, X, LABEL) \
{ \ { \
if (ARM_BASE_REGISTER_RTX_P (X)) \ if (ARM_BASE_REGISTER_RTX_P (X)) \
goto LABEL; \ goto LABEL; \
else if ((GET_CODE (X) == POST_INC || GET_CODE (X) == PRE_DEC) \ else if ((GET_CODE (X) == POST_INC || GET_CODE (X) == PRE_DEC) \
&& GET_CODE (XEXP (X, 0)) == REG \ && GET_CODE (XEXP (X, 0)) == REG \
&& ARM_REG_OK_FOR_BASE_P (XEXP (X, 0))) \ && ARM_REG_OK_FOR_BASE_P (XEXP (X, 0))) \
goto LABEL; \ goto LABEL; \
else if (GET_MODE_SIZE (MODE) >= 4 && reload_completed \ else if (GET_MODE_SIZE (MODE) >= 4 && reload_completed \
&& (GET_CODE (X) == LABEL_REF \ && (GET_CODE (X) == LABEL_REF \
|| (GET_CODE (X) == CONST \ || (GET_CODE (X) == CONST \
&& GET_CODE (XEXP ((X), 0)) == PLUS \ && GET_CODE (XEXP ((X), 0)) == PLUS \
&& GET_CODE (XEXP (XEXP ((X), 0), 0)) == LABEL_REF \ && GET_CODE (XEXP (XEXP ((X), 0), 0)) == LABEL_REF \
&& GET_CODE (XEXP (XEXP ((X), 0), 1)) == CONST_INT))) \ && GET_CODE (XEXP (XEXP ((X), 0), 1)) == CONST_INT)))\
goto LABEL; \ goto LABEL; \
else if ((MODE) == TImode) \ else if ((MODE) == TImode) \
; \ ; \
else if ((MODE) == DImode || (TARGET_SOFT_FLOAT && (MODE) == DFmode)) \ else if ((MODE) == DImode || (TARGET_SOFT_FLOAT && (MODE) == DFmode)) \
{ \ { \
if (GET_CODE (X) == PLUS && ARM_BASE_REGISTER_RTX_P (XEXP (X, 0)) \ if (GET_CODE (X) == PLUS && ARM_BASE_REGISTER_RTX_P (XEXP (X, 0)) \
&& GET_CODE (XEXP (X, 1)) == CONST_INT) \ && GET_CODE (XEXP (X, 1)) == CONST_INT) \
{ \ { \
HOST_WIDE_INT val = INTVAL (XEXP (X, 1)); \ HOST_WIDE_INT val = INTVAL (XEXP (X, 1)); \
if (val == 4 || val == -4 || val == -8) \ if (val == 4 || val == -4 || val == -8) \
goto LABEL; \ goto LABEL; \
} \ } \
} \ } \
else if (GET_CODE (X) == PLUS) \ else if (GET_CODE (X) == PLUS) \
{ \ { \
rtx xop0 = XEXP (X, 0); \ rtx xop0 = XEXP (X, 0); \
rtx xop1 = XEXP (X, 1); \ rtx xop1 = XEXP (X, 1); \
\ \
if (ARM_BASE_REGISTER_RTX_P (xop0)) \ if (ARM_BASE_REGISTER_RTX_P (xop0)) \
ARM_GO_IF_LEGITIMATE_INDEX (MODE, REGNO (xop0), xop1, LABEL); \ ARM_GO_IF_LEGITIMATE_INDEX (MODE, REGNO (xop0), xop1, LABEL); \
else if (ARM_BASE_REGISTER_RTX_P (xop1)) \ else if (ARM_BASE_REGISTER_RTX_P (xop1)) \
ARM_GO_IF_LEGITIMATE_INDEX (MODE, REGNO (xop1), xop0, LABEL); \ ARM_GO_IF_LEGITIMATE_INDEX (MODE, REGNO (xop1), xop0, LABEL); \
} \ } \
/* Reload currently can't handle MINUS, so disable this for now */ \ /* Reload currently can't handle MINUS, so disable this for now */ \
/* else if (GET_CODE (X) == MINUS) \ /* else if (GET_CODE (X) == MINUS) \
{ \ { \
rtx xop0 = XEXP (X,0); \ rtx xop0 = XEXP (X,0); \
rtx xop1 = XEXP (X,1); \ rtx xop1 = XEXP (X,1); \
\ \
if (ARM_BASE_REGISTER_RTX_P (xop0)) \ if (ARM_BASE_REGISTER_RTX_P (xop0)) \
ARM_GO_IF_LEGITIMATE_INDEX (MODE, -1, xop1, LABEL); \ ARM_GO_IF_LEGITIMATE_INDEX (MODE, -1, xop1, LABEL); \
} */ \ } */ \
else if (GET_MODE_CLASS (MODE) != MODE_FLOAT \ else if (GET_MODE_CLASS (MODE) != MODE_FLOAT \
&& GET_CODE (X) == SYMBOL_REF \ && GET_CODE (X) == SYMBOL_REF \
&& CONSTANT_POOL_ADDRESS_P (X) \ && CONSTANT_POOL_ADDRESS_P (X) \
&& ! (flag_pic \ && ! (flag_pic \
&& symbol_mentioned_p (get_pool_constant (X)))) \ && symbol_mentioned_p (get_pool_constant (X)))) \
goto LABEL; \ goto LABEL; \
else if ((GET_CODE (X) == PRE_INC || GET_CODE (X) == POST_DEC) \ else if ((GET_CODE (X) == PRE_INC || GET_CODE (X) == POST_DEC) \
&& (GET_MODE_SIZE (MODE) <= 4) \ && (GET_MODE_SIZE (MODE) <= 4) \
&& GET_CODE (XEXP (X, 0)) == REG \ && GET_CODE (XEXP (X, 0)) == REG \
&& ARM_REG_OK_FOR_BASE_P (XEXP (X, 0))) \ && ARM_REG_OK_FOR_BASE_P (XEXP (X, 0))) \
goto LABEL; \ goto LABEL; \
} }
/* ---------------------thumb version----------------------------------*/ /* ---------------------thumb version----------------------------------*/
...@@ -2355,6 +2370,9 @@ typedef struct ...@@ -2355,6 +2370,9 @@ typedef struct
in one reasonably fast instruction. */ in one reasonably fast instruction. */
#define MOVE_MAX 4 #define MOVE_MAX 4
#undef MOVE_RATIO
#define MOVE_RATIO (arm_is_xscale ? 4 : 2)
/* Define if operations between registers always perform the operation /* Define if operations between registers always perform the operation
on the full register even if a narrower mode is specified. */ on the full register even if a narrower mode is specified. */
#define WORD_REGISTER_OPERATIONS #define WORD_REGISTER_OPERATIONS
...@@ -2924,4 +2942,20 @@ extern int making_const_table; ...@@ -2924,4 +2942,20 @@ extern int making_const_table;
#define SPECIAL_MODE_PREDICATES \ #define SPECIAL_MODE_PREDICATES \
"cc_register", "dominant_cc_register", "cc_register", "dominant_cc_register",
enum arm_builtins
{
ARM_BUILTIN_CLZ,
ARM_BUILTIN_PREFETCH,
ARM_BUILTIN_MAX
};
#define MD_INIT_BUILTINS \
do \
{ \
arm_init_builtins (); \
} \
while (0)
#define MD_EXPAND_BUILTIN(EXP, TARGET, SUBTARGET, MODE, IGNORE) \
arm_expand_builtin ((EXP), (TARGET), (SUBTARGET), (MODE), (IGNORE))
#endif /* __ARM_H__ */ #endif /* __ARM_H__ */
...@@ -69,6 +69,11 @@ ...@@ -69,6 +69,11 @@
; scheduling decisions for the load unit and the multiplier. ; scheduling decisions for the load unit and the multiplier.
(define_attr "is_strongarm" "no,yes" (const (symbol_ref "arm_is_strong"))) (define_attr "is_strongarm" "no,yes" (const (symbol_ref "arm_is_strong")))
;; Operand number of an input operand that is shifted. Zero if the
;; given instruction does not shift one of its input operands.
(define_attr "is_xscale" "no,yes" (const (symbol_ref "arm_is_xscale")))
(define_attr "shift" "" (const_int 0))
; Floating Point Unit. If we only have floating point emulation, then there ; Floating Point Unit. If we only have floating point emulation, then there
; is no point in scheduling the floating point insns. (Well, for best ; is no point in scheduling the floating point insns. (Well, for best
; performance we should try and group them together). ; performance we should try and group them together).
...@@ -291,6 +296,18 @@ ...@@ -291,6 +296,18 @@
(define_function_unit "core" 1 0 (define_function_unit "core" 1 0
(and (eq_attr "ldsched" "yes") (eq_attr "type" "load")) 2 1) (and (eq_attr "ldsched" "yes") (eq_attr "type" "load")) 2 1)
;; We do not need to conditionalize the define_function_unit immediately
;; above. This one will be ignored for anything other than xscale
;; compiles and for xscale compiles it provides a larger delay
;; and the scheduler will DTRT.
;; FIXME: this test needs to be revamped to not depend on this feature
;; of the scheduler.
(define_function_unit "core" 1 0
(and (and (eq_attr "ldsched" "yes") (eq_attr "type" "load"))
(eq_attr "is_xscale" "yes"))
3 1)
(define_function_unit "core" 1 0 (define_function_unit "core" 1 0
(and (eq_attr "ldsched" "!yes") (eq_attr "type" "load,store1")) 2 2) (and (eq_attr "ldsched" "!yes") (eq_attr "type" "load,store1")) 2 2)
...@@ -1121,7 +1138,7 @@ ...@@ -1121,7 +1138,7 @@
(const_int 0))) (const_int 0)))
(set (match_operand:SI 0 "s_register_operand" "=&r,&r") (set (match_operand:SI 0 "s_register_operand" "=&r,&r")
(mult:SI (match_dup 2) (match_dup 1)))] (mult:SI (match_dup 2) (match_dup 1)))]
"TARGET_ARM" "TARGET_ARM && !arm_is_xscale"
"mul%?s\\t%0, %2, %1" "mul%?s\\t%0, %2, %1"
[(set_attr "conds" "set") [(set_attr "conds" "set")
(set_attr "type" "mult")] (set_attr "type" "mult")]
...@@ -1134,7 +1151,7 @@ ...@@ -1134,7 +1151,7 @@
(match_operand:SI 1 "s_register_operand" "%?r,0")) (match_operand:SI 1 "s_register_operand" "%?r,0"))
(const_int 0))) (const_int 0)))
(clobber (match_scratch:SI 0 "=&r,&r"))] (clobber (match_scratch:SI 0 "=&r,&r"))]
"TARGET_ARM" "TARGET_ARM && !arm_is_xscale"
"mul%?s\\t%0, %2, %1" "mul%?s\\t%0, %2, %1"
[(set_attr "conds" "set") [(set_attr "conds" "set")
(set_attr "type" "mult")] (set_attr "type" "mult")]
...@@ -1165,7 +1182,7 @@ ...@@ -1165,7 +1182,7 @@
(set (match_operand:SI 0 "s_register_operand" "=&r,&r,&r,&r") (set (match_operand:SI 0 "s_register_operand" "=&r,&r,&r,&r")
(plus:SI (mult:SI (match_dup 2) (match_dup 1)) (plus:SI (mult:SI (match_dup 2) (match_dup 1))
(match_dup 3)))] (match_dup 3)))]
"TARGET_ARM" "TARGET_ARM && !arm_is_xscale"
"mla%?s\\t%0, %2, %1, %3" "mla%?s\\t%0, %2, %1, %3"
[(set_attr "conds" "set") [(set_attr "conds" "set")
(set_attr "type" "mult")] (set_attr "type" "mult")]
...@@ -1180,7 +1197,7 @@ ...@@ -1180,7 +1197,7 @@
(match_operand:SI 3 "s_register_operand" "?r,r,0,0")) (match_operand:SI 3 "s_register_operand" "?r,r,0,0"))
(const_int 0))) (const_int 0)))
(clobber (match_scratch:SI 0 "=&r,&r,&r,&r"))] (clobber (match_scratch:SI 0 "=&r,&r,&r,&r"))]
"TARGET_ARM" "TARGET_ARM && !arm_is_xscale"
"mla%?s\\t%0, %2, %1, %3" "mla%?s\\t%0, %2, %1, %3"
[(set_attr "conds" "set") [(set_attr "conds" "set")
(set_attr "type" "mult")] (set_attr "type" "mult")]
...@@ -1226,7 +1243,7 @@ ...@@ -1226,7 +1243,7 @@
;; Unnamed template to match long long unsigned multiply-accumlate (umlal) ;; Unnamed template to match long long unsigned multiply-accumlate (umlal)
(define_insn "*umulsidi3adddi" (define_insn "*umulsidi3adddi"
[(set (match_operand:DI 0 "s_register_operand" "=&r") [(set (match_operand:DI 0 "s_register_operand" "+&r")
(plus:DI (plus:DI
(mult:DI (mult:DI
(zero_extend:DI (match_operand:SI 2 "s_register_operand" "%r")) (zero_extend:DI (match_operand:SI 2 "s_register_operand" "%r"))
...@@ -1268,6 +1285,41 @@ ...@@ -1268,6 +1285,41 @@
(set_attr "predicable" "yes")] (set_attr "predicable" "yes")]
) )
(define_insn "mulhisi3"
[(set (match_operand:SI 0 "s_register_operand" "=r")
(mult:SI (sign_extend:SI
(match_operand:HI 1 "s_register_operand" "%r"))
(sign_extend:SI
(match_operand:HI 2 "s_register_operand" "r"))))]
"TARGET_ARM && arm_is_xscale"
"smulbb%?\\t%0,%1,%2"
[(set_attr "type" "mult")]
)
(define_insn "*mulhisi3addsi"
[(set (match_operand:SI 0 "s_register_operand" "=r")
(plus:SI (match_operand:SI 1 "s_register_operand" "r")
(mult:SI (sign_extend:SI
(match_operand:HI 2 "s_register_operand" "%r"))
(sign_extend:SI
(match_operand:HI 3 "s_register_operand" "r")))))]
"TARGET_ARM && arm_is_xscale"
"smlabb%?\\t%0,%2,%3,%1"
[(set_attr "type" "mult")]
)
(define_insn "*mulhidi3adddi"
[(set (match_operand:DI 0 "s_register_operand" "=r")
(plus:DI
(match_operand:DI 1 "s_register_operand" "0")
(mult:DI (sign_extend:DI
(match_operand:HI 2 "s_register_operand" "%r"))
(sign_extend:DI
(match_operand:HI 3 "s_register_operand" "r")))))]
"TARGET_ARM && arm_is_xscale"
"smlalbb%?\\t%Q0, %R0, %2, %3"
[(set_attr "type" "mult")])
(define_insn "mulsf3" (define_insn "mulsf3"
[(set (match_operand:SF 0 "s_register_operand" "=f") [(set (match_operand:SF 0 "s_register_operand" "=f")
(mult:SF (match_operand:SF 1 "s_register_operand" "f") (mult:SF (match_operand:SF 1 "s_register_operand" "f")
...@@ -2003,6 +2055,7 @@ ...@@ -2003,6 +2055,7 @@
"TARGET_ARM" "TARGET_ARM"
"bic%?\\t%0, %1, %2%S4" "bic%?\\t%0, %1, %2%S4"
[(set_attr "predicable" "yes") [(set_attr "predicable" "yes")
(set_attr "shift" "2")
] ]
) )
...@@ -2503,6 +2556,7 @@ ...@@ -2503,6 +2556,7 @@
"TARGET_ARM" "TARGET_ARM"
"mov%?\\t%0, %1%S3" "mov%?\\t%0, %1%S3"
[(set_attr "predicable" "yes") [(set_attr "predicable" "yes")
(set_attr "shift" "1")
] ]
) )
...@@ -2517,6 +2571,7 @@ ...@@ -2517,6 +2571,7 @@
"TARGET_ARM" "TARGET_ARM"
"mov%?s\\t%0, %1%S3" "mov%?s\\t%0, %1%S3"
[(set_attr "conds" "set") [(set_attr "conds" "set")
(set_attr "shift" "1")
] ]
) )
...@@ -2530,6 +2585,7 @@ ...@@ -2530,6 +2585,7 @@
"TARGET_ARM" "TARGET_ARM"
"mov%?s\\t%0, %1%S3" "mov%?s\\t%0, %1%S3"
[(set_attr "conds" "set") [(set_attr "conds" "set")
(set_attr "shift" "1")
] ]
) )
...@@ -2541,6 +2597,7 @@ ...@@ -2541,6 +2597,7 @@
"TARGET_ARM" "TARGET_ARM"
"mvn%?\\t%0, %1%S3" "mvn%?\\t%0, %1%S3"
[(set_attr "predicable" "yes") [(set_attr "predicable" "yes")
(set_attr "shift" "1")
] ]
) )
...@@ -2555,6 +2612,7 @@ ...@@ -2555,6 +2612,7 @@
"TARGET_ARM" "TARGET_ARM"
"mvn%?s\\t%0, %1%S3" "mvn%?s\\t%0, %1%S3"
[(set_attr "conds" "set") [(set_attr "conds" "set")
(set_attr "shift" "1")
] ]
) )
...@@ -2568,7 +2626,8 @@ ...@@ -2568,7 +2626,8 @@
"TARGET_ARM" "TARGET_ARM"
"mvn%?s\\t%0, %1%S3" "mvn%?s\\t%0, %1%S3"
[(set_attr "conds" "set") [(set_attr "conds" "set")
] (set_attr "shift" "1")
]
) )
;; We don't really have extzv, but defining this using shifts helps ;; We don't really have extzv, but defining this using shifts helps
...@@ -2713,6 +2772,7 @@ ...@@ -2713,6 +2772,7 @@
cmp\\t%0, #0\;rsblt\\t%0, %0, #0 cmp\\t%0, #0\;rsblt\\t%0, %0, #0
eor%?\\t%0, %1, %1, asr #31\;sub%?\\t%0, %0, %1, asr #31" eor%?\\t%0, %1, %1, asr #31\;sub%?\\t%0, %0, %1, asr #31"
[(set_attr "conds" "clob,*") [(set_attr "conds" "clob,*")
(set_attr "shift" "1")
;; predicable can't be set based on the variant, so left as no ;; predicable can't be set based on the variant, so left as no
(set_attr "length" "8")] (set_attr "length" "8")]
) )
...@@ -2726,6 +2786,7 @@ ...@@ -2726,6 +2786,7 @@
cmp\\t%0, #0\;rsbgt\\t%0, %0, #0 cmp\\t%0, #0\;rsbgt\\t%0, %0, #0
eor%?\\t%0, %1, %1, asr #31\;rsb%?\\t%0, %0, %1, asr #31" eor%?\\t%0, %1, %1, asr #31\;rsb%?\\t%0, %0, %1, asr #31"
[(set_attr "conds" "clob,*") [(set_attr "conds" "clob,*")
(set_attr "shift" "1")
;; predicable can't be set based on the variant, so left as no ;; predicable can't be set based on the variant, so left as no
(set_attr "length" "8")] (set_attr "length" "8")]
) )
...@@ -3056,6 +3117,7 @@ ...@@ -3056,6 +3117,7 @@
return \"mov%?\\t%R0, %Q0, asr #31\"; return \"mov%?\\t%R0, %Q0, asr #31\";
" "
[(set_attr "length" "8") [(set_attr "length" "8")
(set_attr "shift" "1")
(set_attr "predicable" "yes")] (set_attr "predicable" "yes")]
) )
...@@ -5471,6 +5533,7 @@ ...@@ -5471,6 +5533,7 @@
"TARGET_ARM" "TARGET_ARM"
"cmp%?\\t%0, %1%S3" "cmp%?\\t%0, %1%S3"
[(set_attr "conds" "set") [(set_attr "conds" "set")
(set_attr "shift" "1")
] ]
) )
...@@ -5483,6 +5546,7 @@ ...@@ -5483,6 +5546,7 @@
"TARGET_ARM" "TARGET_ARM"
"cmp%?\\t%0, %1%S3" "cmp%?\\t%0, %1%S3"
[(set_attr "conds" "set") [(set_attr "conds" "set")
(set_attr "shift" "1")
] ]
) )
...@@ -5495,6 +5559,7 @@ ...@@ -5495,6 +5559,7 @@
"TARGET_ARM" "TARGET_ARM"
"cmn%?\\t%0, %1%S3" "cmn%?\\t%0, %1%S3"
[(set_attr "conds" "set") [(set_attr "conds" "set")
(set_attr "shift" "1")
] ]
) )
...@@ -6728,6 +6793,7 @@ ...@@ -6728,6 +6793,7 @@
"TARGET_ARM" "TARGET_ARM"
"%i1%?\\t%0, %2, %4%S3" "%i1%?\\t%0, %2, %4%S3"
[(set_attr "predicable" "yes") [(set_attr "predicable" "yes")
(set_attr "shift" "4")
] ]
) )
...@@ -6745,6 +6811,7 @@ ...@@ -6745,6 +6811,7 @@
"TARGET_ARM" "TARGET_ARM"
"%i1%?s\\t%0, %2, %4%S3" "%i1%?s\\t%0, %2, %4%S3"
[(set_attr "conds" "set") [(set_attr "conds" "set")
(set_attr "shift" "4")
] ]
) )
...@@ -6760,6 +6827,7 @@ ...@@ -6760,6 +6827,7 @@
"TARGET_ARM" "TARGET_ARM"
"%i1%?s\\t%0, %2, %4%S3" "%i1%?s\\t%0, %2, %4%S3"
[(set_attr "conds" "set") [(set_attr "conds" "set")
(set_attr "shift" "4")
] ]
) )
...@@ -6772,6 +6840,7 @@ ...@@ -6772,6 +6840,7 @@
"TARGET_ARM" "TARGET_ARM"
"sub%?\\t%0, %1, %3%S2" "sub%?\\t%0, %1, %3%S2"
[(set_attr "predicable" "yes") [(set_attr "predicable" "yes")
(set_attr "shift" "3")
] ]
) )
...@@ -6789,6 +6858,7 @@ ...@@ -6789,6 +6858,7 @@
"TARGET_ARM" "TARGET_ARM"
"sub%?s\\t%0, %1, %3%S2" "sub%?s\\t%0, %1, %3%S2"
[(set_attr "conds" "set") [(set_attr "conds" "set")
(set_attr "shift" "3")
] ]
) )
...@@ -6804,6 +6874,7 @@ ...@@ -6804,6 +6874,7 @@
"TARGET_ARM" "TARGET_ARM"
"sub%?s\\t%0, %1, %3%S2" "sub%?s\\t%0, %1, %3%S2"
[(set_attr "conds" "set") [(set_attr "conds" "set")
(set_attr "shift" "3")
] ]
) )
...@@ -6848,12 +6919,13 @@ ...@@ -6848,12 +6919,13 @@
(plus:SI (plus:SI (match_op_dup 5 [(match_dup 3) (match_dup 4)]) (plus:SI (plus:SI (match_op_dup 5 [(match_dup 3) (match_dup 4)])
(match_dup 1)) (match_dup 1))
(match_dup 2)))] (match_dup 2)))]
"TARGET_ARM && reload_in_progress" "TARGET_ARM && reload_in_progress && !arm_is_xscale"
"* "*
output_add_immediate (operands); output_add_immediate (operands);
return \"add%?s\\t%0, %0, %3%S5\"; return \"add%?s\\t%0, %0, %3%S5\";
" "
[(set_attr "conds" "set") [(set_attr "conds" "set")
(set_attr "shift" "3")
(set_attr "length" "20")] (set_attr "length" "20")]
) )
...@@ -6868,12 +6940,13 @@ ...@@ -6868,12 +6940,13 @@
(match_operand:SI 2 "const_int_operand" "n")) (match_operand:SI 2 "const_int_operand" "n"))
(const_int 0))) (const_int 0)))
(clobber (match_scratch:SI 0 "=&r"))] (clobber (match_scratch:SI 0 "=&r"))]
"TARGET_ARM && reload_in_progress" "TARGET_ARM && reload_in_progress && !arm_is_xscale"
"* "*
output_add_immediate (operands); output_add_immediate (operands);
return \"add%?s\\t%0, %0, %3%S5\"; return \"add%?s\\t%0, %0, %3%S5\";
" "
[(set_attr "conds" "set") [(set_attr "conds" "set")
(set_attr "shift" "3")
(set_attr "length" "20")] (set_attr "length" "20")]
) )
...@@ -6908,7 +6981,7 @@ ...@@ -6908,7 +6981,7 @@
(set (match_operand:SI 0 "" "=&r") (set (match_operand:SI 0 "" "=&r")
(plus:SI (plus:SI (mult:SI (match_dup 3) (match_dup 4)) (match_dup 1)) (plus:SI (plus:SI (mult:SI (match_dup 3) (match_dup 4)) (match_dup 1))
(match_dup 2)))] (match_dup 2)))]
"TARGET_ARM && reload_in_progress" "TARGET_ARM && reload_in_progress && !arm_is_xscale"
"* "*
output_add_immediate (operands); output_add_immediate (operands);
output_asm_insn (\"mla%?s\\t%0, %3, %4, %0\", operands); output_asm_insn (\"mla%?s\\t%0, %3, %4, %0\", operands);
...@@ -7615,6 +7688,7 @@ ...@@ -7615,6 +7688,7 @@
mov%D5\\t%0, %1\;mov%d5\\t%0, %2%S4 mov%D5\\t%0, %1\;mov%d5\\t%0, %2%S4
mvn%D5\\t%0, #%B1\;mov%d5\\t%0, %2%S4" mvn%D5\\t%0, #%B1\;mov%d5\\t%0, %2%S4"
[(set_attr "conds" "use") [(set_attr "conds" "use")
(set_attr "shift" "2")
(set_attr "length" "4,8,8")] (set_attr "length" "4,8,8")]
) )
...@@ -7650,6 +7724,7 @@ ...@@ -7650,6 +7724,7 @@
mov%d5\\t%0, %1\;mov%D5\\t%0, %2%S4 mov%d5\\t%0, %1\;mov%D5\\t%0, %2%S4
mvn%d5\\t%0, #%B1\;mov%D5\\t%0, %2%S4" mvn%d5\\t%0, #%B1\;mov%D5\\t%0, %2%S4"
[(set_attr "conds" "use") [(set_attr "conds" "use")
(set_attr "shift" "2")
(set_attr "length" "4,8,8")] (set_attr "length" "4,8,8")]
) )
...@@ -7686,6 +7761,7 @@ ...@@ -7686,6 +7761,7 @@
"TARGET_ARM" "TARGET_ARM"
"mov%d5\\t%0, %1%S6\;mov%D5\\t%0, %3%S7" "mov%d5\\t%0, %1%S6\;mov%D5\\t%0, %3%S7"
[(set_attr "conds" "use") [(set_attr "conds" "use")
(set_attr "shift" "1")
(set_attr "length" "8")] (set_attr "length" "8")]
) )
...@@ -8912,6 +8988,22 @@ ...@@ -8912,6 +8988,22 @@
[(set_attr "length" "2")] [(set_attr "length" "2")]
) )
;; V5 Instructions,
(define_insn "clz"
[(set (match_operand:SI 0 "s_register_operand" "=r")
(unspec:SI [(match_operand:SI 1 "s_register_operand" "r")] 128))]
"TARGET_ARM"
"clz\\t%0,%1")
;; XScale instructions.
(define_insn "prefetch"
[(unspec_volatile
[(match_operand:SI 0 "offsettable_memory_operand" "o")] 129)]
"TARGET_ARM"
"pld\\t%0")
;; General predication pattern ;; General predication pattern
(define_cond_exec (define_cond_exec
......
/* Definitions of target machine for GNU compiler, for ARM with PE obj format.
Copyright (C) 1999 Free Software Foundation, Inc.
Contributed by Doug Evans (dje@cygnus.com).
This file is part of GNU CC.
GNU CC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
GNU CC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
#include "arm/strongarm-coff.h"
#include "arm/pe.h"
#undef TARGET_VERSION
#define TARGET_VERSION fputs (" (StrongARM/PE)", stderr);
CROSS_LIBGCC1 = libgcc1-asm.a
LIB1ASMSRC = arm/lib1funcs.asm
LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _bb_init_func
# We want fine grained libraries, so use the new code to build the
# floating point emulation libraries.
FPBIT = fp-bit.c
DPBIT = dp-bit.c
fp-bit.c: $(srcdir)/config/fp-bit.c
echo '#define FLOAT' > fp-bit.c
echo '#ifndef __ARMEB__' >> fp-bit.c
echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
echo '#endif' >> fp-bit.c
cat $(srcdir)/config/fp-bit.c >> fp-bit.c
dp-bit.c: $(srcdir)/config/fp-bit.c
echo '#ifndef __ARMEB__' > dp-bit.c
echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
echo '#define FLOAT_WORD_ORDER_MISMATCH' >> dp-bit.c
echo '#endif' >> dp-bit.c
cat $(srcdir)/config/fp-bit.c >> dp-bit.c
MULTILIB_OPTIONS = mlittle-endian/mbig-endian mhard-float/msoft-float
MULTILIB_DIRNAMES = le be fpu soft
MULTILIB_MATCHES =
EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o
LIBGCC = stmp-multilib
INSTALL_LIBGCC = install-multilib
# Currently there is a bug somwehere in GCC's alias analysis
# or scheduling code that is breaking _fpmul_parts in libgcc1.c.
# Disabling function inlining is a workaround for this problem.
TARGET_LIBGCC2_CFLAGS = -Dinhibit_libc -fno-inline
CROSS_LIBGCC1 = libgcc1-asm.a
LIB1ASMSRC = arm/lib1funcs.asm
LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _bb_init_func
# We want fine grained libraries, so use the new code to build the
# floating point emulation libraries.
FPBIT = fp-bit.c
DPBIT = dp-bit.c
fp-bit.c: $(srcdir)/config/fp-bit.c
echo '#define FLOAT' > fp-bit.c
echo '#ifndef __ARMEB__' >> fp-bit.c
echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
echo '#endif' >> fp-bit.c
cat $(srcdir)/config/fp-bit.c >> fp-bit.c
dp-bit.c: $(srcdir)/config/fp-bit.c
echo '#ifndef __ARMEB__' > dp-bit.c
echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
echo '#define FLOAT_WORD_ORDER_MISMATCH' >> dp-bit.c
echo '#endif' >> dp-bit.c
cat $(srcdir)/config/fp-bit.c >> dp-bit.c
MULTILIB_OPTIONS = mlittle-endian/mbig-endian mhard-float/msoft-float
MULTILIB_DIRNAMES = le be fpu soft
MULTILIB_EXCEPTIONS =
MULTILIB_MATCHES = mbig-endian=mbe mlittle-endian=mle
EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o
# If EXTRA_MULTILIB_PARTS is not defined above then define EXTRA_PARTS here
# EXTRA_PARTS = crtbegin.o crtend.o
LIBGCC = stmp-multilib
INSTALL_LIBGCC = install-multilib
# Currently there is a bug somewhere in GCC's alias analysis
# or scheduling code that is breaking _fpmul_parts in libgcc1.c.
# Disabling function inlining is a workaround for this problem.
TARGET_LIBGCC2_CFLAGS = -Dinhibit_libc -fno-inline
CROSS_LIBGCC1 = libgcc1-asm.a
LIB1ASMSRC = arm/lib1funcs.asm
LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _bb_init_func
# We want fine grained libraries, so use the new code to build the
# floating point emulation libraries.
FPBIT = fp-bit.c
DPBIT = dp-bit.c
fp-bit.c: $(srcdir)/config/fp-bit.c
echo '#define FLOAT' > fp-bit.c
echo '#ifndef __ARMEB__' >> fp-bit.c
echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
echo '#endif' >> fp-bit.c
cat $(srcdir)/config/fp-bit.c >> fp-bit.c
dp-bit.c: $(srcdir)/config/fp-bit.c
echo '#ifndef __ARMEB__' > dp-bit.c
echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
echo '#define FLOAT_WORD_ORDER_MISMATCH' >> dp-bit.c
echo '#endif' >> dp-bit.c
cat $(srcdir)/config/fp-bit.c >> dp-bit.c
pe.o: $(srcdir)/config/arm/pe.c
$(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $(srcdir)/config/arm/pe.c
MULTILIB_OPTIONS = mhard-float/msoft-float
MULTILIB_DIRNAMES = fpu soft
MULTILIB_MATCHES =
EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o
LIBGCC = stmp-multilib
INSTALL_LIBGCC = install-multilib
# Currently there is a bug somwehere in GCC's alias analysis
# or scheduling code that is breaking _fpmul_parts in libgcc1.c.
# Disabling function inlining is a workaround for this problem.
TARGET_LIBGCC2_CFLAGS = -Dinhibit_libc -fno-inline
CROSS_LIBGCC1 = libgcc1-asm.a
LIB1ASMSRC = arm/lib1funcs.asm
LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _bb_init_func _call_via_rX _interwork_call_via_rX
# We want fine grained libraries, so use the new code to build the
# floating point emulation libraries.
FPBIT = fp-bit.c
DPBIT = dp-bit.c
fp-bit.c: $(srcdir)/config/fp-bit.c
echo '#define FLOAT' > fp-bit.c
echo '#ifndef __ARMEB__' >> fp-bit.c
echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
echo '#endif' >> fp-bit.c
cat $(srcdir)/config/fp-bit.c >> fp-bit.c
dp-bit.c: $(srcdir)/config/fp-bit.c
echo '#ifndef __ARMEB__' > dp-bit.c
echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
echo '#define FLOAT_WORD_ORDER_MISMATCH' >> dp-bit.c
echo '#endif' >> dp-bit.c
cat $(srcdir)/config/fp-bit.c >> dp-bit.c
MULTILIB_OPTIONS = mlittle-endian/mbig-endian
MULTILIB_DIRNAMES = le be
MULTILIB_EXCEPTIONS =
MULTILIB_MATCHES = mbig-endian=mbe mlittle-endian=mle
# Note XScale does not support 26 bit APCS.
# Note XScale does not support hard FP
MULTILIB_OPTIONS += mno-thumb-interwork/mthumb-interwork
MULTILIB_DIRNAMES += normal interwork
MULTILIB_OPTIONS += marm/mthumb
MULTILIB_DIRNAMES += arm thumb
MULTILIB_EXCEPTIONS += *mhard-float/*mthumb*
# CYGNUS LOCAL nickc/redundant multilibs
MULTILIB_REDUNDANT_DIRS = interwork/thumb=thumb
# END CYGNUS LOCAL
EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o
LIBGCC = stmp-multilib
INSTALL_LIBGCC = install-multilib
# Currently there is a bug somewhere in GCC's alias analysis
# or scheduling code that is breaking _fpmul_parts in libgcc1.c.
# Disabling function inlining is a workaround for this problem.
TARGET_LIBGCC2_CFLAGS = -Dinhibit_libc -fno-inline
CROSS_LIBGCC1 = libgcc1-asm.a
LIB1ASMSRC = arm/lib1funcs.asm
LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _bb_init_func _call_via_rX _interwork_call_via_rX
# We want fine grained libraries, so use the new code to build the
# floating point emulation libraries.
FPBIT = fp-bit.c
DPBIT = dp-bit.c
fp-bit.c: $(srcdir)/config/fp-bit.c
echo '#define FLOAT' > fp-bit.c
echo '#ifndef __ARMEB__' >> fp-bit.c
echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
echo '#endif' >> fp-bit.c
cat $(srcdir)/config/fp-bit.c >> fp-bit.c
dp-bit.c: $(srcdir)/config/fp-bit.c
echo '#ifndef __ARMEB__' > dp-bit.c
echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
echo '#define FLOAT_WORD_ORDER_MISMATCH' >> dp-bit.c
echo '#endif' >> dp-bit.c
cat $(srcdir)/config/fp-bit.c >> dp-bit.c
MULTILIB_OPTIONS = mlittle-endian/mbig-endian
MULTILIB_DIRNAMES = le be
MULTILIB_EXCEPTIONS =
MULTILIB_MATCHES = mbig-endian=mbe mlittle-endian=mle
# Note XScale does not support 26 bit APCS.
# Note XScale does not support hard FP
MULTILIB_OPTIONS += mno-thumb-interwork/mthumb-interwork
MULTILIB_DIRNAMES += normal interwork
MULTILIB_OPTIONS += marm/mthumb
MULTILIB_DIRNAMES += arm thumb
MULTILIB_EXCEPTIONS += *mhard-float/*mthumb*
# CYGNUS LOCAL nickc/redundant multilibs
MULTILIB_REDUNDANT_DIRS = interwork/thumb=thumb
# END CYGNUS LOCAL
EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o
LIBGCC = stmp-multilib
INSTALL_LIBGCC = install-multilib
# Currently there is a bug somewhere in GCC's alias analysis
# or scheduling code that is breaking _fpmul_parts in libgcc1.c.
# Disabling function inlining is a workaround for this problem.
TARGET_LIBGCC2_CFLAGS = -Dinhibit_libc -fno-inline
/* Definitions for XScale systems using COFF
Copyright (C) 1999, 2000 Free Software Foundation, Inc.
Contributed by Catherine Moore <clm@cygnus.com>
This file is part of GNU CC.
GNU CC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
GNU CC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
/* Run-time Target Specification. */
#ifndef SUBTARGET_CPU_DEFAULT
#define SUBTARGET_CPU_DEFAULT TARGET_CPU_xscale
#endif
#define SUBTARGET_EXTRA_ASM_SPEC "%{!mcpu=*:-mxscale}"
#define MULTILIB_DEFAULTS \
{ "mlittle-endian", "mno-thumb-interwork", "marm" }
#include "coff.h"
#undef TARGET_VERSION
#define TARGET_VERSION fputs (" (XScale/COFF)", stderr);
/* Definitions for XScale architectures using ELF
Copyright (C) 1999, 2000 Free Software Foundation, Inc.
Contributed by Catherine Moore <clm@cygnus.com>
This file is part of GNU CC.
GNU CC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
GNU CC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
/* Run-time Target Specification. */
#ifndef TARGET_VERSION
#define TARGET_VERSION fputs (" (XScale/ELF non-Linux)", stderr);
#endif
#ifndef SUBTARGET_CPU_DEFAULT
#define SUBTARGET_CPU_DEFAULT TARGET_CPU_xscale
#endif
#define SUBTARGET_EXTRA_ASM_SPEC "%{!mcpu=*:-mxscale}"
#ifndef MULTILIB_DEFAULTS
#define MULTILIB_DEFAULTS \
{ "mlittle-endian", "mno-thumb-interwork", "marm" }
#endif
#include "unknown-elf.h"
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment