Commit 58e9ddb1 by Nick Clifton Committed by Nick Clifton

arm.h (EMIT_EABI_ATTRIBUTE): New macro.

	* config/arm/arm.h (EMIT_EABI_ATTRIBUTE): New macro.  Used to
	emit a .eabi_attribute assembler directive, possibly with a
	comment attached.
	* config/arm/arm.c (arm_file_start): Use the new macro.
	* config/arm/arm-c.c (arm_output_c_attributes): Likewise.

From-SVN: r179844
parent 9bc9ee67
2011-10-12 Nick Clifton <nickc@redhat.com>
* config/arm/arm.h (EMIT_EABI_ATTRIBUTE): New macro. Used to
emit a .eabi_attribute assembler directive, possibly with a
comment attached.
* config/arm/arm.c (arm_file_start): Use the new macro.
* config/arm/arm-c.c (arm_output_c_attributes): Likewise.
2011-10-12 Georg-Johann Lay <avr@gjlay.de> 2011-10-12 Georg-Johann Lay <avr@gjlay.de>
PR target/49939 PR target/49939
/* Copyright (C) 2007, 2010 Free Software Foundation, Inc. /* Copyright (C) 2007, 2010, 2011 Free Software Foundation, Inc.
This file is part of GCC. This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later Software Foundation; either version 3, or (at your option) any later
version. version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details. for more details.
You should have received a copy of the GNU General Public License You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */ <http://www.gnu.org/licenses/>. */
#include "config.h" #include "config.h"
#include "system.h" #include "system.h"
...@@ -25,21 +25,21 @@ along with GCC; see the file COPYING3. If not see ...@@ -25,21 +25,21 @@ along with GCC; see the file COPYING3. If not see
#include "output.h" #include "output.h"
#include "c-family/c-common.h" #include "c-family/c-common.h"
/* Output C specific EABI object attributes. These can not be done in /* Output C specific EABI object attributes. These can not be done in
arm.c because they require information from the C frontend. */ arm.c because they require information from the C frontend. */
static void arm_output_c_attributes(void) static void
arm_output_c_attributes (void)
{ {
/* Tag_ABI_PCS_wchar_t. */ EMIT_EABI_ATTRIBUTE (Tag_ABI_PCS_wchar_t, 18,
asm_fprintf (asm_out_file, "\t.eabi_attribute 18, %d\n", (int)(TYPE_PRECISION (wchar_type_node) / BITS_PER_UNIT));
(int)(TYPE_PRECISION (wchar_type_node) / BITS_PER_UNIT));
} }
/* Setup so that common code calls arm_output_c_attributes. */ /* Setup so that common code calls arm_output_c_attributes. */
void arm_lang_object_attributes_init(void) void
arm_lang_object_attributes_init (void)
{ {
arm_lang_output_object_attributes_hook = arm_output_c_attributes; arm_lang_output_object_attributes_hook = arm_output_c_attributes;
} }
...@@ -1062,12 +1062,12 @@ arm_set_fixed_optab_libfunc (optab optable, enum machine_mode mode, ...@@ -1062,12 +1062,12 @@ arm_set_fixed_optab_libfunc (optab optable, enum machine_mode mode,
int num_suffix) int num_suffix)
{ {
char buffer[50]; char buffer[50];
if (num_suffix == 0) if (num_suffix == 0)
sprintf (buffer, "__gnu_%s%s", funcname, modename); sprintf (buffer, "__gnu_%s%s", funcname, modename);
else else
sprintf (buffer, "__gnu_%s%s%d", funcname, modename, num_suffix); sprintf (buffer, "__gnu_%s%s%d", funcname, modename, num_suffix);
set_optab_libfunc (optable, mode, buffer); set_optab_libfunc (optable, mode, buffer);
} }
...@@ -1078,13 +1078,13 @@ arm_set_fixed_conv_libfunc (convert_optab optable, enum machine_mode to, ...@@ -1078,13 +1078,13 @@ arm_set_fixed_conv_libfunc (convert_optab optable, enum machine_mode to,
{ {
char buffer[50]; char buffer[50];
const char *maybe_suffix_2 = ""; const char *maybe_suffix_2 = "";
/* Follow the logic for selecting a "2" suffix in fixed-bit.h. */ /* Follow the logic for selecting a "2" suffix in fixed-bit.h. */
if (ALL_FIXED_POINT_MODE_P (from) && ALL_FIXED_POINT_MODE_P (to) if (ALL_FIXED_POINT_MODE_P (from) && ALL_FIXED_POINT_MODE_P (to)
&& UNSIGNED_FIXED_POINT_MODE_P (from) == UNSIGNED_FIXED_POINT_MODE_P (to) && UNSIGNED_FIXED_POINT_MODE_P (from) == UNSIGNED_FIXED_POINT_MODE_P (to)
&& ALL_FRACT_MODE_P (from) == ALL_FRACT_MODE_P (to)) && ALL_FRACT_MODE_P (from) == ALL_FRACT_MODE_P (to))
maybe_suffix_2 = "2"; maybe_suffix_2 = "2";
sprintf (buffer, "__gnu_%s%s%s%s", funcname, fromname, toname, sprintf (buffer, "__gnu_%s%s%s%s", funcname, fromname, toname,
maybe_suffix_2); maybe_suffix_2);
...@@ -1210,11 +1210,11 @@ arm_init_libfuncs (void) ...@@ -1210,11 +1210,11 @@ arm_init_libfuncs (void)
(arm_fp16_format == ARM_FP16_FORMAT_IEEE (arm_fp16_format == ARM_FP16_FORMAT_IEEE
? "__gnu_f2h_ieee" ? "__gnu_f2h_ieee"
: "__gnu_f2h_alternative")); : "__gnu_f2h_alternative"));
set_conv_libfunc (sext_optab, SFmode, HFmode, set_conv_libfunc (sext_optab, SFmode, HFmode,
(arm_fp16_format == ARM_FP16_FORMAT_IEEE (arm_fp16_format == ARM_FP16_FORMAT_IEEE
? "__gnu_h2f_ieee" ? "__gnu_h2f_ieee"
: "__gnu_h2f_alternative")); : "__gnu_h2f_alternative"));
/* Arithmetic. */ /* Arithmetic. */
set_optab_libfunc (add_optab, HFmode, NULL); set_optab_libfunc (add_optab, HFmode, NULL);
set_optab_libfunc (sdiv_optab, HFmode, NULL); set_optab_libfunc (sdiv_optab, HFmode, NULL);
...@@ -1380,14 +1380,14 @@ arm_build_builtin_va_list (void) ...@@ -1380,14 +1380,14 @@ arm_build_builtin_va_list (void)
{ {
tree va_list_name; tree va_list_name;
tree ap_field; tree ap_field;
if (!TARGET_AAPCS_BASED) if (!TARGET_AAPCS_BASED)
return std_build_builtin_va_list (); return std_build_builtin_va_list ();
/* AAPCS \S 7.1.4 requires that va_list be a typedef for a type /* AAPCS \S 7.1.4 requires that va_list be a typedef for a type
defined as: defined as:
struct __va_list struct __va_list
{ {
void *__ap; void *__ap;
}; };
...@@ -1411,7 +1411,7 @@ arm_build_builtin_va_list (void) ...@@ -1411,7 +1411,7 @@ arm_build_builtin_va_list (void)
TYPE_STUB_DECL (va_list_type) = va_list_name; TYPE_STUB_DECL (va_list_type) = va_list_name;
/* Create the __ap field. */ /* Create the __ap field. */
ap_field = build_decl (BUILTINS_LOCATION, ap_field = build_decl (BUILTINS_LOCATION,
FIELD_DECL, FIELD_DECL,
get_identifier ("__ap"), get_identifier ("__ap"),
ptr_type_node); ptr_type_node);
DECL_ARTIFICIAL (ap_field) = 1; DECL_ARTIFICIAL (ap_field) = 1;
...@@ -1437,7 +1437,7 @@ arm_extract_valist_ptr (tree valist) ...@@ -1437,7 +1437,7 @@ arm_extract_valist_ptr (tree valist)
if (TARGET_AAPCS_BASED) if (TARGET_AAPCS_BASED)
{ {
tree ap_field = TYPE_FIELDS (TREE_TYPE (valist)); tree ap_field = TYPE_FIELDS (TREE_TYPE (valist));
valist = build3 (COMPONENT_REF, TREE_TYPE (ap_field), valist = build3 (COMPONENT_REF, TREE_TYPE (ap_field),
valist, ap_field, NULL_TREE); valist, ap_field, NULL_TREE);
} }
...@@ -1454,7 +1454,7 @@ arm_expand_builtin_va_start (tree valist, rtx nextarg) ...@@ -1454,7 +1454,7 @@ arm_expand_builtin_va_start (tree valist, rtx nextarg)
/* Implement TARGET_GIMPLIFY_VA_ARG_EXPR. */ /* Implement TARGET_GIMPLIFY_VA_ARG_EXPR. */
static tree static tree
arm_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p, arm_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
gimple_seq *post_p) gimple_seq *post_p)
{ {
valist = arm_extract_valist_ptr (valist); valist = arm_extract_valist_ptr (valist);
...@@ -2312,7 +2312,7 @@ use_return_insn (int iscond, rtx sibling) ...@@ -2312,7 +2312,7 @@ use_return_insn (int iscond, rtx sibling)
if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM)) if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
return 0; return 0;
if (flag_pic if (flag_pic
&& arm_pic_register != INVALID_REGNUM && arm_pic_register != INVALID_REGNUM
&& df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM)) && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
return 0; return 0;
...@@ -2368,7 +2368,7 @@ const_ok_for_arm (HOST_WIDE_INT i) ...@@ -2368,7 +2368,7 @@ const_ok_for_arm (HOST_WIDE_INT i)
/* Get the number of trailing zeros. */ /* Get the number of trailing zeros. */
lowbit = ffs((int) i) - 1; lowbit = ffs((int) i) - 1;
/* Only even shifts are allowed in ARM mode so round down to the /* Only even shifts are allowed in ARM mode so round down to the
nearest even number. */ nearest even number. */
if (TARGET_ARM) if (TARGET_ARM)
...@@ -2632,10 +2632,10 @@ optimal_immediate_sequence_1 (enum rtx_code code, unsigned HOST_WIDE_INT val, ...@@ -2632,10 +2632,10 @@ optimal_immediate_sequence_1 (enum rtx_code code, unsigned HOST_WIDE_INT val,
/* Try and find a way of doing the job in either two or three /* Try and find a way of doing the job in either two or three
instructions. instructions.
In ARM mode we can use 8-bit constants, rotated to any 2-bit aligned In ARM mode we can use 8-bit constants, rotated to any 2-bit aligned
location. We start at position I. This may be the MSB, or location. We start at position I. This may be the MSB, or
optimial_immediate_sequence may have positioned it at the largest block optimial_immediate_sequence may have positioned it at the largest block
of zeros that are aligned on a 2-bit boundary. We then fill up the temps, of zeros that are aligned on a 2-bit boundary. We then fill up the temps,
wrapping around to the top of the word when we drop off the bottom. wrapping around to the top of the word when we drop off the bottom.
In the worst case this code should produce no more than four insns. In the worst case this code should produce no more than four insns.
...@@ -2684,11 +2684,11 @@ optimal_immediate_sequence_1 (enum rtx_code code, unsigned HOST_WIDE_INT val, ...@@ -2684,11 +2684,11 @@ optimal_immediate_sequence_1 (enum rtx_code code, unsigned HOST_WIDE_INT val,
/* Next, see if we can do a better job with a thumb2 replicated /* Next, see if we can do a better job with a thumb2 replicated
constant. constant.
We do it this way around to catch the cases like 0x01F001E0 where We do it this way around to catch the cases like 0x01F001E0 where
two 8-bit immediates would work, but a replicated constant would two 8-bit immediates would work, but a replicated constant would
make it worse. make it worse.
TODO: 16-bit constants that don't clear all the bits, but still win. TODO: 16-bit constants that don't clear all the bits, but still win.
TODO: Arithmetic splitting for set/add/sub, rather than bitwise. */ TODO: Arithmetic splitting for set/add/sub, rather than bitwise. */
if (TARGET_THUMB2) if (TARGET_THUMB2)
...@@ -2716,7 +2716,7 @@ optimal_immediate_sequence_1 (enum rtx_code code, unsigned HOST_WIDE_INT val, ...@@ -2716,7 +2716,7 @@ optimal_immediate_sequence_1 (enum rtx_code code, unsigned HOST_WIDE_INT val,
|| (matching_bytes == 2 || (matching_bytes == 2
&& const_ok_for_op (remainder & ~tmp2, code)))) && const_ok_for_op (remainder & ~tmp2, code))))
{ {
/* At least 3 of the bytes match, and the fourth has at /* At least 3 of the bytes match, and the fourth has at
least as many bits set, or two of the bytes match least as many bits set, or two of the bytes match
and it will only require one more insn to finish. */ and it will only require one more insn to finish. */
result = tmp2; result = tmp2;
...@@ -3629,7 +3629,7 @@ arm_libcall_uses_aapcs_base (const_rtx libcall) ...@@ -3629,7 +3629,7 @@ arm_libcall_uses_aapcs_base (const_rtx libcall)
convert_optab_libfunc (sfloat_optab, SFmode, DImode)); convert_optab_libfunc (sfloat_optab, SFmode, DImode));
add_libcall (libcall_htab, add_libcall (libcall_htab,
convert_optab_libfunc (sfloat_optab, DFmode, DImode)); convert_optab_libfunc (sfloat_optab, DFmode, DImode));
add_libcall (libcall_htab, add_libcall (libcall_htab,
convert_optab_libfunc (ufloat_optab, SFmode, SImode)); convert_optab_libfunc (ufloat_optab, SFmode, SImode));
add_libcall (libcall_htab, add_libcall (libcall_htab,
...@@ -3949,7 +3949,7 @@ arm_get_pcs_model (const_tree type, const_tree decl) ...@@ -3949,7 +3949,7 @@ arm_get_pcs_model (const_tree type, const_tree decl)
(no argument is ever a candidate for a co-processor (no argument is ever a candidate for a co-processor
register). */ register). */
bool base_rules = stdarg_p (type); bool base_rules = stdarg_p (type);
if (user_convention) if (user_convention)
{ {
if (user_pcs > ARM_PCS_AAPCS_LOCAL) if (user_pcs > ARM_PCS_AAPCS_LOCAL)
...@@ -3984,7 +3984,7 @@ arm_get_pcs_model (const_tree type, const_tree decl) ...@@ -3984,7 +3984,7 @@ arm_get_pcs_model (const_tree type, const_tree decl)
static void static void
aapcs_vfp_cum_init (CUMULATIVE_ARGS *pcum ATTRIBUTE_UNUSED, aapcs_vfp_cum_init (CUMULATIVE_ARGS *pcum ATTRIBUTE_UNUSED,
const_tree fntype ATTRIBUTE_UNUSED, const_tree fntype ATTRIBUTE_UNUSED,
rtx libcall ATTRIBUTE_UNUSED, rtx libcall ATTRIBUTE_UNUSED,
const_tree fndecl ATTRIBUTE_UNUSED) const_tree fndecl ATTRIBUTE_UNUSED)
{ {
/* Record the unallocated VFP registers. */ /* Record the unallocated VFP registers. */
...@@ -4089,7 +4089,7 @@ aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep) ...@@ -4089,7 +4089,7 @@ aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep)
return count; return count;
} }
case RECORD_TYPE: case RECORD_TYPE:
{ {
int count = 0; int count = 0;
...@@ -4237,7 +4237,7 @@ aapcs_vfp_is_return_candidate (enum arm_pcs pcs_variant, ...@@ -4237,7 +4237,7 @@ aapcs_vfp_is_return_candidate (enum arm_pcs pcs_variant,
} }
static bool static bool
aapcs_vfp_is_call_candidate (CUMULATIVE_ARGS *pcum, enum machine_mode mode, aapcs_vfp_is_call_candidate (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
const_tree type) const_tree type)
{ {
if (!use_vfp_abi (pcum->pcs_variant, false)) if (!use_vfp_abi (pcum->pcs_variant, false))
...@@ -4255,7 +4255,7 @@ aapcs_vfp_allocate (CUMULATIVE_ARGS *pcum, enum machine_mode mode, ...@@ -4255,7 +4255,7 @@ aapcs_vfp_allocate (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
int shift = GET_MODE_SIZE (pcum->aapcs_vfp_rmode) / GET_MODE_SIZE (SFmode); int shift = GET_MODE_SIZE (pcum->aapcs_vfp_rmode) / GET_MODE_SIZE (SFmode);
unsigned mask = (1 << (shift * pcum->aapcs_vfp_rcount)) - 1; unsigned mask = (1 << (shift * pcum->aapcs_vfp_rcount)) - 1;
int regno; int regno;
for (regno = 0; regno < NUM_VFP_ARG_REGS; regno += shift) for (regno = 0; regno < NUM_VFP_ARG_REGS; regno += shift)
if (((pcum->aapcs_vfp_regs_free >> regno) & mask) == mask) if (((pcum->aapcs_vfp_regs_free >> regno) & mask) == mask)
{ {
...@@ -4282,10 +4282,10 @@ aapcs_vfp_allocate (CUMULATIVE_ARGS *pcum, enum machine_mode mode, ...@@ -4282,10 +4282,10 @@ aapcs_vfp_allocate (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
par = gen_rtx_PARALLEL (mode, rtvec_alloc (rcount)); par = gen_rtx_PARALLEL (mode, rtvec_alloc (rcount));
for (i = 0; i < rcount; i++) for (i = 0; i < rcount; i++)
{ {
rtx tmp = gen_rtx_REG (rmode, rtx tmp = gen_rtx_REG (rmode,
FIRST_VFP_REGNUM + regno + i * rshift); FIRST_VFP_REGNUM + regno + i * rshift);
tmp = gen_rtx_EXPR_LIST tmp = gen_rtx_EXPR_LIST
(VOIDmode, tmp, (VOIDmode, tmp,
GEN_INT (i * GET_MODE_SIZE (rmode))); GEN_INT (i * GET_MODE_SIZE (rmode)));
XVECEXP (par, 0, i) = tmp; XVECEXP (par, 0, i) = tmp;
} }
...@@ -4314,7 +4314,7 @@ aapcs_vfp_allocate_return_reg (enum arm_pcs pcs_variant ATTRIBUTE_UNUSED, ...@@ -4314,7 +4314,7 @@ aapcs_vfp_allocate_return_reg (enum arm_pcs pcs_variant ATTRIBUTE_UNUSED,
int i; int i;
rtx par; rtx par;
int shift; int shift;
aapcs_vfp_is_call_or_return_candidate (pcs_variant, mode, type, aapcs_vfp_is_call_or_return_candidate (pcs_variant, mode, type,
&ag_mode, &count); &ag_mode, &count);
...@@ -4333,7 +4333,7 @@ aapcs_vfp_allocate_return_reg (enum arm_pcs pcs_variant ATTRIBUTE_UNUSED, ...@@ -4333,7 +4333,7 @@ aapcs_vfp_allocate_return_reg (enum arm_pcs pcs_variant ATTRIBUTE_UNUSED,
for (i = 0; i < count; i++) for (i = 0; i < count; i++)
{ {
rtx tmp = gen_rtx_REG (ag_mode, FIRST_VFP_REGNUM + i * shift); rtx tmp = gen_rtx_REG (ag_mode, FIRST_VFP_REGNUM + i * shift);
tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp,
GEN_INT (i * GET_MODE_SIZE (ag_mode))); GEN_INT (i * GET_MODE_SIZE (ag_mode)));
XVECEXP (par, 0, i) = tmp; XVECEXP (par, 0, i) = tmp;
} }
...@@ -4370,7 +4370,7 @@ aapcs_vfp_advance (CUMULATIVE_ARGS *pcum ATTRIBUTE_UNUSED, ...@@ -4370,7 +4370,7 @@ aapcs_vfp_advance (CUMULATIVE_ARGS *pcum ATTRIBUTE_UNUSED,
and stops after the first match. If that entry then fails to put and stops after the first match. If that entry then fails to put
the argument into a co-processor register, the argument will go on the argument into a co-processor register, the argument will go on
the stack. */ the stack. */
static struct static struct
{ {
/* Initialize co-processor related state in CUMULATIVE_ARGS structure. */ /* Initialize co-processor related state in CUMULATIVE_ARGS structure. */
void (*cum_init) (CUMULATIVE_ARGS *, const_tree, rtx, const_tree); void (*cum_init) (CUMULATIVE_ARGS *, const_tree, rtx, const_tree);
...@@ -4406,7 +4406,7 @@ static struct ...@@ -4406,7 +4406,7 @@ static struct
#undef AAPCS_CP #undef AAPCS_CP
static int static int
aapcs_select_call_coproc (CUMULATIVE_ARGS *pcum, enum machine_mode mode, aapcs_select_call_coproc (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
const_tree type) const_tree type)
{ {
int i; int i;
...@@ -4446,7 +4446,7 @@ aapcs_select_return_coproc (const_tree type, const_tree fntype) ...@@ -4446,7 +4446,7 @@ aapcs_select_return_coproc (const_tree type, const_tree fntype)
int i; int i;
for (i = 0; i < ARM_NUM_COPROC_SLOTS; i++) for (i = 0; i < ARM_NUM_COPROC_SLOTS; i++)
if (aapcs_cp_arg_layout[i].is_return_candidate (pcs_variant, if (aapcs_cp_arg_layout[i].is_return_candidate (pcs_variant,
TYPE_MODE (type), TYPE_MODE (type),
type)) type))
return i; return i;
...@@ -4538,7 +4538,7 @@ aapcs_layout_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode, ...@@ -4538,7 +4538,7 @@ aapcs_layout_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
anonymous argument which is on the stack. */ anonymous argument which is on the stack. */
if (!named) if (!named)
return; return;
/* Is this a potential co-processor register candidate? */ /* Is this a potential co-processor register candidate? */
if (pcum->pcs_variant != ARM_PCS_AAPCS) if (pcum->pcs_variant != ARM_PCS_AAPCS)
{ {
...@@ -4638,7 +4638,7 @@ arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype, ...@@ -4638,7 +4638,7 @@ arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
{ {
if (arm_libcall_uses_aapcs_base (libname)) if (arm_libcall_uses_aapcs_base (libname))
pcum->pcs_variant = ARM_PCS_AAPCS; pcum->pcs_variant = ARM_PCS_AAPCS;
pcum->aapcs_ncrn = pcum->aapcs_next_ncrn = 0; pcum->aapcs_ncrn = pcum->aapcs_next_ncrn = 0;
pcum->aapcs_reg = NULL_RTX; pcum->aapcs_reg = NULL_RTX;
pcum->aapcs_partial = 0; pcum->aapcs_partial = 0;
...@@ -5685,7 +5685,7 @@ thumb2_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p) ...@@ -5685,7 +5685,7 @@ thumb2_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
{ {
bool use_ldrd; bool use_ldrd;
enum rtx_code code = GET_CODE (x); enum rtx_code code = GET_CODE (x);
if (arm_address_register_rtx_p (x, strict_p)) if (arm_address_register_rtx_p (x, strict_p))
return 1; return 1;
...@@ -5713,7 +5713,7 @@ thumb2_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p) ...@@ -5713,7 +5713,7 @@ thumb2_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
offset = INTVAL(addend); offset = INTVAL(addend);
if (GET_MODE_SIZE (mode) <= 4) if (GET_MODE_SIZE (mode) <= 4)
return (offset > -256 && offset < 256); return (offset > -256 && offset < 256);
return (use_ldrd && offset > -1024 && offset < 1024 return (use_ldrd && offset > -1024 && offset < 1024
&& (offset & 3) == 0); && (offset & 3) == 0);
} }
...@@ -5869,14 +5869,14 @@ static bool ...@@ -5869,14 +5869,14 @@ static bool
thumb2_index_mul_operand (rtx op) thumb2_index_mul_operand (rtx op)
{ {
HOST_WIDE_INT val; HOST_WIDE_INT val;
if (GET_CODE(op) != CONST_INT) if (GET_CODE(op) != CONST_INT)
return false; return false;
val = INTVAL(op); val = INTVAL(op);
return (val == 1 || val == 2 || val == 4 || val == 8); return (val == 1 || val == 2 || val == 4 || val == 8);
} }
/* Return nonzero if INDEX is a valid Thumb-2 address index operand. */ /* Return nonzero if INDEX is a valid Thumb-2 address index operand. */
static int static int
thumb2_legitimate_index_p (enum machine_mode mode, rtx index, int strict_p) thumb2_legitimate_index_p (enum machine_mode mode, rtx index, int strict_p)
...@@ -6224,11 +6224,11 @@ arm_call_tls_get_addr (rtx x, rtx reg, rtx *valuep, int reloc) ...@@ -6224,11 +6224,11 @@ arm_call_tls_get_addr (rtx x, rtx reg, rtx *valuep, int reloc)
emit_insn (gen_pic_add_dot_plus_eight (reg, reg, labelno)); emit_insn (gen_pic_add_dot_plus_eight (reg, reg, labelno));
else else
emit_insn (gen_pic_add_dot_plus_four (reg, reg, labelno)); emit_insn (gen_pic_add_dot_plus_four (reg, reg, labelno));
*valuep = emit_library_call_value (get_tls_get_addr (), NULL_RTX, *valuep = emit_library_call_value (get_tls_get_addr (), NULL_RTX,
LCT_PURE, /* LCT_CONST? */ LCT_PURE, /* LCT_CONST? */
Pmode, 1, reg, Pmode); Pmode, 1, reg, Pmode);
insns = get_insns (); insns = get_insns ();
end_sequence (); end_sequence ();
...@@ -6246,7 +6246,7 @@ arm_tls_descseq_addr (rtx x, rtx reg) ...@@ -6246,7 +6246,7 @@ arm_tls_descseq_addr (rtx x, rtx reg)
GEN_INT (!TARGET_ARM)), GEN_INT (!TARGET_ARM)),
UNSPEC_TLS); UNSPEC_TLS);
rtx reg0 = load_tls_operand (sum, gen_rtx_REG (SImode, 0)); rtx reg0 = load_tls_operand (sum, gen_rtx_REG (SImode, 0));
emit_insn (gen_tlscall (x, labelno)); emit_insn (gen_tlscall (x, labelno));
if (!reg) if (!reg)
reg = gen_reg_rtx (SImode); reg = gen_reg_rtx (SImode);
...@@ -6272,7 +6272,7 @@ legitimize_tls_address (rtx x, rtx reg) ...@@ -6272,7 +6272,7 @@ legitimize_tls_address (rtx x, rtx reg)
reg = arm_tls_descseq_addr (x, reg); reg = arm_tls_descseq_addr (x, reg);
tp = arm_load_tp (NULL_RTX); tp = arm_load_tp (NULL_RTX);
dest = gen_rtx_PLUS (Pmode, tp, reg); dest = gen_rtx_PLUS (Pmode, tp, reg);
} }
else else
...@@ -6290,20 +6290,20 @@ legitimize_tls_address (rtx x, rtx reg) ...@@ -6290,20 +6290,20 @@ legitimize_tls_address (rtx x, rtx reg)
reg = arm_tls_descseq_addr (x, reg); reg = arm_tls_descseq_addr (x, reg);
tp = arm_load_tp (NULL_RTX); tp = arm_load_tp (NULL_RTX);
dest = gen_rtx_PLUS (Pmode, tp, reg); dest = gen_rtx_PLUS (Pmode, tp, reg);
} }
else else
{ {
insns = arm_call_tls_get_addr (x, reg, &ret, TLS_LDM32); insns = arm_call_tls_get_addr (x, reg, &ret, TLS_LDM32);
/* Attach a unique REG_EQUIV, to allow the RTL optimizers to /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
share the LDM result with other LD model accesses. */ share the LDM result with other LD model accesses. */
eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx), eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx),
UNSPEC_TLS); UNSPEC_TLS);
dest = gen_reg_rtx (Pmode); dest = gen_reg_rtx (Pmode);
emit_libcall_block (insns, dest, ret, eqv); emit_libcall_block (insns, dest, ret, eqv);
/* Load the addend. */ /* Load the addend. */
addend = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, addend = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x,
GEN_INT (TLS_LDO32)), GEN_INT (TLS_LDO32)),
...@@ -7428,7 +7428,7 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed) ...@@ -7428,7 +7428,7 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
*total += rtx_cost (XEXP (XEXP (x, 0), 0), subcode, 0, speed); *total += rtx_cost (XEXP (XEXP (x, 0), 0), subcode, 0, speed);
return true; return true;
} }
return false; return false;
case UMIN: case UMIN:
...@@ -8338,9 +8338,9 @@ cortex_a9_sched_adjust_cost (rtx insn, rtx link, rtx dep, int * cost) ...@@ -8338,9 +8338,9 @@ cortex_a9_sched_adjust_cost (rtx insn, rtx link, rtx dep, int * cost)
{ {
if (GET_CODE (PATTERN (insn)) == SET) if (GET_CODE (PATTERN (insn)) == SET)
{ {
if (GET_MODE_CLASS if (GET_MODE_CLASS
(GET_MODE (SET_DEST (PATTERN (insn)))) == MODE_FLOAT (GET_MODE (SET_DEST (PATTERN (insn)))) == MODE_FLOAT
|| GET_MODE_CLASS || GET_MODE_CLASS
(GET_MODE (SET_SRC (PATTERN (insn)))) == MODE_FLOAT) (GET_MODE (SET_SRC (PATTERN (insn)))) == MODE_FLOAT)
{ {
enum attr_type attr_type_insn = get_attr_type (insn); enum attr_type attr_type_insn = get_attr_type (insn);
...@@ -8360,7 +8360,7 @@ cortex_a9_sched_adjust_cost (rtx insn, rtx link, rtx dep, int * cost) ...@@ -8360,7 +8360,7 @@ cortex_a9_sched_adjust_cost (rtx insn, rtx link, rtx dep, int * cost)
{ {
/* FMACS is a special case where the dependant /* FMACS is a special case where the dependant
instruction can be issued 3 cycles before instruction can be issued 3 cycles before
the normal latency in case of an output the normal latency in case of an output
dependency. */ dependency. */
if ((attr_type_insn == TYPE_FMACS if ((attr_type_insn == TYPE_FMACS
|| attr_type_insn == TYPE_FMACD) || attr_type_insn == TYPE_FMACD)
...@@ -8428,8 +8428,8 @@ fa726te_sched_adjust_cost (rtx insn, rtx link, rtx dep, int * cost) ...@@ -8428,8 +8428,8 @@ fa726te_sched_adjust_cost (rtx insn, rtx link, rtx dep, int * cost)
It corrects the value of COST based on the relationship between It corrects the value of COST based on the relationship between
INSN and DEP through the dependence LINK. It returns the new INSN and DEP through the dependence LINK. It returns the new
value. There is a per-core adjust_cost hook to adjust scheduler costs value. There is a per-core adjust_cost hook to adjust scheduler costs
and the per-core hook can choose to completely override the generic and the per-core hook can choose to completely override the generic
adjust_cost function. Only put bits of code into arm_adjust_cost that adjust_cost function. Only put bits of code into arm_adjust_cost that
are common across all cores. */ are common across all cores. */
static int static int
arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost) arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
...@@ -8473,7 +8473,7 @@ arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost) ...@@ -8473,7 +8473,7 @@ arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
constant pool are cached, and that others will miss. This is a constant pool are cached, and that others will miss. This is a
hack. */ hack. */
if ((GET_CODE (src_mem) == SYMBOL_REF if ((GET_CODE (src_mem) == SYMBOL_REF
&& CONSTANT_POOL_ADDRESS_P (src_mem)) && CONSTANT_POOL_ADDRESS_P (src_mem))
|| reg_mentioned_p (stack_pointer_rtx, src_mem) || reg_mentioned_p (stack_pointer_rtx, src_mem)
|| reg_mentioned_p (frame_pointer_rtx, src_mem) || reg_mentioned_p (frame_pointer_rtx, src_mem)
...@@ -9868,7 +9868,7 @@ arm_cannot_copy_insn_p (rtx insn) ...@@ -9868,7 +9868,7 @@ arm_cannot_copy_insn_p (rtx insn)
word. */ word. */
if (recog_memoized (insn) == CODE_FOR_tlscall) if (recog_memoized (insn) == CODE_FOR_tlscall)
return true; return true;
return for_each_rtx (&PATTERN (insn), arm_note_pic_base, NULL); return for_each_rtx (&PATTERN (insn), arm_note_pic_base, NULL);
} }
...@@ -11097,7 +11097,7 @@ arm_select_cc_mode (enum rtx_code op, rtx x, rtx y) ...@@ -11097,7 +11097,7 @@ arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
/* A compare with a shifted operand. Because of canonicalization, the /* A compare with a shifted operand. Because of canonicalization, the
comparison will have to be swapped when we emit the assembler. */ comparison will have to be swapped when we emit the assembler. */
if (GET_MODE (y) == SImode if (GET_MODE (y) == SImode
&& (REG_P (y) || (GET_CODE (y) == SUBREG)) && (REG_P (y) || (GET_CODE (y) == SUBREG))
&& (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
|| GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
...@@ -11106,7 +11106,7 @@ arm_select_cc_mode (enum rtx_code op, rtx x, rtx y) ...@@ -11106,7 +11106,7 @@ arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
/* This operation is performed swapped, but since we only rely on the Z /* This operation is performed swapped, but since we only rely on the Z
flag we don't need an additional mode. */ flag we don't need an additional mode. */
if (GET_MODE (y) == SImode if (GET_MODE (y) == SImode
&& (REG_P (y) || (GET_CODE (y) == SUBREG)) && (REG_P (y) || (GET_CODE (y) == SUBREG))
&& GET_CODE (x) == NEG && GET_CODE (x) == NEG
&& (op == EQ || op == NE)) && (op == EQ || op == NE))
...@@ -12456,7 +12456,7 @@ create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address) ...@@ -12456,7 +12456,7 @@ create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
still put the pool after the table. */ still put the pool after the table. */
new_cost = arm_barrier_cost (from); new_cost = arm_barrier_cost (from);
if (count < max_count if (count < max_count
&& (!selected || new_cost <= selected_cost)) && (!selected || new_cost <= selected_cost))
{ {
selected = tmp; selected = tmp;
...@@ -12838,7 +12838,7 @@ arm_reorg (void) ...@@ -12838,7 +12838,7 @@ arm_reorg (void)
if (TARGET_THUMB2) if (TARGET_THUMB2)
thumb2_reorg (); thumb2_reorg ();
minipool_fix_head = minipool_fix_tail = NULL; minipool_fix_head = minipool_fix_tail = NULL;
/* The first insn must always be a note, or the code below won't /* The first insn must always be a note, or the code below won't
...@@ -13452,7 +13452,7 @@ output_move_double (rtx *operands, bool emit, int *count) ...@@ -13452,7 +13452,7 @@ output_move_double (rtx *operands, bool emit, int *count)
if (count) if (count)
*count = 1; *count = 1;
/* The only case when this might happen is when /* The only case when this might happen is when
you are looking at the length of a DImode instruction you are looking at the length of a DImode instruction
that has an invalid constant in it. */ that has an invalid constant in it. */
if (code0 == REG && code1 != MEM) if (code0 == REG && code1 != MEM)
...@@ -13461,7 +13461,6 @@ output_move_double (rtx *operands, bool emit, int *count) ...@@ -13461,7 +13461,6 @@ output_move_double (rtx *operands, bool emit, int *count)
*count = 2; *count = 2;
return ""; return "";
} }
if (code0 == REG) if (code0 == REG)
{ {
...@@ -13489,7 +13488,6 @@ output_move_double (rtx *operands, bool emit, int *count) ...@@ -13489,7 +13488,6 @@ output_move_double (rtx *operands, bool emit, int *count)
gcc_assert (TARGET_LDRD); gcc_assert (TARGET_LDRD);
if (emit) if (emit)
output_asm_insn ("ldr%(d%)\t%0, [%m1, #8]!", operands); output_asm_insn ("ldr%(d%)\t%0, [%m1, #8]!", operands);
break; break;
case PRE_DEC: case PRE_DEC:
...@@ -13503,7 +13501,6 @@ output_move_double (rtx *operands, bool emit, int *count) ...@@ -13503,7 +13501,6 @@ output_move_double (rtx *operands, bool emit, int *count)
break; break;
case POST_INC: case POST_INC:
if (emit) if (emit)
{ {
if (TARGET_LDRD) if (TARGET_LDRD)
...@@ -13692,7 +13689,7 @@ output_move_double (rtx *operands, bool emit, int *count) ...@@ -13692,7 +13689,7 @@ output_move_double (rtx *operands, bool emit, int *count)
} }
if (GET_CODE (otherops[2]) == CONST_INT) if (GET_CODE (otherops[2]) == CONST_INT)
{ {
if (emit) if (emit)
{ {
if (!(const_ok_for_arm (INTVAL (otherops[2])))) if (!(const_ok_for_arm (INTVAL (otherops[2]))))
...@@ -13700,7 +13697,6 @@ output_move_double (rtx *operands, bool emit, int *count) ...@@ -13700,7 +13697,6 @@ output_move_double (rtx *operands, bool emit, int *count)
else else
output_asm_insn ("add%?\t%0, %1, %2", otherops); output_asm_insn ("add%?\t%0, %1, %2", otherops);
} }
} }
else else
{ {
...@@ -13716,8 +13712,8 @@ output_move_double (rtx *operands, bool emit, int *count) ...@@ -13716,8 +13712,8 @@ output_move_double (rtx *operands, bool emit, int *count)
if (TARGET_LDRD) if (TARGET_LDRD)
return "ldr%(d%)\t%0, [%1]"; return "ldr%(d%)\t%0, [%1]";
return "ldm%(ia%)\t%1, %M0"; return "ldm%(ia%)\t%1, %M0";
} }
else else
{ {
...@@ -13894,7 +13890,6 @@ output_move_double (rtx *operands, bool emit, int *count) ...@@ -13894,7 +13890,6 @@ output_move_double (rtx *operands, bool emit, int *count)
} }
if (count) if (count)
*count = 2; *count = 2;
} }
} }
...@@ -14104,7 +14099,7 @@ output_move_neon (rtx *operands) ...@@ -14104,7 +14099,7 @@ output_move_neon (rtx *operands)
ops[0] = XEXP (addr, 0); ops[0] = XEXP (addr, 0);
ops[1] = reg; ops[1] = reg;
break; break;
case POST_MODIFY: case POST_MODIFY:
/* FIXME: Not currently enabled in neon_vector_mem_operand. */ /* FIXME: Not currently enabled in neon_vector_mem_operand. */
gcc_unreachable (); gcc_unreachable ();
...@@ -14596,7 +14591,7 @@ arm_compute_save_reg0_reg12_mask (void) ...@@ -14596,7 +14591,7 @@ arm_compute_save_reg0_reg12_mask (void)
} }
/* Compute the number of bytes used to store the static chain register on the /* Compute the number of bytes used to store the static chain register on the
stack, above the stack frame. We need to know this accurately to get the stack, above the stack frame. We need to know this accurately to get the
alignment of the rest of the stack frame correct. */ alignment of the rest of the stack frame correct. */
...@@ -14935,7 +14930,7 @@ output_return_instruction (rtx operand, int really_return, int reverse) ...@@ -14935,7 +14930,7 @@ output_return_instruction (rtx operand, int really_return, int reverse)
then try to pop r3 instead. */ then try to pop r3 instead. */
if (stack_adjust) if (stack_adjust)
live_regs_mask |= 1 << 3; live_regs_mask |= 1 << 3;
if (TARGET_UNIFIED_ASM) if (TARGET_UNIFIED_ASM)
sprintf (instr, "ldmfd%s\t%%|sp, {", conditional); sprintf (instr, "ldmfd%s\t%%|sp, {", conditional);
else else
...@@ -15149,7 +15144,7 @@ arm_output_epilogue (rtx sibling) ...@@ -15149,7 +15144,7 @@ arm_output_epilogue (rtx sibling)
/* If we have already generated the return instruction /* If we have already generated the return instruction
then it is futile to generate anything else. */ then it is futile to generate anything else. */
if (use_return_insn (FALSE, sibling) && if (use_return_insn (FALSE, sibling) &&
(cfun->machine->return_used_this_function != 0)) (cfun->machine->return_used_this_function != 0))
return ""; return "";
...@@ -15357,7 +15352,7 @@ arm_output_epilogue (rtx sibling) ...@@ -15357,7 +15352,7 @@ arm_output_epilogue (rtx sibling)
{ {
operands[0] = stack_pointer_rtx; operands[0] = stack_pointer_rtx;
operands[1] = hard_frame_pointer_rtx; operands[1] = hard_frame_pointer_rtx;
operands[2] = GEN_INT (offsets->frame - offsets->saved_regs); operands[2] = GEN_INT (offsets->frame - offsets->saved_regs);
output_add_immediate (operands); output_add_immediate (operands);
} }
...@@ -15405,7 +15400,7 @@ arm_output_epilogue (rtx sibling) ...@@ -15405,7 +15400,7 @@ arm_output_epilogue (rtx sibling)
} }
} }
} }
if (amount) if (amount)
{ {
operands[1] = operands[0]; operands[1] = operands[0];
...@@ -16037,7 +16032,7 @@ arm_get_frame_offsets (void) ...@@ -16037,7 +16032,7 @@ arm_get_frame_offsets (void)
{ {
int reg = -1; int reg = -1;
/* If it is safe to use r3, then do so. This sometimes /* If it is safe to use r3, then do so. This sometimes
generates better code on Thumb-2 by avoiding the need to generates better code on Thumb-2 by avoiding the need to
use 32-bit push/pop instructions. */ use 32-bit push/pop instructions. */
if (! any_sibcall_uses_r3 () if (! any_sibcall_uses_r3 ()
...@@ -16483,7 +16478,7 @@ arm_expand_prologue (void) ...@@ -16483,7 +16478,7 @@ arm_expand_prologue (void)
&& TARGET_ARM) && TARGET_ARM)
{ {
rtx lr = gen_rtx_REG (SImode, LR_REGNUM); rtx lr = gen_rtx_REG (SImode, LR_REGNUM);
emit_set_insn (lr, plus_constant (lr, -4)); emit_set_insn (lr, plus_constant (lr, -4));
} }
...@@ -16694,7 +16689,7 @@ arm_print_operand (FILE *stream, rtx x, int code) ...@@ -16694,7 +16689,7 @@ arm_print_operand (FILE *stream, rtx x, int code)
if (TARGET_UNIFIED_ASM) if (TARGET_UNIFIED_ASM)
arm_print_condition (stream); arm_print_condition (stream);
break; break;
case '.': case '.':
/* The current condition code for a condition code setting instruction. /* The current condition code for a condition code setting instruction.
Preceded by 's' in unified syntax, otherwise followed by 's'. */ Preceded by 's' in unified syntax, otherwise followed by 's'. */
...@@ -17221,7 +17216,7 @@ arm_print_operand (FILE *stream, rtx x, int code) ...@@ -17221,7 +17216,7 @@ arm_print_operand (FILE *stream, rtx x, int code)
of the target. */ of the target. */
align = MEM_ALIGN (x) >> 3; align = MEM_ALIGN (x) >> 3;
memsize = MEM_SIZE (x); memsize = MEM_SIZE (x);
/* Only certain alignment specifiers are supported by the hardware. */ /* Only certain alignment specifiers are supported by the hardware. */
if (memsize == 16 && (align % 32) == 0) if (memsize == 16 && (align % 32) == 0)
align_bits = 256; align_bits = 256;
...@@ -17231,7 +17226,7 @@ arm_print_operand (FILE *stream, rtx x, int code) ...@@ -17231,7 +17226,7 @@ arm_print_operand (FILE *stream, rtx x, int code)
align_bits = 64; align_bits = 64;
else else
align_bits = 0; align_bits = 0;
if (align_bits != 0) if (align_bits != 0)
asm_fprintf (stream, ":%d", align_bits); asm_fprintf (stream, ":%d", align_bits);
...@@ -17301,7 +17296,7 @@ arm_print_operand (FILE *stream, rtx x, int code) ...@@ -17301,7 +17296,7 @@ arm_print_operand (FILE *stream, rtx x, int code)
fprintf (stream, "d%d[%d]", regno/2, ((regno % 2) ? 2 : 0)); fprintf (stream, "d%d[%d]", regno/2, ((regno % 2) ? 2 : 0));
} }
return; return;
default: default:
if (x == 0) if (x == 0)
{ {
...@@ -17340,7 +17335,7 @@ arm_print_operand (FILE *stream, rtx x, int code) ...@@ -17340,7 +17335,7 @@ arm_print_operand (FILE *stream, rtx x, int code)
fputs (":lower16:", stream); fputs (":lower16:", stream);
x = XEXP (x, 0); x = XEXP (x, 0);
} }
output_addr_const (stream, x); output_addr_const (stream, x);
break; break;
} }
...@@ -17554,8 +17549,8 @@ arm_elf_asm_cdtor (rtx symbol, int priority, bool is_ctor) ...@@ -17554,8 +17549,8 @@ arm_elf_asm_cdtor (rtx symbol, int priority, bool is_ctor)
if (!TARGET_AAPCS_BASED) if (!TARGET_AAPCS_BASED)
{ {
(is_ctor ? (is_ctor ?
default_named_section_asm_out_constructor default_named_section_asm_out_constructor
: default_named_section_asm_out_destructor) (symbol, priority); : default_named_section_asm_out_destructor) (symbol, priority);
return; return;
} }
...@@ -17564,7 +17559,7 @@ arm_elf_asm_cdtor (rtx symbol, int priority, bool is_ctor) ...@@ -17564,7 +17559,7 @@ arm_elf_asm_cdtor (rtx symbol, int priority, bool is_ctor)
if (priority != DEFAULT_INIT_PRIORITY) if (priority != DEFAULT_INIT_PRIORITY)
{ {
char buf[18]; char buf[18];
sprintf (buf, "%s.%.5u", sprintf (buf, "%s.%.5u",
is_ctor ? ".init_array" : ".fini_array", is_ctor ? ".init_array" : ".fini_array",
priority); priority);
s = get_section (buf, SECTION_WRITE, NULL_TREE); s = get_section (buf, SECTION_WRITE, NULL_TREE);
...@@ -17638,6 +17633,7 @@ arm_elf_asm_destructor (rtx symbol, int priority) ...@@ -17638,6 +17633,7 @@ arm_elf_asm_destructor (rtx symbol, int priority)
/* Returns the index of the ARM condition code string in /* Returns the index of the ARM condition code string in
`arm_condition_codes', or ARM_NV if the comparison is invalid. `arm_condition_codes', or ARM_NV if the comparison is invalid.
COMPARISON should be an rtx like `(eq (...) (...))'. */ COMPARISON should be an rtx like `(eq (...) (...))'. */
enum arm_cond_code enum arm_cond_code
maybe_get_arm_condition_code (rtx comparison) maybe_get_arm_condition_code (rtx comparison)
{ {
...@@ -18255,7 +18251,7 @@ arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode) ...@@ -18255,7 +18251,7 @@ arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
if (IS_IWMMXT_REGNUM (regno)) if (IS_IWMMXT_REGNUM (regno))
return VALID_IWMMXT_REG_MODE (mode); return VALID_IWMMXT_REG_MODE (mode);
} }
/* We allow almost any value to be stored in the general registers. /* We allow almost any value to be stored in the general registers.
Restrict doubleword quantities to even register pairs so that we can Restrict doubleword quantities to even register pairs so that we can
use ldrd. Do not allow very large Neon structure opaque modes in use ldrd. Do not allow very large Neon structure opaque modes in
...@@ -19440,7 +19436,7 @@ arm_init_neon_builtins (void) ...@@ -19440,7 +19436,7 @@ arm_init_neon_builtins (void)
} \ } \
} \ } \
while (0) while (0)
struct builtin_description struct builtin_description
{ {
const unsigned int mask; const unsigned int mask;
...@@ -19456,7 +19452,7 @@ static const struct builtin_description bdesc_2arg[] = ...@@ -19456,7 +19452,7 @@ static const struct builtin_description bdesc_2arg[] =
#define IWMMXT_BUILTIN(code, string, builtin) \ #define IWMMXT_BUILTIN(code, string, builtin) \
{ FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \ { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
ARM_BUILTIN_##builtin, UNKNOWN, 0 }, ARM_BUILTIN_##builtin, UNKNOWN, 0 },
IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB) IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH) IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
IWMMXT_BUILTIN (addv2si3, "waddw", WADDW) IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
...@@ -19515,10 +19511,10 @@ static const struct builtin_description bdesc_2arg[] = ...@@ -19515,10 +19511,10 @@ static const struct builtin_description bdesc_2arg[] =
IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW) IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS) IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU) IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
#define IWMMXT_BUILTIN2(code, builtin) \ #define IWMMXT_BUILTIN2(code, builtin) \
{ FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, UNKNOWN, 0 }, { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, UNKNOWN, 0 },
IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS) IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS) IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS) IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
...@@ -19552,7 +19548,7 @@ static const struct builtin_description bdesc_2arg[] = ...@@ -19552,7 +19548,7 @@ static const struct builtin_description bdesc_2arg[] =
IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ) IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ) IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
}; };
static const struct builtin_description bdesc_1arg[] = static const struct builtin_description bdesc_1arg[] =
{ {
IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB) IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
...@@ -19574,7 +19570,7 @@ static const struct builtin_description bdesc_1arg[] = ...@@ -19574,7 +19570,7 @@ static const struct builtin_description bdesc_1arg[] =
IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH) IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW) IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
}; };
/* Set up all the iWMMXt builtins. This is not called if /* Set up all the iWMMXt builtins. This is not called if
TARGET_IWMMXT is zero. */ TARGET_IWMMXT is zero. */
...@@ -19698,7 +19694,7 @@ arm_init_iwmmxt_builtins (void) ...@@ -19698,7 +19694,7 @@ arm_init_iwmmxt_builtins (void)
= build_function_type_list (long_long_unsigned_type_node, = build_function_type_list (long_long_unsigned_type_node,
V4HI_type_node,V4HI_type_node, V4HI_type_node,V4HI_type_node,
NULL_TREE); NULL_TREE);
/* Normal vector binops. */ /* Normal vector binops. */
tree v8qi_ftype_v8qi_v8qi tree v8qi_ftype_v8qi_v8qi
= build_function_type_list (V8QI_type_node, = build_function_type_list (V8QI_type_node,
...@@ -19714,7 +19710,7 @@ arm_init_iwmmxt_builtins (void) ...@@ -19714,7 +19710,7 @@ arm_init_iwmmxt_builtins (void)
long_long_unsigned_type_node, long_long_unsigned_type_node,
long_long_unsigned_type_node, long_long_unsigned_type_node,
NULL_TREE); NULL_TREE);
/* Add all builtins that are more or less simple operations on two /* Add all builtins that are more or less simple operations on two
operands. */ operands. */
for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++) for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
...@@ -20669,7 +20665,7 @@ number_of_first_bit_set (unsigned mask) ...@@ -20669,7 +20665,7 @@ number_of_first_bit_set (unsigned mask)
return ctz_hwi (mask); return ctz_hwi (mask);
} }
/* Like emit_multi_reg_push, but allowing for a different set of /* Like emit_multi_reg_push, but allowing for a different set of
registers to be described as saved. MASK is the set of registers registers to be described as saved. MASK is the set of registers
to be saved; REAL_REGS is the set of registers to be described as to be saved; REAL_REGS is the set of registers to be described as
saved. If REAL_REGS is 0, only describe the stack adjustment. */ saved. If REAL_REGS is 0, only describe the stack adjustment. */
...@@ -22291,9 +22287,9 @@ arm_file_start (void) ...@@ -22291,9 +22287,9 @@ arm_file_start (void)
if (arm_fpu_desc->model == ARM_FP_MODEL_VFP) if (arm_fpu_desc->model == ARM_FP_MODEL_VFP)
{ {
if (TARGET_HARD_FLOAT) if (TARGET_HARD_FLOAT)
asm_fprintf (asm_out_file, "\t.eabi_attribute 27, 3\n"); EMIT_EABI_ATTRIBUTE (Tag_ABI_HardFP_use, 27, 3);
if (TARGET_HARD_FLOAT_ABI) if (TARGET_HARD_FLOAT_ABI)
asm_fprintf (asm_out_file, "\t.eabi_attribute 28, 1\n"); EMIT_EABI_ATTRIBUTE (Tag_ABI_VFP_args, 28, 1);
} }
} }
asm_fprintf (asm_out_file, "\t.fpu %s\n", fpu_name); asm_fprintf (asm_out_file, "\t.fpu %s\n", fpu_name);
...@@ -22302,30 +22298,23 @@ arm_file_start (void) ...@@ -22302,30 +22298,23 @@ arm_file_start (void)
are used. However we don't have any easy way of figuring this out. are used. However we don't have any easy way of figuring this out.
Conservatively record the setting that would have been used. */ Conservatively record the setting that would have been used. */
/* Tag_ABI_FP_rounding. */
if (flag_rounding_math) if (flag_rounding_math)
asm_fprintf (asm_out_file, "\t.eabi_attribute 19, 1\n"); EMIT_EABI_ATTRIBUTE (Tag_ABI_FP_rounding, 19, 1);
if (!flag_unsafe_math_optimizations) if (!flag_unsafe_math_optimizations)
{ {
/* Tag_ABI_FP_denomal. */ EMIT_EABI_ATTRIBUTE (Tag_ABI_FP_denormal, 20, 1);
asm_fprintf (asm_out_file, "\t.eabi_attribute 20, 1\n"); EMIT_EABI_ATTRIBUTE (Tag_ABI_FP_exceptions, 21, 1);
/* Tag_ABI_FP_exceptions. */
asm_fprintf (asm_out_file, "\t.eabi_attribute 21, 1\n");
} }
/* Tag_ABI_FP_user_exceptions. */
if (flag_signaling_nans) if (flag_signaling_nans)
asm_fprintf (asm_out_file, "\t.eabi_attribute 22, 1\n"); EMIT_EABI_ATTRIBUTE (Tag_ABI_FP_user_exceptions, 22, 1);
/* Tag_ABI_FP_number_model. */
asm_fprintf (asm_out_file, "\t.eabi_attribute 23, %d\n", EMIT_EABI_ATTRIBUTE (Tag_ABI_FP_number_model, 23,
flag_finite_math_only ? 1 : 3); flag_finite_math_only ? 1 : 3);
/* Tag_ABI_align8_needed. */ EMIT_EABI_ATTRIBUTE (Tag_ABI_align8_needed, 24, 1);
asm_fprintf (asm_out_file, "\t.eabi_attribute 24, 1\n"); EMIT_EABI_ATTRIBUTE (Tag_ABI_align8_preserved, 25, 1);
/* Tag_ABI_align8_preserved. */ EMIT_EABI_ATTRIBUTE (Tag_ABI_enum_size, 26, flag_short_enums ? 1 : 2);
asm_fprintf (asm_out_file, "\t.eabi_attribute 25, 1\n");
/* Tag_ABI_enum_size. */
asm_fprintf (asm_out_file, "\t.eabi_attribute 26, %d\n",
flag_short_enums ? 1 : 2);
/* Tag_ABI_optimization_goals. */ /* Tag_ABI_optimization_goals. */
if (optimize_size) if (optimize_size)
...@@ -22336,21 +22325,18 @@ arm_file_start (void) ...@@ -22336,21 +22325,18 @@ arm_file_start (void)
val = 1; val = 1;
else else
val = 6; val = 6;
asm_fprintf (asm_out_file, "\t.eabi_attribute 30, %d\n", val); EMIT_EABI_ATTRIBUTE (Tag_ABI_optimization_goals, 30, val);
/* Tag_CPU_unaligned_access. */ EMIT_EABI_ATTRIBUTE (Tag_CPU_unaligned_access, 34, unaligned_access);
asm_fprintf (asm_out_file, "\t.eabi_attribute 34, %d\n",
unaligned_access);
/* Tag_ABI_FP_16bit_format. */
if (arm_fp16_format) if (arm_fp16_format)
asm_fprintf (asm_out_file, "\t.eabi_attribute 38, %d\n", EMIT_EABI_ATTRIBUTE (Tag_ABI_FP_16bit_format, 38, (int) arm_fp16_format);
(int)arm_fp16_format);
if (arm_lang_output_object_attributes_hook) if (arm_lang_output_object_attributes_hook)
arm_lang_output_object_attributes_hook(); arm_lang_output_object_attributes_hook();
} }
default_file_start();
default_file_start ();
} }
static void static void
...@@ -22638,7 +22624,7 @@ arm_setup_incoming_varargs (cumulative_args_t pcum_v, ...@@ -22638,7 +22624,7 @@ arm_setup_incoming_varargs (cumulative_args_t pcum_v,
{ {
CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v); CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
int nregs; int nregs;
cfun->machine->uses_anonymous_args = 1; cfun->machine->uses_anonymous_args = 1;
if (pcum->pcs_variant <= ARM_PCS_AAPCS_LOCAL) if (pcum->pcs_variant <= ARM_PCS_AAPCS_LOCAL)
{ {
...@@ -22648,7 +22634,7 @@ arm_setup_incoming_varargs (cumulative_args_t pcum_v, ...@@ -22648,7 +22634,7 @@ arm_setup_incoming_varargs (cumulative_args_t pcum_v,
} }
else else
nregs = pcum->nregs; nregs = pcum->nregs;
if (nregs < NUM_ARG_REGS) if (nregs < NUM_ARG_REGS)
*pretend_size = (NUM_ARG_REGS - nregs) * UNITS_PER_WORD; *pretend_size = (NUM_ARG_REGS - nregs) * UNITS_PER_WORD;
} }
...@@ -23148,7 +23134,7 @@ arm_preferred_simd_mode (enum machine_mode mode) ...@@ -23148,7 +23134,7 @@ arm_preferred_simd_mode (enum machine_mode mode)
} }
/* Implement TARGET_CLASS_LIKELY_SPILLED_P. /* Implement TARGET_CLASS_LIKELY_SPILLED_P.
We need to define this for LO_REGS on Thumb-1. Otherwise we can end up We need to define this for LO_REGS on Thumb-1. Otherwise we can end up
using r0-r4 for function arguments, r7 for the stack frame and don't have using r0-r4 for function arguments, r7 for the stack frame and don't have
enough left over to do doubleword arithmetic. For Thumb-2 all the enough left over to do doubleword arithmetic. For Thumb-2 all the
...@@ -23716,7 +23702,7 @@ arm_output_shift(rtx * operands, int set_flags) ...@@ -23716,7 +23702,7 @@ arm_output_shift(rtx * operands, int set_flags)
const char *shift; const char *shift;
HOST_WIDE_INT val; HOST_WIDE_INT val;
char c; char c;
c = flag_chars[set_flags]; c = flag_chars[set_flags];
if (TARGET_UNIFIED_ASM) if (TARGET_UNIFIED_ASM)
{ {
...@@ -23747,10 +23733,10 @@ thumb1_output_casesi (rtx *operands) ...@@ -23747,10 +23733,10 @@ thumb1_output_casesi (rtx *operands)
switch (GET_MODE(diff_vec)) switch (GET_MODE(diff_vec))
{ {
case QImode: case QImode:
return (ADDR_DIFF_VEC_FLAGS (diff_vec).offset_unsigned ? return (ADDR_DIFF_VEC_FLAGS (diff_vec).offset_unsigned ?
"bl\t%___gnu_thumb1_case_uqi" : "bl\t%___gnu_thumb1_case_sqi"); "bl\t%___gnu_thumb1_case_uqi" : "bl\t%___gnu_thumb1_case_sqi");
case HImode: case HImode:
return (ADDR_DIFF_VEC_FLAGS (diff_vec).offset_unsigned ? return (ADDR_DIFF_VEC_FLAGS (diff_vec).offset_unsigned ?
"bl\t%___gnu_thumb1_case_uhi" : "bl\t%___gnu_thumb1_case_shi"); "bl\t%___gnu_thumb1_case_uhi" : "bl\t%___gnu_thumb1_case_shi");
case SImode: case SImode:
return "bl\t%___gnu_thumb1_case_si"; return "bl\t%___gnu_thumb1_case_si";
...@@ -23859,7 +23845,7 @@ arm_mangle_type (const_tree type) ...@@ -23859,7 +23845,7 @@ arm_mangle_type (const_tree type)
/* The ARM ABI documents (10th October 2008) say that "__va_list" /* The ARM ABI documents (10th October 2008) say that "__va_list"
has to be managled as if it is in the "std" namespace. */ has to be managled as if it is in the "std" namespace. */
if (TARGET_AAPCS_BASED if (TARGET_AAPCS_BASED
&& lang_hooks.types_compatible_p (CONST_CAST_TREE (type), va_list_type)) && lang_hooks.types_compatible_p (CONST_CAST_TREE (type), va_list_type))
{ {
static bool warned; static bool warned;
...@@ -24385,7 +24371,7 @@ arm_builtin_support_vector_misalignment (enum machine_mode mode, ...@@ -24385,7 +24371,7 @@ arm_builtin_support_vector_misalignment (enum machine_mode mode,
packed access. */ packed access. */
return ((misalignment % align) == 0); return ((misalignment % align) == 0);
} }
return default_builtin_support_vector_misalignment (mode, type, misalignment, return default_builtin_support_vector_misalignment (mode, type, misalignment,
is_packed); is_packed);
} }
......
...@@ -2235,4 +2235,19 @@ extern int making_const_table; ...@@ -2235,4 +2235,19 @@ extern int making_const_table;
" %{mcpu=generic-*:-march=%*;" \ " %{mcpu=generic-*:-march=%*;" \
" :%{mcpu=*:-mcpu=%*} %{march=*:-march=%*}}" " :%{mcpu=*:-mcpu=%*} %{march=*:-march=%*}}"
/* This macro is used to emit an EABI tag and its associated value.
We emit the numerical value of the tag in case the assembler does not
support textual tags. (Eg gas prior to 2.20). If requested we include
the tag name in a comment so that anyone reading the assembler output
will know which tag is being set. */
#define EMIT_EABI_ATTRIBUTE(NAME,NUM,VAL) \
do \
{ \
asm_fprintf (asm_out_file, "\t.eabi_attribute %d, %d", NUM, VAL); \
if (flag_verbose_asm || flag_debug_asm) \
asm_fprintf (asm_out_file, "\t%s " #NAME, ASM_COMMENT_START); \
asm_fprintf (asm_out_file, "\n"); \
} \
while (0)
#endif /* ! GCC_ARM_H */ #endif /* ! GCC_ARM_H */
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment