Commit 86b60116 by Julian Brown Committed by Julian Brown

arm.c (arm_override_options): Add unaligned_access support.

	gcc/
	* config/arm/arm.c (arm_override_options): Add unaligned_access
	support.
	(arm_file_start): Emit attribute for unaligned access as
	appropriate.
	* config/arm/arm.md (UNSPEC_UNALIGNED_LOAD)
	(UNSPEC_UNALIGNED_STORE): Add constants for unspecs.
	(insv, extzv): Add unaligned-access support.
	(extv): Change to expander. Likewise.
	(extzv_t1, extv_regsi): Add helpers.
	(unaligned_loadsi, unaligned_loadhis, unaligned_loadhiu)
	(unaligned_storesi, unaligned_storehi): New.
	(*extv_reg): New (previous extv implementation).
	* config/arm/arm.opt (munaligned_access): Add option.
	* config/arm/constraints.md (Uw): New constraint.
	* expmed.c (store_bit_field_1): Adjust bitfield numbering according
	to size of access, not size of unit, when BITS_BIG_ENDIAN !=
	BYTES_BIG_ENDIAN. Don't use bitfield accesses for
	volatile accesses when -fstrict-volatile-bitfields is in effect.
	(extract_bit_field_1): Likewise.

From-SVN: r178852
parent fafaf06f
2011-09-14 Julian Brown <julian@codesourcery.com>
* config/arm/arm.c (arm_override_options): Add unaligned_access
support.
(arm_file_start): Emit attribute for unaligned access as
appropriate.
* config/arm/arm.md (UNSPEC_UNALIGNED_LOAD)
(UNSPEC_UNALIGNED_STORE): Add constants for unspecs.
(insv, extzv): Add unaligned-access support.
(extv): Change to expander. Likewise.
(extzv_t1, extv_regsi): Add helpers.
(unaligned_loadsi, unaligned_loadhis, unaligned_loadhiu)
(unaligned_storesi, unaligned_storehi): New.
(*extv_reg): New (previous extv implementation).
* config/arm/arm.opt (munaligned_access): Add option.
* config/arm/constraints.md (Uw): New constraint.
* expmed.c (store_bit_field_1): Adjust bitfield numbering according
to size of access, not size of unit, when BITS_BIG_ENDIAN !=
BYTES_BIG_ENDIAN. Don't use bitfield accesses for
volatile accesses when -fstrict-volatile-bitfields is in effect.
(extract_bit_field_1): Likewise.
2011-09-14 Richard Sandiford <richard.sandiford@linaro.org> 2011-09-14 Richard Sandiford <richard.sandiford@linaro.org>
* simplify-rtx.c (simplify_subreg): Check that the inner mode is * simplify-rtx.c (simplify_subreg): Check that the inner mode is
...@@ -1915,6 +1915,28 @@ arm_option_override (void) ...@@ -1915,6 +1915,28 @@ arm_option_override (void)
fix_cm3_ldrd = 0; fix_cm3_ldrd = 0;
} }
/* Enable -munaligned-access by default for
- all ARMv6 architecture-based processors
- ARMv7-A, ARMv7-R, and ARMv7-M architecture-based processors.
Disable -munaligned-access by default for
- all pre-ARMv6 architecture-based processors
- ARMv6-M architecture-based processors. */
if (unaligned_access == 2)
{
if (arm_arch6 && (arm_arch_notm || arm_arch7))
unaligned_access = 1;
else
unaligned_access = 0;
}
else if (unaligned_access == 1
&& !(arm_arch6 && (arm_arch_notm || arm_arch7)))
{
warning (0, "target CPU does not support unaligned accesses");
unaligned_access = 0;
}
if (TARGET_THUMB1 && flag_schedule_insns) if (TARGET_THUMB1 && flag_schedule_insns)
{ {
/* Don't warn since it's on by default in -O2. */ /* Don't warn since it's on by default in -O2. */
...@@ -22274,6 +22296,10 @@ arm_file_start (void) ...@@ -22274,6 +22296,10 @@ arm_file_start (void)
val = 6; val = 6;
asm_fprintf (asm_out_file, "\t.eabi_attribute 30, %d\n", val); asm_fprintf (asm_out_file, "\t.eabi_attribute 30, %d\n", val);
/* Tag_CPU_unaligned_access. */
asm_fprintf (asm_out_file, "\t.eabi_attribute 34, %d\n",
unaligned_access);
/* Tag_ABI_FP_16bit_format. */ /* Tag_ABI_FP_16bit_format. */
if (arm_fp16_format) if (arm_fp16_format)
asm_fprintf (asm_out_file, "\t.eabi_attribute 38, %d\n", asm_fprintf (asm_out_file, "\t.eabi_attribute 38, %d\n",
......
...@@ -253,3 +253,7 @@ mfix-cortex-m3-ldrd ...@@ -253,3 +253,7 @@ mfix-cortex-m3-ldrd
Target Report Var(fix_cm3_ldrd) Init(2) Target Report Var(fix_cm3_ldrd) Init(2)
Avoid overlapping destination and address registers on LDRD instructions Avoid overlapping destination and address registers on LDRD instructions
that may trigger Cortex-M3 errata. that may trigger Cortex-M3 errata.
munaligned-access
Target Report Var(unaligned_access) Init(2)
Enable unaligned word and halfword accesses to packed data.
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
;; The following memory constraints have been used: ;; The following memory constraints have been used:
;; in ARM/Thumb-2 state: Q, Ut, Uv, Uy, Un, Um, Us ;; in ARM/Thumb-2 state: Q, Ut, Uv, Uy, Un, Um, Us
;; in ARM state: Uq ;; in ARM state: Uq
;; in Thumb state: Uu ;; in Thumb state: Uu, Uw
(define_register_constraint "f" "TARGET_ARM ? FPA_REGS : NO_REGS" (define_register_constraint "f" "TARGET_ARM ? FPA_REGS : NO_REGS"
...@@ -353,6 +353,19 @@ ...@@ -353,6 +353,19 @@
&& thumb1_legitimate_address_p (GET_MODE (op), XEXP (op, 0), && thumb1_legitimate_address_p (GET_MODE (op), XEXP (op, 0),
0)"))) 0)")))
; The 16-bit post-increment LDR/STR accepted by thumb1_legitimate_address_p
; are actually LDM/STM instructions, so cannot be used to access unaligned
; data.
(define_memory_constraint "Uw"
"@internal
In Thumb state an address that is valid in 16bit encoding, and that can be
used for unaligned accesses."
(and (match_code "mem")
(match_test "TARGET_THUMB
&& thumb1_legitimate_address_p (GET_MODE (op), XEXP (op, 0),
0)
&& GET_CODE (XEXP (op, 0)) != POST_INC")))
;; We used to have constraint letters for S and R in ARM state, but ;; We used to have constraint letters for S and R in ARM state, but
;; all uses of these now appear to have been removed. ;; all uses of these now appear to have been removed.
......
...@@ -620,6 +620,10 @@ store_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, ...@@ -620,6 +620,10 @@ store_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
&& GET_MODE (value) != BLKmode && GET_MODE (value) != BLKmode
&& bitsize > 0 && bitsize > 0
&& GET_MODE_BITSIZE (op_mode) >= bitsize && GET_MODE_BITSIZE (op_mode) >= bitsize
/* Do not use insv for volatile bitfields when
-fstrict-volatile-bitfields is in effect. */
&& !(MEM_P (op0) && MEM_VOLATILE_P (op0)
&& flag_strict_volatile_bitfields > 0)
&& ! ((REG_P (op0) || GET_CODE (op0) == SUBREG) && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
&& (bitsize + bitpos > GET_MODE_BITSIZE (op_mode)))) && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode))))
{ {
...@@ -659,19 +663,21 @@ store_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, ...@@ -659,19 +663,21 @@ store_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
copy_back = true; copy_back = true;
} }
/* On big-endian machines, we count bits from the most significant.
If the bit field insn does not, we must invert. */
if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
xbitpos = unit - bitsize - xbitpos;
/* We have been counting XBITPOS within UNIT. /* We have been counting XBITPOS within UNIT.
Count instead within the size of the register. */ Count instead within the size of the register. */
if (BITS_BIG_ENDIAN && !MEM_P (xop0)) if (BYTES_BIG_ENDIAN && !MEM_P (xop0))
xbitpos += GET_MODE_BITSIZE (op_mode) - unit; xbitpos += GET_MODE_BITSIZE (op_mode) - unit;
unit = GET_MODE_BITSIZE (op_mode); unit = GET_MODE_BITSIZE (op_mode);
/* If BITS_BIG_ENDIAN is zero on a BYTES_BIG_ENDIAN machine, we count
"backwards" from the size of the unit we are inserting into.
Otherwise, we count bits from the most significant on a
BYTES/BITS_BIG_ENDIAN machine. */
if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
xbitpos = unit - bitsize - xbitpos;
/* Convert VALUE to op_mode (which insv insn wants) in VALUE1. */ /* Convert VALUE to op_mode (which insv insn wants) in VALUE1. */
value1 = value; value1 = value;
if (GET_MODE (value) != op_mode) if (GET_MODE (value) != op_mode)
...@@ -1507,6 +1513,10 @@ extract_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, ...@@ -1507,6 +1513,10 @@ extract_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
if (ext_mode != MAX_MACHINE_MODE if (ext_mode != MAX_MACHINE_MODE
&& bitsize > 0 && bitsize > 0
&& GET_MODE_BITSIZE (ext_mode) >= bitsize && GET_MODE_BITSIZE (ext_mode) >= bitsize
/* Do not use extv/extzv for volatile bitfields when
-fstrict-volatile-bitfields is in effect. */
&& !(MEM_P (op0) && MEM_VOLATILE_P (op0)
&& flag_strict_volatile_bitfields > 0)
/* If op0 is a register, we need it in EXT_MODE to make it /* If op0 is a register, we need it in EXT_MODE to make it
acceptable to the format of ext(z)v. */ acceptable to the format of ext(z)v. */
&& !(GET_CODE (op0) == SUBREG && GET_MODE (op0) != ext_mode) && !(GET_CODE (op0) == SUBREG && GET_MODE (op0) != ext_mode)
...@@ -1528,17 +1538,20 @@ extract_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, ...@@ -1528,17 +1538,20 @@ extract_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
/* Get ref to first byte containing part of the field. */ /* Get ref to first byte containing part of the field. */
xop0 = adjust_address (xop0, byte_mode, xoffset); xop0 = adjust_address (xop0, byte_mode, xoffset);
/* On big-endian machines, we count bits from the most significant.
If the bit field insn does not, we must invert. */
if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
xbitpos = unit - bitsize - xbitpos;
/* Now convert from counting within UNIT to counting in EXT_MODE. */ /* Now convert from counting within UNIT to counting in EXT_MODE. */
if (BITS_BIG_ENDIAN && !MEM_P (xop0)) if (BYTES_BIG_ENDIAN && !MEM_P (xop0))
xbitpos += GET_MODE_BITSIZE (ext_mode) - unit; xbitpos += GET_MODE_BITSIZE (ext_mode) - unit;
unit = GET_MODE_BITSIZE (ext_mode); unit = GET_MODE_BITSIZE (ext_mode);
/* If BITS_BIG_ENDIAN is zero on a BYTES_BIG_ENDIAN machine, we count
"backwards" from the size of the unit we are extracting from.
Otherwise, we count bits from the most significant on a
BYTES/BITS_BIG_ENDIAN machine. */
if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
xbitpos = unit - bitsize - xbitpos;
if (xtarget == 0) if (xtarget == 0)
xtarget = xspec_target = gen_reg_rtx (tmode); xtarget = xspec_target = gen_reg_rtx (tmode);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment