Commit 77e994c9 by Richard Sandiford Committed by Richard Sandiford

[61/77] Use scalar_int_mode in the AArch64 port

This patch makes the AArch64 port use scalar_int_mode in various places.
Other ports won't need this kind of change; we only need it for AArch64
because of the variable-sized SVE modes.

The only change in functionality is in the rtx_costs handling
of CONST_INT.  If the caller doesn't supply a mode, we now pass
word_mode rather than VOIDmode to aarch64_internal_mov_immediate.
aarch64_movw_imm will therefore not now truncate large constants
in this situation.

2017-09-05  Richard Sandiford  <richard.sandiford@linaro.org>
	    Alan Hayward  <alan.hayward@arm.com>
	    David Sherwood  <david.sherwood@arm.com>

gcc/
	* config/aarch64/aarch64-protos.h (aarch64_is_extend_from_extract):
	Take a scalar_int_mode instead of a machine_mode.
	(aarch64_mask_and_shift_for_ubfiz_p): Likewise.
	(aarch64_output_scalar_simd_mov_immediate): Likewise.
	(aarch64_simd_scalar_immediate_valid_for_move): Likewise.
	(aarch64_simd_attr_length_rglist): Delete.
	* config/aarch64/aarch64.c (aarch64_is_extend_from_extract): Take
	a scalar_int_mode instead of a machine_mode.
	(aarch64_add_offset): Likewise.
	(aarch64_internal_mov_immediate): Likewise
	(aarch64_add_constant_internal): Likewise.
	(aarch64_add_constant): Likewise.
	(aarch64_movw_imm): Likewise.
	(aarch64_rtx_arith_op_extract_p): Likewise.
	(aarch64_mask_and_shift_for_ubfiz_p): Likewise.
	(aarch64_simd_scalar_immediate_valid_for_move): Likewise.
	Remove assert that the mode isn't a vector.
	(aarch64_output_scalar_simd_mov_immediate): Likewise.
	(aarch64_expand_mov_immediate): Update calls after above changes.
	(aarch64_output_casesi): Use as_a <scalar_int_mode>.
	(aarch64_and_bitmask_imm): Check for scalar integer modes.
	(aarch64_move_imm): Likewise.
	(aarch64_can_const_movi_rtx_p): Likewise.
	(aarch64_strip_extend): Likewise.
	(aarch64_extr_rtx_p): Likewise.
	(aarch64_rtx_costs): Likewise, using wode_mode as the mode of
	a CONST_INT when the mode parameter is VOIDmode.
	(aarch64_float_const_rtx_p): Use scalar_int_mode for a temporary.

Co-Authored-By: Alan Hayward <alan.hayward@arm.com>
Co-Authored-By: David Sherwood <david.sherwood@arm.com>

From-SVN: r251735
parent 40300fa4
2017-09-05 Richard Sandiford <richard.sandiford@linaro.org> 2017-09-05 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
* config/aarch64/aarch64-protos.h (aarch64_is_extend_from_extract):
Take a scalar_int_mode instead of a machine_mode.
(aarch64_mask_and_shift_for_ubfiz_p): Likewise.
(aarch64_output_scalar_simd_mov_immediate): Likewise.
(aarch64_simd_scalar_immediate_valid_for_move): Likewise.
(aarch64_simd_attr_length_rglist): Delete.
* config/aarch64/aarch64.c (aarch64_is_extend_from_extract): Take
a scalar_int_mode instead of a machine_mode.
(aarch64_add_offset): Likewise.
(aarch64_internal_mov_immediate): Likewise
(aarch64_add_constant_internal): Likewise.
(aarch64_add_constant): Likewise.
(aarch64_movw_imm): Likewise.
(aarch64_rtx_arith_op_extract_p): Likewise.
(aarch64_mask_and_shift_for_ubfiz_p): Likewise.
(aarch64_simd_scalar_immediate_valid_for_move): Likewise.
Remove assert that the mode isn't a vector.
(aarch64_output_scalar_simd_mov_immediate): Likewise.
(aarch64_expand_mov_immediate): Update calls after above changes.
(aarch64_output_casesi): Use as_a <scalar_int_mode>.
(aarch64_and_bitmask_imm): Check for scalar integer modes.
(aarch64_move_imm): Likewise.
(aarch64_can_const_movi_rtx_p): Likewise.
(aarch64_strip_extend): Likewise.
(aarch64_extr_rtx_p): Likewise.
(aarch64_rtx_costs): Likewise, using wode_mode as the mode of
a CONST_INT when the mode parameter is VOIDmode.
(aarch64_float_const_rtx_p): Use scalar_int_mode for a temporary.
2017-09-05 Richard Sandiford <richard.sandiford@linaro.org>
* machmode.h (bitwise_mode_for_mode): Return opt_mode. * machmode.h (bitwise_mode_for_mode): Return opt_mode.
* stor-layout.c (bitwise_mode_for_mode): Likewise. * stor-layout.c (bitwise_mode_for_mode): Likewise.
......
...@@ -332,20 +332,19 @@ bool aarch64_function_arg_regno_p (unsigned); ...@@ -332,20 +332,19 @@ bool aarch64_function_arg_regno_p (unsigned);
bool aarch64_fusion_enabled_p (enum aarch64_fusion_pairs); bool aarch64_fusion_enabled_p (enum aarch64_fusion_pairs);
bool aarch64_gen_movmemqi (rtx *); bool aarch64_gen_movmemqi (rtx *);
bool aarch64_gimple_fold_builtin (gimple_stmt_iterator *); bool aarch64_gimple_fold_builtin (gimple_stmt_iterator *);
bool aarch64_is_extend_from_extract (machine_mode, rtx, rtx); bool aarch64_is_extend_from_extract (scalar_int_mode, rtx, rtx);
bool aarch64_is_long_call_p (rtx); bool aarch64_is_long_call_p (rtx);
bool aarch64_is_noplt_call_p (rtx); bool aarch64_is_noplt_call_p (rtx);
bool aarch64_label_mentioned_p (rtx); bool aarch64_label_mentioned_p (rtx);
void aarch64_declare_function_name (FILE *, const char*, tree); void aarch64_declare_function_name (FILE *, const char*, tree);
bool aarch64_legitimate_pic_operand_p (rtx); bool aarch64_legitimate_pic_operand_p (rtx);
bool aarch64_mask_and_shift_for_ubfiz_p (machine_mode, rtx, rtx); bool aarch64_mask_and_shift_for_ubfiz_p (scalar_int_mode, rtx, rtx);
bool aarch64_zero_extend_const_eq (machine_mode, rtx, machine_mode, rtx); bool aarch64_zero_extend_const_eq (machine_mode, rtx, machine_mode, rtx);
bool aarch64_move_imm (HOST_WIDE_INT, machine_mode); bool aarch64_move_imm (HOST_WIDE_INT, machine_mode);
bool aarch64_mov_operand_p (rtx, machine_mode); bool aarch64_mov_operand_p (rtx, machine_mode);
int aarch64_simd_attr_length_rglist (machine_mode);
rtx aarch64_reverse_mask (machine_mode); rtx aarch64_reverse_mask (machine_mode);
bool aarch64_offset_7bit_signed_scaled_p (machine_mode, HOST_WIDE_INT); bool aarch64_offset_7bit_signed_scaled_p (machine_mode, HOST_WIDE_INT);
char *aarch64_output_scalar_simd_mov_immediate (rtx, machine_mode); char *aarch64_output_scalar_simd_mov_immediate (rtx, scalar_int_mode);
char *aarch64_output_simd_mov_immediate (rtx, machine_mode, unsigned); char *aarch64_output_simd_mov_immediate (rtx, machine_mode, unsigned);
bool aarch64_pad_reg_upward (machine_mode, const_tree, bool); bool aarch64_pad_reg_upward (machine_mode, const_tree, bool);
bool aarch64_regno_ok_for_base_p (int, bool); bool aarch64_regno_ok_for_base_p (int, bool);
...@@ -354,7 +353,7 @@ bool aarch64_reinterpret_float_as_int (rtx value, unsigned HOST_WIDE_INT *fail); ...@@ -354,7 +353,7 @@ bool aarch64_reinterpret_float_as_int (rtx value, unsigned HOST_WIDE_INT *fail);
bool aarch64_simd_check_vect_par_cnst_half (rtx op, machine_mode mode, bool aarch64_simd_check_vect_par_cnst_half (rtx op, machine_mode mode,
bool high); bool high);
bool aarch64_simd_imm_zero_p (rtx, machine_mode); bool aarch64_simd_imm_zero_p (rtx, machine_mode);
bool aarch64_simd_scalar_immediate_valid_for_move (rtx, machine_mode); bool aarch64_simd_scalar_immediate_valid_for_move (rtx, scalar_int_mode);
bool aarch64_simd_shift_imm_p (rtx, machine_mode, bool); bool aarch64_simd_shift_imm_p (rtx, machine_mode, bool);
bool aarch64_simd_valid_immediate (rtx, machine_mode, bool, bool aarch64_simd_valid_immediate (rtx, machine_mode, bool,
struct simd_immediate_info *); struct simd_immediate_info *);
......
...@@ -1183,7 +1183,7 @@ aarch64_is_noplt_call_p (rtx sym) ...@@ -1183,7 +1183,7 @@ aarch64_is_noplt_call_p (rtx sym)
(extract:MODE (mult (reg) (MULT_IMM)) (EXTRACT_IMM) (const_int 0)). */ (extract:MODE (mult (reg) (MULT_IMM)) (EXTRACT_IMM) (const_int 0)). */
bool bool
aarch64_is_extend_from_extract (machine_mode mode, rtx mult_imm, aarch64_is_extend_from_extract (scalar_int_mode mode, rtx mult_imm,
rtx extract_imm) rtx extract_imm)
{ {
HOST_WIDE_INT mult_val, extract_val; HOST_WIDE_INT mult_val, extract_val;
...@@ -1809,7 +1809,8 @@ aarch64_force_temporary (machine_mode mode, rtx x, rtx value) ...@@ -1809,7 +1809,8 @@ aarch64_force_temporary (machine_mode mode, rtx x, rtx value)
static rtx static rtx
aarch64_add_offset (machine_mode mode, rtx temp, rtx reg, HOST_WIDE_INT offset) aarch64_add_offset (scalar_int_mode mode, rtx temp, rtx reg,
HOST_WIDE_INT offset)
{ {
if (!aarch64_plus_immediate (GEN_INT (offset), mode)) if (!aarch64_plus_immediate (GEN_INT (offset), mode))
{ {
...@@ -1827,7 +1828,7 @@ aarch64_add_offset (machine_mode mode, rtx temp, rtx reg, HOST_WIDE_INT offset) ...@@ -1827,7 +1828,7 @@ aarch64_add_offset (machine_mode mode, rtx temp, rtx reg, HOST_WIDE_INT offset)
static int static int
aarch64_internal_mov_immediate (rtx dest, rtx imm, bool generate, aarch64_internal_mov_immediate (rtx dest, rtx imm, bool generate,
machine_mode mode) scalar_int_mode mode)
{ {
int i; int i;
unsigned HOST_WIDE_INT val, val2, mask; unsigned HOST_WIDE_INT val, val2, mask;
...@@ -1958,9 +1959,11 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm) ...@@ -1958,9 +1959,11 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm)
gcc_assert (mode == SImode || mode == DImode); gcc_assert (mode == SImode || mode == DImode);
/* Check on what type of symbol it is. */ /* Check on what type of symbol it is. */
if (GET_CODE (imm) == SYMBOL_REF scalar_int_mode int_mode;
|| GET_CODE (imm) == LABEL_REF if ((GET_CODE (imm) == SYMBOL_REF
|| GET_CODE (imm) == CONST) || GET_CODE (imm) == LABEL_REF
|| GET_CODE (imm) == CONST)
&& is_a <scalar_int_mode> (mode, &int_mode))
{ {
rtx mem, base, offset; rtx mem, base, offset;
enum aarch64_symbol_type sty; enum aarch64_symbol_type sty;
...@@ -1974,11 +1977,12 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm) ...@@ -1974,11 +1977,12 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm)
{ {
case SYMBOL_FORCE_TO_MEM: case SYMBOL_FORCE_TO_MEM:
if (offset != const0_rtx if (offset != const0_rtx
&& targetm.cannot_force_const_mem (mode, imm)) && targetm.cannot_force_const_mem (int_mode, imm))
{ {
gcc_assert (can_create_pseudo_p ()); gcc_assert (can_create_pseudo_p ());
base = aarch64_force_temporary (mode, dest, base); base = aarch64_force_temporary (int_mode, dest, base);
base = aarch64_add_offset (mode, NULL, base, INTVAL (offset)); base = aarch64_add_offset (int_mode, NULL, base,
INTVAL (offset));
aarch64_emit_move (dest, base); aarch64_emit_move (dest, base);
return; return;
} }
...@@ -2000,8 +2004,8 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm) ...@@ -2000,8 +2004,8 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm)
mem = gen_rtx_MEM (ptr_mode, base); mem = gen_rtx_MEM (ptr_mode, base);
} }
if (mode != ptr_mode) if (int_mode != ptr_mode)
mem = gen_rtx_ZERO_EXTEND (mode, mem); mem = gen_rtx_ZERO_EXTEND (int_mode, mem);
emit_insn (gen_rtx_SET (dest, mem)); emit_insn (gen_rtx_SET (dest, mem));
...@@ -2017,8 +2021,9 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm) ...@@ -2017,8 +2021,9 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm)
if (offset != const0_rtx) if (offset != const0_rtx)
{ {
gcc_assert(can_create_pseudo_p ()); gcc_assert(can_create_pseudo_p ());
base = aarch64_force_temporary (mode, dest, base); base = aarch64_force_temporary (int_mode, dest, base);
base = aarch64_add_offset (mode, NULL, base, INTVAL (offset)); base = aarch64_add_offset (int_mode, NULL, base,
INTVAL (offset));
aarch64_emit_move (dest, base); aarch64_emit_move (dest, base);
return; return;
} }
...@@ -2052,7 +2057,8 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm) ...@@ -2052,7 +2057,8 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm)
return; return;
} }
aarch64_internal_mov_immediate (dest, imm, true, GET_MODE (dest)); aarch64_internal_mov_immediate (dest, imm, true,
as_a <scalar_int_mode> (mode));
} }
/* Add DELTA to REGNUM in mode MODE. SCRATCHREG can be used to hold a /* Add DELTA to REGNUM in mode MODE. SCRATCHREG can be used to hold a
...@@ -2068,9 +2074,9 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm) ...@@ -2068,9 +2074,9 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm)
large immediate). */ large immediate). */
static void static void
aarch64_add_constant_internal (machine_mode mode, int regnum, int scratchreg, aarch64_add_constant_internal (scalar_int_mode mode, int regnum,
HOST_WIDE_INT delta, bool frame_related_p, int scratchreg, HOST_WIDE_INT delta,
bool emit_move_imm) bool frame_related_p, bool emit_move_imm)
{ {
HOST_WIDE_INT mdelta = abs_hwi (delta); HOST_WIDE_INT mdelta = abs_hwi (delta);
rtx this_rtx = gen_rtx_REG (mode, regnum); rtx this_rtx = gen_rtx_REG (mode, regnum);
...@@ -2117,7 +2123,7 @@ aarch64_add_constant_internal (machine_mode mode, int regnum, int scratchreg, ...@@ -2117,7 +2123,7 @@ aarch64_add_constant_internal (machine_mode mode, int regnum, int scratchreg,
} }
static inline void static inline void
aarch64_add_constant (machine_mode mode, int regnum, int scratchreg, aarch64_add_constant (scalar_int_mode mode, int regnum, int scratchreg,
HOST_WIDE_INT delta) HOST_WIDE_INT delta)
{ {
aarch64_add_constant_internal (mode, regnum, scratchreg, delta, false, true); aarch64_add_constant_internal (mode, regnum, scratchreg, delta, false, true);
...@@ -3985,7 +3991,7 @@ aarch64_uimm12_shift (HOST_WIDE_INT val) ...@@ -3985,7 +3991,7 @@ aarch64_uimm12_shift (HOST_WIDE_INT val)
/* Return true if val is an immediate that can be loaded into a /* Return true if val is an immediate that can be loaded into a
register by a MOVZ instruction. */ register by a MOVZ instruction. */
static bool static bool
aarch64_movw_imm (HOST_WIDE_INT val, machine_mode mode) aarch64_movw_imm (HOST_WIDE_INT val, scalar_int_mode mode)
{ {
if (GET_MODE_SIZE (mode) > 4) if (GET_MODE_SIZE (mode) > 4)
{ {
...@@ -4089,15 +4095,19 @@ aarch64_and_split_imm2 (HOST_WIDE_INT val_in) ...@@ -4089,15 +4095,19 @@ aarch64_and_split_imm2 (HOST_WIDE_INT val_in)
bool bool
aarch64_and_bitmask_imm (unsigned HOST_WIDE_INT val_in, machine_mode mode) aarch64_and_bitmask_imm (unsigned HOST_WIDE_INT val_in, machine_mode mode)
{ {
if (aarch64_bitmask_imm (val_in, mode)) scalar_int_mode int_mode;
if (!is_a <scalar_int_mode> (mode, &int_mode))
return false;
if (aarch64_bitmask_imm (val_in, int_mode))
return false; return false;
if (aarch64_move_imm (val_in, mode)) if (aarch64_move_imm (val_in, int_mode))
return false; return false;
unsigned HOST_WIDE_INT imm2 = aarch64_and_split_imm2 (val_in); unsigned HOST_WIDE_INT imm2 = aarch64_and_split_imm2 (val_in);
return aarch64_bitmask_imm (imm2, mode); return aarch64_bitmask_imm (imm2, int_mode);
} }
/* Return true if val is an immediate that can be loaded into a /* Return true if val is an immediate that can be loaded into a
...@@ -4105,9 +4115,13 @@ aarch64_and_bitmask_imm (unsigned HOST_WIDE_INT val_in, machine_mode mode) ...@@ -4105,9 +4115,13 @@ aarch64_and_bitmask_imm (unsigned HOST_WIDE_INT val_in, machine_mode mode)
bool bool
aarch64_move_imm (HOST_WIDE_INT val, machine_mode mode) aarch64_move_imm (HOST_WIDE_INT val, machine_mode mode)
{ {
if (aarch64_movw_imm (val, mode) || aarch64_movw_imm (~val, mode)) scalar_int_mode int_mode;
if (!is_a <scalar_int_mode> (mode, &int_mode))
return false;
if (aarch64_movw_imm (val, int_mode) || aarch64_movw_imm (~val, int_mode))
return 1; return 1;
return aarch64_bitmask_imm (val, mode); return aarch64_bitmask_imm (val, int_mode);
} }
static bool static bool
...@@ -4771,9 +4785,9 @@ aarch64_float_const_rtx_p (rtx x) ...@@ -4771,9 +4785,9 @@ aarch64_float_const_rtx_p (rtx x)
&& SCALAR_FLOAT_MODE_P (mode) && SCALAR_FLOAT_MODE_P (mode)
&& aarch64_reinterpret_float_as_int (x, &ival)) && aarch64_reinterpret_float_as_int (x, &ival))
{ {
machine_mode imode = (mode == HFmode scalar_int_mode imode = (mode == HFmode
? SImode ? SImode
: int_mode_for_mode (mode).require ()); : int_mode_for_mode (mode).require ());
int num_instr = aarch64_internal_mov_immediate int num_instr = aarch64_internal_mov_immediate
(NULL_RTX, gen_int_mode (ival, imode), false, imode); (NULL_RTX, gen_int_mode (ival, imode), false, imode);
return num_instr < 3; return num_instr < 3;
...@@ -4802,7 +4816,8 @@ aarch64_can_const_movi_rtx_p (rtx x, machine_mode mode) ...@@ -4802,7 +4816,8 @@ aarch64_can_const_movi_rtx_p (rtx x, machine_mode mode)
if (!TARGET_SIMD) if (!TARGET_SIMD)
return false; return false;
machine_mode vmode, imode; machine_mode vmode;
scalar_int_mode imode;
unsigned HOST_WIDE_INT ival; unsigned HOST_WIDE_INT ival;
if (GET_CODE (x) == CONST_DOUBLE if (GET_CODE (x) == CONST_DOUBLE
...@@ -4818,17 +4833,14 @@ aarch64_can_const_movi_rtx_p (rtx x, machine_mode mode) ...@@ -4818,17 +4833,14 @@ aarch64_can_const_movi_rtx_p (rtx x, machine_mode mode)
imode = int_mode_for_mode (mode).require (); imode = int_mode_for_mode (mode).require ();
} }
else if (GET_CODE (x) == CONST_INT else if (GET_CODE (x) == CONST_INT
&& SCALAR_INT_MODE_P (mode)) && is_a <scalar_int_mode> (mode, &imode))
{ ival = INTVAL (x);
imode = mode;
ival = INTVAL (x);
}
else else
return false; return false;
/* use a 64 bit mode for everything except for DI/DF mode, where we use /* use a 64 bit mode for everything except for DI/DF mode, where we use
a 128 bit vector mode. */ a 128 bit vector mode. */
int width = GET_MODE_BITSIZE (mode) == 64 ? 128 : 64; int width = GET_MODE_BITSIZE (imode) == 64 ? 128 : 64;
vmode = aarch64_simd_container_mode (imode, width); vmode = aarch64_simd_container_mode (imode, width);
rtx v_op = aarch64_simd_gen_const_vector_dup (vmode, ival); rtx v_op = aarch64_simd_gen_const_vector_dup (vmode, ival);
...@@ -6130,7 +6142,8 @@ aarch64_output_casesi (rtx *operands) ...@@ -6130,7 +6142,8 @@ aarch64_output_casesi (rtx *operands)
gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC); gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC);
index = exact_log2 (GET_MODE_SIZE (GET_MODE (diff_vec))); scalar_int_mode mode = as_a <scalar_int_mode> (GET_MODE (diff_vec));
index = exact_log2 (GET_MODE_SIZE (mode));
gcc_assert (index >= 0 && index <= 3); gcc_assert (index >= 0 && index <= 3);
...@@ -6250,13 +6263,17 @@ aarch64_strip_shift (rtx x) ...@@ -6250,13 +6263,17 @@ aarch64_strip_shift (rtx x)
static rtx static rtx
aarch64_strip_extend (rtx x, bool strip_shift) aarch64_strip_extend (rtx x, bool strip_shift)
{ {
scalar_int_mode mode;
rtx op = x; rtx op = x;
if (!is_a <scalar_int_mode> (GET_MODE (op), &mode))
return op;
/* Zero and sign extraction of a widened value. */ /* Zero and sign extraction of a widened value. */
if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT) if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
&& XEXP (op, 2) == const0_rtx && XEXP (op, 2) == const0_rtx
&& GET_CODE (XEXP (op, 0)) == MULT && GET_CODE (XEXP (op, 0)) == MULT
&& aarch64_is_extend_from_extract (GET_MODE (op), XEXP (XEXP (op, 0), 1), && aarch64_is_extend_from_extract (mode, XEXP (XEXP (op, 0), 1),
XEXP (op, 1))) XEXP (op, 1)))
return XEXP (XEXP (op, 0), 0); return XEXP (XEXP (op, 0), 0);
...@@ -6593,7 +6610,7 @@ aarch64_branch_cost (bool speed_p, bool predictable_p) ...@@ -6593,7 +6610,7 @@ aarch64_branch_cost (bool speed_p, bool predictable_p)
/* Return true if the RTX X in mode MODE is a zero or sign extract /* Return true if the RTX X in mode MODE is a zero or sign extract
usable in an ADD or SUB (extended register) instruction. */ usable in an ADD or SUB (extended register) instruction. */
static bool static bool
aarch64_rtx_arith_op_extract_p (rtx x, machine_mode mode) aarch64_rtx_arith_op_extract_p (rtx x, scalar_int_mode mode)
{ {
/* Catch add with a sign extract. /* Catch add with a sign extract.
This is add_<optab><mode>_multp2. */ This is add_<optab><mode>_multp2. */
...@@ -6652,7 +6669,9 @@ static bool ...@@ -6652,7 +6669,9 @@ static bool
aarch64_extr_rtx_p (rtx x, rtx *res_op0, rtx *res_op1) aarch64_extr_rtx_p (rtx x, rtx *res_op0, rtx *res_op1)
{ {
rtx op0, op1; rtx op0, op1;
machine_mode mode = GET_MODE (x); scalar_int_mode mode;
if (!is_a <scalar_int_mode> (GET_MODE (x), &mode))
return false;
*res_op0 = NULL_RTX; *res_op0 = NULL_RTX;
*res_op1 = NULL_RTX; *res_op1 = NULL_RTX;
...@@ -6837,7 +6856,8 @@ aarch64_extend_bitfield_pattern_p (rtx x) ...@@ -6837,7 +6856,8 @@ aarch64_extend_bitfield_pattern_p (rtx x)
mode MODE. See the *andim_ashift<mode>_bfiz pattern. */ mode MODE. See the *andim_ashift<mode>_bfiz pattern. */
bool bool
aarch64_mask_and_shift_for_ubfiz_p (machine_mode mode, rtx mask, rtx shft_amnt) aarch64_mask_and_shift_for_ubfiz_p (scalar_int_mode mode, rtx mask,
rtx shft_amnt)
{ {
return CONST_INT_P (mask) && CONST_INT_P (shft_amnt) return CONST_INT_P (mask) && CONST_INT_P (shft_amnt)
&& INTVAL (shft_amnt) < GET_MODE_BITSIZE (mode) && INTVAL (shft_amnt) < GET_MODE_BITSIZE (mode)
...@@ -6929,8 +6949,8 @@ aarch64_rtx_costs (rtx x, machine_mode mode, int outer ATTRIBUTE_UNUSED, ...@@ -6929,8 +6949,8 @@ aarch64_rtx_costs (rtx x, machine_mode mode, int outer ATTRIBUTE_UNUSED,
if ((GET_CODE (op1) == ZERO_EXTEND if ((GET_CODE (op1) == ZERO_EXTEND
|| GET_CODE (op1) == SIGN_EXTEND) || GET_CODE (op1) == SIGN_EXTEND)
&& CONST_INT_P (XEXP (op0, 1)) && CONST_INT_P (XEXP (op0, 1))
&& (GET_MODE_BITSIZE (GET_MODE (XEXP (op1, 0))) && is_a <scalar_int_mode> (GET_MODE (XEXP (op1, 0)), &int_mode)
>= INTVAL (XEXP (op0, 1)))) && GET_MODE_BITSIZE (int_mode) >= INTVAL (XEXP (op0, 1)))
op1 = XEXP (op1, 0); op1 = XEXP (op1, 0);
if (CONST_INT_P (op1)) if (CONST_INT_P (op1))
...@@ -6975,8 +6995,10 @@ aarch64_rtx_costs (rtx x, machine_mode mode, int outer ATTRIBUTE_UNUSED, ...@@ -6975,8 +6995,10 @@ aarch64_rtx_costs (rtx x, machine_mode mode, int outer ATTRIBUTE_UNUSED,
proportionally expensive to the number of instructions proportionally expensive to the number of instructions
required to build that constant. This is true whether we required to build that constant. This is true whether we
are compiling for SPEED or otherwise. */ are compiling for SPEED or otherwise. */
if (!is_a <scalar_int_mode> (mode, &int_mode))
int_mode = word_mode;
*cost = COSTS_N_INSNS (aarch64_internal_mov_immediate *cost = COSTS_N_INSNS (aarch64_internal_mov_immediate
(NULL_RTX, x, false, mode)); (NULL_RTX, x, false, int_mode));
} }
return true; return true;
...@@ -6992,9 +7014,9 @@ aarch64_rtx_costs (rtx x, machine_mode mode, int outer ATTRIBUTE_UNUSED, ...@@ -6992,9 +7014,9 @@ aarch64_rtx_costs (rtx x, machine_mode mode, int outer ATTRIBUTE_UNUSED,
bool succeed = aarch64_reinterpret_float_as_int (x, &ival); bool succeed = aarch64_reinterpret_float_as_int (x, &ival);
gcc_assert (succeed); gcc_assert (succeed);
machine_mode imode = (mode == HFmode scalar_int_mode imode = (mode == HFmode
? SImode ? SImode
: int_mode_for_mode (mode).require ()); : int_mode_for_mode (mode).require ());
int ncost = aarch64_internal_mov_immediate int ncost = aarch64_internal_mov_immediate
(NULL_RTX, gen_int_mode (ival, imode), false, imode); (NULL_RTX, gen_int_mode (ival, imode), false, imode);
*cost += COSTS_N_INSNS (ncost); *cost += COSTS_N_INSNS (ncost);
...@@ -7249,7 +7271,8 @@ cost_minus: ...@@ -7249,7 +7271,8 @@ cost_minus:
} }
/* Look for SUB (extended register). */ /* Look for SUB (extended register). */
if (aarch64_rtx_arith_op_extract_p (op1, mode)) if (is_a <scalar_int_mode> (mode, &int_mode)
&& aarch64_rtx_arith_op_extract_p (op1, int_mode))
{ {
if (speed) if (speed)
*cost += extra_cost->alu.extend_arith; *cost += extra_cost->alu.extend_arith;
...@@ -7328,7 +7351,8 @@ cost_plus: ...@@ -7328,7 +7351,8 @@ cost_plus:
*cost += rtx_cost (op1, mode, PLUS, 1, speed); *cost += rtx_cost (op1, mode, PLUS, 1, speed);
/* Look for ADD (extended register). */ /* Look for ADD (extended register). */
if (aarch64_rtx_arith_op_extract_p (op0, mode)) if (is_a <scalar_int_mode> (mode, &int_mode)
&& aarch64_rtx_arith_op_extract_p (op0, int_mode))
{ {
if (speed) if (speed)
*cost += extra_cost->alu.extend_arith; *cost += extra_cost->alu.extend_arith;
...@@ -11696,12 +11720,11 @@ aarch64_simd_gen_const_vector_dup (machine_mode mode, HOST_WIDE_INT val) ...@@ -11696,12 +11720,11 @@ aarch64_simd_gen_const_vector_dup (machine_mode mode, HOST_WIDE_INT val)
/* Check OP is a legal scalar immediate for the MOVI instruction. */ /* Check OP is a legal scalar immediate for the MOVI instruction. */
bool bool
aarch64_simd_scalar_immediate_valid_for_move (rtx op, machine_mode mode) aarch64_simd_scalar_immediate_valid_for_move (rtx op, scalar_int_mode mode)
{ {
machine_mode vmode; machine_mode vmode;
gcc_assert (!VECTOR_MODE_P (mode)); vmode = aarch64_preferred_simd_mode (mode);
vmode = aarch64_preferred_simd_mode (as_a <scalar_mode> (mode));
rtx op_v = aarch64_simd_gen_const_vector_dup (vmode, INTVAL (op)); rtx op_v = aarch64_simd_gen_const_vector_dup (vmode, INTVAL (op));
return aarch64_simd_valid_immediate (op_v, vmode, false, NULL); return aarch64_simd_valid_immediate (op_v, vmode, false, NULL);
} }
...@@ -13051,7 +13074,7 @@ aarch64_output_simd_mov_immediate (rtx const_vector, ...@@ -13051,7 +13074,7 @@ aarch64_output_simd_mov_immediate (rtx const_vector,
} }
char* char*
aarch64_output_scalar_simd_mov_immediate (rtx immediate, machine_mode mode) aarch64_output_scalar_simd_mov_immediate (rtx immediate, scalar_int_mode mode)
{ {
/* If a floating point number was passed and we desire to use it in an /* If a floating point number was passed and we desire to use it in an
...@@ -13069,7 +13092,6 @@ aarch64_output_scalar_simd_mov_immediate (rtx immediate, machine_mode mode) ...@@ -13069,7 +13092,6 @@ aarch64_output_scalar_simd_mov_immediate (rtx immediate, machine_mode mode)
a 128 bit vector mode. */ a 128 bit vector mode. */
int width = GET_MODE_BITSIZE (mode) == 64 ? 128 : 64; int width = GET_MODE_BITSIZE (mode) == 64 ? 128 : 64;
gcc_assert (!VECTOR_MODE_P (mode));
vmode = aarch64_simd_container_mode (mode, width); vmode = aarch64_simd_container_mode (mode, width);
rtx v_op = aarch64_simd_gen_const_vector_dup (vmode, INTVAL (immediate)); rtx v_op = aarch64_simd_gen_const_vector_dup (vmode, INTVAL (immediate));
return aarch64_output_simd_mov_immediate (v_op, vmode, width); return aarch64_output_simd_mov_immediate (v_op, vmode, width);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment