Commit 1044fa32 by Richard Sandiford Committed by Richard Sandiford

[AArch64] Use simd_immediate_info for SVE predicate constants

This patch makes predicate constants use the normal simd_immediate_info
machinery, rather than treating PFALSE and PTRUE as special cases.
This makes it easier to add other types of predicate constant later.

2019-08-13  Richard Sandiford  <richard.sandiford@arm.com>

gcc/
	* config/aarch64/aarch64-protos.h (aarch64_output_ptrue): Delete.
	* config/aarch64/aarch64-sve.md (*aarch64_sve_mov<PRED_ALL:mode>):
	Use a single Dn alternative instead of separate Dz and Dm
	alternatives.  Use aarch64_output_sve_move_immediate.
	* config/aarch64/aarch64.c (aarch64_sve_element_int_mode): New
	function.
	(aarch64_simd_valid_immediate): Fill in the simd_immediate_info
	for predicates too.
	(aarch64_output_sve_mov_immediate): Handle predicate modes.
	(aarch64_output_ptrue): Delete.

From-SVN: r274372
parent 1da83cce
2019-08-13 Richard Sandiford <richard.sandiford@arm.com>
* config/aarch64/aarch64-protos.h (aarch64_output_ptrue): Delete.
* config/aarch64/aarch64-sve.md (*aarch64_sve_mov<PRED_ALL:mode>):
Use a single Dn alternative instead of separate Dz and Dm
alternatives. Use aarch64_output_sve_move_immediate.
* config/aarch64/aarch64.c (aarch64_sve_element_int_mode): New
function.
(aarch64_simd_valid_immediate): Fill in the simd_immediate_info
for predicates too.
(aarch64_output_sve_mov_immediate): Handle predicate modes.
(aarch64_output_ptrue): Delete.
2019-08-13 Richard Sandiford <richard.sandiford@arm.com>
* config/aarch64/aarch64.c (simd_immediate_info::insn_type): Add
INDEX.
(simd_immediate_info::value, simd_immediate_info::step)
......
......@@ -462,7 +462,6 @@ char *aarch64_output_scalar_simd_mov_immediate (rtx, scalar_int_mode);
char *aarch64_output_simd_mov_immediate (rtx, unsigned,
enum simd_immediate_check w = AARCH64_CHECK_MOV);
char *aarch64_output_sve_mov_immediate (rtx);
char *aarch64_output_ptrue (machine_mode, char);
bool aarch64_pad_reg_upward (machine_mode, const_tree, bool);
bool aarch64_regno_ok_for_base_p (int, bool);
bool aarch64_regno_ok_for_index_p (int, bool);
......
......@@ -453,8 +453,8 @@
)
(define_insn "*aarch64_sve_mov<mode>"
[(set (match_operand:PRED_ALL 0 "nonimmediate_operand" "=Upa, m, Upa, Upa, Upa")
(match_operand:PRED_ALL 1 "general_operand" "Upa, Upa, m, Dz, Dm"))]
[(set (match_operand:PRED_ALL 0 "nonimmediate_operand" "=Upa, m, Upa, Upa")
(match_operand:PRED_ALL 1 "general_operand" "Upa, Upa, m, Dn"))]
"TARGET_SVE
&& (register_operand (operands[0], <MODE>mode)
|| register_operand (operands[1], <MODE>mode))"
......@@ -462,8 +462,7 @@
mov\t%0.b, %1.b
str\t%1, %0
ldr\t%0, %1
pfalse\t%0.b
* return aarch64_output_ptrue (<MODE>mode, '<Vetype>');"
* return aarch64_output_sve_mov_immediate (operands[1]);"
)
;; =========================================================================
......
......@@ -1635,6 +1635,16 @@ aarch64_get_mask_mode (poly_uint64 nunits, poly_uint64 nbytes)
return default_get_mask_mode (nunits, nbytes);
}
/* Return the integer element mode associated with SVE mode MODE. */
static scalar_int_mode
aarch64_sve_element_int_mode (machine_mode mode)
{
unsigned int elt_bits = vector_element_size (BITS_PER_SVE_VECTOR,
GET_MODE_NUNITS (mode));
return int_mode_for_size (elt_bits, 0).require ();
}
/* Implement TARGET_PREFERRED_ELSE_VALUE. For binary operations,
prefer to use the first arithmetic operand as the else value if
the else value doesn't matter, since that exactly matches the SVE
......@@ -14700,8 +14710,18 @@ aarch64_simd_valid_immediate (rtx op, simd_immediate_info *info,
/* Handle PFALSE and PTRUE. */
if (vec_flags & VEC_SVE_PRED)
return (op == CONST0_RTX (mode)
|| op == CONSTM1_RTX (mode));
{
if (op == CONST0_RTX (mode) || op == CONSTM1_RTX (mode))
{
if (info)
{
scalar_int_mode int_mode = aarch64_sve_element_int_mode (mode);
*info = simd_immediate_info (int_mode, op == CONSTM1_RTX (mode));
}
return true;
}
return false;
}
scalar_float_mode elt_float_mode;
if (n_elts == 1
......@@ -16393,6 +16413,21 @@ aarch64_output_sve_mov_immediate (rtx const_vector)
element_char = sizetochar (GET_MODE_BITSIZE (info.elt_mode));
machine_mode vec_mode = GET_MODE (const_vector);
if (aarch64_sve_pred_mode_p (vec_mode))
{
static char buf[sizeof ("ptrue\t%0.N, vlNNNNN")];
unsigned int total_bytes;
if (info.u.mov.value == const0_rtx)
snprintf (buf, sizeof (buf), "pfalse\t%%0.b");
else if (BYTES_PER_SVE_VECTOR.is_constant (&total_bytes))
snprintf (buf, sizeof (buf), "ptrue\t%%0.%c, vl%d", element_char,
total_bytes / GET_MODE_SIZE (info.elt_mode));
else
snprintf (buf, sizeof (buf), "ptrue\t%%0.%c, all", element_char);
return buf;
}
if (info.insn == simd_immediate_info::INDEX)
{
snprintf (templ, sizeof (templ), "index\t%%0.%c, #"
......@@ -16425,21 +16460,6 @@ aarch64_output_sve_mov_immediate (rtx const_vector)
return templ;
}
/* Return the asm format for a PTRUE instruction whose destination has
mode MODE. SUFFIX is the element size suffix. */
char *
aarch64_output_ptrue (machine_mode mode, char suffix)
{
unsigned int nunits;
static char buf[sizeof ("ptrue\t%0.N, vlNNNNN")];
if (GET_MODE_NUNITS (mode).is_constant (&nunits))
snprintf (buf, sizeof (buf), "ptrue\t%%0.%c, vl%d", suffix, nunits);
else
snprintf (buf, sizeof (buf), "ptrue\t%%0.%c, all", suffix);
return buf;
}
/* Split operands into moves from op[1] + op[2] into op[0]. */
void
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment