Commit 16de3637 by Richard Sandiford Committed by Richard Sandiford

[AArch64] Factor out ptrue predicate creation

This is the first step to canonicalising predicate constants so that
they can be reused between modes.

2019-06-18  Richard Sandiford  <richard.sandiford@arm.com>

gcc/
	* config/aarch64/aarch64-protos.h (aarch64_ptrue_reg): Declare.
	* config/aarch64/aarch64.c (aarch64_ptrue_reg): New functions.
	(aarch64_expand_sve_widened_duplicate, aarch64_expand_sve_mem_move)
	(aarch64_maybe_expand_sve_subreg_move, aarch64_evpc_rev_local)
	(aarch64_expand_sve_vec_cmp_int): Use it.
	(aarch64_expand_sve_vec_cmp_float): Likewise.
	* config/aarch64/aarch64-sve.md: Likewise throughout.

From-SVN: r272424
parent 32cf949c
2019-06-18 Richard Sandiford <richard.sandiford@arm.com>
* config/aarch64/aarch64-protos.h (aarch64_ptrue_reg): Declare.
* config/aarch64/aarch64.c (aarch64_ptrue_reg): New functions.
(aarch64_expand_sve_widened_duplicate, aarch64_expand_sve_mem_move)
(aarch64_maybe_expand_sve_subreg_move, aarch64_evpc_rev_local)
(aarch64_expand_sve_vec_cmp_int): Use it.
(aarch64_expand_sve_vec_cmp_float): Likewise.
* config/aarch64/aarch64-sve.md: Likewise throughout.
2019-06-18 Richard Sandiford <richard.sandiford@arm.com>
* config/aarch64/aarch64-sve.md (*cond_<optab><mode>_0): Delete.
(*cond_<optab><mode>_z): Fold into...
(*cond_<optab><mode>_any): ...here. Also handle cases in which
......
......@@ -520,6 +520,7 @@ const char * aarch64_output_probe_sve_stack_clash (rtx, rtx, rtx, rtx);
void aarch64_err_no_fpadvsimd (machine_mode);
void aarch64_expand_epilogue (bool);
void aarch64_expand_mov_immediate (rtx, rtx, rtx (*) (rtx, rtx) = 0);
rtx aarch64_ptrue_reg (machine_mode);
void aarch64_emit_sve_pred_move (rtx, rtx, rtx);
void aarch64_expand_sve_mem_move (rtx, rtx, machine_mode);
bool aarch64_maybe_expand_sve_subreg_move (rtx, rtx);
......
......@@ -232,7 +232,7 @@
UNSPEC_LD1_GATHER))]
"TARGET_SVE"
{
operands[5] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
operands[5] = aarch64_ptrue_reg (<VPRED>mode);
}
)
......@@ -289,7 +289,7 @@
UNSPEC_ST1_SCATTER))]
"TARGET_SVE"
{
operands[5] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
operands[5] = aarch64_ptrue_reg (<VPRED>mode);
}
)
......@@ -629,7 +629,7 @@
{
if (MEM_P (operands[1]))
{
rtx ptrue = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
rtx ptrue = aarch64_ptrue_reg (<VPRED>mode);
emit_insn (gen_sve_ld1r<mode> (operands[0], ptrue, operands[1],
CONST0_RTX (<MODE>mode)));
DONE;
......@@ -744,7 +744,7 @@
UNSPEC_LDN))]
"TARGET_SVE"
{
operands[2] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
operands[2] = aarch64_ptrue_reg (<VPRED>mode);
}
)
......@@ -773,7 +773,7 @@
UNSPEC_STN))]
"TARGET_SVE"
{
operands[2] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
operands[2] = aarch64_ptrue_reg (<VPRED>mode);
}
)
......@@ -932,7 +932,7 @@
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
operands[3] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
operands[3] = aarch64_ptrue_reg (<VPRED>mode);
}
)
......@@ -1019,7 +1019,7 @@
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
operands[3] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
operands[3] = aarch64_ptrue_reg (<VPRED>mode);
}
)
......@@ -1050,7 +1050,7 @@
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
operands[3] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
operands[3] = aarch64_ptrue_reg (<VPRED>mode);
}
)
......@@ -1080,7 +1080,7 @@
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
operands[2] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
operands[2] = aarch64_ptrue_reg (<VPRED>mode);
}
)
......@@ -1150,7 +1150,7 @@
(match_dup 3)))]
"TARGET_SVE"
{
operands[3] = force_reg (<MODE>mode, CONSTM1_RTX (<MODE>mode));
operands[3] = aarch64_ptrue_reg (<MODE>mode);
}
)
......@@ -1197,7 +1197,7 @@
(match_dup 2)))]
"TARGET_SVE"
{
operands[2] = force_reg (<MODE>mode, CONSTM1_RTX (<MODE>mode));
operands[2] = aarch64_ptrue_reg (<MODE>mode);
}
)
......@@ -1246,7 +1246,7 @@
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
operands[3] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
operands[3] = aarch64_ptrue_reg (<VPRED>mode);
}
)
......@@ -1738,7 +1738,7 @@
(pc)))]
""
{
rtx ptrue = force_reg (<MODE>mode, CONSTM1_RTX (<MODE>mode));
rtx ptrue = aarch64_ptrue_reg (<MODE>mode);
rtx pred;
if (operands[2] == CONST0_RTX (<MODE>mode))
pred = operands[1];
......@@ -1764,7 +1764,7 @@
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
operands[3] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
operands[3] = aarch64_ptrue_reg (<VPRED>mode);
}
)
......@@ -1793,7 +1793,7 @@
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
operands[3] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
operands[3] = aarch64_ptrue_reg (<VPRED>mode);
}
)
......@@ -1823,7 +1823,7 @@
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
operands[3] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
operands[3] = aarch64_ptrue_reg (<VPRED>mode);
}
)
......@@ -2017,7 +2017,7 @@
UNSPEC_ADDV))]
"TARGET_SVE"
{
operands[2] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
operands[2] = aarch64_ptrue_reg (<VPRED>mode);
}
)
......@@ -2039,7 +2039,7 @@
UNSPEC_FADDV))]
"TARGET_SVE"
{
operands[2] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
operands[2] = aarch64_ptrue_reg (<VPRED>mode);
}
)
......@@ -2061,7 +2061,7 @@
MAXMINV))]
"TARGET_SVE"
{
operands[2] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
operands[2] = aarch64_ptrue_reg (<VPRED>mode);
}
)
......@@ -2083,7 +2083,7 @@
FMAXMINV))]
"TARGET_SVE"
{
operands[2] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
operands[2] = aarch64_ptrue_reg (<VPRED>mode);
}
)
......@@ -2104,7 +2104,7 @@
BITWISEV))]
"TARGET_SVE"
{
operands[2] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
operands[2] = aarch64_ptrue_reg (<VPRED>mode);
}
)
......@@ -2126,7 +2126,7 @@
UNSPEC_FADDA))]
"TARGET_SVE"
{
operands[3] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
operands[3] = aarch64_ptrue_reg (<VPRED>mode);
}
)
......@@ -2167,7 +2167,7 @@
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
operands[3] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
operands[3] = aarch64_ptrue_reg (<VPRED>mode);
}
)
......@@ -2203,7 +2203,7 @@
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
operands[3] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
operands[3] = aarch64_ptrue_reg (<VPRED>mode);
}
)
......@@ -2243,7 +2243,7 @@
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
operands[3] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
operands[3] = aarch64_ptrue_reg (<VPRED>mode);
}
)
......@@ -2289,7 +2289,7 @@
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
operands[4] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
operands[4] = aarch64_ptrue_reg (<VPRED>mode);
}
)
......@@ -2322,7 +2322,7 @@
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
operands[4] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
operands[4] = aarch64_ptrue_reg (<VPRED>mode);
}
)
......@@ -2356,7 +2356,7 @@
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
operands[4] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
operands[4] = aarch64_ptrue_reg (<VPRED>mode);
}
)
......@@ -2391,7 +2391,7 @@
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
operands[4] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
operands[4] = aarch64_ptrue_reg (<VPRED>mode);
}
)
......@@ -2424,7 +2424,7 @@
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
operands[3] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
operands[3] = aarch64_ptrue_reg (<VPRED>mode);
}
)
......@@ -2453,7 +2453,7 @@
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
operands[2] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
operands[2] = aarch64_ptrue_reg (<VPRED>mode);
}
)
......@@ -2491,7 +2491,7 @@
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
operands[2] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
operands[2] = aarch64_ptrue_reg (<VPRED>mode);
}
)
......@@ -2518,7 +2518,7 @@
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
operands[2] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
operands[2] = aarch64_ptrue_reg (<VPRED>mode);
}
)
......@@ -2569,7 +2569,7 @@
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
operands[2] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
operands[2] = aarch64_ptrue_reg (<VPRED>mode);
}
)
......@@ -2707,7 +2707,7 @@
? gen_aarch64_sve_zip2<mode>
: gen_aarch64_sve_zip1<mode>)
(temp, operands[1], operands[1]));
rtx ptrue = force_reg (<VWIDE_PRED>mode, CONSTM1_RTX (<VWIDE_PRED>mode));
rtx ptrue = aarch64_ptrue_reg (<VWIDE_PRED>mode);
emit_insn (gen_aarch64_sve_extend<mode><Vwide>2 (operands[0],
ptrue, temp));
DONE;
......@@ -2733,7 +2733,7 @@
? gen_aarch64_sve_zip2vnx4si
: gen_aarch64_sve_zip1vnx4si)
(temp, operands[1], operands[1]));
rtx ptrue = force_reg (VNx2BImode, CONSTM1_RTX (VNx2BImode));
rtx ptrue = aarch64_ptrue_reg (VNx2BImode);
emit_insn (gen_aarch64_sve_<FLOATUORS:optab>vnx4sivnx2df2 (operands[0],
ptrue, temp));
DONE;
......@@ -2783,7 +2783,7 @@
(unspec:SVE_HSF [(match_dup 4) (match_dup 5)] UNSPEC_UZP1))]
"TARGET_SVE"
{
operands[3] = force_reg (<VWIDE_PRED>mode, CONSTM1_RTX (<VWIDE_PRED>mode));
operands[3] = aarch64_ptrue_reg (<VWIDE_PRED>mode);
operands[4] = gen_reg_rtx (<MODE>mode);
operands[5] = gen_reg_rtx (<MODE>mode);
}
......@@ -2805,7 +2805,7 @@
(unspec:VNx4SI [(match_dup 4) (match_dup 5)] UNSPEC_UZP1))]
"TARGET_SVE"
{
operands[3] = force_reg (VNx2BImode, CONSTM1_RTX (VNx2BImode));
operands[3] = aarch64_ptrue_reg (VNx2BImode);
operands[4] = gen_reg_rtx (VNx4SImode);
operands[5] = gen_reg_rtx (VNx4SImode);
}
......@@ -3073,7 +3073,7 @@
(match_operand:SVE_I 2 "register_operand"))]
"TARGET_SVE"
{
rtx pred = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
rtx pred = aarch64_ptrue_reg (<VPRED>mode);
emit_insn (gen_aarch64_<su>abd<mode>_3 (operands[0], pred, operands[1],
operands[2]));
DONE;
......
......@@ -2458,6 +2458,15 @@ aarch64_force_temporary (machine_mode mode, rtx x, rtx value)
}
}
/* Return an all-true predicate register of mode MODE. */
rtx
aarch64_ptrue_reg (machine_mode mode)
{
gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_BOOL);
return force_reg (mode, CONSTM1_RTX (mode));
}
/* Return true if we can move VALUE into a register using a single
CNT[BHWD] instruction. */
......@@ -3187,7 +3196,7 @@ aarch64_expand_sve_widened_duplicate (rtx dest, scalar_int_mode src_mode,
machine_mode mode = GET_MODE (dest);
unsigned int elem_bytes = GET_MODE_UNIT_SIZE (mode);
machine_mode pred_mode = aarch64_sve_pred_mode (elem_bytes).require ();
rtx ptrue = force_reg (pred_mode, CONSTM1_RTX (pred_mode));
rtx ptrue = aarch64_ptrue_reg (pred_mode);
src = gen_rtx_UNSPEC (mode, gen_rtvec (2, ptrue, src), UNSPEC_LD1RQ);
emit_insn (gen_rtx_SET (dest, src));
return true;
......@@ -3448,7 +3457,7 @@ void
aarch64_expand_sve_mem_move (rtx dest, rtx src, machine_mode pred_mode)
{
machine_mode mode = GET_MODE (dest);
rtx ptrue = force_reg (pred_mode, CONSTM1_RTX (pred_mode));
rtx ptrue = aarch64_ptrue_reg (pred_mode);
if (!register_operand (src, mode)
&& !register_operand (dest, mode))
{
......@@ -3512,7 +3521,7 @@ aarch64_maybe_expand_sve_subreg_move (rtx dest, rtx src)
return false;
/* Generate *aarch64_sve_mov<mode>_subreg_be. */
rtx ptrue = force_reg (VNx16BImode, CONSTM1_RTX (VNx16BImode));
rtx ptrue = aarch64_ptrue_reg (VNx16BImode);
rtx unspec = gen_rtx_UNSPEC (GET_MODE (dest), gen_rtvec (2, ptrue, src),
UNSPEC_REV_SUBREG);
emit_insn (gen_rtx_SET (dest, unspec));
......@@ -16753,7 +16762,7 @@ aarch64_evpc_rev_local (struct expand_vec_perm_d *d)
rtx src = gen_rtx_UNSPEC (d->vmode, gen_rtvec (1, d->op0), unspec);
if (d->vec_flags == VEC_SVE_DATA)
{
rtx pred = force_reg (pred_mode, CONSTM1_RTX (pred_mode));
rtx pred = aarch64_ptrue_reg (pred_mode);
src = gen_rtx_UNSPEC (d->vmode, gen_rtvec (2, pred, src),
UNSPEC_MERGE_PTRUE);
}
......@@ -17101,7 +17110,7 @@ aarch64_expand_sve_vec_cmp_int (rtx target, rtx_code code, rtx op0, rtx op1)
if (!aarch64_sve_cmp_operand_p (code, op1))
op1 = force_reg (data_mode, op1);
rtx ptrue = force_reg (pred_mode, CONSTM1_RTX (pred_mode));
rtx ptrue = aarch64_ptrue_reg (pred_mode);
rtx cond = gen_rtx_fmt_ee (code, pred_mode, op0, op1);
aarch64_emit_sve_ptrue_op_cc (target, ptrue, cond);
}
......@@ -17160,7 +17169,7 @@ aarch64_expand_sve_vec_cmp_float (rtx target, rtx_code code,
machine_mode pred_mode = GET_MODE (target);
machine_mode data_mode = GET_MODE (op0);
rtx ptrue = force_reg (pred_mode, CONSTM1_RTX (pred_mode));
rtx ptrue = aarch64_ptrue_reg (pred_mode);
switch (code)
{
case UNORDERED:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment