Commit fb0cb7fa by Kyrylo Tkachov Committed by Kyrylo Tkachov

[AArch64] Add alternative 'extr' pattern, calculate rtx cost properly

	* config/aarch64/aarch64.md (*extr<mode>5_insn_alt): New pattern.
	(*extrsi5_insn_uxtw_alt): Likewise.
	* config/aarch64/aarch64.c (aarch64_extr_rtx_p): New function.
	(aarch64_rtx_costs, IOR case): Use above to properly cost extr
	operations.

From-SVN: r222639
parent 19261b99
2015-04-30 Kyrylo Tkachov <kyrylo.tkachov@arm.com> 2015-04-30 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
* config/aarch64/aarch64.md (*extr<mode>5_insn_alt): New pattern.
(*extrsi5_insn_uxtw_alt): Likewise.
* config/aarch64/aarch64.c (aarch64_extr_rtx_p): New function.
(aarch64_rtx_costs, IOR case): Use above to properly cost extr
operations.
2015-04-30 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
* config/aarch64/aarch64.c (aarch64_rtx_costs): Handle pattern for * config/aarch64/aarch64.c (aarch64_rtx_costs): Handle pattern for
fabd in ABS case. fabd in ABS case.
......
...@@ -5445,6 +5445,51 @@ aarch64_frint_unspec_p (unsigned int u) ...@@ -5445,6 +5445,51 @@ aarch64_frint_unspec_p (unsigned int u)
} }
} }
/* Return true iff X is an rtx that will match an extr instruction
i.e. as described in the *extr<mode>5_insn family of patterns.
OP0 and OP1 will be set to the operands of the shifts involved
on success and will be NULL_RTX otherwise. */
static bool
aarch64_extr_rtx_p (rtx x, rtx *res_op0, rtx *res_op1)
{
rtx op0, op1;
machine_mode mode = GET_MODE (x);
*res_op0 = NULL_RTX;
*res_op1 = NULL_RTX;
if (GET_CODE (x) != IOR)
return false;
op0 = XEXP (x, 0);
op1 = XEXP (x, 1);
if ((GET_CODE (op0) == ASHIFT && GET_CODE (op1) == LSHIFTRT)
|| (GET_CODE (op1) == ASHIFT && GET_CODE (op0) == LSHIFTRT))
{
/* Canonicalise locally to ashift in op0, lshiftrt in op1. */
if (GET_CODE (op1) == ASHIFT)
std::swap (op0, op1);
if (!CONST_INT_P (XEXP (op0, 1)) || !CONST_INT_P (XEXP (op1, 1)))
return false;
unsigned HOST_WIDE_INT shft_amnt_0 = UINTVAL (XEXP (op0, 1));
unsigned HOST_WIDE_INT shft_amnt_1 = UINTVAL (XEXP (op1, 1));
if (shft_amnt_0 < GET_MODE_BITSIZE (mode)
&& shft_amnt_0 + shft_amnt_1 == GET_MODE_BITSIZE (mode))
{
*res_op0 = XEXP (op0, 0);
*res_op1 = XEXP (op1, 0);
return true;
}
}
return false;
}
/* Calculate the cost of calculating (if_then_else (OP0) (OP1) (OP2)), /* Calculate the cost of calculating (if_then_else (OP0) (OP1) (OP2)),
storing it in *COST. Result is true if the total cost of the operation storing it in *COST. Result is true if the total cost of the operation
has now been calculated. */ has now been calculated. */
...@@ -5977,6 +6022,16 @@ cost_plus: ...@@ -5977,6 +6022,16 @@ cost_plus:
return true; return true;
} }
if (aarch64_extr_rtx_p (x, &op0, &op1))
{
*cost += rtx_cost (op0, IOR, 0, speed)
+ rtx_cost (op1, IOR, 1, speed);
if (speed)
*cost += extra_cost->alu.shift;
return true;
}
/* Fall through. */ /* Fall through. */
case XOR: case XOR:
case AND: case AND:
......
...@@ -3597,6 +3597,21 @@ ...@@ -3597,6 +3597,21 @@
[(set_attr "type" "shift_imm")] [(set_attr "type" "shift_imm")]
) )
;; There are no canonicalisation rules for ashift and lshiftrt inside an ior
;; so we have to match both orderings.
(define_insn "*extr<mode>5_insn_alt"
[(set (match_operand:GPI 0 "register_operand" "=r")
(ior:GPI (lshiftrt:GPI (match_operand:GPI 2 "register_operand" "r")
(match_operand 4 "const_int_operand" "n"))
(ashift:GPI (match_operand:GPI 1 "register_operand" "r")
(match_operand 3 "const_int_operand" "n"))))]
"UINTVAL (operands[3]) < GET_MODE_BITSIZE (<MODE>mode)
&& (UINTVAL (operands[3]) + UINTVAL (operands[4])
== GET_MODE_BITSIZE (<MODE>mode))"
"extr\\t%<w>0, %<w>1, %<w>2, %4"
[(set_attr "type" "shift_imm")]
)
;; zero_extend version of the above ;; zero_extend version of the above
(define_insn "*extrsi5_insn_uxtw" (define_insn "*extrsi5_insn_uxtw"
[(set (match_operand:DI 0 "register_operand" "=r") [(set (match_operand:DI 0 "register_operand" "=r")
...@@ -3611,6 +3626,19 @@ ...@@ -3611,6 +3626,19 @@
[(set_attr "type" "shift_imm")] [(set_attr "type" "shift_imm")]
) )
(define_insn "*extrsi5_insn_uxtw_alt"
[(set (match_operand:DI 0 "register_operand" "=r")
(zero_extend:DI
(ior:SI (lshiftrt:SI (match_operand:SI 2 "register_operand" "r")
(match_operand 4 "const_int_operand" "n"))
(ashift:SI (match_operand:SI 1 "register_operand" "r")
(match_operand 3 "const_int_operand" "n")))))]
"UINTVAL (operands[3]) < 32 &&
(UINTVAL (operands[3]) + UINTVAL (operands[4]) == 32)"
"extr\\t%w0, %w1, %w2, %4"
[(set_attr "type" "shift_imm")]
)
(define_insn "*ror<mode>3_insn" (define_insn "*ror<mode>3_insn"
[(set (match_operand:GPI 0 "register_operand" "=r") [(set (match_operand:GPI 0 "register_operand" "=r")
(rotate:GPI (match_operand:GPI 1 "register_operand" "r") (rotate:GPI (match_operand:GPI 1 "register_operand" "r")
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment