Commit 8c83f71d by Kyrylo Tkachov Committed by Kyrylo Tkachov

[AArch64] Handle AND+ASHIFT form of UBFIZ correctly in costs

	* config/aarch64/aarch64.c (aarch64_mask_and_shift_for_ubfiz_p):
	New function.
	(aarch64_rtx_costs): Use it.  Rewrite CONST_INT_P (op1) case to handle
	mask+shift version.
	* config/aarch64/aarch64-protos.h (aarch64_mask_and_shift_for_ubfiz_p):
	New prototype.
	* config/aarch64/aarch64.md (*andim_ashift<mode>_bfiz): Replace
	matching condition with aarch64_mask_and_shift_for_ubfiz_p.

From-SVN: r237440
parent 124bed29
2015-06-14 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
* config/aarch64/aarch64.c (aarch64_mask_and_shift_for_ubfiz_p):
New function.
(aarch64_rtx_costs): Use it. Rewrite CONST_INT_P (op1) case to handle
mask+shift version.
* config/aarch64/aarch64-protos.h (aarch64_mask_and_shift_for_ubfiz_p):
New prototype.
* config/aarch64/aarch64.md (*andim_ashift<mode>_bfiz): Replace
matching condition with aarch64_mask_and_shift_for_ubfiz_p.
2016-06-14 Richard Biener <rguenther@suse.de> 2016-06-14 Richard Biener <rguenther@suse.de>
PR tree-optimization/71522 PR tree-optimization/71522
......
...@@ -318,6 +318,7 @@ bool aarch64_is_noplt_call_p (rtx); ...@@ -318,6 +318,7 @@ bool aarch64_is_noplt_call_p (rtx);
bool aarch64_label_mentioned_p (rtx); bool aarch64_label_mentioned_p (rtx);
void aarch64_declare_function_name (FILE *, const char*, tree); void aarch64_declare_function_name (FILE *, const char*, tree);
bool aarch64_legitimate_pic_operand_p (rtx); bool aarch64_legitimate_pic_operand_p (rtx);
bool aarch64_mask_and_shift_for_ubfiz_p (machine_mode, rtx, rtx);
bool aarch64_modes_tieable_p (machine_mode mode1, bool aarch64_modes_tieable_p (machine_mode mode1,
machine_mode mode2); machine_mode mode2);
bool aarch64_zero_extend_const_eq (machine_mode, rtx, machine_mode, rtx); bool aarch64_zero_extend_const_eq (machine_mode, rtx, machine_mode, rtx);
......
...@@ -6050,6 +6050,19 @@ aarch64_extend_bitfield_pattern_p (rtx x) ...@@ -6050,6 +6050,19 @@ aarch64_extend_bitfield_pattern_p (rtx x)
return op; return op;
} }
/* Return true if the mask and a shift amount from an RTX of the form
(x << SHFT_AMNT) & MASK are valid to combine into a UBFIZ instruction of
mode MODE. See the *andim_ashift<mode>_bfiz pattern. */
bool
aarch64_mask_and_shift_for_ubfiz_p (machine_mode mode, rtx mask, rtx shft_amnt)
{
return CONST_INT_P (mask) && CONST_INT_P (shft_amnt)
&& INTVAL (shft_amnt) < GET_MODE_BITSIZE (mode)
&& exact_log2 ((INTVAL (mask) >> INTVAL (shft_amnt)) + 1) >= 0
&& (INTVAL (mask) & ((1 << INTVAL (shft_amnt)) - 1)) == 0;
}
/* Calculate the cost of calculating X, storing it in *COST. Result /* Calculate the cost of calculating X, storing it in *COST. Result
is true if the total cost of the operation has now been calculated. */ is true if the total cost of the operation has now been calculated. */
static bool static bool
...@@ -6624,17 +6637,31 @@ cost_plus: ...@@ -6624,17 +6637,31 @@ cost_plus:
if (GET_MODE_CLASS (mode) == MODE_INT) if (GET_MODE_CLASS (mode) == MODE_INT)
{ {
/* We possibly get the immediate for free, this is not if (CONST_INT_P (op1))
modelled. */
if (CONST_INT_P (op1)
&& aarch64_bitmask_imm (INTVAL (op1), mode))
{ {
*cost += rtx_cost (op0, mode, (enum rtx_code) code, 0, speed); /* We have a mask + shift version of a UBFIZ
i.e. the *andim_ashift<mode>_bfiz pattern. */
if (GET_CODE (op0) == ASHIFT
&& aarch64_mask_and_shift_for_ubfiz_p (mode, op1,
XEXP (op0, 1)))
{
*cost += rtx_cost (XEXP (op0, 0), mode,
(enum rtx_code) code, 0, speed);
if (speed)
*cost += extra_cost->alu.bfx;
if (speed) return true;
*cost += extra_cost->alu.logical; }
else if (aarch64_bitmask_imm (INTVAL (op1), mode))
{
/* We possibly get the immediate for free, this is not
modelled. */
*cost += rtx_cost (op0, mode, (enum rtx_code) code, 0, speed);
if (speed)
*cost += extra_cost->alu.logical;
return true; return true;
}
} }
else else
{ {
......
...@@ -4380,9 +4380,7 @@ ...@@ -4380,9 +4380,7 @@
(and:GPI (ashift:GPI (match_operand:GPI 1 "register_operand" "r") (and:GPI (ashift:GPI (match_operand:GPI 1 "register_operand" "r")
(match_operand 2 "const_int_operand" "n")) (match_operand 2 "const_int_operand" "n"))
(match_operand 3 "const_int_operand" "n")))] (match_operand 3 "const_int_operand" "n")))]
"(INTVAL (operands[2]) < (<GPI:sizen>)) "aarch64_mask_and_shift_for_ubfiz_p (<MODE>mode, operands[3], operands[2])"
&& exact_log2 ((INTVAL (operands[3]) >> INTVAL (operands[2])) + 1) >= 0
&& (INTVAL (operands[3]) & ((1 << INTVAL (operands[2])) - 1)) == 0"
"ubfiz\\t%<w>0, %<w>1, %2, %P3" "ubfiz\\t%<w>0, %<w>1, %2, %P3"
[(set_attr "type" "bfm")] [(set_attr "type" "bfm")]
) )
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment