Commit 9dff8d0b by Kyrylo Tkachov Committed by Kyrylo Tkachov

[ARM] Initialise cost to COSTS_N_INSNS (1) and increment in arm rtx costs

	* config/arm/arm.c (arm_new_rtx_costs): Initialise cost to
	COSTS_N_INSNS (1) and increment it appropriately throughout the
	function.

From-SVN: r225537
parent 3d1e4f66
2015-07-08 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
* config/arm/arm.c (arm_new_rtx_costs): Initialise cost to
COSTS_N_INSNS (1) and increment it appropriately throughout the
function.
2015-07-08 Richard Biener <rguenther@suse.de> 2015-07-08 Richard Biener <rguenther@suse.de>
* fold-const.c (fold_widened_comparison): Fix inverted comparison. * fold-const.c (fold_widened_comparison): Fix inverted comparison.
......
...@@ -9355,7 +9355,6 @@ arm_unspec_cost (rtx x, enum rtx_code /* outer_code */, bool speed_p, int *cost) ...@@ -9355,7 +9355,6 @@ arm_unspec_cost (rtx x, enum rtx_code /* outer_code */, bool speed_p, int *cost)
case UNSPEC_VRINTR: case UNSPEC_VRINTR:
case UNSPEC_VRINTX: case UNSPEC_VRINTX:
case UNSPEC_VRINTA: case UNSPEC_VRINTA:
*cost = COSTS_N_INSNS (1);
if (speed_p) if (speed_p)
*cost += extra_cost->fp[GET_MODE (x) == DFmode].roundint; *cost += extra_cost->fp[GET_MODE (x) == DFmode].roundint;
...@@ -9418,6 +9417,8 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -9418,6 +9417,8 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
{ {
machine_mode mode = GET_MODE (x); machine_mode mode = GET_MODE (x);
*cost = COSTS_N_INSNS (1);
if (TARGET_THUMB1) if (TARGET_THUMB1)
{ {
if (speed_p) if (speed_p)
...@@ -9519,8 +9520,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -9519,8 +9520,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
bool is_ldm = load_multiple_operation (x, SImode); bool is_ldm = load_multiple_operation (x, SImode);
bool is_stm = store_multiple_operation (x, SImode); bool is_stm = store_multiple_operation (x, SImode);
*cost = COSTS_N_INSNS (1);
if (is_ldm || is_stm) if (is_ldm || is_stm)
{ {
if (speed_p) if (speed_p)
...@@ -9547,10 +9546,10 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -9547,10 +9546,10 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
case UDIV: case UDIV:
if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT
&& (mode == SFmode || !TARGET_VFP_SINGLE)) && (mode == SFmode || !TARGET_VFP_SINGLE))
*cost = COSTS_N_INSNS (speed_p *cost += COSTS_N_INSNS (speed_p
? extra_cost->fp[mode != SFmode].div : 1); ? extra_cost->fp[mode != SFmode].div : 0);
else if (mode == SImode && TARGET_IDIV) else if (mode == SImode && TARGET_IDIV)
*cost = COSTS_N_INSNS (speed_p ? extra_cost->mult[0].idiv : 1); *cost += COSTS_N_INSNS (speed_p ? extra_cost->mult[0].idiv : 0);
else else
*cost = LIBCALL_COST (2); *cost = LIBCALL_COST (2);
return false; /* All arguments must be in registers. */ return false; /* All arguments must be in registers. */
...@@ -9563,7 +9562,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -9563,7 +9562,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
case ROTATE: case ROTATE:
if (mode == SImode && REG_P (XEXP (x, 1))) if (mode == SImode && REG_P (XEXP (x, 1)))
{ {
*cost = (COSTS_N_INSNS (2) *cost += (COSTS_N_INSNS (1)
+ rtx_cost (XEXP (x, 0), mode, code, 0, speed_p)); + rtx_cost (XEXP (x, 0), mode, code, 0, speed_p));
if (speed_p) if (speed_p)
*cost += extra_cost->alu.shift_reg; *cost += extra_cost->alu.shift_reg;
...@@ -9576,7 +9575,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -9576,7 +9575,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
case ASHIFTRT: case ASHIFTRT:
if (mode == DImode && CONST_INT_P (XEXP (x, 1))) if (mode == DImode && CONST_INT_P (XEXP (x, 1)))
{ {
*cost = (COSTS_N_INSNS (3) *cost += (COSTS_N_INSNS (2)
+ rtx_cost (XEXP (x, 0), mode, code, 0, speed_p)); + rtx_cost (XEXP (x, 0), mode, code, 0, speed_p));
if (speed_p) if (speed_p)
*cost += 2 * extra_cost->alu.shift; *cost += 2 * extra_cost->alu.shift;
...@@ -9584,8 +9583,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -9584,8 +9583,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
} }
else if (mode == SImode) else if (mode == SImode)
{ {
*cost = (COSTS_N_INSNS (1) *cost += rtx_cost (XEXP (x, 0), mode, code, 0, speed_p);
+ rtx_cost (XEXP (x, 0), mode, code, 0, speed_p));
/* Slightly disparage register shifts at -Os, but not by much. */ /* Slightly disparage register shifts at -Os, but not by much. */
if (!CONST_INT_P (XEXP (x, 1))) if (!CONST_INT_P (XEXP (x, 1)))
*cost += (speed_p ? extra_cost->alu.shift_reg : 1 *cost += (speed_p ? extra_cost->alu.shift_reg : 1
...@@ -9597,8 +9595,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -9597,8 +9595,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
{ {
if (code == ASHIFT) if (code == ASHIFT)
{ {
*cost = (COSTS_N_INSNS (1) *cost += rtx_cost (XEXP (x, 0), mode, code, 0, speed_p);
+ rtx_cost (XEXP (x, 0), mode, code, 0, speed_p));
/* Slightly disparage register shifts at -Os, but not by /* Slightly disparage register shifts at -Os, but not by
much. */ much. */
if (!CONST_INT_P (XEXP (x, 1))) if (!CONST_INT_P (XEXP (x, 1)))
...@@ -9610,14 +9607,13 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -9610,14 +9607,13 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
if (arm_arch_thumb2 && CONST_INT_P (XEXP (x, 1))) if (arm_arch_thumb2 && CONST_INT_P (XEXP (x, 1)))
{ {
/* Can use SBFX/UBFX. */ /* Can use SBFX/UBFX. */
*cost = COSTS_N_INSNS (1);
if (speed_p) if (speed_p)
*cost += extra_cost->alu.bfx; *cost += extra_cost->alu.bfx;
*cost += rtx_cost (XEXP (x, 0), mode, code, 0, speed_p); *cost += rtx_cost (XEXP (x, 0), mode, code, 0, speed_p);
} }
else else
{ {
*cost = COSTS_N_INSNS (2); *cost += COSTS_N_INSNS (1);
*cost += rtx_cost (XEXP (x, 0), mode, code, 0, speed_p); *cost += rtx_cost (XEXP (x, 0), mode, code, 0, speed_p);
if (speed_p) if (speed_p)
{ {
...@@ -9634,7 +9630,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -9634,7 +9630,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
} }
else /* Rotates. */ else /* Rotates. */
{ {
*cost = COSTS_N_INSNS (3 + !CONST_INT_P (XEXP (x, 1))); *cost = COSTS_N_INSNS (2 + !CONST_INT_P (XEXP (x, 1)));
*cost += rtx_cost (XEXP (x, 0), mode, code, 0, speed_p); *cost += rtx_cost (XEXP (x, 0), mode, code, 0, speed_p);
if (speed_p) if (speed_p)
{ {
...@@ -9658,7 +9654,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -9658,7 +9654,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
{ {
if (mode == SImode) if (mode == SImode)
{ {
*cost = COSTS_N_INSNS (1);
if (speed_p) if (speed_p)
*cost += extra_cost->alu.rev; *cost += extra_cost->alu.rev;
...@@ -9671,7 +9666,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -9671,7 +9666,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
and thumb_legacy_rev for the form of RTL used then. */ and thumb_legacy_rev for the form of RTL used then. */
if (TARGET_THUMB) if (TARGET_THUMB)
{ {
*cost = COSTS_N_INSNS (10); *cost += COSTS_N_INSNS (9);
if (speed_p) if (speed_p)
{ {
...@@ -9681,7 +9676,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -9681,7 +9676,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
} }
else else
{ {
*cost = COSTS_N_INSNS (5); *cost += COSTS_N_INSNS (4);
if (speed_p) if (speed_p)
{ {
...@@ -9698,7 +9693,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -9698,7 +9693,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT
&& (mode == SFmode || !TARGET_VFP_SINGLE)) && (mode == SFmode || !TARGET_VFP_SINGLE))
{ {
*cost = COSTS_N_INSNS (1);
if (GET_CODE (XEXP (x, 0)) == MULT if (GET_CODE (XEXP (x, 0)) == MULT
|| GET_CODE (XEXP (x, 1)) == MULT) || GET_CODE (XEXP (x, 1)) == MULT)
{ {
...@@ -9743,8 +9737,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -9743,8 +9737,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
rtx shift_op; rtx shift_op;
rtx non_shift_op; rtx non_shift_op;
*cost = COSTS_N_INSNS (1);
shift_op = shifter_op_p (XEXP (x, 0), &shift_by_reg); shift_op = shifter_op_p (XEXP (x, 0), &shift_by_reg);
if (shift_op == NULL) if (shift_op == NULL)
{ {
...@@ -9812,7 +9804,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -9812,7 +9804,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
HANDLE_NARROW_SHIFT_ARITH (MINUS, 1) HANDLE_NARROW_SHIFT_ARITH (MINUS, 1)
/* Slightly disparage, as we might need to widen the result. */ /* Slightly disparage, as we might need to widen the result. */
*cost = 1 + COSTS_N_INSNS (1); *cost += 1;
if (speed_p) if (speed_p)
*cost += extra_cost->alu.arith; *cost += extra_cost->alu.arith;
...@@ -9827,7 +9819,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -9827,7 +9819,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
if (mode == DImode) if (mode == DImode)
{ {
*cost = COSTS_N_INSNS (2); *cost += COSTS_N_INSNS (1);
if (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND) if (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND)
{ {
...@@ -9882,7 +9874,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -9882,7 +9874,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT
&& (mode == SFmode || !TARGET_VFP_SINGLE)) && (mode == SFmode || !TARGET_VFP_SINGLE))
{ {
*cost = COSTS_N_INSNS (1);
if (GET_CODE (XEXP (x, 0)) == MULT) if (GET_CODE (XEXP (x, 0)) == MULT)
{ {
rtx mul_op0, mul_op1, add_op; rtx mul_op0, mul_op1, add_op;
...@@ -9939,7 +9930,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -9939,7 +9930,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
/* Slightly penalize a narrow operation as the result may /* Slightly penalize a narrow operation as the result may
need widening. */ need widening. */
*cost = 1 + COSTS_N_INSNS (1); *cost += 1;
if (speed_p) if (speed_p)
*cost += extra_cost->alu.arith; *cost += extra_cost->alu.arith;
...@@ -9950,7 +9941,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -9950,7 +9941,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
{ {
rtx shift_op, shift_reg; rtx shift_op, shift_reg;
*cost = COSTS_N_INSNS (1);
if (TARGET_INT_SIMD if (TARGET_INT_SIMD
&& (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
|| GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)) || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
...@@ -9985,8 +9975,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -9985,8 +9975,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
{ {
rtx mul_op = XEXP (x, 0); rtx mul_op = XEXP (x, 0);
*cost = COSTS_N_INSNS (1);
if (TARGET_DSP_MULTIPLY if (TARGET_DSP_MULTIPLY
&& ((GET_CODE (XEXP (mul_op, 0)) == SIGN_EXTEND && ((GET_CODE (XEXP (mul_op, 0)) == SIGN_EXTEND
&& (GET_CODE (XEXP (mul_op, 1)) == SIGN_EXTEND && (GET_CODE (XEXP (mul_op, 1)) == SIGN_EXTEND
...@@ -10046,7 +10034,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10046,7 +10034,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
|| (GET_CODE (XEXP (XEXP (x, 0), 0)) == SIGN_EXTEND || (GET_CODE (XEXP (XEXP (x, 0), 0)) == SIGN_EXTEND
&& GET_CODE (XEXP (XEXP (x, 0), 1)) == SIGN_EXTEND))) && GET_CODE (XEXP (XEXP (x, 0), 1)) == SIGN_EXTEND)))
{ {
*cost = COSTS_N_INSNS (1);
if (speed_p) if (speed_p)
*cost += extra_cost->mult[1].extend_add; *cost += extra_cost->mult[1].extend_add;
*cost += (rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0), mode, *cost += (rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0), mode,
...@@ -10057,7 +10044,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10057,7 +10044,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
return true; return true;
} }
*cost = COSTS_N_INSNS (2); *cost += COSTS_N_INSNS (1);
if (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND if (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
|| GET_CODE (XEXP (x, 0)) == SIGN_EXTEND) || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
...@@ -10085,7 +10072,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10085,7 +10072,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
case IOR: case IOR:
if (mode == SImode && arm_arch6 && aarch_rev16_p (x)) if (mode == SImode && arm_arch6 && aarch_rev16_p (x))
{ {
*cost = COSTS_N_INSNS (1);
if (speed_p) if (speed_p)
*cost += extra_cost->alu.rev; *cost += extra_cost->alu.rev;
...@@ -10099,8 +10085,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10099,8 +10085,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
rtx op0 = XEXP (x, 0); rtx op0 = XEXP (x, 0);
rtx shift_op, shift_reg; rtx shift_op, shift_reg;
*cost = COSTS_N_INSNS (1);
if (subcode == NOT if (subcode == NOT
&& (code == AND && (code == AND
|| (code == IOR && TARGET_THUMB2))) || (code == IOR && TARGET_THUMB2)))
...@@ -10149,7 +10133,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10149,7 +10133,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
rtx op0 = XEXP (x, 0); rtx op0 = XEXP (x, 0);
enum rtx_code subcode = GET_CODE (op0); enum rtx_code subcode = GET_CODE (op0);
*cost = COSTS_N_INSNS (2); *cost += COSTS_N_INSNS (1);
if (subcode == NOT if (subcode == NOT
&& (code == AND && (code == AND
...@@ -10193,8 +10177,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10193,8 +10177,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
{ {
rtx op0 = XEXP (x, 0); rtx op0 = XEXP (x, 0);
*cost = COSTS_N_INSNS (1);
if (GET_CODE (op0) == NEG) if (GET_CODE (op0) == NEG)
op0 = XEXP (op0, 0); op0 = XEXP (op0, 0);
...@@ -10213,7 +10195,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10213,7 +10195,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
if (mode == SImode) if (mode == SImode)
{ {
*cost = COSTS_N_INSNS (1);
if (TARGET_DSP_MULTIPLY if (TARGET_DSP_MULTIPLY
&& ((GET_CODE (XEXP (x, 0)) == SIGN_EXTEND && ((GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
&& (GET_CODE (XEXP (x, 1)) == SIGN_EXTEND && (GET_CODE (XEXP (x, 1)) == SIGN_EXTEND
...@@ -10249,7 +10230,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10249,7 +10230,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
|| (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND || (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
&& GET_CODE (XEXP (x, 1)) == SIGN_EXTEND))) && GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)))
{ {
*cost = COSTS_N_INSNS (1);
if (speed_p) if (speed_p)
*cost += extra_cost->mult[1].extend; *cost += extra_cost->mult[1].extend;
*cost += (rtx_cost (XEXP (XEXP (x, 0), 0), VOIDmode, *cost += (rtx_cost (XEXP (XEXP (x, 0), 0), VOIDmode,
...@@ -10271,7 +10251,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10271,7 +10251,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT
&& (mode == SFmode || !TARGET_VFP_SINGLE)) && (mode == SFmode || !TARGET_VFP_SINGLE))
{ {
*cost = COSTS_N_INSNS (1);
if (speed_p) if (speed_p)
*cost += extra_cost->fp[mode != SFmode].neg; *cost += extra_cost->fp[mode != SFmode].neg;
...@@ -10287,7 +10266,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10287,7 +10266,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
{ {
if (GET_CODE (XEXP (x, 0)) == ABS) if (GET_CODE (XEXP (x, 0)) == ABS)
{ {
*cost = COSTS_N_INSNS (2); *cost += COSTS_N_INSNS (1);
/* Assume the non-flag-changing variant. */ /* Assume the non-flag-changing variant. */
if (speed_p) if (speed_p)
*cost += (extra_cost->alu.log_shift *cost += (extra_cost->alu.log_shift
...@@ -10299,7 +10278,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10299,7 +10278,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
if (GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == RTX_COMPARE if (GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == RTX_COMPARE
|| GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == RTX_COMM_COMPARE) || GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == RTX_COMM_COMPARE)
{ {
*cost = COSTS_N_INSNS (2); *cost += COSTS_N_INSNS (1);
/* No extra cost for MOV imm and MVN imm. */ /* No extra cost for MOV imm and MVN imm. */
/* If the comparison op is using the flags, there's no further /* If the comparison op is using the flags, there's no further
cost, otherwise we need to add the cost of the comparison. */ cost, otherwise we need to add the cost of the comparison. */
...@@ -10318,7 +10297,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10318,7 +10297,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
} }
return true; return true;
} }
*cost = COSTS_N_INSNS (1);
if (speed_p) if (speed_p)
*cost += extra_cost->alu.arith; *cost += extra_cost->alu.arith;
return false; return false;
...@@ -10328,7 +10307,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10328,7 +10307,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
&& GET_MODE_SIZE (mode) < 4) && GET_MODE_SIZE (mode) < 4)
{ {
/* Slightly disparage, as we might need an extend operation. */ /* Slightly disparage, as we might need an extend operation. */
*cost = 1 + COSTS_N_INSNS (1); *cost += 1;
if (speed_p) if (speed_p)
*cost += extra_cost->alu.arith; *cost += extra_cost->alu.arith;
return false; return false;
...@@ -10336,7 +10315,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10336,7 +10315,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
if (mode == DImode) if (mode == DImode)
{ {
*cost = COSTS_N_INSNS (2); *cost += COSTS_N_INSNS (1);
if (speed_p) if (speed_p)
*cost += 2 * extra_cost->alu.arith; *cost += 2 * extra_cost->alu.arith;
return false; return false;
...@@ -10352,7 +10331,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10352,7 +10331,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
rtx shift_op; rtx shift_op;
rtx shift_reg = NULL; rtx shift_reg = NULL;
*cost = COSTS_N_INSNS (1);
shift_op = shifter_op_p (XEXP (x, 0), &shift_reg); shift_op = shifter_op_p (XEXP (x, 0), &shift_reg);
if (shift_op) if (shift_op)
...@@ -10375,7 +10353,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10375,7 +10353,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
} }
if (mode == DImode) if (mode == DImode)
{ {
*cost = COSTS_N_INSNS (2); *cost += COSTS_N_INSNS (1);
return false; return false;
} }
...@@ -10388,7 +10366,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10388,7 +10366,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
{ {
if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC) if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
{ {
*cost = COSTS_N_INSNS (4); *cost += COSTS_N_INSNS (3);
return true; return true;
} }
int op1cost = rtx_cost (XEXP (x, 1), mode, SET, 1, speed_p); int op1cost = rtx_cost (XEXP (x, 1), mode, SET, 1, speed_p);
...@@ -10431,7 +10409,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10431,7 +10409,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
if (TARGET_HARD_FLOAT && GET_MODE_CLASS (op0mode) == MODE_FLOAT if (TARGET_HARD_FLOAT && GET_MODE_CLASS (op0mode) == MODE_FLOAT
&& (op0mode == SFmode || !TARGET_VFP_SINGLE)) && (op0mode == SFmode || !TARGET_VFP_SINGLE))
{ {
*cost = COSTS_N_INSNS (1);
if (speed_p) if (speed_p)
*cost += extra_cost->fp[op0mode != SFmode].compare; *cost += extra_cost->fp[op0mode != SFmode].compare;
...@@ -10452,7 +10429,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10452,7 +10429,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
/* DImode compares normally take two insns. */ /* DImode compares normally take two insns. */
if (op0mode == DImode) if (op0mode == DImode)
{ {
*cost = COSTS_N_INSNS (2); *cost += COSTS_N_INSNS (1);
if (speed_p) if (speed_p)
*cost += 2 * extra_cost->alu.arith; *cost += 2 * extra_cost->alu.arith;
return false; return false;
...@@ -10490,7 +10467,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10490,7 +10467,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
shift_op = shifter_op_p (XEXP (x, 0), &shift_reg); shift_op = shifter_op_p (XEXP (x, 0), &shift_reg);
if (shift_op != NULL) if (shift_op != NULL)
{ {
*cost = COSTS_N_INSNS (1);
if (shift_reg != NULL) if (shift_reg != NULL)
{ {
*cost += rtx_cost (shift_reg, op0mode, ASHIFT, *cost += rtx_cost (shift_reg, op0mode, ASHIFT,
...@@ -10505,7 +10481,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10505,7 +10481,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
return true; return true;
} }
*cost = COSTS_N_INSNS (1);
if (speed_p) if (speed_p)
*cost += extra_cost->alu.arith; *cost += extra_cost->alu.arith;
if (CONST_INT_P (XEXP (x, 1)) if (CONST_INT_P (XEXP (x, 1))
...@@ -10549,7 +10524,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10549,7 +10524,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
&& XEXP (x, 1) == const0_rtx) && XEXP (x, 1) == const0_rtx)
{ {
/* Thumb also needs an IT insn. */ /* Thumb also needs an IT insn. */
*cost = COSTS_N_INSNS (TARGET_THUMB ? 3 : 2); *cost += COSTS_N_INSNS (TARGET_THUMB ? 2 : 1);
return true; return true;
} }
if (XEXP (x, 1) == const0_rtx) if (XEXP (x, 1) == const0_rtx)
...@@ -10558,7 +10533,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10558,7 +10533,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
{ {
case LT: case LT:
/* LSR Rd, Rn, #31. */ /* LSR Rd, Rn, #31. */
*cost = COSTS_N_INSNS (1);
if (speed_p) if (speed_p)
*cost += extra_cost->alu.shift; *cost += extra_cost->alu.shift;
break; break;
...@@ -10570,13 +10544,13 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10570,13 +10544,13 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
case NE: case NE:
/* SUBS T1, Rn, #1 /* SUBS T1, Rn, #1
SBC Rd, Rn, T1. */ SBC Rd, Rn, T1. */
*cost = COSTS_N_INSNS (2); *cost += COSTS_N_INSNS (1);
break; break;
case LE: case LE:
/* RSBS T1, Rn, Rn, LSR #31 /* RSBS T1, Rn, Rn, LSR #31
ADC Rd, Rn, T1. */ ADC Rd, Rn, T1. */
*cost = COSTS_N_INSNS (2); *cost += COSTS_N_INSNS (1);
if (speed_p) if (speed_p)
*cost += extra_cost->alu.arith_shift; *cost += extra_cost->alu.arith_shift;
break; break;
...@@ -10584,7 +10558,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10584,7 +10558,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
case GT: case GT:
/* RSB Rd, Rn, Rn, ASR #1 /* RSB Rd, Rn, Rn, ASR #1
LSR Rd, Rd, #31. */ LSR Rd, Rd, #31. */
*cost = COSTS_N_INSNS (2); *cost += COSTS_N_INSNS (1);
if (speed_p) if (speed_p)
*cost += (extra_cost->alu.arith_shift *cost += (extra_cost->alu.arith_shift
+ extra_cost->alu.shift); + extra_cost->alu.shift);
...@@ -10593,7 +10567,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10593,7 +10567,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
case GE: case GE:
/* ASR Rd, Rn, #31 /* ASR Rd, Rn, #31
ADD Rd, Rn, #1. */ ADD Rd, Rn, #1. */
*cost = COSTS_N_INSNS (2); *cost += COSTS_N_INSNS (1);
if (speed_p) if (speed_p)
*cost += extra_cost->alu.shift; *cost += extra_cost->alu.shift;
break; break;
...@@ -10609,7 +10583,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10609,7 +10583,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
} }
else else
{ {
*cost = COSTS_N_INSNS (TARGET_THUMB ? 4 : 3); *cost += COSTS_N_INSNS (TARGET_THUMB ? 3 : 2);
if (CONST_INT_P (XEXP (x, 1)) if (CONST_INT_P (XEXP (x, 1))
&& const_ok_for_op (INTVAL (XEXP (x, 1)), COMPARE)) && const_ok_for_op (INTVAL (XEXP (x, 1)), COMPARE))
{ {
...@@ -10637,7 +10611,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10637,7 +10611,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT
&& (mode == SFmode || !TARGET_VFP_SINGLE)) && (mode == SFmode || !TARGET_VFP_SINGLE))
{ {
*cost = COSTS_N_INSNS (1);
if (speed_p) if (speed_p)
*cost += extra_cost->fp[mode != SFmode].neg; *cost += extra_cost->fp[mode != SFmode].neg;
...@@ -10651,7 +10624,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10651,7 +10624,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
if (mode == SImode) if (mode == SImode)
{ {
*cost = COSTS_N_INSNS (1);
if (speed_p) if (speed_p)
*cost += extra_cost->alu.log_shift + extra_cost->alu.arith_shift; *cost += extra_cost->alu.log_shift + extra_cost->alu.arith_shift;
return false; return false;
...@@ -10687,7 +10659,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10687,7 +10659,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
if (GET_MODE (XEXP (x, 0)) != SImode && arm_arch6) if (GET_MODE (XEXP (x, 0)) != SImode && arm_arch6)
{ {
/* We have SXTB/SXTH. */ /* We have SXTB/SXTH. */
*cost = COSTS_N_INSNS (1);
*cost += rtx_cost (XEXP (x, 0), VOIDmode, code, 0, speed_p); *cost += rtx_cost (XEXP (x, 0), VOIDmode, code, 0, speed_p);
if (speed_p) if (speed_p)
*cost += extra_cost->alu.extend; *cost += extra_cost->alu.extend;
...@@ -10695,7 +10666,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10695,7 +10666,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
else if (GET_MODE (XEXP (x, 0)) != SImode) else if (GET_MODE (XEXP (x, 0)) != SImode)
{ {
/* Needs two shifts. */ /* Needs two shifts. */
*cost = COSTS_N_INSNS (2); *cost += COSTS_N_INSNS (1);
*cost += rtx_cost (XEXP (x, 0), VOIDmode, code, 0, speed_p); *cost += rtx_cost (XEXP (x, 0), VOIDmode, code, 0, speed_p);
if (speed_p) if (speed_p)
*cost += 2 * extra_cost->alu.shift; *cost += 2 * extra_cost->alu.shift;
...@@ -10733,14 +10704,12 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10733,14 +10704,12 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
optimizing for speed it should never be slower to use optimizing for speed it should never be slower to use
AND, and we don't really model 16-bit vs 32-bit insns AND, and we don't really model 16-bit vs 32-bit insns
here. */ here. */
*cost = COSTS_N_INSNS (1);
if (speed_p) if (speed_p)
*cost += extra_cost->alu.logical; *cost += extra_cost->alu.logical;
} }
else if (GET_MODE (XEXP (x, 0)) != SImode && arm_arch6) else if (GET_MODE (XEXP (x, 0)) != SImode && arm_arch6)
{ {
/* We have UXTB/UXTH. */ /* We have UXTB/UXTH. */
*cost = COSTS_N_INSNS (1);
*cost += rtx_cost (XEXP (x, 0), VOIDmode, code, 0, speed_p); *cost += rtx_cost (XEXP (x, 0), VOIDmode, code, 0, speed_p);
if (speed_p) if (speed_p)
*cost += extra_cost->alu.extend; *cost += extra_cost->alu.extend;
...@@ -10756,8 +10725,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10756,8 +10725,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
if (speed_p) if (speed_p)
*cost += 2 * extra_cost->alu.shift; *cost += 2 * extra_cost->alu.shift;
} }
else /* GET_MODE (XEXP (x, 0)) == SImode. */
*cost = COSTS_N_INSNS (1);
/* Widening beyond 32-bits requires one more insn. */ /* Widening beyond 32-bits requires one more insn. */
if (mode == DImode) if (mode == DImode)
...@@ -10815,12 +10782,12 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10815,12 +10782,12 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
if (speed_p) if (speed_p)
{ {
if (arm_arch_thumb2 && !flag_pic) if (arm_arch_thumb2 && !flag_pic)
*cost = COSTS_N_INSNS (2); *cost += COSTS_N_INSNS (1);
else else
*cost = COSTS_N_INSNS (1) + extra_cost->ldst.load; *cost += extra_cost->ldst.load;
} }
else else
*cost = COSTS_N_INSNS (2); *cost += COSTS_N_INSNS (1);
if (flag_pic) if (flag_pic)
{ {
...@@ -10842,7 +10809,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10842,7 +10809,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
{ {
if (vfp3_const_double_rtx (x)) if (vfp3_const_double_rtx (x))
{ {
*cost = COSTS_N_INSNS (1);
if (speed_p) if (speed_p)
*cost += extra_cost->fp[mode == DFmode].fpconst; *cost += extra_cost->fp[mode == DFmode].fpconst;
return true; return true;
...@@ -10850,14 +10816,13 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10850,14 +10816,13 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
if (speed_p) if (speed_p)
{ {
*cost = COSTS_N_INSNS (1);
if (mode == DFmode) if (mode == DFmode)
*cost += extra_cost->ldst.loadd; *cost += extra_cost->ldst.loadd;
else else
*cost += extra_cost->ldst.loadf; *cost += extra_cost->ldst.loadf;
} }
else else
*cost = COSTS_N_INSNS (2 + (mode == DFmode)); *cost += COSTS_N_INSNS (1 + (mode == DFmode));
return true; return true;
} }
...@@ -10877,7 +10842,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10877,7 +10842,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
case HIGH: case HIGH:
case LO_SUM: case LO_SUM:
*cost = COSTS_N_INSNS (1);
/* When optimizing for size, we prefer constant pool entries to /* When optimizing for size, we prefer constant pool entries to
MOVW/MOVT pairs, so bump the cost of these slightly. */ MOVW/MOVT pairs, so bump the cost of these slightly. */
if (!speed_p) if (!speed_p)
...@@ -10885,7 +10849,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10885,7 +10849,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
return true; return true;
case CLZ: case CLZ:
*cost = COSTS_N_INSNS (1);
if (speed_p) if (speed_p)
*cost += extra_cost->alu.clz; *cost += extra_cost->alu.clz;
return false; return false;
...@@ -10893,7 +10856,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10893,7 +10856,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
case SMIN: case SMIN:
if (XEXP (x, 1) == const0_rtx) if (XEXP (x, 1) == const0_rtx)
{ {
*cost = COSTS_N_INSNS (1);
if (speed_p) if (speed_p)
*cost += extra_cost->alu.log_shift; *cost += extra_cost->alu.log_shift;
*cost += rtx_cost (XEXP (x, 0), mode, code, 0, speed_p); *cost += rtx_cost (XEXP (x, 0), mode, code, 0, speed_p);
...@@ -10903,7 +10865,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10903,7 +10865,7 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
case SMAX: case SMAX:
case UMIN: case UMIN:
case UMAX: case UMAX:
*cost = COSTS_N_INSNS (2); *cost += COSTS_N_INSNS (1);
return false; return false;
case TRUNCATE: case TRUNCATE:
...@@ -10917,7 +10879,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10917,7 +10879,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
&& (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1))
== ZERO_EXTEND)))) == ZERO_EXTEND))))
{ {
*cost = COSTS_N_INSNS (1);
if (speed_p) if (speed_p)
*cost += extra_cost->mult[1].extend; *cost += extra_cost->mult[1].extend;
*cost += (rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0), VOIDmode, *cost += (rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0), VOIDmode,
...@@ -10948,14 +10909,13 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10948,14 +10909,13 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
&& CONST_INT_P (XEXP (x, 1)) && CONST_INT_P (XEXP (x, 1))
&& CONST_INT_P (XEXP (x, 2))) && CONST_INT_P (XEXP (x, 2)))
{ {
*cost = COSTS_N_INSNS (1);
if (speed_p) if (speed_p)
*cost += extra_cost->alu.bfx; *cost += extra_cost->alu.bfx;
*cost += rtx_cost (XEXP (x, 0), mode, code, 0, speed_p); *cost += rtx_cost (XEXP (x, 0), mode, code, 0, speed_p);
return true; return true;
} }
/* Without UBFX/SBFX, need to resort to shift operations. */ /* Without UBFX/SBFX, need to resort to shift operations. */
*cost = COSTS_N_INSNS (2); *cost += COSTS_N_INSNS (1);
if (speed_p) if (speed_p)
*cost += 2 * extra_cost->alu.shift; *cost += 2 * extra_cost->alu.shift;
*cost += rtx_cost (XEXP (x, 0), mode, ASHIFT, 0, speed_p); *cost += rtx_cost (XEXP (x, 0), mode, ASHIFT, 0, speed_p);
...@@ -10964,7 +10924,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10964,7 +10924,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
case FLOAT_EXTEND: case FLOAT_EXTEND:
if (TARGET_HARD_FLOAT) if (TARGET_HARD_FLOAT)
{ {
*cost = COSTS_N_INSNS (1);
if (speed_p) if (speed_p)
*cost += extra_cost->fp[mode == DFmode].widen; *cost += extra_cost->fp[mode == DFmode].widen;
if (!TARGET_FPU_ARMV8 if (!TARGET_FPU_ARMV8
...@@ -10986,7 +10945,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -10986,7 +10945,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
case FLOAT_TRUNCATE: case FLOAT_TRUNCATE:
if (TARGET_HARD_FLOAT) if (TARGET_HARD_FLOAT)
{ {
*cost = COSTS_N_INSNS (1);
if (speed_p) if (speed_p)
*cost += extra_cost->fp[mode == DFmode].narrow; *cost += extra_cost->fp[mode == DFmode].narrow;
*cost += rtx_cost (XEXP (x, 0), VOIDmode, code, 0, speed_p); *cost += rtx_cost (XEXP (x, 0), VOIDmode, code, 0, speed_p);
...@@ -11003,7 +10961,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -11003,7 +10961,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
rtx op1 = XEXP (x, 1); rtx op1 = XEXP (x, 1);
rtx op2 = XEXP (x, 2); rtx op2 = XEXP (x, 2);
*cost = COSTS_N_INSNS (1);
/* vfms or vfnma. */ /* vfms or vfnma. */
if (GET_CODE (op0) == NEG) if (GET_CODE (op0) == NEG)
...@@ -11032,7 +10989,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -11032,7 +10989,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
{ {
if (GET_MODE_CLASS (mode) == MODE_INT) if (GET_MODE_CLASS (mode) == MODE_INT)
{ {
*cost = COSTS_N_INSNS (1);
mode = GET_MODE (XEXP (x, 0)); mode = GET_MODE (XEXP (x, 0));
if (speed_p) if (speed_p)
*cost += extra_cost->fp[mode == DFmode].toint; *cost += extra_cost->fp[mode == DFmode].toint;
...@@ -11049,7 +11005,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -11049,7 +11005,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
else if (GET_MODE_CLASS (mode) == MODE_FLOAT else if (GET_MODE_CLASS (mode) == MODE_FLOAT
&& TARGET_FPU_ARMV8) && TARGET_FPU_ARMV8)
{ {
*cost = COSTS_N_INSNS (1);
if (speed_p) if (speed_p)
*cost += extra_cost->fp[mode == DFmode].roundint; *cost += extra_cost->fp[mode == DFmode].roundint;
return false; return false;
...@@ -11065,7 +11020,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -11065,7 +11020,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
{ {
/* ??? Increase the cost to deal with transferring from CORE /* ??? Increase the cost to deal with transferring from CORE
-> FP registers? */ -> FP registers? */
*cost = COSTS_N_INSNS (1);
if (speed_p) if (speed_p)
*cost += extra_cost->fp[mode == DFmode].fromint; *cost += extra_cost->fp[mode == DFmode].fromint;
return false; return false;
...@@ -11074,7 +11028,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, ...@@ -11074,7 +11028,6 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
return false; return false;
case CALL: case CALL:
*cost = COSTS_N_INSNS (1);
return true; return true;
case ASM_OPERANDS: case ASM_OPERANDS:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment