Commit d4b5c77d by Kyrylo Tkachov Committed by Kyrylo Tkachov

[ARM][2/4] Replace casts of 1 to HOST_WIDE_INT by HOST_WIDE_INT_1 and HOST_WIDE_INT_1U

	* config/arm/arm.md (andsi3): Replace cast of 1 to HOST_WIDE_INT
	with HOST_WIDE_INT_1.
	(insv): Likewise.
	* config/arm/arm.c (optimal_immediate_sequence): Replace cast of
	1 to unsigned HOST_WIDE_INT with HOST_WIDE_INT_1U.
	(arm_canonicalize_comparison): Likewise.
	(thumb1_rtx_costs): Replace cast of 1 to HOST_WIDE_INT with
	HOST_WIDE_INT_1.
	(thumb1_size_rtx_costs): Likewise.
	(vfp_const_double_index): Replace cast of 1 to unsigned
	HOST_WIDE_INT with HOST_WIDE_INT_1U.
	(get_jump_table_size): Replace cast of 1 to HOST_WIDE_INT with
	HOST_WIDE_INT_1.
	(arm_asan_shadow_offset): Replace cast of 1 to unsigned
	HOST_WIDE_INT with HOST_WIDE_INT_1U.
	* config/arm/neon.md (vec_set<mode>): Replace cast of 1 to
	HOST_WIDE_INT with HOST_WIDE_INT_1.

From-SVN: r236638
parent abd3a68c
2016-05-24 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
* config/arm/arm.md (andsi3): Replace cast of 1 to HOST_WIDE_INT
with HOST_WIDE_INT_1.
(insv): Likewise.
* config/arm/arm.c (optimal_immediate_sequence): Replace cast of
1 to unsigned HOST_WIDE_INT with HOST_WIDE_INT_1U.
(arm_canonicalize_comparison): Likewise.
(thumb1_rtx_costs): Replace cast of 1 to HOST_WIDE_INT with
HOST_WIDE_INT_1.
(thumb1_size_rtx_costs): Likewise.
(vfp_const_double_index): Replace cast of 1 to unsigned
HOST_WIDE_INT with HOST_WIDE_INT_1U.
(get_jump_table_size): Replace cast of 1 to HOST_WIDE_INT with
HOST_WIDE_INT_1.
(arm_asan_shadow_offset): Replace cast of 1 to unsigned
HOST_WIDE_INT with HOST_WIDE_INT_1U.
* config/arm/neon.md (vec_set<mode>): Replace cast of 1 to
HOST_WIDE_INT with HOST_WIDE_INT_1.
2016-05-24 Marek Polacek <polacek@redhat.com>
* tree-cfg.h (should_remove_lhs_p): New predicate.
......
......@@ -4113,7 +4113,7 @@ optimal_immediate_sequence (enum rtx_code code, unsigned HOST_WIDE_INT val,
yield a shorter sequence, we may as well use zero. */
insns1 = optimal_immediate_sequence_1 (code, val, return_sequence, best_start);
if (best_start != 0
&& ((((unsigned HOST_WIDE_INT) 1) << best_start) < val))
&& ((HOST_WIDE_INT_1U << best_start) < val))
{
insns2 = optimal_immediate_sequence_1 (code, val, &tmp_sequence, 0);
if (insns2 <= insns1)
......@@ -4944,7 +4944,7 @@ arm_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
if (mode == VOIDmode)
mode = GET_MODE (*op1);
maxval = (((unsigned HOST_WIDE_INT) 1) << (GET_MODE_BITSIZE(mode) - 1)) - 1;
maxval = (HOST_WIDE_INT_1U << (GET_MODE_BITSIZE (mode) - 1)) - 1;
/* For DImode, we have GE/LT/GEU/LTU comparisons. In ARM mode
we can also use cmp/cmpeq for GTU/LEU. GT/LE must be either
......@@ -8320,8 +8320,8 @@ thumb1_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
int i;
/* This duplicates the tests in the andsi3 expander. */
for (i = 9; i <= 31; i++)
if ((((HOST_WIDE_INT) 1) << i) - 1 == INTVAL (x)
|| (((HOST_WIDE_INT) 1) << i) - 1 == ~INTVAL (x))
if ((HOST_WIDE_INT_1 << i) - 1 == INTVAL (x)
|| (HOST_WIDE_INT_1 << i) - 1 == ~INTVAL (x))
return COSTS_N_INSNS (2);
}
else if (outer == ASHIFT || outer == ASHIFTRT
......@@ -9082,8 +9082,8 @@ thumb1_size_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
int i;
/* This duplicates the tests in the andsi3 expander. */
for (i = 9; i <= 31; i++)
if ((((HOST_WIDE_INT) 1) << i) - 1 == INTVAL (x)
|| (((HOST_WIDE_INT) 1) << i) - 1 == ~INTVAL (x))
if ((HOST_WIDE_INT_1 << i) - 1 == INTVAL (x)
|| (HOST_WIDE_INT_1 << i) - 1 == ~INTVAL (x))
return COSTS_N_INSNS (2);
}
else if (outer == ASHIFT || outer == ASHIFTRT
......@@ -12258,7 +12258,7 @@ vfp3_const_double_index (rtx x)
/* We can permit four significant bits of mantissa only, plus a high bit
which is always 1. */
mask = ((unsigned HOST_WIDE_INT)1 << (point_pos - 5)) - 1;
mask = (HOST_WIDE_INT_1U << (point_pos - 5)) - 1;
if ((mantissa & mask) != 0)
return -1;
......@@ -16364,7 +16364,7 @@ get_jump_table_size (rtx_jump_table_data *insn)
{
case 1:
/* Round up size of TBB table to a halfword boundary. */
size = (size + 1) & ~(HOST_WIDE_INT)1;
size = (size + 1) & ~HOST_WIDE_INT_1;
break;
case 2:
/* No padding necessary for TBH. */
......@@ -29859,7 +29859,7 @@ aarch_macro_fusion_pair_p (rtx_insn* prev, rtx_insn* curr)
static unsigned HOST_WIDE_INT
arm_asan_shadow_offset (void)
{
return (unsigned HOST_WIDE_INT) 1 << 29;
return HOST_WIDE_INT_1U << 29;
}
......
......@@ -2140,13 +2140,13 @@
for (i = 9; i <= 31; i++)
{
if ((((HOST_WIDE_INT) 1) << i) - 1 == INTVAL (operands[2]))
if ((HOST_WIDE_INT_1 << i) - 1 == INTVAL (operands[2]))
{
emit_insn (gen_extzv (operands[0], operands[1], GEN_INT (i),
const0_rtx));
DONE;
}
else if ((((HOST_WIDE_INT) 1) << i) - 1
else if ((HOST_WIDE_INT_1 << i) - 1
== ~INTVAL (operands[2]))
{
rtx shift = GEN_INT (i);
......@@ -2445,7 +2445,7 @@
{
int start_bit = INTVAL (operands[2]);
int width = INTVAL (operands[1]);
HOST_WIDE_INT mask = (((HOST_WIDE_INT)1) << width) - 1;
HOST_WIDE_INT mask = (HOST_WIDE_INT_1 << width) - 1;
rtx target, subtarget;
if (arm_arch_thumb2)
......
......@@ -406,7 +406,7 @@
(match_operand:SI 2 "immediate_operand" "")]
"TARGET_NEON"
{
HOST_WIDE_INT elem = (HOST_WIDE_INT) 1 << INTVAL (operands[2]);
HOST_WIDE_INT elem = HOST_WIDE_INT_1 << INTVAL (operands[2]);
emit_insn (gen_vec_set<mode>_internal (operands[0], operands[1],
GEN_INT (elem), operands[0]));
DONE;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment