Commit ed052e94 by Alan Lawrence Committed by Alan Lawrence

Relax check against commuting XOR and ASHIFTRT in combine.c

gcc/:
	* combine.c (simplify_shift_const_1): Allow commuting (ashiftrt (xor))
	when result_mode == shift_mode.

gcc/testsuite/:
	* gcc.dg/combine_ashiftrt_1.c: New test.
	* gcc.dg/combine_ashiftrt_2.c: Likewise.
	* gcc.target/aarch64/singleton_intrinsics_1.c: Remove scan-assembler
	workarounds for cmge.
	* gcc.target/aarch64/simd/int_comparisons_1.c: Likewise; also check for
	absence of mvn.

From-SVN: r215531
parent 43b1b9ed
2014-09-23 Alan Lawrence <alan.lawrence@arm.com>
* combine.c (simplify_shift_const_1): Allow commuting (ashiftrt (xor))
when result_mode == shift_mode.
2014-09-23 Kostya Serebryany <kcc@google.com>
Update to match the changed asan API.
......@@ -10255,8 +10255,10 @@ simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode,
if (CONST_INT_P (XEXP (varop, 1))
/* We can't do this if we have (ashiftrt (xor)) and the
constant has its sign bit set in shift_mode. */
constant has its sign bit set in shift_mode with shift_mode
wider than result_mode. */
&& !(code == ASHIFTRT && GET_CODE (varop) == XOR
&& result_mode != shift_mode
&& 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
shift_mode))
&& (new_rtx = simplify_const_binary_operation
......@@ -10273,10 +10275,12 @@ simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode,
/* If we can't do that, try to simplify the shift in each arm of the
logical expression, make a new logical expression, and apply
the inverse distributive law. This also can't be done
for some (ashiftrt (xor)). */
the inverse distributive law. This also can't be done for
(ashiftrt (xor)) where we've widened the shift and the constant
changes the sign bit. */
if (CONST_INT_P (XEXP (varop, 1))
&& !(code == ASHIFTRT && GET_CODE (varop) == XOR
&& result_mode != shift_mode
&& 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
shift_mode)))
{
......
2014-09-23 Alan Lawrence <alan.lawrence@arm.com>
* gcc.dg/combine_ashiftrt_1.c: New test.
* gcc.dg/combine_ashiftrt_2.c: Likewise.
* gcc.target/aarch64/singleton_intrinsics_1.c: Remove scan-assembler
workarounds for cmge.
* gcc.target/aarch64/simd/int_comparisons_1.c: Likewise; also check for
absence of mvn.
2014-09-23 Paolo Carlini <paolo.carlini@oracle.com>
PR c++/61857
......
/* { dg-do compile {target sparc64*-*-* aarch64*-*-* x86_64-*-* powerpc64*-*-*} } */
/* { dg-options "-O2 -fdump-rtl-combine-all" } */
typedef long long int int64_t;
int64_t
foo (int64_t a)
{
return (~a) >> 63;
}
/* The combine phase will try to combine not & ashiftrt, and
combine_simplify_rtx should transform (ashiftrt (not x) 63)
to (not (ashiftrt x 63)) and then to (neg (ge x 0)). We look for
the *attempt* to match this RTL pattern, regardless of whether an
actual insn may be found on the platform. */
/* { dg-final { scan-rtl-dump "\\(neg:DI \\(ge:DI" "combine" } } */
/* { dg-final { cleanup-rtl-dump "combine" } } */
/* { dg-do compile {target arm*-*-* i?86-*-* powerpc-*-* sparc-*-*} } */
/* { dg-options "-O2 -fdump-rtl-combine-all" } */
typedef long int32_t;
int32_t
foo (int32_t a)
{
return (~a) >> 31;
}
/* The combine phase will try to combine not & ashiftrt, and
combine_simplify_rtx should transform (ashiftrt (not x) 31)
to (not (ashiftrt x 63)) and then to (neg (ge x 0)). We look for
the *attempt* to match this RTL pattern, regardless of whether an
actual insn may be found on the platform. */
/* { dg-final { scan-rtl-dump "\\(neg:SI \\(ge:SI" "combine" } } */
/* { dg-final { cleanup-rtl-dump "combine" } } */
......@@ -30,18 +30,16 @@
/* Comparisons against immediate zero, on the 8 signed integer types only. */
/* { dg-final { scan-assembler-times "\[ \t\]cmge\[ \t\]+v\[0-9\]+\.\[0-9\]+\[bshd\],\[ \t\]*v\[0-9\]+\.\[0-9\]+\[bshd\],\[ \t\]*#?0" 7 } } */
/* For int64_t and int64x1_t, combine_simplify_rtx failure of
https://gcc.gnu.org/ml/gcc/2014-06/msg00253.html
prevents generation of cmge....#0, instead producing mvn + sshr. */
/* { #dg-final { scan-assembler-times "\[ \t\]cmge\[ \t\]+d\[0-9\]+,\[ \t\]*d\[0-9\]+,\[ \t\]*#?0" 2 } } */
/* { dg-final { scan-assembler-times "\[ \t\]cmge\[ \t\]+d\[0-9\]+,\[ \t\]*d\[0-9\]+,\[ \t\]*#?0" 2 } } */
/* { dg-final { scan-assembler-times "\[ \t\]cmgt\[ \t\]+v\[0-9\]+\.\[0-9\]+\[bshd\],\[ \t\]*v\[0-9\]+\.\[0-9\]+\[bshd\],\[ \t\]*#?0" 7 } } */
/* { dg-final { scan-assembler-times "\[ \t\]cmgt\[ \t\]+d\[0-9\]+,\[ \t\]*d\[0-9\]+,\[ \t\]*#?0" 2 } } */
/* { dg-final { scan-assembler-times "\[ \t\]cmle\[ \t\]+v\[0-9\]+\.\[0-9\]+\[bshd\],\[ \t\]*v\[0-9\]+\.\[0-9\]+\[bshd\],\[ \t\]*#?0" 7 } } */
/* { dg-final { scan-assembler-times "\[ \t\]cmle\[ \t\]+d\[0-9\]+,\[ \t\]*d\[0-9\]+,\[ \t\]*#?0" 2 } } */
/* { dg-final { scan-assembler-times "\[ \t\]cmlt\[ \t\]+v\[0-9\]+\.\[0-9\]+\[bshd\],\[ \t\]*v\[0-9\]+\.\[0-9\]+\[bshd\],\[ \t\]*#?0" 7 } } */
/* For int64_t and int64x1_t, cmlt ... #0 and sshr ... #63 are equivalent,
so allow either. cmgez issue above results in extra 2 * sshr....63. */
/* { dg-final { scan-assembler-times "\[ \t\](?:cmlt|sshr)\[ \t\]+d\[0-9\]+,\[ \t\]*d\[0-9\]+,\[ \t\]*#?(?:0|63)" 4 } } */
so allow either. */
/* { dg-final { scan-assembler-times "\[ \t\](?:cmlt|sshr)\[ \t\]+d\[0-9\]+,\[ \t\]*d\[0-9\]+,\[ \t\]*#?(?:0|63)" 2 } } */
// All should have been compiled into single insns without inverting result:
/* { dg-final { scan-assembler-not "\[ \t\]not\[ \t\]" } } */
/* { dg-final { scan-assembler-not "\[ \t\]mvn\[ \t\]" } } */
......@@ -57,8 +57,7 @@ test_vcle_s64 (int64x1_t a, int64x1_t b)
return vcle_s64 (a, b);
}
/* Idiom recognition will cause this testcase not to generate
the expected cmge instruction, so do not check for it. */
/* { dg-final { scan-assembler-times "\\tcmge\\td\[0-9\]+, d\[0-9\]+, #?0" 1 } } */
uint64x1_t
test_vcgez_s64 (int64x1_t a)
......@@ -236,8 +235,8 @@ test_vrshl_u64 (uint64x1_t a, int64x1_t b)
return vrshl_u64 (a, b);
}
/* { dg-final { scan-assembler-times "\\tsshr\\td\[0-9\]+" 3 } } */
/* Idiom recognition compiles vcltz and vcgez to sshr rather than cmlt/cmge. */
/* For int64x1_t, sshr...#63 is output instead of the equivalent cmlt...#0. */
/* { dg-final { scan-assembler-times "\\tsshr\\td\[0-9\]+" 2 } } */
int64x1_t
test_vshr_n_s64 (int64x1_t a)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment