Commit 46e778c4 by James Greenhalgh Committed by James Greenhalgh

[AArch64] [3/4 Fix vtbx1]Implement bsl intrinsics using builtins

gcc/
	* config/aarch64/aarch64-builtins.c
	(aarch64_types_bsl_p_qualifiers): New.
	(aarch64_types_bsl_s_qualifiers): Likewise.
	(aarch64_types_bsl_u_qualifiers): Likewise.
	(TYPES_BSL_P): Likewise.
	(TYPES_BSL_S): Likewise.
	(TYPES_BSL_U): Likewise.
	(BUILTIN_VALLDIF): Likewise.
	(BUILTIN_VDQQH): Likewise.
	* config/aarch64/aarch64-simd-builtins.def (simd_bsl): New.
	* config/aarch64/aarch64-simd.md
	(aarch64_simd_bsl<mode>_internal): Handle more modes.
	(aarch64_simd_bsl<mode>): Likewise.
	* config/aarch64/arm_neon.h
	(vbsl<q>_<fpsu><8,16,32,64): Implement using builtins.
	* config/aarch64/iterators.md (VALLDIF): New.
	(Vbtype): Handle more modes.

From-SVN: r205385
parent 6db1ec94
2013-11-26 James Greenhalgh <james.greenhalgh@arm.com>
* config/aarch64/aarch64-builtins.c
(aarch64_types_bsl_p_qualifiers): New.
(aarch64_types_bsl_s_qualifiers): Likewise.
(aarch64_types_bsl_u_qualifiers): Likewise.
(TYPES_BSL_P): Likewise.
(TYPES_BSL_S): Likewise.
(TYPES_BSL_U): Likewise.
(BUILTIN_VALLDIF): Likewise.
(BUILTIN_VDQQH): Likewise.
* config/aarch64/aarch64-simd-builtins.def (simd_bsl): New.
* config/aarch64/aarch64-simd.md
(aarch64_simd_bsl<mode>_internal): Handle more modes.
(aarch64_simd_bsl<mode>): Likewise.
* config/aarch64/arm_neon.h
(vbsl<q>_<fpsu><8,16,32,64): Implement using builtins.
* config/aarch64/iterators.md (VALLDIF): New.
(Vbtype): Handle more modes.
2013-11-26 James Greenhalgh <james.greenhalgh@arm.com>
* config/aarch64/aarch64-builtins.c
(aarch64_type_qualifiers): Add qualifier_poly.
(aarch64_build_scalar_type): Also build Poly types.
(aarch64_build_vector_type): Likewise.
......@@ -181,6 +181,22 @@ aarch64_types_load1_qualifiers[SIMD_MAX_BUILTIN_ARGS]
#define TYPES_LOAD1 (aarch64_types_load1_qualifiers)
#define TYPES_LOADSTRUCT (aarch64_types_load1_qualifiers)
static enum aarch64_type_qualifiers
aarch64_types_bsl_p_qualifiers[SIMD_MAX_BUILTIN_ARGS]
= { qualifier_poly, qualifier_unsigned,
qualifier_poly, qualifier_poly };
#define TYPES_BSL_P (aarch64_types_bsl_p_qualifiers)
static enum aarch64_type_qualifiers
aarch64_types_bsl_s_qualifiers[SIMD_MAX_BUILTIN_ARGS]
= { qualifier_none, qualifier_unsigned,
qualifier_none, qualifier_none };
#define TYPES_BSL_S (aarch64_types_bsl_s_qualifiers)
static enum aarch64_type_qualifiers
aarch64_types_bsl_u_qualifiers[SIMD_MAX_BUILTIN_ARGS]
= { qualifier_unsigned, qualifier_unsigned,
qualifier_unsigned, qualifier_unsigned };
#define TYPES_BSL_U (aarch64_types_bsl_u_qualifiers)
/* The first argument (return type) of a store should be void type,
which we represent with qualifier_void. Their first operand will be
a DImode pointer to the location to store to, so we must use
......@@ -255,6 +271,9 @@ aarch64_types_store1_qualifiers[SIMD_MAX_BUILTIN_ARGS]
#define BUILTIN_VALLDI(T, N, MAP) \
VAR11 (T, N, MAP, v8qi, v16qi, v4hi, v8hi, v2si, \
v4si, v2di, v2sf, v4sf, v2df, di)
#define BUILTIN_VALLDIF(T, N, MAP) \
VAR12 (T, N, MAP, v8qi, v16qi, v4hi, v8hi, v2si, \
v4si, v2di, v2sf, v4sf, v2df, di, df)
#define BUILTIN_VB(T, N, MAP) \
VAR2 (T, N, MAP, v8qi, v16qi)
#define BUILTIN_VD(T, N, MAP) \
......@@ -279,6 +298,8 @@ aarch64_types_store1_qualifiers[SIMD_MAX_BUILTIN_ARGS]
VAR6 (T, N, MAP, v8qi, v16qi, v4hi, v8hi, v2si, v4si)
#define BUILTIN_VDQV(T, N, MAP) \
VAR5 (T, N, MAP, v8qi, v16qi, v4hi, v8hi, v4si)
#define BUILTIN_VDQQH(T, N, MAP) \
VAR4 (T, N, MAP, v8qi, v16qi, v4hi, v8hi)
#define BUILTIN_VDQ_BHSI(T, N, MAP) \
VAR6 (T, N, MAP, v8qi, v16qi, v4hi, v8hi, v2si, v4si)
#define BUILTIN_VDQ_I(T, N, MAP) \
......
......@@ -362,3 +362,8 @@
/* Implemented by fma<mode>4. */
BUILTIN_VDQF (TERNOP, fma, 4)
/* Implemented by aarch64_simd_bsl<mode>. */
BUILTIN_VDQQH (BSL_P, simd_bsl, 0)
BUILTIN_VSDQ_I_DI (BSL_U, simd_bsl, 0)
BUILTIN_VALLDIF (BSL_S, simd_bsl, 0)
......@@ -1662,15 +1662,15 @@
;; bif op0, op1, mask
(define_insn "aarch64_simd_bsl<mode>_internal"
[(set (match_operand:VALL 0 "register_operand" "=w,w,w")
(ior:VALL
(and:VALL
[(set (match_operand:VALLDIF 0 "register_operand" "=w,w,w")
(ior:VALLDIF
(and:VALLDIF
(match_operand:<V_cmp_result> 1 "register_operand" " 0,w,w")
(match_operand:VALL 2 "register_operand" " w,w,0"))
(and:VALL
(match_operand:VALLDIF 2 "register_operand" " w,w,0"))
(and:VALLDIF
(not:<V_cmp_result>
(match_dup:<V_cmp_result> 1))
(match_operand:VALL 3 "register_operand" " w,0,w"))
(match_operand:VALLDIF 3 "register_operand" " w,0,w"))
))]
"TARGET_SIMD"
"@
......@@ -1681,10 +1681,10 @@
)
(define_expand "aarch64_simd_bsl<mode>"
[(match_operand:VALL 0 "register_operand")
[(match_operand:VALLDIF 0 "register_operand")
(match_operand:<V_cmp_result> 1 "register_operand")
(match_operand:VALL 2 "register_operand")
(match_operand:VALL 3 "register_operand")]
(match_operand:VALLDIF 2 "register_operand")
(match_operand:VALLDIF 3 "register_operand")]
"TARGET_SIMD"
{
/* We can't alias operands together if they have different modes. */
......
......@@ -107,6 +107,10 @@
;; All vector modes and DI.
(define_mode_iterator VALLDI [V8QI V16QI V4HI V8HI V2SI V4SI V2DI V2SF V4SF V2DF DI])
;; All vector modes and DI and DF.
(define_mode_iterator VALLDIF [V8QI V16QI V4HI V8HI V2SI V4SI
V2DI V2SF V4SF V2DF DI DF])
;; Vector modes for Integer reduction across lanes.
(define_mode_iterator VDQV [V8QI V16QI V4HI V8HI V4SI V2DI])
......@@ -363,7 +367,8 @@
(V4HI "8b") (V8HI "16b")
(V2SI "8b") (V4SI "16b")
(V2DI "16b") (V2SF "8b")
(V4SF "16b") (V2DF "16b")])
(V4SF "16b") (V2DF "16b")
(DI "8b") (DF "8b")])
;; Define element mode for each vector mode.
(define_mode_attr VEL [(V8QI "QI") (V16QI "QI")
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment