Commit 0ddec79f by James Greenhalgh Committed by James Greenhalgh

[AArch64] Map standard pattern names to NEON intrinsics directly.

gcc/
	* config/aarch64/aarch64-builtins.c
	(CF): Remove.
	(CF0, CF1, CF2, CF3, CF4, CF10): New.
	(VAR<1-12>): Add MAP parameter.
	(BUILTIN_*): Likewise.
	* config/aarch64/aarch64-simd-builtins.def: Set MAP parameter.
	* config/aarch64/aarch64-simd.md (aarch64_sshl_n<mode>): Remove.
	(aarch64_ushl_n<mode>): Likewise.
	(aarch64_sshr_n<mode>): Likewise.
	(aarch64_ushr_n<mode>): Likewise.
	(aarch64_<maxmin><mode>): Likewise.
	(aarch64_sqrt<mode>): Likewise.
	* config/aarch64/arm_neon.h (vshl<q>_n_*): Use new builtin names.
	(vshr<q>_n_*): Likewise.

From-SVN: r198137
parent 0050faf8
2013-04-22 James Greenhalgh <james.greenhalgh@arm.com>
* config/aarch64/aarch64-builtins.c
(CF): Remove.
(CF0, CF1, CF2, CF3, CF4, CF10): New.
(VAR<1-12>): Add MAP parameter.
(BUILTIN_*): Likewise.
* config/aarch64/aarch64-simd-builtins.def: Set MAP parameter.
* config/aarch64/aarch64-simd.md (aarch64_sshl_n<mode>): Remove.
(aarch64_ushl_n<mode>): Likewise.
(aarch64_sshr_n<mode>): Likewise.
(aarch64_ushr_n<mode>): Likewise.
(aarch64_<maxmin><mode>): Likewise.
(aarch64_sqrt<mode>): Likewise.
* config/aarch64/arm_neon.h (vshl<q>_n_*): Use new builtin names.
(vshr<q>_n_*): Likewise.
2013-04-22 James Greenhalgh <james.greenhalgh@arm.com>
* config/aarch64/aarch64-builtins.c
(aarch64_simd_builtin_type_mode): Handle SF types.
(sf_UP): Define.
(BUILTIN_GPF): Define.
......
......@@ -2897,28 +2897,6 @@
(set_attr "simd_mode" "<MODE>")]
)
;; vshl_n
(define_expand "aarch64_sshl_n<mode>"
[(match_operand:VSDQ_I_DI 0 "register_operand" "=w")
(match_operand:VSDQ_I_DI 1 "register_operand" "w")
(match_operand:SI 2 "immediate_operand" "i")]
"TARGET_SIMD"
{
emit_insn (gen_ashl<mode>3 (operands[0], operands[1], operands[2]));
DONE;
})
(define_expand "aarch64_ushl_n<mode>"
[(match_operand:VSDQ_I_DI 0 "register_operand" "=w")
(match_operand:VSDQ_I_DI 1 "register_operand" "w")
(match_operand:SI 2 "immediate_operand" "i")]
"TARGET_SIMD"
{
emit_insn (gen_ashl<mode>3 (operands[0], operands[1], operands[2]));
DONE;
})
;; vshll_n
(define_insn "aarch64_<sur>shll_n<mode>"
......@@ -2963,28 +2941,6 @@
(set_attr "simd_mode" "<MODE>")]
)
;; vshr_n
(define_expand "aarch64_sshr_n<mode>"
[(match_operand:VSDQ_I_DI 0 "register_operand" "=w")
(match_operand:VSDQ_I_DI 1 "register_operand" "w")
(match_operand:SI 2 "immediate_operand" "i")]
"TARGET_SIMD"
{
emit_insn (gen_ashr<mode>3 (operands[0], operands[1], operands[2]));
DONE;
})
(define_expand "aarch64_ushr_n<mode>"
[(match_operand:VSDQ_I_DI 0 "register_operand" "=w")
(match_operand:VSDQ_I_DI 1 "register_operand" "w")
(match_operand:SI 2 "immediate_operand" "i")]
"TARGET_SIMD"
{
emit_insn (gen_lshr<mode>3 (operands[0], operands[1], operands[2]));
DONE;
})
;; vrshr_n
(define_insn "aarch64_<sur>shr_n<mode>"
......@@ -3141,19 +3097,6 @@
(set_attr "simd_mode" "DI")]
)
;; v(max|min)
(define_expand "aarch64_<maxmin><mode>"
[(set (match_operand:VDQ_BHSI 0 "register_operand" "=w")
(MAXMIN:VDQ_BHSI (match_operand:VDQ_BHSI 1 "register_operand" "w")
(match_operand:VDQ_BHSI 2 "register_operand" "w")))]
"TARGET_SIMD"
{
emit_insn (gen_<maxmin><mode>3 (operands[0], operands[1], operands[2]));
DONE;
})
(define_insn "aarch64_<fmaxmin><mode>"
[(set (match_operand:VDQF 0 "register_operand" "=w")
(unspec:VDQF [(match_operand:VDQF 1 "register_operand" "w")
......@@ -3176,16 +3119,6 @@
(set_attr "simd_mode" "<MODE>")]
)
(define_expand "aarch64_sqrt<mode>"
[(match_operand:VDQF 0 "register_operand" "=w")
(match_operand:VDQF 1 "register_operand" "w")]
"TARGET_SIMD"
{
emit_insn (gen_sqrt<mode>2 (operands[0], operands[1]));
DONE;
})
;; Patterns for vector struct loads and stores.
(define_insn "vec_load_lanesoi<mode>"
......
......@@ -23404,109 +23404,109 @@ vrsrad_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vshl_n_s8 (int8x8_t __a, const int __b)
{
return (int8x8_t) __builtin_aarch64_sshl_nv8qi (__a, __b);
return (int8x8_t) __builtin_aarch64_ashlv8qi (__a, __b);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vshl_n_s16 (int16x4_t __a, const int __b)
{
return (int16x4_t) __builtin_aarch64_sshl_nv4hi (__a, __b);
return (int16x4_t) __builtin_aarch64_ashlv4hi (__a, __b);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vshl_n_s32 (int32x2_t __a, const int __b)
{
return (int32x2_t) __builtin_aarch64_sshl_nv2si (__a, __b);
return (int32x2_t) __builtin_aarch64_ashlv2si (__a, __b);
}
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
vshl_n_s64 (int64x1_t __a, const int __b)
{
return (int64x1_t) __builtin_aarch64_sshl_ndi (__a, __b);
return (int64x1_t) __builtin_aarch64_ashldi (__a, __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vshl_n_u8 (uint8x8_t __a, const int __b)
{
return (uint8x8_t) __builtin_aarch64_ushl_nv8qi ((int8x8_t) __a, __b);
return (uint8x8_t) __builtin_aarch64_ashlv8qi ((int8x8_t) __a, __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vshl_n_u16 (uint16x4_t __a, const int __b)
{
return (uint16x4_t) __builtin_aarch64_ushl_nv4hi ((int16x4_t) __a, __b);
return (uint16x4_t) __builtin_aarch64_ashlv4hi ((int16x4_t) __a, __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vshl_n_u32 (uint32x2_t __a, const int __b)
{
return (uint32x2_t) __builtin_aarch64_ushl_nv2si ((int32x2_t) __a, __b);
return (uint32x2_t) __builtin_aarch64_ashlv2si ((int32x2_t) __a, __b);
}
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vshl_n_u64 (uint64x1_t __a, const int __b)
{
return (uint64x1_t) __builtin_aarch64_ushl_ndi ((int64x1_t) __a, __b);
return (uint64x1_t) __builtin_aarch64_ashldi ((int64x1_t) __a, __b);
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vshlq_n_s8 (int8x16_t __a, const int __b)
{
return (int8x16_t) __builtin_aarch64_sshl_nv16qi (__a, __b);
return (int8x16_t) __builtin_aarch64_ashlv16qi (__a, __b);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vshlq_n_s16 (int16x8_t __a, const int __b)
{
return (int16x8_t) __builtin_aarch64_sshl_nv8hi (__a, __b);
return (int16x8_t) __builtin_aarch64_ashlv8hi (__a, __b);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vshlq_n_s32 (int32x4_t __a, const int __b)
{
return (int32x4_t) __builtin_aarch64_sshl_nv4si (__a, __b);
return (int32x4_t) __builtin_aarch64_ashlv4si (__a, __b);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vshlq_n_s64 (int64x2_t __a, const int __b)
{
return (int64x2_t) __builtin_aarch64_sshl_nv2di (__a, __b);
return (int64x2_t) __builtin_aarch64_ashlv2di (__a, __b);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vshlq_n_u8 (uint8x16_t __a, const int __b)
{
return (uint8x16_t) __builtin_aarch64_ushl_nv16qi ((int8x16_t) __a, __b);
return (uint8x16_t) __builtin_aarch64_ashlv16qi ((int8x16_t) __a, __b);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vshlq_n_u16 (uint16x8_t __a, const int __b)
{
return (uint16x8_t) __builtin_aarch64_ushl_nv8hi ((int16x8_t) __a, __b);
return (uint16x8_t) __builtin_aarch64_ashlv8hi ((int16x8_t) __a, __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vshlq_n_u32 (uint32x4_t __a, const int __b)
{
return (uint32x4_t) __builtin_aarch64_ushl_nv4si ((int32x4_t) __a, __b);
return (uint32x4_t) __builtin_aarch64_ashlv4si ((int32x4_t) __a, __b);
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vshlq_n_u64 (uint64x2_t __a, const int __b)
{
return (uint64x2_t) __builtin_aarch64_ushl_nv2di ((int64x2_t) __a, __b);
return (uint64x2_t) __builtin_aarch64_ashlv2di ((int64x2_t) __a, __b);
}
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
vshld_n_s64 (int64x1_t __a, const int __b)
{
return (int64x1_t) __builtin_aarch64_sshl_ndi (__a, __b);
return (int64x1_t) __builtin_aarch64_ashldi (__a, __b);
}
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vshld_n_u64 (uint64x1_t __a, const int __b)
{
return (uint64x1_t) __builtin_aarch64_ushl_ndi (__a, __b);
return (uint64x1_t) __builtin_aarch64_ashldi (__a, __b);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
......@@ -23694,109 +23694,109 @@ vshll_n_u32 (uint32x2_t __a, const int __b)
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vshr_n_s8 (int8x8_t __a, const int __b)
{
return (int8x8_t) __builtin_aarch64_sshr_nv8qi (__a, __b);
return (int8x8_t) __builtin_aarch64_ashrv8qi (__a, __b);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vshr_n_s16 (int16x4_t __a, const int __b)
{
return (int16x4_t) __builtin_aarch64_sshr_nv4hi (__a, __b);
return (int16x4_t) __builtin_aarch64_ashrv4hi (__a, __b);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vshr_n_s32 (int32x2_t __a, const int __b)
{
return (int32x2_t) __builtin_aarch64_sshr_nv2si (__a, __b);
return (int32x2_t) __builtin_aarch64_ashrv2si (__a, __b);
}
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
vshr_n_s64 (int64x1_t __a, const int __b)
{
return (int64x1_t) __builtin_aarch64_sshr_ndi (__a, __b);
return (int64x1_t) __builtin_aarch64_ashrdi (__a, __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vshr_n_u8 (uint8x8_t __a, const int __b)
{
return (uint8x8_t) __builtin_aarch64_ushr_nv8qi ((int8x8_t) __a, __b);
return (uint8x8_t) __builtin_aarch64_lshrv8qi ((int8x8_t) __a, __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vshr_n_u16 (uint16x4_t __a, const int __b)
{
return (uint16x4_t) __builtin_aarch64_ushr_nv4hi ((int16x4_t) __a, __b);
return (uint16x4_t) __builtin_aarch64_lshrv4hi ((int16x4_t) __a, __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vshr_n_u32 (uint32x2_t __a, const int __b)
{
return (uint32x2_t) __builtin_aarch64_ushr_nv2si ((int32x2_t) __a, __b);
return (uint32x2_t) __builtin_aarch64_lshrv2si ((int32x2_t) __a, __b);
}
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vshr_n_u64 (uint64x1_t __a, const int __b)
{
return (uint64x1_t) __builtin_aarch64_ushr_ndi ((int64x1_t) __a, __b);
return (uint64x1_t) __builtin_aarch64_lshrdi ((int64x1_t) __a, __b);
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vshrq_n_s8 (int8x16_t __a, const int __b)
{
return (int8x16_t) __builtin_aarch64_sshr_nv16qi (__a, __b);
return (int8x16_t) __builtin_aarch64_ashrv16qi (__a, __b);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vshrq_n_s16 (int16x8_t __a, const int __b)
{
return (int16x8_t) __builtin_aarch64_sshr_nv8hi (__a, __b);
return (int16x8_t) __builtin_aarch64_ashrv8hi (__a, __b);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vshrq_n_s32 (int32x4_t __a, const int __b)
{
return (int32x4_t) __builtin_aarch64_sshr_nv4si (__a, __b);
return (int32x4_t) __builtin_aarch64_ashrv4si (__a, __b);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vshrq_n_s64 (int64x2_t __a, const int __b)
{
return (int64x2_t) __builtin_aarch64_sshr_nv2di (__a, __b);
return (int64x2_t) __builtin_aarch64_ashrv2di (__a, __b);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vshrq_n_u8 (uint8x16_t __a, const int __b)
{
return (uint8x16_t) __builtin_aarch64_ushr_nv16qi ((int8x16_t) __a, __b);
return (uint8x16_t) __builtin_aarch64_lshrv16qi ((int8x16_t) __a, __b);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vshrq_n_u16 (uint16x8_t __a, const int __b)
{
return (uint16x8_t) __builtin_aarch64_ushr_nv8hi ((int16x8_t) __a, __b);
return (uint16x8_t) __builtin_aarch64_lshrv8hi ((int16x8_t) __a, __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vshrq_n_u32 (uint32x4_t __a, const int __b)
{
return (uint32x4_t) __builtin_aarch64_ushr_nv4si ((int32x4_t) __a, __b);
return (uint32x4_t) __builtin_aarch64_lshrv4si ((int32x4_t) __a, __b);
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vshrq_n_u64 (uint64x2_t __a, const int __b)
{
return (uint64x2_t) __builtin_aarch64_ushr_nv2di ((int64x2_t) __a, __b);
return (uint64x2_t) __builtin_aarch64_lshrv2di ((int64x2_t) __a, __b);
}
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
vshrd_n_s64 (int64x1_t __a, const int __b)
{
return (int64x1_t) __builtin_aarch64_sshr_ndi (__a, __b);
return (int64x1_t) __builtin_aarch64_ashrdi (__a, __b);
}
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vshrd_n_u64 (uint64x1_t __a, const int __b)
{
return (uint64x1_t) __builtin_aarch64_ushr_ndi (__a, __b);
return (uint64x1_t) __builtin_aarch64_lshrdi (__a, __b);
}
/* vsli */
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment