Commit 6383ff9f by Tamar Christina Committed by Tamar Christina

2016-11-28 Tamar Christina <tamar.christina@arm.com>

	* config/aarch64/aarch64-simd-builtins.def
	(BSL_P): Added di and v2di mode.
	* config/aarch64/arm_neon.h
	(vsriq_n_p64, vsri_n_p64): Added poly type.
	(vextq_p64, vext_p64): Likewise.
	(vceq_p64, vbslq_p64, vbsl_p64): Likewise.

From-SVN: r242916
parent 159b8724
2016-11-28 Tamar Christina <tamar.christina@arm.com>
* config/aarch64/aarch64-simd-builtins.def
(BSL_P): Added di and v2di mode.
* config/aarch64/arm_neon.h
(vsriq_n_p64, vsri_n_p64): Added poly type.
(vextq_p64, vext_p64): Likewise.
(vceq_p64, vbslq_p64, vbsl_p64): Likewise.
2016-11-28 Tamar Christina <tamar.christina@arm.com>
* config/aarch64/aarch64-builtins.c (TYPES_SETREGP): Added poly type.
(TYPES_GETREGP): Likewise.
(TYPES_SHIFTINSERTP): Likewise.
......@@ -442,6 +442,7 @@
/* Implemented by aarch64_simd_bsl<mode>. */
BUILTIN_VDQQH (BSL_P, simd_bsl, 0)
VAR2 (BSL_P, simd_bsl,0, di, v2di)
BUILTIN_VSDQ_I_DI (BSL_U, simd_bsl, 0)
BUILTIN_VALLDIF (BSL_S, simd_bsl, 0)
......
......@@ -10609,6 +10609,19 @@ vrsqrteq_u32 (uint32x4_t a)
result; \
})
#define vsri_n_p64(a, b, c) \
__extension__ \
({ \
poly64x1_t b_ = (b); \
poly64x1_t a_ = (a); \
poly64x1_t result; \
__asm__ ("sri %d0,%d2,%3" \
: "=w"(result) \
: "0"(a_), "w"(b_), "i"(c) \
: /* No clobbers. */); \
result; \
})
#define vsriq_n_p8(a, b, c) \
__extension__ \
({ \
......@@ -10635,6 +10648,19 @@ vrsqrteq_u32 (uint32x4_t a)
result; \
})
#define vsriq_n_p64(a, b, c) \
__extension__ \
({ \
poly64x2_t b_ = (b); \
poly64x2_t a_ = (a); \
poly64x2_t result; \
__asm__ ("sri %0.2d,%2.2d,%3" \
: "=w"(result) \
: "0"(a_), "w"(b_), "i"(c) \
: /* No clobbers. */); \
result; \
})
__extension__ extern __inline uint8x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtst_p8 (poly8x8_t a, poly8x8_t b)
......@@ -11774,6 +11800,13 @@ vbsl_p16 (uint16x4_t __a, poly16x4_t __b, poly16x4_t __c)
{
return __builtin_aarch64_simd_bslv4hi_pupp (__a, __b, __c);
}
__extension__ extern __inline poly64x1_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbsl_p64 (uint64x1_t __a, poly64x1_t __b, poly64x1_t __c)
{
return (poly64x1_t)
{__builtin_aarch64_simd_bsldi_pupp (__a[0], __b[0], __c[0])};
}
__extension__ extern __inline int8x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
......@@ -11882,6 +11915,13 @@ vbslq_s16 (uint16x8_t __a, int16x8_t __b, int16x8_t __c)
return __builtin_aarch64_simd_bslv8hi_suss (__a, __b, __c);
}
__extension__ extern __inline poly64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbslq_p64 (uint64x2_t __a, poly64x2_t __b, poly64x2_t __c)
{
return __builtin_aarch64_simd_bslv2di_pupp (__a, __b, __c);
}
__extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbslq_s32 (uint32x4_t __a, int32x4_t __b, int32x4_t __c)
......@@ -12413,6 +12453,13 @@ vceq_p8 (poly8x8_t __a, poly8x8_t __b)
return (uint8x8_t) (__a == __b);
}
__extension__ extern __inline uint64x1_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceq_p64 (poly64x1_t __a, poly64x1_t __b)
{
return (uint64x1_t) (__a == __b);
}
__extension__ extern __inline uint8x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceq_s8 (int8x8_t __a, int8x8_t __b)
......@@ -16152,6 +16199,15 @@ vext_p16 (poly16x4_t __a, poly16x4_t __b, __const int __c)
#endif
}
__extension__ extern __inline poly64x1_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vext_p64 (poly64x1_t __a, poly64x1_t __b, __const int __c)
{
__AARCH64_LANE_CHECK (__a, __c);
/* The only possible index to the assembler instruction returns element 0. */
return __a;
}
__extension__ extern __inline int8x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vext_s8 (int8x8_t __a, int8x8_t __b, __const int __c)
......@@ -16320,6 +16376,18 @@ vextq_p16 (poly16x8_t __a, poly16x8_t __b, __const int __c)
#endif
}
__extension__ extern __inline poly64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vextq_p64 (poly64x2_t __a, poly64x2_t __b, __const int __c)
{
__AARCH64_LANE_CHECK (__a, __c);
#ifdef __AARCH64EB__
return __builtin_shuffle (__b, __a, (uint64x2_t) {2-__c, 3-__c});
#else
return __builtin_shuffle (__a, __b, (uint64x2_t) {__c, __c+1});
#endif
}
__extension__ extern __inline int8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vextq_s8 (int8x16_t __a, int8x16_t __b, __const int __c)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment