Commit bc138f7b by Alan Lawrence Committed by Alan Lawrence

[AArch64] Simplify vreinterpret for float64x1_t using casts.

	* config/aarch64/aarch64-builtins.c (aarch64_types_unop_su_qualifiers,
	TYPES_REINTERP_SU, aarch64_types_unop_sp_qualifiers, TYPE_REINTERP_SP,
	aarch64_types_unop_us_qualifiers, TYPES_REINTERP_US,
	aarch64_types_unop_ps_qualifiers, TYPES_REINTERP_PS, BUILTIN_VD):
	Delete.

	(aarch64_fold_builtin): Remove all reinterpret cases.

	* config/aarch64/aarch64-protos.h (aarch64_simd_reinterpret): Delete.

	* config/aarch64/aarch64-simd-builtins.def (reinterpret*) : Delete.

	* config/aarch64/aarch64-simd.md (aarch64_reinterpretv8qi<mode>,
	aarch64_reinterpretv4hi<mode>, aarch64_reinterpretv2si<mode>,
	aarch64_reinterpretv2sf<mode>, aarch64_reinterpretdi<mode>,
	aarch64_reinterpretv1df<mode>, aarch64_reinterpretv16qi<mode>,
	aarch64_reinterpretv8hi<mode>, aarch64_reinterpretv4si<mode>,
	aarch64_reinterpretv4sf<mode>, aarch64_reinterpretv2di<mode>,
	aarch64_reinterpretv2df<mode>): Delete.

	* config/aarch64/aarch64.c (aarch64_simd_reinterpret): Delete.

	* config/aarch64/arm_neon.h (vreinterpret_p8_f64,
	vreinterpret_p16_f64, vreinterpret_f32_f64, vreinterpret_f64_f32,
	vreinterpret_f64_p8, vreinterpret_f64_p16, vreinterpret_f64_s8,
	vreinterpret_f64_s16, vreinterpret_f64_s32, vreinterpret_f64_u8,
	vreinterpret_f64_u16, vreinterpret_f64_u32, vreinterpret_s64_f64,
	vreinterpret_u64_f64, vreinterpret_s8_f64, vreinterpret_s16_f64,
	vreinterpret_s32_f64, vreinterpret_u8_f64, vreinterpret_u16_f64,
	vreinterpret_u32_f64): Use cast.

	* config/aarch64/iterators.md (VD_RE): Delete.

From-SVN: r215180
parent fdaddc1b
2014-09-11 Alan Lawrence <alan.lawrence@arm.com> 2014-09-11 Alan Lawrence <alan.lawrence@arm.com>
* config/aarch64/aarch64-builtins.c (aarch64_types_unop_su_qualifiers,
TYPES_REINTERP_SU, aarch64_types_unop_sp_qualifiers, TYPE_REINTERP_SP,
aarch64_types_unop_us_qualifiers, TYPES_REINTERP_US,
aarch64_types_unop_ps_qualifiers, TYPES_REINTERP_PS, BUILTIN_VD):
Delete.
(aarch64_fold_builtin): Remove all reinterpret cases.
* config/aarch64/aarch64-protos.h (aarch64_simd_reinterpret): Delete.
* config/aarch64/aarch64-simd-builtins.def (reinterpret*) : Delete.
* config/aarch64/aarch64-simd.md (aarch64_reinterpretv8qi<mode>,
aarch64_reinterpretv4hi<mode>, aarch64_reinterpretv2si<mode>,
aarch64_reinterpretv2sf<mode>, aarch64_reinterpretdi<mode>,
aarch64_reinterpretv1df<mode>, aarch64_reinterpretv16qi<mode>,
aarch64_reinterpretv8hi<mode>, aarch64_reinterpretv4si<mode>,
aarch64_reinterpretv4sf<mode>, aarch64_reinterpretv2di<mode>,
aarch64_reinterpretv2df<mode>): Delete.
* config/aarch64/aarch64.c (aarch64_simd_reinterpret): Delete.
* config/aarch64/arm_neon.h (vreinterpret_p8_f64,
vreinterpret_p16_f64, vreinterpret_f32_f64, vreinterpret_f64_f32,
vreinterpret_f64_p8, vreinterpret_f64_p16, vreinterpret_f64_s8,
vreinterpret_f64_s16, vreinterpret_f64_s32, vreinterpret_f64_u8,
vreinterpret_f64_u16, vreinterpret_f64_u32, vreinterpret_s64_f64,
vreinterpret_u64_f64, vreinterpret_s8_f64, vreinterpret_s16_f64,
vreinterpret_s32_f64, vreinterpret_u8_f64, vreinterpret_u16_f64,
vreinterpret_u32_f64): Use cast.
* config/aarch64/iterators.md (VD_RE): Delete.
2014-09-11 Alan Lawrence <alan.lawrence@arm.com>
* config/aarch64/arm_neon.h (aarch64_vset_lane_any): New (*2). * config/aarch64/arm_neon.h (aarch64_vset_lane_any): New (*2).
(vset_lane_f32, vset_lane_f64, vset_lane_p8, vset_lane_p16, (vset_lane_f32, vset_lane_f64, vset_lane_p8, vset_lane_p16,
vset_lane_s8, vset_lane_s16, vset_lane_s32, vset_lane_s64, vset_lane_s8, vset_lane_s16, vset_lane_s32, vset_lane_s64,
...@@ -122,23 +122,6 @@ aarch64_types_unopu_qualifiers[SIMD_MAX_BUILTIN_ARGS] ...@@ -122,23 +122,6 @@ aarch64_types_unopu_qualifiers[SIMD_MAX_BUILTIN_ARGS]
= { qualifier_unsigned, qualifier_unsigned }; = { qualifier_unsigned, qualifier_unsigned };
#define TYPES_UNOPU (aarch64_types_unopu_qualifiers) #define TYPES_UNOPU (aarch64_types_unopu_qualifiers)
#define TYPES_CREATE (aarch64_types_unop_qualifiers) #define TYPES_CREATE (aarch64_types_unop_qualifiers)
#define TYPES_REINTERP_SS (aarch64_types_unop_qualifiers)
static enum aarch64_type_qualifiers
aarch64_types_unop_su_qualifiers[SIMD_MAX_BUILTIN_ARGS]
= { qualifier_none, qualifier_unsigned };
#define TYPES_REINTERP_SU (aarch64_types_unop_su_qualifiers)
static enum aarch64_type_qualifiers
aarch64_types_unop_sp_qualifiers[SIMD_MAX_BUILTIN_ARGS]
= { qualifier_none, qualifier_poly };
#define TYPES_REINTERP_SP (aarch64_types_unop_sp_qualifiers)
static enum aarch64_type_qualifiers
aarch64_types_unop_us_qualifiers[SIMD_MAX_BUILTIN_ARGS]
= { qualifier_unsigned, qualifier_none };
#define TYPES_REINTERP_US (aarch64_types_unop_us_qualifiers)
static enum aarch64_type_qualifiers
aarch64_types_unop_ps_qualifiers[SIMD_MAX_BUILTIN_ARGS]
= { qualifier_poly, qualifier_none };
#define TYPES_REINTERP_PS (aarch64_types_unop_ps_qualifiers)
static enum aarch64_type_qualifiers static enum aarch64_type_qualifiers
aarch64_types_binop_qualifiers[SIMD_MAX_BUILTIN_ARGS] aarch64_types_binop_qualifiers[SIMD_MAX_BUILTIN_ARGS]
= { qualifier_none, qualifier_none, qualifier_maybe_immediate }; = { qualifier_none, qualifier_none, qualifier_maybe_immediate };
...@@ -319,8 +302,6 @@ aarch64_types_storestruct_lane_qualifiers[SIMD_MAX_BUILTIN_ARGS] ...@@ -319,8 +302,6 @@ aarch64_types_storestruct_lane_qualifiers[SIMD_MAX_BUILTIN_ARGS]
v4si, v2di, v2sf, v4sf, v2df, di, df) v4si, v2di, v2sf, v4sf, v2df, di, df)
#define BUILTIN_VB(T, N, MAP) \ #define BUILTIN_VB(T, N, MAP) \
VAR2 (T, N, MAP, v8qi, v16qi) VAR2 (T, N, MAP, v8qi, v16qi)
#define BUILTIN_VD(T, N, MAP) \
VAR4 (T, N, MAP, v8qi, v4hi, v2si, v2sf)
#define BUILTIN_VD1(T, N, MAP) \ #define BUILTIN_VD1(T, N, MAP) \
VAR5 (T, N, MAP, v8qi, v4hi, v2si, v2sf, v1df) VAR5 (T, N, MAP, v8qi, v4hi, v2si, v2sf, v1df)
#define BUILTIN_VDC(T, N, MAP) \ #define BUILTIN_VDC(T, N, MAP) \
...@@ -1280,24 +1261,6 @@ aarch64_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED, tree *args, ...@@ -1280,24 +1261,6 @@ aarch64_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED, tree *args,
BUILTIN_VALLDI (UNOP, abs, 2) BUILTIN_VALLDI (UNOP, abs, 2)
return fold_build1 (ABS_EXPR, type, args[0]); return fold_build1 (ABS_EXPR, type, args[0]);
break; break;
VAR1 (REINTERP_SS, reinterpretdi, 0, v1df)
VAR1 (REINTERP_SS, reinterpretv8qi, 0, v1df)
VAR1 (REINTERP_SS, reinterpretv4hi, 0, v1df)
VAR1 (REINTERP_SS, reinterpretv2si, 0, v1df)
VAR1 (REINTERP_SS, reinterpretv2sf, 0, v1df)
BUILTIN_VD (REINTERP_SS, reinterpretv1df, 0)
BUILTIN_VD (REINTERP_SU, reinterpretv1df, 0)
VAR1 (REINTERP_US, reinterpretdi, 0, v1df)
VAR1 (REINTERP_US, reinterpretv8qi, 0, v1df)
VAR1 (REINTERP_US, reinterpretv4hi, 0, v1df)
VAR1 (REINTERP_US, reinterpretv2si, 0, v1df)
VAR1 (REINTERP_US, reinterpretv2sf, 0, v1df)
BUILTIN_VD (REINTERP_SP, reinterpretv1df, 0)
VAR1 (REINTERP_PS, reinterpretdi, 0, v1df)
VAR1 (REINTERP_PS, reinterpretv8qi, 0, v1df)
VAR1 (REINTERP_PS, reinterpretv4hi, 0, v1df)
VAR1 (REINTERP_PS, reinterpretv2sf, 0, v1df)
return fold_build1 (VIEW_CONVERT_EXPR, type, args[0]);
VAR1 (UNOP, floatv2si, 2, v2sf) VAR1 (UNOP, floatv2si, 2, v2sf)
VAR1 (UNOP, floatv4si, 2, v4sf) VAR1 (UNOP, floatv4si, 2, v4sf)
VAR1 (UNOP, floatv2di, 2, v2df) VAR1 (UNOP, floatv2di, 2, v2df)
...@@ -1489,7 +1452,6 @@ aarch64_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update) ...@@ -1489,7 +1452,6 @@ aarch64_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
#undef BUILTIN_V2F #undef BUILTIN_V2F
#undef BUILTIN_VALL #undef BUILTIN_VALL
#undef BUILTIN_VB #undef BUILTIN_VB
#undef BUILTIN_VD
#undef BUILTIN_VD1 #undef BUILTIN_VD1
#undef BUILTIN_VDC #undef BUILTIN_VDC
#undef BUILTIN_VDIC #undef BUILTIN_VDIC
......
...@@ -269,9 +269,6 @@ rtx aarch64_simd_expand_builtin (int, tree, rtx); ...@@ -269,9 +269,6 @@ rtx aarch64_simd_expand_builtin (int, tree, rtx);
void aarch64_simd_lane_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT); void aarch64_simd_lane_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
/* Emit code for reinterprets. */
void aarch64_simd_reinterpret (rtx, rtx);
void aarch64_split_128bit_move (rtx, rtx); void aarch64_split_128bit_move (rtx, rtx);
bool aarch64_split_128bit_move_p (rtx, rtx); bool aarch64_split_128bit_move_p (rtx, rtx);
......
...@@ -49,29 +49,6 @@ ...@@ -49,29 +49,6 @@
BUILTIN_VALL (GETLANE, be_checked_get_lane, 0) BUILTIN_VALL (GETLANE, be_checked_get_lane, 0)
VAR1 (REINTERP_SS, reinterpretdi, 0, v1df)
VAR1 (REINTERP_SS, reinterpretv8qi, 0, v1df)
VAR1 (REINTERP_SS, reinterpretv4hi, 0, v1df)
VAR1 (REINTERP_SS, reinterpretv2si, 0, v1df)
VAR1 (REINTERP_SS, reinterpretv2sf, 0, v1df)
BUILTIN_VD (REINTERP_SS, reinterpretv1df, 0)
BUILTIN_VD (REINTERP_SU, reinterpretv1df, 0)
VAR1 (REINTERP_US, reinterpretdi, 0, v1df)
VAR1 (REINTERP_US, reinterpretv8qi, 0, v1df)
VAR1 (REINTERP_US, reinterpretv4hi, 0, v1df)
VAR1 (REINTERP_US, reinterpretv2si, 0, v1df)
VAR1 (REINTERP_US, reinterpretv2sf, 0, v1df)
BUILTIN_VD (REINTERP_SP, reinterpretv1df, 0)
VAR1 (REINTERP_PS, reinterpretdi, 0, v1df)
VAR1 (REINTERP_PS, reinterpretv8qi, 0, v1df)
VAR1 (REINTERP_PS, reinterpretv4hi, 0, v1df)
VAR1 (REINTERP_PS, reinterpretv2si, 0, v1df)
VAR1 (REINTERP_PS, reinterpretv2sf, 0, v1df)
/* Implemented by aarch64_<sur>q<r>shl<mode>. */ /* Implemented by aarch64_<sur>q<r>shl<mode>. */
BUILTIN_VSDQ_I (BINOP, sqshl, 0) BUILTIN_VSDQ_I (BINOP, sqshl, 0)
BUILTIN_VSDQ_I (BINOP_UUS, uqshl, 0) BUILTIN_VSDQ_I (BINOP_UUS, uqshl, 0)
......
...@@ -2350,114 +2350,6 @@ ...@@ -2350,114 +2350,6 @@
DONE; DONE;
}) })
(define_expand "aarch64_reinterpretv8qi<mode>"
[(match_operand:V8QI 0 "register_operand" "")
(match_operand:VD_RE 1 "register_operand" "")]
"TARGET_SIMD"
{
aarch64_simd_reinterpret (operands[0], operands[1]);
DONE;
})
(define_expand "aarch64_reinterpretv4hi<mode>"
[(match_operand:V4HI 0 "register_operand" "")
(match_operand:VD_RE 1 "register_operand" "")]
"TARGET_SIMD"
{
aarch64_simd_reinterpret (operands[0], operands[1]);
DONE;
})
(define_expand "aarch64_reinterpretv2si<mode>"
[(match_operand:V2SI 0 "register_operand" "")
(match_operand:VD_RE 1 "register_operand" "")]
"TARGET_SIMD"
{
aarch64_simd_reinterpret (operands[0], operands[1]);
DONE;
})
(define_expand "aarch64_reinterpretv2sf<mode>"
[(match_operand:V2SF 0 "register_operand" "")
(match_operand:VD_RE 1 "register_operand" "")]
"TARGET_SIMD"
{
aarch64_simd_reinterpret (operands[0], operands[1]);
DONE;
})
(define_expand "aarch64_reinterpretdi<mode>"
[(match_operand:DI 0 "register_operand" "")
(match_operand:VD_RE 1 "register_operand" "")]
"TARGET_SIMD"
{
aarch64_simd_reinterpret (operands[0], operands[1]);
DONE;
})
(define_expand "aarch64_reinterpretv1df<mode>"
[(match_operand:V1DF 0 "register_operand" "")
(match_operand:VD_RE 1 "register_operand" "")]
"TARGET_SIMD"
{
aarch64_simd_reinterpret (operands[0], operands[1]);
DONE;
})
(define_expand "aarch64_reinterpretv16qi<mode>"
[(match_operand:V16QI 0 "register_operand" "")
(match_operand:VQ 1 "register_operand" "")]
"TARGET_SIMD"
{
aarch64_simd_reinterpret (operands[0], operands[1]);
DONE;
})
(define_expand "aarch64_reinterpretv8hi<mode>"
[(match_operand:V8HI 0 "register_operand" "")
(match_operand:VQ 1 "register_operand" "")]
"TARGET_SIMD"
{
aarch64_simd_reinterpret (operands[0], operands[1]);
DONE;
})
(define_expand "aarch64_reinterpretv4si<mode>"
[(match_operand:V4SI 0 "register_operand" "")
(match_operand:VQ 1 "register_operand" "")]
"TARGET_SIMD"
{
aarch64_simd_reinterpret (operands[0], operands[1]);
DONE;
})
(define_expand "aarch64_reinterpretv4sf<mode>"
[(match_operand:V4SF 0 "register_operand" "")
(match_operand:VQ 1 "register_operand" "")]
"TARGET_SIMD"
{
aarch64_simd_reinterpret (operands[0], operands[1]);
DONE;
})
(define_expand "aarch64_reinterpretv2di<mode>"
[(match_operand:V2DI 0 "register_operand" "")
(match_operand:VQ 1 "register_operand" "")]
"TARGET_SIMD"
{
aarch64_simd_reinterpret (operands[0], operands[1]);
DONE;
})
(define_expand "aarch64_reinterpretv2df<mode>"
[(match_operand:V2DF 0 "register_operand" "")
(match_operand:VQ 1 "register_operand" "")]
"TARGET_SIMD"
{
aarch64_simd_reinterpret (operands[0], operands[1]);
DONE;
})
;; In this insn, operand 1 should be low, and operand 2 the high part of the ;; In this insn, operand 1 should be low, and operand 2 the high part of the
;; dest vector. ;; dest vector.
......
...@@ -7941,14 +7941,6 @@ aarch64_simd_const_bounds (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high) ...@@ -7941,14 +7941,6 @@ aarch64_simd_const_bounds (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high)
error ("constant out of range"); error ("constant out of range");
} }
/* Emit code to reinterpret one AdvSIMD type as another,
without altering bits. */
void
aarch64_simd_reinterpret (rtx dest, rtx src)
{
emit_move_insn (dest, gen_lowpart (GET_MODE (dest), src));
}
/* Emit code to place a AdvSIMD pair result in memory locations (with equal /* Emit code to place a AdvSIMD pair result in memory locations (with equal
registers). */ registers). */
void void
......
...@@ -2830,7 +2830,7 @@ vgetq_lane_u64 (uint64x2_t __a, const int __b) ...@@ -2830,7 +2830,7 @@ vgetq_lane_u64 (uint64x2_t __a, const int __b)
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
vreinterpret_p8_f64 (float64x1_t __a) vreinterpret_p8_f64 (float64x1_t __a)
{ {
return __builtin_aarch64_reinterpretv8qiv1df_ps (__a); return (poly8x8_t) __a;
} }
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
...@@ -2962,7 +2962,7 @@ vreinterpretq_p8_p16 (poly16x8_t __a) ...@@ -2962,7 +2962,7 @@ vreinterpretq_p8_p16 (poly16x8_t __a)
__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) __extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
vreinterpret_p16_f64 (float64x1_t __a) vreinterpret_p16_f64 (float64x1_t __a)
{ {
return __builtin_aarch64_reinterpretv4hiv1df_ps (__a); return (poly16x4_t) __a;
} }
__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) __extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
...@@ -3094,7 +3094,7 @@ vreinterpretq_p16_p8 (poly8x16_t __a) ...@@ -3094,7 +3094,7 @@ vreinterpretq_p16_p8 (poly8x16_t __a)
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) __extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vreinterpret_f32_f64 (float64x1_t __a) vreinterpret_f32_f64 (float64x1_t __a)
{ {
return __builtin_aarch64_reinterpretv2sfv1df (__a); return (float32x2_t) __a;
} }
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) __extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
...@@ -3226,37 +3226,37 @@ vreinterpretq_f32_p16 (poly16x8_t __a) ...@@ -3226,37 +3226,37 @@ vreinterpretq_f32_p16 (poly16x8_t __a)
__extension__ static __inline float64x1_t __attribute__((__always_inline__)) __extension__ static __inline float64x1_t __attribute__((__always_inline__))
vreinterpret_f64_f32 (float32x2_t __a) vreinterpret_f64_f32 (float32x2_t __a)
{ {
return __builtin_aarch64_reinterpretv1dfv2sf (__a); return (float64x1_t) __a;
} }
__extension__ static __inline float64x1_t __attribute__((__always_inline__)) __extension__ static __inline float64x1_t __attribute__((__always_inline__))
vreinterpret_f64_p8 (poly8x8_t __a) vreinterpret_f64_p8 (poly8x8_t __a)
{ {
return __builtin_aarch64_reinterpretv1dfv8qi_sp (__a); return (float64x1_t) __a;
} }
__extension__ static __inline float64x1_t __attribute__((__always_inline__)) __extension__ static __inline float64x1_t __attribute__((__always_inline__))
vreinterpret_f64_p16 (poly16x4_t __a) vreinterpret_f64_p16 (poly16x4_t __a)
{ {
return __builtin_aarch64_reinterpretv1dfv4hi_sp (__a); return (float64x1_t) __a;
} }
__extension__ static __inline float64x1_t __attribute__((__always_inline__)) __extension__ static __inline float64x1_t __attribute__((__always_inline__))
vreinterpret_f64_s8 (int8x8_t __a) vreinterpret_f64_s8 (int8x8_t __a)
{ {
return __builtin_aarch64_reinterpretv1dfv8qi (__a); return (float64x1_t) __a;
} }
__extension__ static __inline float64x1_t __attribute__((__always_inline__)) __extension__ static __inline float64x1_t __attribute__((__always_inline__))
vreinterpret_f64_s16 (int16x4_t __a) vreinterpret_f64_s16 (int16x4_t __a)
{ {
return __builtin_aarch64_reinterpretv1dfv4hi (__a); return (float64x1_t) __a;
} }
__extension__ static __inline float64x1_t __attribute__((__always_inline__)) __extension__ static __inline float64x1_t __attribute__((__always_inline__))
vreinterpret_f64_s32 (int32x2_t __a) vreinterpret_f64_s32 (int32x2_t __a)
{ {
return __builtin_aarch64_reinterpretv1dfv2si (__a); return (float64x1_t) __a;
} }
__extension__ static __inline float64x1_t __attribute__((__always_inline__)) __extension__ static __inline float64x1_t __attribute__((__always_inline__))
...@@ -3268,19 +3268,19 @@ vreinterpret_f64_s64 (int64x1_t __a) ...@@ -3268,19 +3268,19 @@ vreinterpret_f64_s64 (int64x1_t __a)
__extension__ static __inline float64x1_t __attribute__((__always_inline__)) __extension__ static __inline float64x1_t __attribute__((__always_inline__))
vreinterpret_f64_u8 (uint8x8_t __a) vreinterpret_f64_u8 (uint8x8_t __a)
{ {
return __builtin_aarch64_reinterpretv1dfv8qi_su (__a); return (float64x1_t) __a;
} }
__extension__ static __inline float64x1_t __attribute__((__always_inline__)) __extension__ static __inline float64x1_t __attribute__((__always_inline__))
vreinterpret_f64_u16 (uint16x4_t __a) vreinterpret_f64_u16 (uint16x4_t __a)
{ {
return __builtin_aarch64_reinterpretv1dfv4hi_su (__a); return (float64x1_t) __a;
} }
__extension__ static __inline float64x1_t __attribute__((__always_inline__)) __extension__ static __inline float64x1_t __attribute__((__always_inline__))
vreinterpret_f64_u32 (uint32x2_t __a) vreinterpret_f64_u32 (uint32x2_t __a)
{ {
return __builtin_aarch64_reinterpretv1dfv2si_su (__a); return (float64x1_t) __a;
} }
__extension__ static __inline float64x1_t __attribute__((__always_inline__)) __extension__ static __inline float64x1_t __attribute__((__always_inline__))
...@@ -3358,7 +3358,7 @@ vreinterpretq_f64_u64 (uint64x2_t __a) ...@@ -3358,7 +3358,7 @@ vreinterpretq_f64_u64 (uint64x2_t __a)
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) __extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
vreinterpret_s64_f64 (float64x1_t __a) vreinterpret_s64_f64 (float64x1_t __a)
{ {
return (int64x1_t) {__builtin_aarch64_reinterpretdiv1df (__a)}; return (int64x1_t) __a;
} }
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) __extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
...@@ -3490,7 +3490,7 @@ vreinterpretq_s64_p16 (poly16x8_t __a) ...@@ -3490,7 +3490,7 @@ vreinterpretq_s64_p16 (poly16x8_t __a)
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vreinterpret_u64_f64 (float64x1_t __a) vreinterpret_u64_f64 (float64x1_t __a)
{ {
return (uint64x1_t) {__builtin_aarch64_reinterpretdiv1df_us (__a)}; return (uint64x1_t) __a;
} }
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
...@@ -3622,7 +3622,7 @@ vreinterpretq_u64_p16 (poly16x8_t __a) ...@@ -3622,7 +3622,7 @@ vreinterpretq_u64_p16 (poly16x8_t __a)
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) __extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vreinterpret_s8_f64 (float64x1_t __a) vreinterpret_s8_f64 (float64x1_t __a)
{ {
return __builtin_aarch64_reinterpretv8qiv1df (__a); return (int8x8_t) __a;
} }
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) __extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
...@@ -3754,7 +3754,7 @@ vreinterpretq_s8_p16 (poly16x8_t __a) ...@@ -3754,7 +3754,7 @@ vreinterpretq_s8_p16 (poly16x8_t __a)
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) __extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vreinterpret_s16_f64 (float64x1_t __a) vreinterpret_s16_f64 (float64x1_t __a)
{ {
return __builtin_aarch64_reinterpretv4hiv1df (__a); return (int16x4_t) __a;
} }
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) __extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
...@@ -3886,7 +3886,7 @@ vreinterpretq_s16_p16 (poly16x8_t __a) ...@@ -3886,7 +3886,7 @@ vreinterpretq_s16_p16 (poly16x8_t __a)
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) __extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vreinterpret_s32_f64 (float64x1_t __a) vreinterpret_s32_f64 (float64x1_t __a)
{ {
return __builtin_aarch64_reinterpretv2siv1df (__a); return (int32x2_t) __a;
} }
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) __extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
...@@ -4018,7 +4018,7 @@ vreinterpretq_s32_p16 (poly16x8_t __a) ...@@ -4018,7 +4018,7 @@ vreinterpretq_s32_p16 (poly16x8_t __a)
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vreinterpret_u8_f64 (float64x1_t __a) vreinterpret_u8_f64 (float64x1_t __a)
{ {
return __builtin_aarch64_reinterpretv8qiv1df_us (__a); return (uint8x8_t) __a;
} }
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
...@@ -4150,7 +4150,7 @@ vreinterpretq_u8_p16 (poly16x8_t __a) ...@@ -4150,7 +4150,7 @@ vreinterpretq_u8_p16 (poly16x8_t __a)
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vreinterpret_u16_f64 (float64x1_t __a) vreinterpret_u16_f64 (float64x1_t __a)
{ {
return __builtin_aarch64_reinterpretv4hiv1df_us (__a); return (uint16x4_t) __a;
} }
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
...@@ -4282,7 +4282,7 @@ vreinterpretq_u16_p16 (poly16x8_t __a) ...@@ -4282,7 +4282,7 @@ vreinterpretq_u16_p16 (poly16x8_t __a)
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vreinterpret_u32_f64 (float64x1_t __a) vreinterpret_u32_f64 (float64x1_t __a)
{ {
return __builtin_aarch64_reinterpretv2siv1df_us (__a); return (uint32x2_t) __a;
} }
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
......
...@@ -144,9 +144,6 @@ ...@@ -144,9 +144,6 @@
;; Double vector modes for combines. ;; Double vector modes for combines.
(define_mode_iterator VDIC [V8QI V4HI V2SI]) (define_mode_iterator VDIC [V8QI V4HI V2SI])
;; Double vector modes, inc. V1DF and the DI "vector" mode, for VREINTERPRET.
(define_mode_iterator VD_RE [V8QI V4HI V2SI DI V1DF V2SF])
;; Double vector modes inc V1DF ;; Double vector modes inc V1DF
(define_mode_iterator VD1 [V8QI V4HI V2SI V2SF V1DF]) (define_mode_iterator VD1 [V8QI V4HI V2SI V2SF V1DF])
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment