Commit 9697e620 by James Greenhalgh Committed by James Greenhalgh

[AArch64] Make vabs<q>_f<32, 64> a tree/gimple intrinsic.

gcc/
	* config/aarch64/aarch64-builtins.c
	(aarch64_fold_builtin): New.
	* config/aarch64/aarch64-protos.h (aarch64_fold_builtin): New.
	* config/aarch64/aarch64.c (TARGET_FOLD_BUILTIN): Define.
	* config/aarch64/aarch64-simd-builtins.def (abs): New.
	* config/aarch64/arm_neon.h
	(vabs<q>_<f32, 64>): Implement using __builtin_aarch64_fabs.

From-SVN: r198305
parent 0ac198d3
2013-04-25 James Greenhalgh <james.greenhalgh@arm.com>
* config/aarch64/aarch64-builtins.c
(aarch64_fold_builtin): New.
* config/aarch64/aarch64-protos.h (aarch64_fold_builtin): New.
* config/aarch64/aarch64.c (TARGET_FOLD_BUILTIN): Define.
* config/aarch64/aarch64-simd-builtins.def (abs): New.
* config/aarch64/arm_neon.h
(vabs<q>_<f32, 64>): Implement using __builtin_aarch64_fabs.
2013-04-25 James Greenhalgh <james.greenhalgh@arm.com>
Tejas Belagod <tejas.belagod@arm.com>
* config/aarch64/aarch64-builtins.c
......
......@@ -1260,6 +1260,25 @@ aarch64_builtin_vectorized_function (tree fndecl, tree type_out, tree type_in)
#define VAR1(T, N, MAP, A) \
case AARCH64_SIMD_BUILTIN_##N##A:
tree
aarch64_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED, tree *args,
bool ignore ATTRIBUTE_UNUSED)
{
int fcode = DECL_FUNCTION_CODE (fndecl);
tree type = TREE_TYPE (TREE_TYPE (fndecl));
switch (fcode)
{
BUILTIN_VDQF (UNOP, abs, 2)
return fold_build1 (ABS_EXPR, type, args[0]);
break;
default:
break;
}
return NULL_TREE;
}
bool
aarch64_gimple_fold_builtin (gimple_stmt_iterator *gsi)
{
......
......@@ -179,6 +179,7 @@ rtx aarch64_simd_gen_const_vector_dup (enum machine_mode, int);
bool aarch64_simd_mem_operand_p (rtx);
rtx aarch64_simd_vect_par_cnst_half (enum machine_mode, bool);
rtx aarch64_tls_get_addr (void);
tree aarch64_fold_builtin (tree, int, tree *, bool);
unsigned aarch64_dbx_register_number (unsigned);
unsigned aarch64_trampoline_size (void);
void aarch64_asm_output_labelref (FILE *, const char *);
......
......@@ -282,3 +282,5 @@
BUILTIN_VDQF (UNOP, frecpe, 0)
BUILTIN_VDQF (BINOP, frecps, 0)
BUILTIN_VDQF (UNOP, abs, 2)
......@@ -7874,6 +7874,9 @@ aarch64_vectorize_vec_perm_const_ok (enum machine_mode vmode,
#undef TARGET_EXPAND_BUILTIN_VA_START
#define TARGET_EXPAND_BUILTIN_VA_START aarch64_expand_builtin_va_start
#undef TARGET_FOLD_BUILTIN
#define TARGET_FOLD_BUILTIN aarch64_fold_builtin
#undef TARGET_FUNCTION_ARG
#define TARGET_FUNCTION_ARG aarch64_function_arg
......
......@@ -4468,17 +4468,6 @@ vabds_f32 (float32_t a, float32_t b)
return result;
}
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vabs_f32 (float32x2_t a)
{
float32x2_t result;
__asm__ ("fabs %0.2s,%1.2s"
: "=w"(result)
: "w"(a)
: /* No clobbers */);
return result;
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vabs_s8 (int8x8_t a)
{
......@@ -4512,28 +4501,6 @@ vabs_s32 (int32x2_t a)
return result;
}
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vabsq_f32 (float32x4_t a)
{
float32x4_t result;
__asm__ ("fabs %0.4s,%1.4s"
: "=w"(result)
: "w"(a)
: /* No clobbers */);
return result;
}
__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
vabsq_f64 (float64x2_t a)
{
float64x2_t result;
__asm__ ("fabs %0.2d,%1.2d"
: "=w"(result)
: "w"(a)
: /* No clobbers */);
return result;
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vabsq_s8 (int8x16_t a)
{
......@@ -19717,6 +19684,26 @@ vtbx4_p8 (poly8x8_t r, poly8x8x4_t tab, uint8x8_t idx)
/* Start of optimal implementations in approved order. */
/* vabs */
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vabs_f32 (float32x2_t __a)
{
return __builtin_aarch64_absv2sf (__a);
}
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vabsq_f32 (float32x4_t __a)
{
return __builtin_aarch64_absv4sf (__a);
}
__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
vabsq_f64 (float64x2_t __a)
{
return __builtin_aarch64_absv2df (__a);
}
/* vadd */
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment