Commit 8d28afb4 by Kyrylo Tkachov Committed by Kyrylo Tkachov

neon.ml (opcode): Add Vrintn, Vrinta, Vrintp, Vrintm, Vrintz to type.

gcc/ChangeLog

2012-12-10  Kyrylo Tkachov  <kyrylo.tkachov@arm.com>

	* config/arm/neon.ml (opcode): Add Vrintn, Vrinta, Vrintp, Vrintm,
	Vrintz to type.
	(type features): Add Requires_arch type constructor.
	(ops): Define Vrintn, Vrinta, Vrintp, Vrintm, Vrintz features.
	* config/arm/neon-docgen.ml (intrinsic_groups): Define Vrintn,
	Vrinta, Vrintp, Vrintm, Vrintz, Vrintx.
	* config/arm/neon-testgen.ml (effective_target): Define check for 
	Requires_arch 8.
	* config/arm/neon-gen.ml 
	(print_feature_test_start): Handle Requires_arch.
	(print_feature_test_end): Likewise.
	Add 2012 to Copyright notice.
	* doc/arm-neon-intrinsics.texi: Regenerate.
	* config/arm/arm_neon.h: Regenerate.

gcc/testsuite/ChangeLog

2012-12-10  Kyrylo Tkachov  <kyrylo.tkachov@arm.com>

	* gcc.target/arm/neon/vrndaf32.c: New test.
	* gcc.target/arm/neon/vrndqaf32.c: Likewise.
	* gcc.target/arm/neon/vrndf32.c: Likewise.
	* gcc.target/arm/neon/vrndqf32.c: Likewise.
	* gcc.target/arm/neon/vrndmf32.c: Likewise.
	* gcc.target/arm/neon/vrndqmf32.c: Likewise.
	* gcc.target/arm/neon/vrndnf32.c: Likewise.
	* gcc.target/arm/neon/vrndqnf32.c: Likewise.
	* gcc.target/arm/neon/vrndpf32.c: Likewise.
	* gcc.target/arm/neon/vrndqpf32.c: Likewise.

From-SVN: r194353
parent 0da911e9
2012-12-10 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
* config/arm/neon.ml (opcode): Add Vrintn, Vrinta, Vrintp, Vrintm,
Vrintz to type.
(type features): Add Requires_arch type constructor.
(ops): Define Vrintn, Vrinta, Vrintp, Vrintm, Vrintz features.
* config/arm/neon-docgen.ml (intrinsic_groups): Define Vrintn,
Vrinta, Vrintp, Vrintm, Vrintz, Vrintx.
* config/arm/neon-testgen.ml (effective_target): Define check for
Requires_arch 8.
* config/arm/neon-gen.ml
(print_feature_test_start): Handle Requires_arch.
(print_feature_test_end): Likewise.
Add 2012 to Copyright notice.
* doc/arm-neon-intrinsics.texi: Regenerate.
* config/arm/arm_neon.h: Regenerate.
2012-12-10 Kai Tietz <ktietz@redhat.com>
* stmt.c (expand_sjlj_dispatch_table): Fix off by one.
/* ARM NEON intrinsics include file. This file is generated automatically
using neon-gen.ml. Please do not edit manually.
Copyright (C) 2006, 2007, 2009 Free Software Foundation, Inc.
Copyright (C) 2006, 2007, 2009, 2012 Free Software Foundation, Inc.
Contributed by CodeSourcery.
This file is part of GCC.
......@@ -1382,6 +1382,86 @@ vfmsq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
}
#endif
#if __ARM_ARCH >= 8
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vrndn_f32 (float32x2_t __a)
{
return (float32x2_t)__builtin_neon_vrintnv2sf (__a);
}
#endif
#if __ARM_ARCH >= 8
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vrndqn_f32 (float32x4_t __a)
{
return (float32x4_t)__builtin_neon_vrintnv4sf (__a);
}
#endif
#if __ARM_ARCH >= 8
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vrnda_f32 (float32x2_t __a)
{
return (float32x2_t)__builtin_neon_vrintav2sf (__a);
}
#endif
#if __ARM_ARCH >= 8
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vrndqa_f32 (float32x4_t __a)
{
return (float32x4_t)__builtin_neon_vrintav4sf (__a);
}
#endif
#if __ARM_ARCH >= 8
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vrndp_f32 (float32x2_t __a)
{
return (float32x2_t)__builtin_neon_vrintpv2sf (__a);
}
#endif
#if __ARM_ARCH >= 8
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vrndqp_f32 (float32x4_t __a)
{
return (float32x4_t)__builtin_neon_vrintpv4sf (__a);
}
#endif
#if __ARM_ARCH >= 8
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vrndm_f32 (float32x2_t __a)
{
return (float32x2_t)__builtin_neon_vrintmv2sf (__a);
}
#endif
#if __ARM_ARCH >= 8
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vrndqm_f32 (float32x4_t __a)
{
return (float32x4_t)__builtin_neon_vrintmv4sf (__a);
}
#endif
#if __ARM_ARCH >= 8
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vrnd_f32 (float32x2_t __a)
{
return (float32x2_t)__builtin_neon_vrintzv2sf (__a);
}
#endif
#if __ARM_ARCH >= 8
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vrndq_f32 (float32x4_t __a)
{
return (float32x4_t)__builtin_neon_vrintzv4sf (__a);
}
#endif
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vsub_s8 (int8x8_t __a, int8x8_t __b)
{
......
......@@ -105,6 +105,11 @@ let intrinsic_groups =
"Multiply-subtract", single_opcode Vmls;
"Fused-multiply-accumulate", single_opcode Vfma;
"Fused-multiply-subtract", single_opcode Vfms;
"Round to integral (to nearest, ties to even)", single_opcode Vrintn;
"Round to integral (to nearest, ties away from zero)", single_opcode Vrinta;
"Round to integral (towards +Inf)", single_opcode Vrintp;
"Round to integral (towards -Inf)", single_opcode Vrintm;
"Round to integral (towards 0)", single_opcode Vrintz;
"Subtraction", single_opcode Vsub;
"Comparison (equal-to)", single_opcode Vceq;
"Comparison (greater-than-or-equal-to)", single_opcode Vcge;
......
......@@ -290,17 +290,21 @@ let print_feature_test_start features =
try
match List.find (fun feature ->
match feature with Requires_feature _ -> true
| Requires_arch _ -> true
| _ -> false)
features with
Requires_feature feature ->
Format.printf "#ifdef __ARM_FEATURE_%s@\n" feature
| Requires_arch arch ->
Format.printf "#if __ARM_ARCH >= %d@\n" arch
| _ -> assert false
with Not_found -> assert true
let print_feature_test_end features =
let feature =
List.exists (function Requires_feature x -> true
| _ -> false) features in
| Requires_arch x -> true
| _ -> false) features in
if feature then Format.printf "#endif@\n"
......@@ -437,7 +441,7 @@ let _ =
"/* ARM NEON intrinsics include file. This file is generated automatically";
" using neon-gen.ml. Please do not edit manually.";
"";
" Copyright (C) 2006, 2007, 2009 Free Software Foundation, Inc.";
" Copyright (C) 2006, 2007, 2009, 2012 Free Software Foundation, Inc.";
" Contributed by CodeSourcery.";
"";
" This file is part of GCC.";
......
......@@ -162,9 +162,11 @@ let effective_target features =
try
match List.find (fun feature ->
match feature with Requires_feature _ -> true
| Requires_arch _ -> true
| _ -> false)
features with
Requires_feature "FMA" -> "arm_neonv2"
| Requires_arch 8 -> "arm_v8_neon"
| _ -> assert false
with Not_found -> "arm_neon"
......
......@@ -152,6 +152,11 @@ type opcode =
| Vqdmulh_n
| Vqdmulh_lane
(* Unary ops. *)
| Vrintn
| Vrinta
| Vrintp
| Vrintm
| Vrintz
| Vabs
| Vneg
| Vcls
......@@ -279,6 +284,7 @@ type features =
| Fixed_core_reg
(* Mark that the intrinsic requires __ARM_FEATURE_string to be defined. *)
| Requires_feature of string
| Requires_arch of int
exception MixedMode of elts * elts
......@@ -812,6 +818,27 @@ let ops =
Vfms, [Requires_feature "FMA"], All (3, Dreg), "vfms", elts_same_io, [F32];
Vfms, [Requires_feature "FMA"], All (3, Qreg), "vfmsQ", elts_same_io, [F32];
(* Round to integral. *)
Vrintn, [Builtin_name "vrintn"; Requires_arch 8], Use_operands [| Dreg; Dreg |],
"vrndn", elts_same_1, [F32];
Vrintn, [Builtin_name "vrintn"; Requires_arch 8], Use_operands [| Qreg; Qreg |],
"vrndqn", elts_same_1, [F32];
Vrinta, [Builtin_name "vrinta"; Requires_arch 8], Use_operands [| Dreg; Dreg |],
"vrnda", elts_same_1, [F32];
Vrinta, [Builtin_name "vrinta"; Requires_arch 8], Use_operands [| Qreg; Qreg |],
"vrndqa", elts_same_1, [F32];
Vrintp, [Builtin_name "vrintp"; Requires_arch 8], Use_operands [| Dreg; Dreg |],
"vrndp", elts_same_1, [F32];
Vrintp, [Builtin_name "vrintp"; Requires_arch 8], Use_operands [| Qreg; Qreg |],
"vrndqp", elts_same_1, [F32];
Vrintm, [Builtin_name "vrintm"; Requires_arch 8], Use_operands [| Dreg; Dreg |],
"vrndm", elts_same_1, [F32];
Vrintm, [Builtin_name "vrintm"; Requires_arch 8], Use_operands [| Qreg; Qreg |],
"vrndqm", elts_same_1, [F32];
Vrintz, [Builtin_name "vrintz"; Requires_arch 8], Use_operands [| Dreg; Dreg |],
"vrnd", elts_same_1, [F32];
Vrintz, [Builtin_name "vrintz"; Requires_arch 8], Use_operands [| Qreg; Qreg |],
"vrndq", elts_same_1, [F32];
(* Subtraction. *)
Vsub, [], All (3, Dreg), "vsub", sign_invar_2, F32 :: su_8_32;
Vsub, [No_op], All (3, Dreg), "vsub", sign_invar_2, [S64; U64];
......
......@@ -1004,6 +1004,86 @@
@subsubsection Round to integral (to nearest, ties to even)
@itemize @bullet
@item float32x2_t vrndn_f32 (float32x2_t)
@*@emph{Form of expected instruction(s):} @code{vrintn.f32 @var{d0}, @var{d0}}
@end itemize
@itemize @bullet
@item float32x4_t vrndqn_f32 (float32x4_t)
@*@emph{Form of expected instruction(s):} @code{vrintn.f32 @var{q0}, @var{q0}}
@end itemize
@subsubsection Round to integral (to nearest, ties away from zero)
@itemize @bullet
@item float32x2_t vrnda_f32 (float32x2_t)
@*@emph{Form of expected instruction(s):} @code{vrinta.f32 @var{d0}, @var{d0}}
@end itemize
@itemize @bullet
@item float32x4_t vrndqa_f32 (float32x4_t)
@*@emph{Form of expected instruction(s):} @code{vrinta.f32 @var{q0}, @var{q0}}
@end itemize
@subsubsection Round to integral (towards +Inf)
@itemize @bullet
@item float32x2_t vrndp_f32 (float32x2_t)
@*@emph{Form of expected instruction(s):} @code{vrintp.f32 @var{d0}, @var{d0}}
@end itemize
@itemize @bullet
@item float32x4_t vrndqp_f32 (float32x4_t)
@*@emph{Form of expected instruction(s):} @code{vrintp.f32 @var{q0}, @var{q0}}
@end itemize
@subsubsection Round to integral (towards -Inf)
@itemize @bullet
@item float32x2_t vrndm_f32 (float32x2_t)
@*@emph{Form of expected instruction(s):} @code{vrintm.f32 @var{d0}, @var{d0}}
@end itemize
@itemize @bullet
@item float32x4_t vrndqm_f32 (float32x4_t)
@*@emph{Form of expected instruction(s):} @code{vrintm.f32 @var{q0}, @var{q0}}
@end itemize
@subsubsection Round to integral (towards 0)
@itemize @bullet
@item float32x2_t vrnd_f32 (float32x2_t)
@*@emph{Form of expected instruction(s):} @code{vrintz.f32 @var{d0}, @var{d0}}
@end itemize
@itemize @bullet
@item float32x4_t vrndq_f32 (float32x4_t)
@*@emph{Form of expected instruction(s):} @code{vrintz.f32 @var{q0}, @var{q0}}
@end itemize
@subsubsection Subtraction
@itemize @bullet
......@@ -7218,12 +7298,6 @@
@subsubsection Transpose elements
@itemize @bullet
@item uint32x2x2_t vtrn_u32 (uint32x2_t, uint32x2_t)
@*@emph{Form of expected instruction(s):} @code{vtrn.32 @var{d0}, @var{d1}}
@end itemize
@itemize @bullet
@item uint16x4x2_t vtrn_u16 (uint16x4_t, uint16x4_t)
@*@emph{Form of expected instruction(s):} @code{vtrn.16 @var{d0}, @var{d1}}
@end itemize
......@@ -7236,38 +7310,44 @@
@itemize @bullet
@item int32x2x2_t vtrn_s32 (int32x2_t, int32x2_t)
@*@emph{Form of expected instruction(s):} @code{vtrn.32 @var{d0}, @var{d1}}
@item int16x4x2_t vtrn_s16 (int16x4_t, int16x4_t)
@*@emph{Form of expected instruction(s):} @code{vtrn.16 @var{d0}, @var{d1}}
@end itemize
@itemize @bullet
@item int16x4x2_t vtrn_s16 (int16x4_t, int16x4_t)
@item int8x8x2_t vtrn_s8 (int8x8_t, int8x8_t)
@*@emph{Form of expected instruction(s):} @code{vtrn.8 @var{d0}, @var{d1}}
@end itemize
@itemize @bullet
@item poly16x4x2_t vtrn_p16 (poly16x4_t, poly16x4_t)
@*@emph{Form of expected instruction(s):} @code{vtrn.16 @var{d0}, @var{d1}}
@end itemize
@itemize @bullet
@item int8x8x2_t vtrn_s8 (int8x8_t, int8x8_t)
@item poly8x8x2_t vtrn_p8 (poly8x8_t, poly8x8_t)
@*@emph{Form of expected instruction(s):} @code{vtrn.8 @var{d0}, @var{d1}}
@end itemize
@itemize @bullet
@item float32x2x2_t vtrn_f32 (float32x2_t, float32x2_t)
@*@emph{Form of expected instruction(s):} @code{vtrn.32 @var{d0}, @var{d1}}
@*@emph{Form of expected instruction(s):} @code{vuzp.32 @var{d0}, @var{d1}}
@end itemize
@itemize @bullet
@item poly16x4x2_t vtrn_p16 (poly16x4_t, poly16x4_t)
@*@emph{Form of expected instruction(s):} @code{vtrn.16 @var{d0}, @var{d1}}
@item uint32x2x2_t vtrn_u32 (uint32x2_t, uint32x2_t)
@*@emph{Form of expected instruction(s):} @code{vuzp.32 @var{d0}, @var{d1}}
@end itemize
@itemize @bullet
@item poly8x8x2_t vtrn_p8 (poly8x8_t, poly8x8_t)
@*@emph{Form of expected instruction(s):} @code{vtrn.8 @var{d0}, @var{d1}}
@item int32x2x2_t vtrn_s32 (int32x2_t, int32x2_t)
@*@emph{Form of expected instruction(s):} @code{vuzp.32 @var{d0}, @var{d1}}
@end itemize
......@@ -7330,12 +7410,6 @@
@subsubsection Zip elements
@itemize @bullet
@item uint32x2x2_t vzip_u32 (uint32x2_t, uint32x2_t)
@*@emph{Form of expected instruction(s):} @code{vzip.32 @var{d0}, @var{d1}}
@end itemize
@itemize @bullet
@item uint16x4x2_t vzip_u16 (uint16x4_t, uint16x4_t)
@*@emph{Form of expected instruction(s):} @code{vzip.16 @var{d0}, @var{d1}}
@end itemize
......@@ -7348,38 +7422,44 @@
@itemize @bullet
@item int32x2x2_t vzip_s32 (int32x2_t, int32x2_t)
@*@emph{Form of expected instruction(s):} @code{vzip.32 @var{d0}, @var{d1}}
@item int16x4x2_t vzip_s16 (int16x4_t, int16x4_t)
@*@emph{Form of expected instruction(s):} @code{vzip.16 @var{d0}, @var{d1}}
@end itemize
@itemize @bullet
@item int16x4x2_t vzip_s16 (int16x4_t, int16x4_t)
@item int8x8x2_t vzip_s8 (int8x8_t, int8x8_t)
@*@emph{Form of expected instruction(s):} @code{vzip.8 @var{d0}, @var{d1}}
@end itemize
@itemize @bullet
@item poly16x4x2_t vzip_p16 (poly16x4_t, poly16x4_t)
@*@emph{Form of expected instruction(s):} @code{vzip.16 @var{d0}, @var{d1}}
@end itemize
@itemize @bullet
@item int8x8x2_t vzip_s8 (int8x8_t, int8x8_t)
@item poly8x8x2_t vzip_p8 (poly8x8_t, poly8x8_t)
@*@emph{Form of expected instruction(s):} @code{vzip.8 @var{d0}, @var{d1}}
@end itemize
@itemize @bullet
@item float32x2x2_t vzip_f32 (float32x2_t, float32x2_t)
@*@emph{Form of expected instruction(s):} @code{vzip.32 @var{d0}, @var{d1}}
@*@emph{Form of expected instruction(s):} @code{vuzp.32 @var{d0}, @var{d1}}
@end itemize
@itemize @bullet
@item poly16x4x2_t vzip_p16 (poly16x4_t, poly16x4_t)
@*@emph{Form of expected instruction(s):} @code{vzip.16 @var{d0}, @var{d1}}
@item uint32x2x2_t vzip_u32 (uint32x2_t, uint32x2_t)
@*@emph{Form of expected instruction(s):} @code{vuzp.32 @var{d0}, @var{d1}}
@end itemize
@itemize @bullet
@item poly8x8x2_t vzip_p8 (poly8x8_t, poly8x8_t)
@*@emph{Form of expected instruction(s):} @code{vzip.8 @var{d0}, @var{d1}}
@item int32x2x2_t vzip_s32 (int32x2_t, int32x2_t)
@*@emph{Form of expected instruction(s):} @code{vuzp.32 @var{d0}, @var{d1}}
@end itemize
......@@ -7939,13 +8019,13 @@
@itemize @bullet
@item uint64x2_t vld1q_dup_u64 (const uint64_t *)
@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}, @var{d1}@}, [@var{r0}]}
@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}@}, [@var{r0}]}
@end itemize
@itemize @bullet
@item int64x2_t vld1q_dup_s64 (const int64_t *)
@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}, @var{d1}@}, [@var{r0}]}
@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}@}, [@var{r0}]}
@end itemize
......
2012-12-10 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
* gcc.target/arm/neon/vrndaf32.c: New test.
* gcc.target/arm/neon/vrndqaf32.c: Likewise.
* gcc.target/arm/neon/vrndf32.c: Likewise.
* gcc.target/arm/neon/vrndqf32.c: Likewise.
* gcc.target/arm/neon/vrndmf32.c: Likewise.
* gcc.target/arm/neon/vrndqmf32.c: Likewise.
* gcc.target/arm/neon/vrndnf32.c: Likewise.
* gcc.target/arm/neon/vrndqnf32.c: Likewise.
* gcc.target/arm/neon/vrndpf32.c: Likewise.
* gcc.target/arm/neon/vrndqpf32.c: Likewise.
2012-12-09 John David Anglin <dave.anglin@nrc-cnrc.gc.ca>
* gcc.misc-tests/gcov-12.c: Fix dg order.
......
/* Test the `vrndaf32' ARM Neon intrinsic. */
/* This file was autogenerated by neon-testgen. */
/* { dg-do assemble } */
/* { dg-require-effective-target arm_v8_neon_ok } */
/* { dg-options "-save-temps -O0" } */
/* { dg-add-options arm_v8_neon } */
#include "arm_neon.h"
void test_vrndaf32 (void)
{
float32x2_t out_float32x2_t;
float32x2_t arg0_float32x2_t;
out_float32x2_t = vrnda_f32 (arg0_float32x2_t);
}
/* { dg-final { scan-assembler "vrinta\.f32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
/* { dg-final { cleanup-saved-temps } } */
/* Test the `vrndf32' ARM Neon intrinsic. */
/* This file was autogenerated by neon-testgen. */
/* { dg-do assemble } */
/* { dg-require-effective-target arm_v8_neon_ok } */
/* { dg-options "-save-temps -O0" } */
/* { dg-add-options arm_v8_neon } */
#include "arm_neon.h"
void test_vrndf32 (void)
{
float32x2_t out_float32x2_t;
float32x2_t arg0_float32x2_t;
out_float32x2_t = vrnd_f32 (arg0_float32x2_t);
}
/* { dg-final { scan-assembler "vrintz\.f32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
/* { dg-final { cleanup-saved-temps } } */
/* Test the `vrndmf32' ARM Neon intrinsic. */
/* This file was autogenerated by neon-testgen. */
/* { dg-do assemble } */
/* { dg-require-effective-target arm_v8_neon_ok } */
/* { dg-options "-save-temps -O0" } */
/* { dg-add-options arm_v8_neon } */
#include "arm_neon.h"
void test_vrndmf32 (void)
{
float32x2_t out_float32x2_t;
float32x2_t arg0_float32x2_t;
out_float32x2_t = vrndm_f32 (arg0_float32x2_t);
}
/* { dg-final { scan-assembler "vrintm\.f32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
/* { dg-final { cleanup-saved-temps } } */
/* Test the `vrndnf32' ARM Neon intrinsic. */
/* This file was autogenerated by neon-testgen. */
/* { dg-do assemble } */
/* { dg-require-effective-target arm_v8_neon_ok } */
/* { dg-options "-save-temps -O0" } */
/* { dg-add-options arm_v8_neon } */
#include "arm_neon.h"
void test_vrndnf32 (void)
{
float32x2_t out_float32x2_t;
float32x2_t arg0_float32x2_t;
out_float32x2_t = vrndn_f32 (arg0_float32x2_t);
}
/* { dg-final { scan-assembler "vrintn\.f32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
/* { dg-final { cleanup-saved-temps } } */
/* Test the `vrndpf32' ARM Neon intrinsic. */
/* This file was autogenerated by neon-testgen. */
/* { dg-do assemble } */
/* { dg-require-effective-target arm_v8_neon_ok } */
/* { dg-options "-save-temps -O0" } */
/* { dg-add-options arm_v8_neon } */
#include "arm_neon.h"
void test_vrndpf32 (void)
{
float32x2_t out_float32x2_t;
float32x2_t arg0_float32x2_t;
out_float32x2_t = vrndp_f32 (arg0_float32x2_t);
}
/* { dg-final { scan-assembler "vrintp\.f32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
/* { dg-final { cleanup-saved-temps } } */
/* Test the `vrndqaf32' ARM Neon intrinsic. */
/* This file was autogenerated by neon-testgen. */
/* { dg-do assemble } */
/* { dg-require-effective-target arm_v8_neon_ok } */
/* { dg-options "-save-temps -O0" } */
/* { dg-add-options arm_v8_neon } */
#include "arm_neon.h"
void test_vrndqaf32 (void)
{
float32x4_t out_float32x4_t;
float32x4_t arg0_float32x4_t;
out_float32x4_t = vrndqa_f32 (arg0_float32x4_t);
}
/* { dg-final { scan-assembler "vrinta\.f32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
/* { dg-final { cleanup-saved-temps } } */
/* Test the `vrndqf32' ARM Neon intrinsic. */
/* This file was autogenerated by neon-testgen. */
/* { dg-do assemble } */
/* { dg-require-effective-target arm_v8_neon_ok } */
/* { dg-options "-save-temps -O0" } */
/* { dg-add-options arm_v8_neon } */
#include "arm_neon.h"
void test_vrndqf32 (void)
{
float32x4_t out_float32x4_t;
float32x4_t arg0_float32x4_t;
out_float32x4_t = vrndq_f32 (arg0_float32x4_t);
}
/* { dg-final { scan-assembler "vrintz\.f32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
/* { dg-final { cleanup-saved-temps } } */
/* Test the `vrndqmf32' ARM Neon intrinsic. */
/* This file was autogenerated by neon-testgen. */
/* { dg-do assemble } */
/* { dg-require-effective-target arm_v8_neon_ok } */
/* { dg-options "-save-temps -O0" } */
/* { dg-add-options arm_v8_neon } */
#include "arm_neon.h"
void test_vrndqmf32 (void)
{
float32x4_t out_float32x4_t;
float32x4_t arg0_float32x4_t;
out_float32x4_t = vrndqm_f32 (arg0_float32x4_t);
}
/* { dg-final { scan-assembler "vrintm\.f32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
/* { dg-final { cleanup-saved-temps } } */
/* Test the `vrndqnf32' ARM Neon intrinsic. */
/* This file was autogenerated by neon-testgen. */
/* { dg-do assemble } */
/* { dg-require-effective-target arm_v8_neon_ok } */
/* { dg-options "-save-temps -O0" } */
/* { dg-add-options arm_v8_neon } */
#include "arm_neon.h"
void test_vrndqnf32 (void)
{
float32x4_t out_float32x4_t;
float32x4_t arg0_float32x4_t;
out_float32x4_t = vrndqn_f32 (arg0_float32x4_t);
}
/* { dg-final { scan-assembler "vrintn\.f32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
/* { dg-final { cleanup-saved-temps } } */
/* Test the `vrndqpf32' ARM Neon intrinsic. */
/* This file was autogenerated by neon-testgen. */
/* { dg-do assemble } */
/* { dg-require-effective-target arm_v8_neon_ok } */
/* { dg-options "-save-temps -O0" } */
/* { dg-add-options arm_v8_neon } */
#include "arm_neon.h"
void test_vrndqpf32 (void)
{
float32x4_t out_float32x4_t;
float32x4_t arg0_float32x4_t;
out_float32x4_t = vrndqp_f32 (arg0_float32x4_t);
}
/* { dg-final { scan-assembler "vrintp\.f32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
/* { dg-final { cleanup-saved-temps } } */
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment