Commit 3eff57aa by Srinath Parvathaneni Committed by Kyrylo Tkachov

[ARM][GCC][6x]:MVE ACLE vaddq intrinsics using arithmetic plus operator.

This patch supports following MVE ACLE vaddq intrinsics. The RTL patterns for this intrinsics are added using arithmetic "plus" operator.

vaddq_s8, vaddq_s16, vaddq_s32, vaddq_u8, vaddq_u16, vaddq_u32, vaddq_f16, vaddq_f32.

Please refer to M-profile Vector Extension (MVE) intrinsics [1]  for more details.
[1]  https://developer.arm.com/architectures/instruction-sets/simd-isas/helium/mve-intrinsics

2020-03-20  Srinath Parvathaneni  <srinath.parvathaneni@arm.com>
            Andre Vieira  <andre.simoesdiasvieira@arm.com>
            Mihail Ionescu  <mihail.ionescu@arm.com>

	* config/arm/arm_mve.h (vaddq_s8): Define macro.
	(vaddq_s16): Likewise.
	(vaddq_s32): Likewise.
	(vaddq_u8): Likewise.
	(vaddq_u16): Likewise.
	(vaddq_u32): Likewise.
	(vaddq_f16): Likewise.
	(vaddq_f32): Likewise.
	(__arm_vaddq_s8): Define intrinsic.
	(__arm_vaddq_s16): Likewise.
	(__arm_vaddq_s32): Likewise.
	(__arm_vaddq_u8): Likewise.
	(__arm_vaddq_u16): Likewise.
	(__arm_vaddq_u32): Likewise.
	(__arm_vaddq_f16): Likewise.
	(__arm_vaddq_f32): Likewise.
	(vaddq): Define polymorphic variant.
	* config/arm/iterators.md (VNIM): Define mode iterator for common types
	Neon, IWMMXT and MVE.
	(VNINOTM): Likewise.
	* config/arm/mve.md (mve_vaddq<mode>): Define RTL pattern.
	(mve_vaddq_f<mode>): Define RTL pattern.
	* config/arm/neon.md (add<mode>3): Rename to addv4hf3 RTL pattern.
	(addv8hf3_neon): Define RTL pattern.
	* config/arm/vec-common.md (add<mode>3): Modify standard add RTL pattern
	to support MVE.
	(addv8hf3): Define standard RTL pattern for MVE and Neon.
	(add<mode>3): Modify existing standard add RTL pattern for Neon and IWMMXT.

gcc/testsuite/ChangeLog:

2020-03-20  Srinath Parvathaneni  <srinath.parvathaneni@arm.com>
            Andre Vieira  <andre.simoesdiasvieira@arm.com>
            Mihail Ionescu  <mihail.ionescu@arm.com>

	* gcc.target/arm/mve/intrinsics/vaddq_f16.c: New test.
	* gcc.target/arm/mve/intrinsics/vaddq_f32.c: Likewise.
	* gcc.target/arm/mve/intrinsics/vaddq_s16.c: Likewise.
	* gcc.target/arm/mve/intrinsics/vaddq_s32.c: Likewise.
	* gcc.target/arm/mve/intrinsics/vaddq_s8.c: Likewise.
	* gcc.target/arm/mve/intrinsics/vaddq_u16.c: Likewise.
	* gcc.target/arm/mve/intrinsics/vaddq_u32.c: Likewise.
	* gcc.target/arm/mve/intrinsics/vaddq_u8.c: Likewise.
parent 7d4549b2
2020-03-20 Srinath Parvathaneni <srinath.parvathaneni@arm.com>
Andre Vieira <andre.simoesdiasvieira@arm.com>
Mihail Ionescu <mihail.ionescu@arm.com>
* config/arm/arm_mve.h (vaddq_s8): Define macro.
(vaddq_s16): Likewise.
(vaddq_s32): Likewise.
(vaddq_u8): Likewise.
(vaddq_u16): Likewise.
(vaddq_u32): Likewise.
(vaddq_f16): Likewise.
(vaddq_f32): Likewise.
(__arm_vaddq_s8): Define intrinsic.
(__arm_vaddq_s16): Likewise.
(__arm_vaddq_s32): Likewise.
(__arm_vaddq_u8): Likewise.
(__arm_vaddq_u16): Likewise.
(__arm_vaddq_u32): Likewise.
(__arm_vaddq_f16): Likewise.
(__arm_vaddq_f32): Likewise.
(vaddq): Define polymorphic variant.
* config/arm/iterators.md (VNIM): Define mode iterator for common types
Neon, IWMMXT and MVE.
(VNINOTM): Likewise.
* config/arm/mve.md (mve_vaddq<mode>): Define RTL pattern.
(mve_vaddq_f<mode>): Define RTL pattern.
* config/arm/neon.md (add<mode>3): Rename to addv4hf3 RTL pattern.
(addv8hf3_neon): Define RTL pattern.
* config/arm/vec-common.md (add<mode>3): Modify standard add RTL pattern
to support MVE.
(addv8hf3): Define standard RTL pattern for MVE and Neon.
(add<mode>3): Modify existing standard add RTL pattern for Neon and IWMMXT.
2020-03-20 Martin Liska <mliska@suse.cz> 2020-03-20 Martin Liska <mliska@suse.cz>
PR ipa/94232 PR ipa/94232
......
...@@ -1898,6 +1898,14 @@ typedef struct { uint8x16_t val[4]; } uint8x16x4_t; ...@@ -1898,6 +1898,14 @@ typedef struct { uint8x16_t val[4]; } uint8x16x4_t;
#define vstrwq_scatter_shifted_offset_p_u32(__base, __offset, __value, __p) __arm_vstrwq_scatter_shifted_offset_p_u32(__base, __offset, __value, __p) #define vstrwq_scatter_shifted_offset_p_u32(__base, __offset, __value, __p) __arm_vstrwq_scatter_shifted_offset_p_u32(__base, __offset, __value, __p)
#define vstrwq_scatter_shifted_offset_s32(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset_s32(__base, __offset, __value) #define vstrwq_scatter_shifted_offset_s32(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset_s32(__base, __offset, __value)
#define vstrwq_scatter_shifted_offset_u32(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset_u32(__base, __offset, __value) #define vstrwq_scatter_shifted_offset_u32(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset_u32(__base, __offset, __value)
#define vaddq_s8(__a, __b) __arm_vaddq_s8(__a, __b)
#define vaddq_s16(__a, __b) __arm_vaddq_s16(__a, __b)
#define vaddq_s32(__a, __b) __arm_vaddq_s32(__a, __b)
#define vaddq_u8(__a, __b) __arm_vaddq_u8(__a, __b)
#define vaddq_u16(__a, __b) __arm_vaddq_u16(__a, __b)
#define vaddq_u32(__a, __b) __arm_vaddq_u32(__a, __b)
#define vaddq_f16(__a, __b) __arm_vaddq_f16(__a, __b)
#define vaddq_f32(__a, __b) __arm_vaddq_f32(__a, __b)
#endif #endif
__extension__ extern __inline void __extension__ extern __inline void
...@@ -12341,6 +12349,48 @@ __arm_vstrwq_scatter_shifted_offset_u32 (uint32_t * __base, uint32x4_t __offset, ...@@ -12341,6 +12349,48 @@ __arm_vstrwq_scatter_shifted_offset_u32 (uint32_t * __base, uint32x4_t __offset,
__builtin_mve_vstrwq_scatter_shifted_offset_uv4si ((__builtin_neon_si *) __base, __offset, __value); __builtin_mve_vstrwq_scatter_shifted_offset_uv4si ((__builtin_neon_si *) __base, __offset, __value);
} }
__extension__ extern __inline int8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_s8 (int8x16_t __a, int8x16_t __b)
{
return __a + __b;
}
__extension__ extern __inline int16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_s16 (int16x8_t __a, int16x8_t __b)
{
return __a + __b;
}
__extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_s32 (int32x4_t __a, int32x4_t __b)
{
return __a + __b;
}
__extension__ extern __inline uint8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_u8 (uint8x16_t __a, uint8x16_t __b)
{
return __a + __b;
}
__extension__ extern __inline uint16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_u16 (uint16x8_t __a, uint16x8_t __b)
{
return __a + __b;
}
__extension__ extern __inline uint32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_u32 (uint32x4_t __a, uint32x4_t __b)
{
return __a + __b;
}
#if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */ #if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */
__extension__ extern __inline void __extension__ extern __inline void
...@@ -14707,6 +14757,20 @@ __arm_vstrwq_scatter_shifted_offset_p_f32 (float32_t * __base, uint32x4_t __offs ...@@ -14707,6 +14757,20 @@ __arm_vstrwq_scatter_shifted_offset_p_f32 (float32_t * __base, uint32x4_t __offs
__builtin_mve_vstrwq_scatter_shifted_offset_p_fv4sf (__base, __offset, __value, __p); __builtin_mve_vstrwq_scatter_shifted_offset_p_fv4sf (__base, __offset, __value, __p);
} }
__extension__ extern __inline float16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_f16 (float16x8_t __a, float16x8_t __b)
{
return __a + __b;
}
__extension__ extern __inline float32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vaddq_f32 (float32x4_t __a, float32x4_t __b)
{
return __a + __b;
}
#endif #endif
enum { enum {
...@@ -15186,6 +15250,8 @@ extern void *__ARM_undef; ...@@ -15186,6 +15250,8 @@ extern void *__ARM_undef;
int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vaddq_f16 (__ARM_mve_coerce(p0, float16x8_t), __ARM_mve_coerce(p1, float16x8_t)), \
int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vaddq_f32 (__ARM_mve_coerce(p0, float32x4_t), __ARM_mve_coerce(p1, float32x4_t)), \
int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
...@@ -66,6 +66,14 @@ ...@@ -66,6 +66,14 @@
;; Integer and float modes supported by Neon and IWMMXT. ;; Integer and float modes supported by Neon and IWMMXT.
(define_mode_iterator VALL [V2DI V2SI V4HI V8QI V2SF V4SI V8HI V16QI V4SF]) (define_mode_iterator VALL [V2DI V2SI V4HI V8QI V2SF V4SI V8HI V16QI V4SF])
;; Integer and float modes supported by Neon, IWMMXT and MVE, used by
;; arithmetic epxand patterns.
(define_mode_iterator VNIM [V16QI V8HI V4SI V4SF])
;; Integer and float modes supported by Neon and IWMMXT but not MVE, used by
;; arithmetic epxand patterns.
(define_mode_iterator VNINOTM [V2SI V4HI V8QI V2SF V2DI])
;; Integer and float modes supported by Neon, IWMMXT and MVE. ;; Integer and float modes supported by Neon, IWMMXT and MVE.
(define_mode_iterator VNIM1 [V16QI V8HI V4SI V4SF V2DI]) (define_mode_iterator VNIM1 [V16QI V8HI V4SI V4SF V2DI])
......
...@@ -9643,3 +9643,31 @@ ...@@ -9643,3 +9643,31 @@
return ""; return "";
} }
[(set_attr "length" "4")]) [(set_attr "length" "4")])
;;
;; [vaddq_s, vaddq_u])
;;
(define_insn "mve_vaddq<mode>"
[
(set (match_operand:MVE_2 0 "s_register_operand" "=w")
(plus:MVE_2 (match_operand:MVE_2 1 "s_register_operand" "w")
(match_operand:MVE_2 2 "s_register_operand" "w")))
]
"TARGET_HAVE_MVE"
"vadd.i%#<V_sz_elem> %q0, %q1, %q2"
[(set_attr "type" "mve_move")
])
;;
;; [vaddq_f])
;;
(define_insn "mve_vaddq_f<mode>"
[
(set (match_operand:MVE_0 0 "s_register_operand" "=w")
(plus:MVE_0 (match_operand:MVE_0 1 "s_register_operand" "w")
(match_operand:MVE_0 2 "s_register_operand" "w")))
]
"TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
"vadd.f%#<V_sz_elem> %q0, %q1, %q2"
[(set_attr "type" "mve_move")
])
...@@ -519,18 +519,30 @@ ...@@ -519,18 +519,30 @@
;; As with SFmode, full support for HFmode vector arithmetic is only available ;; As with SFmode, full support for HFmode vector arithmetic is only available
;; when flag-unsafe-math-optimizations is enabled. ;; when flag-unsafe-math-optimizations is enabled.
(define_insn "add<mode>3" ;; Add pattern with modes V8HF and V4HF is split into separate patterns to add
;; support for standard pattern addv8hf3 in MVE. Following pattern is called
;; from "addv8hf3" standard pattern inside vec-common.md file.
(define_insn "addv8hf3_neon"
[(set [(set
(match_operand:VH 0 "s_register_operand" "=w") (match_operand:V8HF 0 "s_register_operand" "=w")
(plus:VH (plus:V8HF
(match_operand:VH 1 "s_register_operand" "w") (match_operand:V8HF 1 "s_register_operand" "w")
(match_operand:VH 2 "s_register_operand" "w")))] (match_operand:V8HF 2 "s_register_operand" "w")))]
"TARGET_NEON_FP16INST && flag_unsafe_math_optimizations" "TARGET_NEON_FP16INST && flag_unsafe_math_optimizations"
"vadd.<V_if_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2" "vadd.f16\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
[(set (attr "type") [(set_attr "type" "neon_fp_addsub_s_q")]
(if_then_else (match_test "<Is_float_mode>") )
(const_string "neon_fp_addsub_s<q>")
(const_string "neon_add<q>")))] (define_insn "addv4hf3"
[(set
(match_operand:V4HF 0 "s_register_operand" "=w")
(plus:V4HF
(match_operand:V4HF 1 "s_register_operand" "w")
(match_operand:V4HF 2 "s_register_operand" "w")))]
"TARGET_NEON_FP16INST && flag_unsafe_math_optimizations"
"vadd.f16\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
[(set_attr "type" "neon_fp_addsub_s_q")]
) )
(define_insn "add<mode>3_fp16" (define_insn "add<mode>3_fp16"
......
...@@ -77,19 +77,51 @@ ...@@ -77,19 +77,51 @@
} }
}) })
;; Vector arithmetic. Expanders are blank, then unnamed insns implement ;; Vector arithmetic. Expanders are blank, then unnamed insns implement
;; patterns separately for IWMMXT and Neon. ;; patterns separately for Neon, IWMMXT and MVE.
(define_expand "add<mode>3" (define_expand "add<mode>3"
[(set (match_operand:VALL 0 "s_register_operand") [(set (match_operand:VNIM 0 "s_register_operand")
(plus:VALL (match_operand:VALL 1 "s_register_operand") (plus:VNIM (match_operand:VNIM 1 "s_register_operand")
(match_operand:VALL 2 "s_register_operand")))] (match_operand:VNIM 2 "s_register_operand")))]
"(TARGET_NEON && ((<MODE>mode != V2SFmode && <MODE>mode != V4SFmode)
|| flag_unsafe_math_optimizations))
|| (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (<MODE>mode))
|| (TARGET_HAVE_MVE && VALID_MVE_SI_MODE(<MODE>mode))
|| (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE(<MODE>mode))"
{
})
;; Vector arithmetic. Expanders are blank, then unnamed insns implement
;; patterns separately for Neon and MVE.
(define_expand "addv8hf3"
[(set (match_operand:V8HF 0 "s_register_operand")
(plus:V8HF (match_operand:V8HF 1 "s_register_operand")
(match_operand:V8HF 2 "s_register_operand")))]
"(TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE(V8HFmode))
|| (TARGET_NEON_FP16INST && flag_unsafe_math_optimizations)"
{
if (TARGET_NEON_FP16INST && flag_unsafe_math_optimizations)
emit_insn (gen_addv8hf3_neon (operands[0], operands[1], operands[2]));
})
;; Vector arithmetic. Expanders are blank, then unnamed insns implement
;; patterns separately for Neon and IWMMXT.
(define_expand "add<mode>3"
[(set (match_operand:VNINOTM 0 "s_register_operand")
(plus:VNINOTM (match_operand:VNINOTM 1 "s_register_operand")
(match_operand:VNINOTM 2 "s_register_operand")))]
"(TARGET_NEON && ((<MODE>mode != V2SFmode && <MODE>mode != V4SFmode) "(TARGET_NEON && ((<MODE>mode != V2SFmode && <MODE>mode != V4SFmode)
|| flag_unsafe_math_optimizations)) || flag_unsafe_math_optimizations))
|| (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (<MODE>mode))" || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (<MODE>mode))"
{ {
}) })
;; Vector arithmetic. Expanders are blank, then unnamed insns implement
;; patterns separately for IWMMXT and Neon.
(define_expand "sub<mode>3" (define_expand "sub<mode>3"
[(set (match_operand:VALL 0 "s_register_operand") [(set (match_operand:VALL 0 "s_register_operand")
(minus:VALL (match_operand:VALL 1 "s_register_operand") (minus:VALL (match_operand:VALL 1 "s_register_operand")
......
2020-03-20 Srinath Parvathaneni <srinath.parvathaneni@arm.com>
Andre Vieira <andre.simoesdiasvieira@arm.com>
Mihail Ionescu <mihail.ionescu@arm.com>
* gcc.target/arm/mve/intrinsics/vaddq_f16.c: New test.
* gcc.target/arm/mve/intrinsics/vaddq_f32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vaddq_s16.c: Likewise.
* gcc.target/arm/mve/intrinsics/vaddq_s32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vaddq_s8.c: Likewise.
* gcc.target/arm/mve/intrinsics/vaddq_u16.c: Likewise.
* gcc.target/arm/mve/intrinsics/vaddq_u32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vaddq_u8.c: Likewise.
2020-03-20 Andre Vieira <andre.simoesdiasvieira@arm.com> 2020-03-20 Andre Vieira <andre.simoesdiasvieira@arm.com>
* gcc.target/arm/mve/intrinsics/mve_fp_fpu1.c: Fix testisms. * gcc.target/arm/mve/intrinsics/mve_fp_fpu1.c: Fix testisms.
......
/* { dg-do compile } */
/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
/* { dg-add-options arm_v8_1m_mve_fp } */
/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
float16x8_t
foo (float16x8_t a, float16x8_t b)
{
return vaddq_f16 (a, b);
}
/* { dg-final { scan-assembler "vadd.f16" } } */
float16x8_t
foo1 (float16x8_t a, float16x8_t b)
{
return vaddq (a, b);
}
/* { dg-final { scan-assembler "vadd.f16" } } */
/* { dg-do compile } */
/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
/* { dg-add-options arm_v8_1m_mve_fp } */
/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
float32x4_t
foo (float32x4_t a, float32x4_t b)
{
return vaddq_f32 (a, b);
}
/* { dg-final { scan-assembler "vadd.f32" } } */
float32x4_t
foo1 (float32x4_t a, float32x4_t b)
{
return vaddq (a, b);
}
/* { dg-final { scan-assembler "vadd.f32" } } */
/* { dg-do compile } */
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
/* { dg-add-options arm_v8_1m_mve } */
/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
int16x8_t
foo (int16x8_t a, int16x8_t b)
{
return vaddq_s16 (a, b);
}
/* { dg-final { scan-assembler "vadd.i16" } } */
int16x8_t
foo1 (int16x8_t a, int16x8_t b)
{
return vaddq (a, b);
}
/* { dg-final { scan-assembler "vadd.i16" } } */
/* { dg-do compile } */
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
/* { dg-add-options arm_v8_1m_mve } */
/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
int32x4_t
foo (int32x4_t a, int32x4_t b)
{
return vaddq_s32 (a, b);
}
/* { dg-final { scan-assembler "vadd.i32" } } */
int32x4_t
foo1 (int32x4_t a, int32x4_t b)
{
return vaddq (a, b);
}
/* { dg-final { scan-assembler "vadd.i32" } } */
/* { dg-do compile } */
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
/* { dg-add-options arm_v8_1m_mve } */
/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
int8x16_t
foo (int8x16_t a, int8x16_t b)
{
return vaddq_s8 (a, b);
}
/* { dg-final { scan-assembler "vadd.i8" } } */
int8x16_t
foo1 (int8x16_t a, int8x16_t b)
{
return vaddq (a, b);
}
/* { dg-final { scan-assembler "vadd.i8" } } */
/* { dg-do compile } */
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
/* { dg-add-options arm_v8_1m_mve } */
/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
uint16x8_t
foo (uint16x8_t a, uint16x8_t b)
{
return vaddq_u16 (a, b);
}
/* { dg-final { scan-assembler "vadd.i16" } } */
uint16x8_t
foo1 (uint16x8_t a, uint16x8_t b)
{
return vaddq (a, b);
}
/* { dg-final { scan-assembler "vadd.i16" } } */
/* { dg-do compile } */
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
/* { dg-add-options arm_v8_1m_mve } */
/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
uint32x4_t
foo (uint32x4_t a, uint32x4_t b)
{
return vaddq_u32 (a, b);
}
/* { dg-final { scan-assembler "vadd.i32" } } */
uint32x4_t
foo1 (uint32x4_t a, uint32x4_t b)
{
return vaddq (a, b);
}
/* { dg-final { scan-assembler "vadd.i32" } } */
/* { dg-do compile } */
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
/* { dg-add-options arm_v8_1m_mve } */
/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
uint8x16_t
foo (uint8x16_t a, uint8x16_t b)
{
return vaddq_u8 (a, b);
}
/* { dg-final { scan-assembler "vadd.i8" } } */
uint8x16_t
foo1 (uint8x16_t a, uint8x16_t b)
{
return vaddq (a, b);
}
/* { dg-final { scan-assembler "vadd.i8" } } */
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment