Commit 436016f4 by Dennis Zhang

arm: ACLE I8MM multiply-accumulate

This patch adds intrinsics for matrix multiply-accumulate instructions
including vmmlaq_s32, vmmlaq_u32, and vusmmlaq_s32.

gcc/ChangeLog:

2020-02-21  Dennis Zhang  <dennis.zhang@arm.com>

	* config/arm/arm_neon.h (vmmlaq_s32, vmmlaq_u32, vusmmlaq_s32): New.
	* config/arm/arm_neon_builtins.def (smmla, ummla, usmmla): New.
	* config/arm/iterators.md (MATMUL): New iterator.
	(sup): Add UNSPEC_MATMUL_S, UNSPEC_MATMUL_U, and UNSPEC_MATMUL_US.
	(mmla_sfx): New attribute.
	* config/arm/neon.md (neon_<sup>mmlav16qi): New.
	* config/arm/unspecs.md (UNSPEC_MATMUL_S, UNSPEC_MATMUL_U): New.
	(UNSPEC_MATMUL_US): New.

gcc/testsuite/ChangeLog:

2020-02-21  Dennis Zhang  <dennis.zhang@arm.com>

	* gcc.target/arm/simd/vmmla_1.c: New test.
parent b59506cd
2020-02-21 Dennis Zhang <dennis.zhang@arm.com>
* config/arm/arm_neon.h (vmmlaq_s32, vmmlaq_u32, vusmmlaq_s32): New.
* config/arm/arm_neon_builtins.def (smmla, ummla, usmmla): New.
* config/arm/iterators.md (MATMUL): New iterator.
(sup): Add UNSPEC_MATMUL_S, UNSPEC_MATMUL_U, and UNSPEC_MATMUL_US.
(mmla_sfx): New attribute.
* config/arm/neon.md (neon_<sup>mmlav16qi): New.
* config/arm/unspecs.md (UNSPEC_MATMUL_S, UNSPEC_MATMUL_U): New.
(UNSPEC_MATMUL_US): New.
2020-02-21 Mihail-Calin Ionescu <mihail.ionescu@arm.com>
* config/arm/arm.md: Prevent scalar shifts from being used when big
......
......@@ -18791,6 +18791,34 @@ vsudotq_lane_s32 (int32x4_t __r, int8x16_t __a,
#pragma GCC pop_options
#endif
/* AdvSIMD 8-bit Integer Matrix Multiply (I8MM) intrinsics. */
#pragma GCC push_options
#pragma GCC target ("arch=armv8.2-a+i8mm")
__extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmmlaq_s32 (int32x4_t __r, int8x16_t __a, int8x16_t __b)
{
return __builtin_neon_smmlav16qi (__r, __a, __b);
}
__extension__ extern __inline uint32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmmlaq_u32 (uint32x4_t __r, uint8x16_t __a, uint8x16_t __b)
{
return __builtin_neon_ummlav16qi_uuuu (__r, __a, __b);
}
__extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vusmmlaq_s32 (int32x4_t __r, uint8x16_t __a, int8x16_t __b)
{
return __builtin_neon_usmmlav16qi_ssus (__r, __a, __b);
}
#pragma GCC pop_options
#ifdef __cplusplus
}
#endif
......
......@@ -377,3 +377,7 @@ VAR2 (MAC_LANE_PAIR, vcmlaq_lane0, v4sf, v8hf)
VAR2 (MAC_LANE_PAIR, vcmlaq_lane90, v4sf, v8hf)
VAR2 (MAC_LANE_PAIR, vcmlaq_lane180, v4sf, v8hf)
VAR2 (MAC_LANE_PAIR, vcmlaq_lane270, v4sf, v8hf)
VAR1 (TERNOP, smmla, v16qi)
VAR1 (UTERNOP, ummla, v16qi)
VAR1 (USTERNOP, usmmla, v16qi)
......@@ -487,6 +487,8 @@
(define_int_iterator VCADD [UNSPEC_VCADD90 UNSPEC_VCADD270])
(define_int_iterator VCMLA [UNSPEC_VCMLA UNSPEC_VCMLA90 UNSPEC_VCMLA180 UNSPEC_VCMLA270])
(define_int_iterator MATMUL [UNSPEC_MATMUL_S UNSPEC_MATMUL_U UNSPEC_MATMUL_US])
;;----------------------------------------------------------------------------
;; Mode attributes
;;----------------------------------------------------------------------------
......@@ -942,6 +944,7 @@
(UNSPEC_DOT_S "s") (UNSPEC_DOT_U "u")
(UNSPEC_DOT_US "us") (UNSPEC_DOT_SU "su")
(UNSPEC_SSAT16 "s") (UNSPEC_USAT16 "u")
(UNSPEC_MATMUL_S "s") (UNSPEC_MATMUL_U "u") (UNSPEC_MATMUL_US "us")
])
(define_int_attr vfml_half
......@@ -1110,6 +1113,9 @@
(UNSPEC_SMUADX "smuadx") (UNSPEC_SSAT16 "ssat16")
(UNSPEC_USAT16 "usat16")])
(define_int_attr mmla_sfx [(UNSPEC_MATMUL_S "s8") (UNSPEC_MATMUL_U "u8")
(UNSPEC_MATMUL_US "s8")])
;; Both kinds of return insn.
(define_code_iterator RETURNS [return simple_return])
(define_code_attr return_str [(return "") (simple_return "simple_")])
......
......@@ -6585,3 +6585,14 @@ if (BYTES_BIG_ENDIAN)
"vabd.<V_if_elem> %<V_reg>0, %<V_reg>1, %<V_reg>2"
[(set_attr "type" "neon_fp_abd_s<q>")]
)
(define_insn "neon_<sup>mmlav16qi"
[(set (match_operand:V4SI 0 "register_operand" "=w")
(plus:V4SI
(unspec:V4SI [(match_operand:V16QI 2 "register_operand" "w")
(match_operand:V16QI 3 "register_operand" "w")] MATMUL)
(match_operand:V4SI 1 "register_operand" "0")))]
"TARGET_I8MM"
"v<sup>mmla.<mmla_sfx>\t%q0, %q2, %q3"
[(set_attr "type" "neon_mla_s_q")]
)
......@@ -503,4 +503,7 @@
UNSPEC_VCMLA90
UNSPEC_VCMLA180
UNSPEC_VCMLA270
UNSPEC_MATMUL_S
UNSPEC_MATMUL_U
UNSPEC_MATMUL_US
])
2020-02-21 Dennis Zhang <dennis.zhang@arm.com>
* gcc.target/arm/simd/vmmla_1.c: New test.
2020-02-21 Mihail-Calin Ionescu <mihail.ionescu@arm.com>
* gcc.target/arm/armv8_1m-shift-imm-1.c: Add MVE target checks.
......
/* { dg-do assemble } */
/* { dg-require-effective-target arm_v8_2a_i8mm_ok } */
/* { dg-options "-save-temps -O2" } */
/* { dg-additional-options "-march=armv8.2-a+i8mm" } */
#include "arm_neon.h"
int32x4_t
test_vmmlaq_s32 (int32x4_t r, int8x16_t a, int8x16_t b)
{
return vmmlaq_s32 (r, a, b);
}
uint32x4_t
test_vmmlaq_u32 (uint32x4_t r, uint8x16_t a, uint8x16_t b)
{
return vmmlaq_u32 (r, a, b);
}
int32x4_t
test_vusmmlaq_s32 (int32x4_t r, uint8x16_t a, int8x16_t b)
{
return vusmmlaq_s32 (r, a, b);
}
/* { dg-final { scan-assembler-times {\tvsmmla.s8\tq[0-9]+, q[0-9]+, q[0-9]+} 1 } } */
/* { dg-final { scan-assembler-times {\tvummla.u8\tq[0-9]+, q[0-9]+, q[0-9]+} 1 } } */
/* { dg-final { scan-assembler-times {\tvusmmla.s8\tq[0-9]+, q[0-9]+, q[0-9]+} 1 } } */
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment