Commit 0b839322 by Wilco Dijkstra

[AArch64] Use intrinsics for widening multiplies (PR91598)

Inline assembler instructions don't have latency info and the scheduler does
not attempt to schedule them at all - it does not even honor latencies of
asm source operands.  As a result, SIMD intrinsics which are implemented using
inline assembler perform very poorly, particularly on in-order cores.
Add new patterns and intrinsics for widening multiplies, which results in a
63% speedup for the example in the PR, thus fixing the reported regression.

    gcc/
	PR target/91598
	* config/aarch64/aarch64-builtins.c (TYPES_TERNOPU_LANE): Add define.
	* config/aarch64/aarch64-simd.md
	(aarch64_vec_<su>mult_lane<Qlane>): Add new insn for widening lane mul.
	(aarch64_vec_<su>mlal_lane<Qlane>): Likewise.
	* config/aarch64/aarch64-simd-builtins.def: Add intrinsics.
	* config/aarch64/arm_neon.h:
	(vmlal_lane_s16): Expand using intrinsics rather than inline asm.
	(vmlal_lane_u16): Likewise.
	(vmlal_lane_s32): Likewise.
	(vmlal_lane_u32): Likewise.
	(vmlal_laneq_s16): Likewise.
	(vmlal_laneq_u16): Likewise.
	(vmlal_laneq_s32): Likewise.
	(vmlal_laneq_u32): Likewise.
	(vmull_lane_s16): Likewise.
	(vmull_lane_u16): Likewise.
	(vmull_lane_s32): Likewise.
	(vmull_lane_u32): Likewise.
	(vmull_laneq_s16): Likewise.
	(vmull_laneq_u16): Likewise.
	(vmull_laneq_s32): Likewise.
	(vmull_laneq_u32): Likewise.
	* config/aarch64/iterators.md (Vcondtype): New iterator for lane mul.
	(Qlane): Likewise.
parent 3e5c062e
2020-03-06 Wilco Dijkstra <wdijkstr@arm.com>
PR target/91598
* config/aarch64/aarch64-builtins.c (TYPES_TERNOPU_LANE): Add define.
* config/aarch64/aarch64-simd.md
(aarch64_vec_<su>mult_lane<Qlane>): Add new insn for widening lane mul.
(aarch64_vec_<su>mlal_lane<Qlane>): Likewise.
* config/aarch64/aarch64-simd-builtins.def: Add intrinsics.
* config/aarch64/arm_neon.h:
(vmlal_lane_s16): Expand using intrinsics rather than inline asm.
(vmlal_lane_u16): Likewise.
(vmlal_lane_s32): Likewise.
(vmlal_lane_u32): Likewise.
(vmlal_laneq_s16): Likewise.
(vmlal_laneq_u16): Likewise.
(vmlal_laneq_s32): Likewise.
(vmlal_laneq_u32): Likewise.
(vmull_lane_s16): Likewise.
(vmull_lane_u16): Likewise.
(vmull_lane_s32): Likewise.
(vmull_lane_u32): Likewise.
(vmull_laneq_s16): Likewise.
(vmull_laneq_u16): Likewise.
(vmull_laneq_s32): Likewise.
(vmull_laneq_u32): Likewise.
* config/aarch64/iterators.md (Vcondtype): New iterator for lane mul.
(Qlane): Likewise.
2020-03-06 Wilco Dijkstra <wdijkstr@arm.com>
* aarch64/aarch64-simd.md (aarch64_mla_elt<mode>): Correct lane syntax.
(aarch64_mla_elt_<vswap_width_name><mode>): Likewise.
(aarch64_mls_elt<mode>): Likewise.
......
......@@ -175,6 +175,11 @@ aarch64_types_ternopu_qualifiers[SIMD_MAX_BUILTIN_ARGS]
qualifier_unsigned, qualifier_unsigned };
#define TYPES_TERNOPU (aarch64_types_ternopu_qualifiers)
static enum aarch64_type_qualifiers
aarch64_types_ternopu_lane_qualifiers[SIMD_MAX_BUILTIN_ARGS]
= { qualifier_unsigned, qualifier_unsigned,
qualifier_unsigned, qualifier_lane_index };
#define TYPES_TERNOPU_LANE (aarch64_types_ternopu_lane_qualifiers)
static enum aarch64_type_qualifiers
aarch64_types_ternopu_imm_qualifiers[SIMD_MAX_BUILTIN_ARGS]
= { qualifier_unsigned, qualifier_unsigned,
qualifier_unsigned, qualifier_immediate };
......
......@@ -191,6 +191,15 @@
BUILTIN_VQW (BINOP, vec_widen_smult_hi_, 10)
BUILTIN_VQW (BINOPU, vec_widen_umult_hi_, 10)
BUILTIN_VD_HSI (TERNOP_LANE, vec_smult_lane_, 0)
BUILTIN_VD_HSI (QUADOP_LANE, vec_smlal_lane_, 0)
BUILTIN_VD_HSI (TERNOP_LANE, vec_smult_laneq_, 0)
BUILTIN_VD_HSI (QUADOP_LANE, vec_smlal_laneq_, 0)
BUILTIN_VD_HSI (TERNOPU_LANE, vec_umult_lane_, 0)
BUILTIN_VD_HSI (QUADOPU_LANE, vec_umlal_lane_, 0)
BUILTIN_VD_HSI (TERNOPU_LANE, vec_umult_laneq_, 0)
BUILTIN_VD_HSI (QUADOPU_LANE, vec_umlal_laneq_, 0)
BUILTIN_VSD_HSI (BINOP, sqdmull, 0)
BUILTIN_VSD_HSI (TERNOP_LANE, sqdmull_lane, 0)
BUILTIN_VSD_HSI (TERNOP_LANE, sqdmull_laneq, 0)
......
......@@ -1892,6 +1892,46 @@
}
)
;; vmull_lane_s16 intrinsics
(define_insn "aarch64_vec_<su>mult_lane<Qlane>"
[(set (match_operand:<VWIDE> 0 "register_operand" "=w")
(mult:<VWIDE>
(ANY_EXTEND:<VWIDE>
(match_operand:<VCOND> 1 "register_operand" "w"))
(ANY_EXTEND:<VWIDE>
(vec_duplicate:<VCOND>
(vec_select:<VEL>
(match_operand:VDQHS 2 "register_operand" "<vwx>")
(parallel [(match_operand:SI 3 "immediate_operand" "i")]))))))]
"TARGET_SIMD"
{
operands[3] = aarch64_endian_lane_rtx (<MODE>mode, INTVAL (operands[3]));
return "<su>mull\\t%0.<Vwtype>, %1.<Vcondtype>, %2.<Vetype>[%3]";
}
[(set_attr "type" "neon_mul_<Vetype>_scalar_long")]
)
;; vmlal_lane_s16 intrinsics
(define_insn "aarch64_vec_<su>mlal_lane<Qlane>"
[(set (match_operand:<VWIDE> 0 "register_operand" "=w")
(plus:<VWIDE>
(mult:<VWIDE>
(ANY_EXTEND:<VWIDE>
(match_operand:<VCOND> 2 "register_operand" "w"))
(ANY_EXTEND:<VWIDE>
(vec_duplicate:<VCOND>
(vec_select:<VEL>
(match_operand:VDQHS 3 "register_operand" "<vwx>")
(parallel [(match_operand:SI 4 "immediate_operand" "i")])))))
(match_operand:<VWIDE> 1 "register_operand" "0")))]
"TARGET_SIMD"
{
operands[4] = aarch64_endian_lane_rtx (<MODE>mode, INTVAL (operands[4]));
return "<su>mlal\\t%0.<Vwtype>, %2.<Vcondtype>, %3.<Vetype>[%4]";
}
[(set_attr "type" "neon_mla_<Vetype>_scalar_long")]
)
;; FP vector operations.
;; AArch64 AdvSIMD supports single-precision (32-bit) and
;; double-precision (64-bit) floating-point data types and arithmetic as
......
......@@ -986,6 +986,13 @@
(V4SF "4s") (V2DF "2d")
(V4HF "4h") (V8HF "8h")])
;; Map mode to type used in widening multiplies.
(define_mode_attr Vcondtype [(V4HI "4h") (V8HI "4h") (V2SI "2s") (V4SI "2s")])
;; Map lane mode to name
(define_mode_attr Qlane [(V4HI "_v4hi") (V8HI "q_v4hi")
(V2SI "_v2si") (V4SI "q_v2si")])
(define_mode_attr Vrevsuff [(V4HI "16") (V8HI "16") (V2SI "32")
(V4SI "32") (V2DI "64")])
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment