Commit 6ccd2ece by Bill Schmidt Committed by William Schmidt

altivec.md (UNSPEC_VMRGOW_DIRECT): New constant.

2017-08-17  Bill Schmidt  <wschmidt@linux.vnet.ibm.com>

	* config/rs6000/altivec.md (UNSPEC_VMRGOW_DIRECT): New constant.
	(p8_vmrgew_v4sf_direct): Generalize to p8_vmrgew_<mode>_direct.
	(p8_vmrgow_<mode>_direct): New define_insn.
	* config/rs6000/rs6000.c (altivec_expand_vec_perm_const): Properly
	handle endianness for vmrgew and vmrgow permute patterns.

From-SVN: r251161
parent 1f81b321
2017-08-17 Bill Schmidt <wschmidt@linux.vnet.ibm.com>
* config/rs6000/altivec.md (UNSPEC_VMRGOW_DIRECT): New constant.
(p8_vmrgew_v4sf_direct): Generalize to p8_vmrgew_<mode>_direct.
(p8_vmrgow_<mode>_direct): New define_insn.
* config/rs6000/rs6000.c (altivec_expand_vec_perm_const): Properly
handle endianness for vmrgew and vmrgow permute patterns.
2017-08-17 Peter Bergner <bergner@vnet.ibm.com> 2017-08-17 Peter Bergner <bergner@vnet.ibm.com>
* config/rs6000/altivec.md (VParity): Remove TARGET_VSX_TIMODE. * config/rs6000/altivec.md (VParity): Remove TARGET_VSX_TIMODE.
......
...@@ -148,6 +148,7 @@ ...@@ -148,6 +148,7 @@
UNSPEC_VMRGL_DIRECT UNSPEC_VMRGL_DIRECT
UNSPEC_VSPLT_DIRECT UNSPEC_VSPLT_DIRECT
UNSPEC_VMRGEW_DIRECT UNSPEC_VMRGEW_DIRECT
UNSPEC_VMRGOW_DIRECT
UNSPEC_VSUMSWS_DIRECT UNSPEC_VSUMSWS_DIRECT
UNSPEC_VADDCUQ UNSPEC_VADDCUQ
UNSPEC_VADDEUQM UNSPEC_VADDEUQM
...@@ -1357,15 +1358,24 @@ ...@@ -1357,15 +1358,24 @@
} }
[(set_attr "type" "vecperm")]) [(set_attr "type" "vecperm")])
(define_insn "p8_vmrgew_v4sf_direct" (define_insn "p8_vmrgew_<mode>_direct"
[(set (match_operand:V4SF 0 "register_operand" "=v") [(set (match_operand:VSX_W 0 "register_operand" "=v")
(unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v") (unspec:VSX_W [(match_operand:VSX_W 1 "register_operand" "v")
(match_operand:V4SF 2 "register_operand" "v")] (match_operand:VSX_W 2 "register_operand" "v")]
UNSPEC_VMRGEW_DIRECT))] UNSPEC_VMRGEW_DIRECT))]
"TARGET_P8_VECTOR" "TARGET_P8_VECTOR"
"vmrgew %0,%1,%2" "vmrgew %0,%1,%2"
[(set_attr "type" "vecperm")]) [(set_attr "type" "vecperm")])
(define_insn "p8_vmrgow_<mode>_direct"
[(set (match_operand:VSX_W 0 "register_operand" "=v")
(unspec:VSX_W [(match_operand:VSX_W 1 "register_operand" "v")
(match_operand:VSX_W 2 "register_operand" "v")]
UNSPEC_VMRGOW_DIRECT))]
"TARGET_P8_VECTOR"
"vmrgow %0,%1,%2"
[(set_attr "type" "vecperm")])
(define_expand "vec_widen_umult_even_v16qi" (define_expand "vec_widen_umult_even_v16qi"
[(use (match_operand:V8HI 0 "register_operand" "")) [(use (match_operand:V8HI 0 "register_operand" ""))
(use (match_operand:V16QI 1 "register_operand" "")) (use (match_operand:V16QI 1 "register_operand" ""))
......
...@@ -35282,9 +35282,13 @@ altivec_expand_vec_perm_const (rtx operands[4]) ...@@ -35282,9 +35282,13 @@ altivec_expand_vec_perm_const (rtx operands[4])
(BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
: CODE_FOR_altivec_vmrghw_direct), : CODE_FOR_altivec_vmrghw_direct),
{ 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } }, { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
{ OPTION_MASK_P8_VECTOR, CODE_FOR_p8_vmrgew_v4si, { OPTION_MASK_P8_VECTOR,
(BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgew_v4sf_direct
: CODE_FOR_p8_vmrgow_v4sf_direct),
{ 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } }, { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
{ OPTION_MASK_P8_VECTOR, CODE_FOR_p8_vmrgow, { OPTION_MASK_P8_VECTOR,
(BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgow_v4sf_direct
: CODE_FOR_p8_vmrgew_v4sf_direct),
{ 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } } { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
}; };
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment