Commit d33a4d86 by Michael Meissner Committed by Michael Meissner

rs6000.md (movsi_from_sf): Adjust code to eliminate doing a 32-bit shift right…

rs6000.md (movsi_from_sf): Adjust code to eliminate doing a 32-bit shift right or vector extract after...

2017-09-26  Michael Meissner  <meissner@linux.vnet.ibm.com>

	* config/rs6000/rs6000.md (movsi_from_sf): Adjust code to
	eliminate doing a 32-bit shift right or vector extract after doing
	XSCVDPSPN.  Use zero_extendsidi2 instead of p8_mfvsrd_4_disf to
	move the value to the GPRs.
	(movdi_from_sf_zero_ext): Likewise.
	(reload_gpr_from_vsxsf): Likewise.
	(p8_mfvsrd_4_disf): Delete, no longer used.

From-SVN: r253209
parent 8615012c
......@@ -4,6 +4,14 @@
sign extension from a vector register to a GPR by doing a 32-bit
direct move and then an EXTSW.
(extendsi<mode>2 splitter): Likewise.
(movsi_from_sf): Adjust code to eliminate doing a 32-bit shift
right or vector extract after doing XSCVDPSPN. Use
zero_extendsidi2 instead of p8_mfvsrd_4_disf to move the value to
the GPRs.
(movdi_from_sf_zero_ext): Likewise.
(reload_gpr_from_vsxsf): Likewise.
(p8_mfvsrd_4_disf): Delete, no longer used.
2017-09-26 Martin Jambor <mjambor@suse.cz>
......@@ -6806,25 +6806,25 @@
;; needed.
;; MR LWZ LFIWZX LXSIWZX STW
;; STFS STXSSP STXSSPX VSX->GPR MTVSRWZ
;; VSX->VSX
;; STFS STXSSP STXSSPX VSX->GPR VSX->VSX
;; MTVSRWZ
(define_insn_and_split "movsi_from_sf"
[(set (match_operand:SI 0 "nonimmediate_operand"
"=r, r, ?*wI, ?*wH, m,
m, wY, Z, r, wIwH,
?wK")
m, wY, Z, r, ?*wIwH,
wIwH")
(unspec:SI [(match_operand:SF 1 "input_operand"
"r, m, Z, Z, r,
f, wb, wu, wIwH, r,
wK")]
f, wb, wu, wIwH, wIwH,
r")]
UNSPEC_SI_FROM_SF))
(clobber (match_scratch:V4SF 2
"=X, X, X, X, X,
X, X, X, wa, X,
wa"))]
X, X, X, wIwH, X,
X"))]
"TARGET_NO_SF_SUBREG
&& (register_operand (operands[0], SImode)
......@@ -6839,63 +6839,52 @@
stxssp %1,%0
stxsspx %x1,%y0
#
mtvsrwz %x0,%1
#"
xscvdpspn %x0,%x1
mtvsrwz %x0,%1"
"&& reload_completed
&& register_operand (operands[0], SImode)
&& int_reg_operand (operands[0], SImode)
&& vsx_reg_sfsubreg_ok (operands[1], SFmode)"
[(const_int 0)]
{
rtx op0 = operands[0];
rtx op1 = operands[1];
rtx op2 = operands[2];
rtx op0_di = gen_rtx_REG (DImode, REGNO (op0));
rtx op0_di = gen_rtx_REG (DImode, reg_or_subregno (op0));
rtx op2_si = gen_rtx_REG (SImode, reg_or_subregno (op2));
emit_insn (gen_vsx_xscvdpspn_scalar (op2, op1));
if (int_reg_operand (op0, SImode))
{
emit_insn (gen_p8_mfvsrd_4_disf (op0_di, op2));
emit_insn (gen_lshrdi3 (op0_di, op0_di, GEN_INT (32)));
}
else
{
rtx op1_v16qi = gen_rtx_REG (V16QImode, REGNO (op1));
rtx byte_off = VECTOR_ELT_ORDER_BIG ? const0_rtx : GEN_INT (12);
emit_insn (gen_vextract4b (op0_di, op1_v16qi, byte_off));
}
emit_insn (gen_zero_extendsidi2 (op0_di, op2_si));
DONE;
}
[(set_attr "type"
"*, load, fpload, fpload, store,
fpstore, fpstore, fpstore, mftgpr, mffgpr,
veclogical")
fpstore, fpstore, fpstore, mftgpr, fp,
mffgpr")
(set_attr "length"
"4, 4, 4, 4, 4,
4, 4, 4, 12, 4,
8")])
4, 4, 4, 8, 4,
4")])
;; movsi_from_sf with zero extension
;;
;; RLDICL LWZ LFIWZX LXSIWZX VSX->GPR
;; MTVSRWZ VSX->VSX
;; VSX->VSX MTVSRWZ
(define_insn_and_split "*movdi_from_sf_zero_ext"
[(set (match_operand:DI 0 "gpc_reg_operand"
"=r, r, ?*wI, ?*wH, r,
wIwH, ?wK")
?wK, wIwH")
(zero_extend:DI
(unspec:SI [(match_operand:SF 1 "input_operand"
"r, m, Z, Z, wIwH,
r, wK")]
wIwH, r")]
UNSPEC_SI_FROM_SF)))
(clobber (match_scratch:V4SF 2
"=X, X, X, X, wa,
X, wa"))]
wIwH, X"))]
"TARGET_DIRECT_MOVE_64BIT
&& (register_operand (operands[0], DImode)
......@@ -6906,40 +6895,29 @@
lfiwzx %0,%y1
lxsiwzx %x0,%y1
#
mtvsrwz %x0,%1
#"
#
mtvsrwz %x0,%1"
"&& reload_completed
&& register_operand (operands[0], DImode)
&& vsx_reg_sfsubreg_ok (operands[1], SFmode)"
[(const_int 0)]
{
rtx op0 = operands[0];
rtx op1 = operands[1];
rtx op2 = operands[2];
rtx op2_si = gen_rtx_REG (SImode, reg_or_subregno (op2));
emit_insn (gen_vsx_xscvdpspn_scalar (op2, op1));
if (int_reg_operand (op0, DImode))
{
emit_insn (gen_p8_mfvsrd_4_disf (op0, op2));
emit_insn (gen_lshrdi3 (op0, op0, GEN_INT (32)));
}
else
{
rtx op0_si = gen_rtx_REG (SImode, REGNO (op0));
rtx op1_v16qi = gen_rtx_REG (V16QImode, REGNO (op1));
rtx byte_off = VECTOR_ELT_ORDER_BIG ? const0_rtx : GEN_INT (12);
emit_insn (gen_vextract4b (op0_si, op1_v16qi, byte_off));
}
emit_insn (gen_zero_extendsidi2 (op0, op2_si));
DONE;
}
[(set_attr "type"
"*, load, fpload, fpload, mftgpr,
mffgpr, veclogical")
"*, load, fpload, fpload, two,
two, mffgpr")
(set_attr "length"
"4, 4, 4, 4, 12,
4, 8")])
"4, 4, 4, 4, 8,
8, 4")])
;; Split a load of a large constant into the appropriate two-insn
;; sequence.
......@@ -8439,9 +8417,9 @@
(define_insn_and_split "reload_gpr_from_vsxsf"
[(set (match_operand:SF 0 "register_operand" "=r")
(unspec:SF [(match_operand:SF 1 "register_operand" "wa")]
(unspec:SF [(match_operand:SF 1 "register_operand" "ww")]
UNSPEC_P8V_RELOAD_FROM_VSX))
(clobber (match_operand:V4SF 2 "register_operand" "=wa"))]
(clobber (match_operand:V4SF 2 "register_operand" "=wIwH"))]
"TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
"#"
"&& reload_completed"
......@@ -8450,23 +8428,15 @@
rtx op0 = operands[0];
rtx op1 = operands[1];
rtx op2 = operands[2];
rtx diop0 = simplify_gen_subreg (DImode, op0, SFmode, 0);
rtx op0_di = gen_rtx_REG (DImode, reg_or_subregno (op0));
rtx op2_si = gen_rtx_REG (SImode, reg_or_subregno (op2));
emit_insn (gen_vsx_xscvdpspn_scalar (op2, op1));
emit_insn (gen_p8_mfvsrd_4_disf (diop0, op2));
emit_insn (gen_lshrdi3 (diop0, diop0, GEN_INT (32)));
emit_insn (gen_zero_extendsidi2 (op0_di, op2_si));
DONE;
}
[(set_attr "length" "12")
(set_attr "type" "three")])
(define_insn "p8_mfvsrd_4_disf"
[(set (match_operand:DI 0 "register_operand" "=r")
(unspec:DI [(match_operand:V4SF 1 "register_operand" "wa")]
UNSPEC_P8V_RELOAD_FROM_VSX))]
"TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
"mfvsrd %0,%x1"
[(set_attr "type" "mftgpr")])
[(set_attr "length" "8")
(set_attr "type" "two")])
;; Next come the multi-word integer load and store and the load and store
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment