Commit 911c8df0 by Michael Meissner Committed by Michael Meissner

altivec.md (altivec_mov<mode>, [...]): Change the RTL attribute "length" from…

altivec.md (altivec_mov<mode>, [...]): Change the RTL attribute "length" from "4" to "*" to allow the length attribute...

2019-07-03  Michael Meissner  <meissner@linux.ibm.com>

	* config/rs6000/altivec.md (altivec_mov<mode>, VM2 iterator):
	Change the RTL attribute "length" from "4" to "*" to allow the
	length attribute to be adjusted automatically for prefixed load,
	store, and add immediate instructions.
	* config/rs6000/rs6000.md (extendhi<mode>2, EXTHI iterator):
	Likewise.
	(extendsi<mode>2, EXTSI iterator): Likewise.
	(movsi_internal1): Likewise.
	(movsi_from_sf): Likewise.
	(movdi_from_sf_zero_ext): Likewise.
	(mov<mode>_internal): Likewise.
	(movcc_internal1, QHI iterator): Likewise.
	(mov<mode>_softfloat, FMOVE32 iterator): Likewise.
	(movsf_from_si): Likewise.
	(mov<mode>_hardfloat32, FMOVE64 iterator): Likewise.
	(mov<mode>_softfloat64, FMOVE64 iterator): Likewise.
	(mov<mode>, FMOVE128 iterator): Likewise.
	(movdi_internal64): Likewise.
	* config/rs6000/vsx.md (vsx_le_permute_<mode>, VSX_TI iterator):
	Likewise.
	(vsx_le_undo_permute_<mode>, VSX_TI iterator): Likewise.
	(vsx_mov<mode>_64bit, VSX_M iterator): Likewise.
	(vsx_mov<mode>_32bit, VSX_M iterator): Likewise.
	(vsx_splat_v4sf): Likewise.

From-SVN: r273013
parent e5833b56
2019-07-03 Michael Meissner <meissner@linux.ibm.com>
* config/rs6000/altivec.md (altivec_mov<mode>, VM2 iterator):
Change the RTL attribute "length" from "4" to "*" to allow the
length attribute to be adjusted automatically for prefixed load,
store, and add immediate instructions.
* config/rs6000/rs6000.md (extendhi<mode>2, EXTHI iterator):
Likewise.
(extendsi<mode>2, EXTSI iterator): Likewise.
(movsi_internal1): Likewise.
(movsi_from_sf): Likewise.
(movdi_from_sf_zero_ext): Likewise.
(mov<mode>_internal): Likewise.
(movcc_internal1, QHI iterator): Likewise.
(mov<mode>_softfloat, FMOVE32 iterator): Likewise.
(movsf_from_si): Likewise.
(mov<mode>_hardfloat32, FMOVE64 iterator): Likewise.
(mov<mode>_softfloat64, FMOVE64 iterator): Likewise.
(mov<mode>, FMOVE128 iterator): Likewise.
(movdi_internal64): Likewise.
* config/rs6000/vsx.md (vsx_le_permute_<mode>, VSX_TI iterator):
Likewise.
(vsx_le_undo_permute_<mode>, VSX_TI iterator): Likewise.
(vsx_mov<mode>_64bit, VSX_M iterator): Likewise.
(vsx_mov<mode>_32bit, VSX_M iterator): Likewise.
(vsx_splat_v4sf): Likewise.
2019-07-03 Mark Wielaard <mark@klomp.org> 2019-07-03 Mark Wielaard <mark@klomp.org>
PR debug/90981 PR debug/90981
......
...@@ -256,7 +256,7 @@ ...@@ -256,7 +256,7 @@
* return output_vec_const_move (operands); * return output_vec_const_move (operands);
#" #"
[(set_attr "type" "vecstore,vecload,veclogical,store,load,*,veclogical,*,*") [(set_attr "type" "vecstore,vecload,veclogical,store,load,*,veclogical,*,*")
(set_attr "length" "4,4,4,20,20,20,4,8,32")]) (set_attr "length" "*,*,*,20,20,20,*,8,32")])
;; Unlike other altivec moves, allow the GPRs, since a normal use of TImode ;; Unlike other altivec moves, allow the GPRs, since a normal use of TImode
;; is for unions. However for plain data movement, slightly favor the vector ;; is for unions. However for plain data movement, slightly favor the vector
......
...@@ -965,7 +965,7 @@ ...@@ -965,7 +965,7 @@
vextsh2d %0,%1" vextsh2d %0,%1"
[(set_attr "type" "load,exts,fpload,vecperm") [(set_attr "type" "load,exts,fpload,vecperm")
(set_attr "sign_extend" "yes") (set_attr "sign_extend" "yes")
(set_attr "length" "4,4,8,4") (set_attr "length" "*,*,8,*")
(set_attr "isa" "*,*,p9v,p9v")]) (set_attr "isa" "*,*,p9v,p9v")])
(define_split (define_split
...@@ -1040,7 +1040,7 @@ ...@@ -1040,7 +1040,7 @@
#" #"
[(set_attr "type" "load,exts,fpload,fpload,mffgpr,vecexts,vecperm,mftgpr") [(set_attr "type" "load,exts,fpload,fpload,mffgpr,vecexts,vecperm,mftgpr")
(set_attr "sign_extend" "yes") (set_attr "sign_extend" "yes")
(set_attr "length" "4,4,4,4,4,4,8,8") (set_attr "length" "*,*,*,*,*,*,8,8")
(set_attr "isa" "*,*,p6,p8v,p8v,p9v,p8v,p8v")]) (set_attr "isa" "*,*,p6,p8v,p8v,p9v,p8v,p8v")])
(define_split (define_split
...@@ -6909,11 +6909,11 @@ ...@@ -6909,11 +6909,11 @@
veclogical, veclogical, vecsimple, mffgpr, mftgpr, veclogical, veclogical, vecsimple, mffgpr, mftgpr,
*, *, *") *, *, *")
(set_attr "length" (set_attr "length"
"4, 4, 4, 4, 4, "*, *, *, *, *,
4, 4, 4, 4, 4, *, *, *, *, *,
8, 4, 4, 4, 4, 8, *, *, *, *,
4, 4, 8, 4, 4, *, *, 8, *, *,
4, 4, 4") *, *, *")
(set_attr "isa" (set_attr "isa"
"*, *, *, p8v, p8v, "*, *, *, p8v, p8v,
*, p8v, p8v, *, *, *, p8v, p8v, *, *,
...@@ -6989,9 +6989,9 @@ ...@@ -6989,9 +6989,9 @@
fpstore, fpstore, fpstore, mftgpr, fp, fpstore, fpstore, fpstore, mftgpr, fp,
mffgpr") mffgpr")
(set_attr "length" (set_attr "length"
"4, 4, 4, 4, 4, "*, *, *, *, *,
4, 4, 4, 8, 4, *, *, *, 8, *,
4") *")
(set_attr "isa" (set_attr "isa"
"*, *, p8v, p8v, *, "*, *, p8v, p8v, *,
*, p9v, p8v, p8v, p8v, *, p9v, p8v, p8v, p8v,
...@@ -7043,8 +7043,8 @@ ...@@ -7043,8 +7043,8 @@
"*, load, fpload, fpload, two, "*, load, fpload, fpload, two,
two, mffgpr") two, mffgpr")
(set_attr "length" (set_attr "length"
"4, 4, 4, 4, 8, "*, *, *, *, 8,
8, 4") 8, *")
(set_attr "isa" (set_attr "isa"
"*, *, p8v, p8v, p8v, "*, *, p8v, p8v, p8v,
p9v, p8v")]) p9v, p8v")])
...@@ -7172,9 +7172,9 @@ ...@@ -7172,9 +7172,9 @@
vecsimple, vecperm, vecperm, vecperm, vecperm, mftgpr, vecsimple, vecperm, vecperm, vecperm, vecperm, mftgpr,
mffgpr, mfjmpr, mtjmpr, *") mffgpr, mfjmpr, mtjmpr, *")
(set_attr "length" (set_attr "length"
"4, 4, 4, 4, 4, 4, "*, *, *, *, *, *,
4, 4, 4, 4, 8, 4, *, *, *, *, 8, *,
4, 4, 4, 4") *, *, *, *")
(set_attr "isa" (set_attr "isa"
"*, *, p9v, *, p9v, *, "*, *, p9v, *, p9v, *,
p9v, p9v, p9v, p9v, p9v, p9v, p9v, p9v, p9v, p9v, p9v, p9v,
...@@ -7225,7 +7225,7 @@ ...@@ -7225,7 +7225,7 @@
(const_string "mtjmpr") (const_string "mtjmpr")
(const_string "load") (const_string "load")
(const_string "store")]) (const_string "store")])
(set_attr "length" "4,4,12,4,4,8,4,4,4,4,4,4")]) (set_attr "length" "*,*,12,*,*,8,*,*,*,*,*,*")])
;; For floating-point, we normally deal with the floating-point registers ;; For floating-point, we normally deal with the floating-point registers
;; unless -msoft-float is used. The sole exception is that parameter passing ;; unless -msoft-float is used. The sole exception is that parameter passing
...@@ -7376,11 +7376,11 @@ ...@@ -7376,11 +7376,11 @@
nop" nop"
[(set_attr "type" [(set_attr "type"
"*, mtjmpr, mfjmpr, load, store, *, "*, mtjmpr, mfjmpr, load, store, *,
*, *, *, *") *, *, *, *")
(set_attr "length" (set_attr "length"
"4, 4, 4, 4, 4, 4, "*, *, *, *, *, *,
4, 4, 8, 4")]) *, *, 8, *")])
;; Like movsf, but adjust a SI value to be used in a SF context, i.e. ;; Like movsf, but adjust a SI value to be used in a SF context, i.e.
;; (set (reg:SF ...) (subreg:SF (reg:SI ...) 0)) ;; (set (reg:SF ...) (subreg:SF (reg:SI ...) 0))
...@@ -7442,8 +7442,8 @@ ...@@ -7442,8 +7442,8 @@
DONE; DONE;
} }
[(set_attr "length" [(set_attr "length"
"4, 4, 4, 4, 4, 4, "*, *, *, *, *, *,
4, 12, 4, 4") *, 12, *, *")
(set_attr "type" (set_attr "type"
"load, fpload, fpload, fpload, store, fpstore, "load, fpload, fpload, fpload, store, fpstore,
fpstore, vecfloat, mffgpr, *") fpstore, vecfloat, mffgpr, *")
...@@ -7580,8 +7580,8 @@ ...@@ -7580,8 +7580,8 @@
store, load, two") store, load, two")
(set_attr "size" "64") (set_attr "size" "64")
(set_attr "length" (set_attr "length"
"4, 4, 4, 4, 4, "*, *, *, *, *,
4, 4, 4, 4, 8, *, *, *, *, 8,
8, 8, 8") 8, 8, 8")
(set_attr "isa" (set_attr "isa"
"*, *, *, p9v, p9v, "*, *, *, p9v, p9v,
...@@ -7690,8 +7690,8 @@ ...@@ -7690,8 +7690,8 @@
*, *, *") *, *, *")
(set_attr "length" (set_attr "length"
"4, 4, 4, 4, 4, 8, "*, *, *, *, *, 8,
12, 16, 4")]) 12, 16, *")])
(define_expand "mov<mode>" (define_expand "mov<mode>"
[(set (match_operand:FMOVE128 0 "general_operand") [(set (match_operand:FMOVE128 0 "general_operand")
...@@ -8707,10 +8707,10 @@ ...@@ -8707,10 +8707,10 @@
vecsimple") vecsimple")
(set_attr "size" "64") (set_attr "size" "64")
(set_attr "length" (set_attr "length"
"8, 8, 8, 4, 4, 4, "8, 8, 8, *, *, *,
16, 4, 4, 4, 4, 4, 16, *, *, *, *, *,
4, 4, 4, 4, 4, 8, *, *, *, *, *, 8,
4") *")
(set_attr "isa" (set_attr "isa"
"*, *, *, *, *, *, "*, *, *, *, *, *,
*, p9v, p7v, p9v, p7v, *, *, p9v, p7v, p9v, p7v, *,
...@@ -8800,11 +8800,11 @@ ...@@ -8800,11 +8800,11 @@
mftgpr, mffgpr") mftgpr, mffgpr")
(set_attr "size" "64") (set_attr "size" "64")
(set_attr "length" (set_attr "length"
"4, 4, 4, 4, 4, 20, "*, *, *, *, *, 20,
4, 4, 4, 4, 4, 4, *, *, *, *, *, *,
4, 4, 4, 4, 4, 4, *, *, *, *, *, *,
4, 8, 4, 4, 4, 4, *, 8, *, *, *, *,
4, 4") *, *")
(set_attr "isa" (set_attr "isa"
"*, *, *, *, *, *, "*, *, *, *, *, *,
*, *, *, p9v, p7v, p9v, *, *, *, p9v, p7v, p9v,
......
...@@ -923,7 +923,7 @@ ...@@ -923,7 +923,7 @@
mr %0,%L1\;mr %L0,%1 mr %0,%L1\;mr %L0,%1
ld%U1%X1 %0,%L1\;ld%U1%X1 %L0,%1 ld%U1%X1 %0,%L1\;ld%U1%X1 %L0,%1
std%U0%X0 %L1,%0\;std%U0%X0 %1,%L0" std%U0%X0 %L1,%0\;std%U0%X0 %1,%L0"
[(set_attr "length" "4,4,4,8,8,8") [(set_attr "length" "*,*,*,8,8,8")
(set_attr "type" "vecperm,vecload,vecstore,*,load,store")]) (set_attr "type" "vecperm,vecload,vecstore,*,load,store")])
(define_insn_and_split "*vsx_le_undo_permute_<mode>" (define_insn_and_split "*vsx_le_undo_permute_<mode>"
...@@ -1150,9 +1150,9 @@ ...@@ -1150,9 +1150,9 @@
store, load, store, *, vecsimple, vecsimple, store, load, store, *, vecsimple, vecsimple,
vecsimple, *, *, vecstore, vecload") vecsimple, *, *, vecstore, vecload")
(set_attr "length" (set_attr "length"
"4, 4, 4, 8, 4, 8, "*, *, *, 8, *, 8,
8, 8, 8, 8, 4, 4, 8, 8, 8, 8, *, *,
4, 20, 8, 4, 4") *, 20, 8, *, *")
(set_attr "isa" (set_attr "isa"
"<VSisa>, <VSisa>, <VSisa>, *, *, *, "<VSisa>, <VSisa>, <VSisa>, *, *, *,
*, *, *, *, p9v, *, *, *, *, *, p9v, *,
...@@ -1183,9 +1183,9 @@ ...@@ -1183,9 +1183,9 @@
vecsimple, vecsimple, vecsimple, *, *, vecsimple, vecsimple, vecsimple, *, *,
vecstore, vecload") vecstore, vecload")
(set_attr "length" (set_attr "length"
"4, 4, 4, 16, 16, 16, "*, *, *, 16, 16, 16,
4, 4, 4, 20, 16, *, *, *, 20, 16,
4, 4") *, *")
(set_attr "isa" (set_attr "isa"
"<VSisa>, <VSisa>, <VSisa>, *, *, *, "<VSisa>, <VSisa>, <VSisa>, *, *, *,
p9v, *, <VSisa>, *, *, p9v, *, <VSisa>, *, *,
...@@ -4112,7 +4112,7 @@ ...@@ -4112,7 +4112,7 @@
(const_int 0)] UNSPEC_VSX_XXSPLTW))] (const_int 0)] UNSPEC_VSX_XXSPLTW))]
"" ""
[(set_attr "type" "vecload,vecperm,mftgpr") [(set_attr "type" "vecload,vecperm,mftgpr")
(set_attr "length" "4,8,4") (set_attr "length" "*,8,*")
(set_attr "isa" "*,p8v,*")]) (set_attr "isa" "*,p8v,*")])
;; V4SF/V4SI splat from a vector element ;; V4SF/V4SI splat from a vector element
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment