Commit 4d3d7f15 by Uros Bizjak

i386.md (*movdf_internal_rex64): Remove MODE_TI handling.

	* config/i386/i386.md (*movdf_internal_rex64) <case 8,9,10>:
	Remove MODE_TI handling.  Remove SSE1 handling in attribute "mode"
	calculation.
	(*movdf_internal_rex64) <case 6,7,8>: Remove MODE_TI handling.
	Simplify MODE_V1DF and MODE_V2SF handling.
	(*movsf_internal): Remove x constraint from alternative 7 of operand 1.
	Simplify MODE_SF handling.

From-SVN: r174664
parent 8d231ff2
2011-06-05 Uros Bizjak <ubizjak@gmail.com>
* config/i386/i386.md (*movdf_internal_rex64) <case 8,9,10>:
Remove MODE_TI handling. Remove SSE1 handling in attribute "mode"
calculation.
(*movdf_internal_rex64) <case 6,7,8>: Remove MODE_TI handling.
Simplify MODE_V1DF and MODE_V2SF handling.
(*movsf_internal): Remove x constraint from alternative 7 of operand 1.
Simplify MODE_SF handling.
2011-06-04 Jan Hubicka <jh@suse.cz> 2011-06-04 Jan Hubicka <jh@suse.cz>
PR tree-optimization/48893 PR tree-optimization/48893
...@@ -149,8 +159,7 @@ ...@@ -149,8 +159,7 @@
* config/arm/arm-cores.def (strongarm, strongarm110, strongarm1100) * config/arm/arm-cores.def (strongarm, strongarm110, strongarm1100)
(strongarm1110): Use strongarm tuning. (strongarm1110): Use strongarm tuning.
* config/arm/arm-protos.h (tune_params): Add max_insns_skipped * config/arm/arm-protos.h (tune_params): Add max_insns_skipped field.
field.
* config/arm/arm.c (arm_strongarm_tune): New. * config/arm/arm.c (arm_strongarm_tune): New.
(arm_slowmul_tune, arm_fastmul_tune, arm_xscale_tune, arm_9e_tune) (arm_slowmul_tune, arm_fastmul_tune, arm_xscale_tune, arm_9e_tune)
(arm_v6t2_tune, arm_cortex_tune, arm_cortex_a5_tune) (arm_v6t2_tune, arm_cortex_tune, arm_cortex_a5_tune)
......
...@@ -2956,9 +2956,6 @@ ...@@ -2956,9 +2956,6 @@
case 10: case 10:
switch (get_attr_mode (insn)) switch (get_attr_mode (insn))
{ {
case MODE_TI:
if (!TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
return "%vmovdqa\t{%1, %0|%0, %1}";
case MODE_V2DF: case MODE_V2DF:
if (!TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL) if (!TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
return "%vmovapd\t{%1, %0|%0, %1}"; return "%vmovapd\t{%1, %0|%0, %1}";
...@@ -2970,8 +2967,7 @@ ...@@ -2970,8 +2967,7 @@
case MODE_DF: case MODE_DF:
if (TARGET_AVX && REG_P (operands[0]) && REG_P (operands[1])) if (TARGET_AVX && REG_P (operands[0]) && REG_P (operands[1]))
return "vmovsd\t{%1, %0, %0|%0, %0, %1}"; return "vmovsd\t{%1, %0, %0|%0, %0, %1}";
else return "%vmovsd\t{%1, %0|%0, %1}";
return "%vmovsd\t{%1, %0|%0, %1}";
case MODE_V1DF: case MODE_V1DF:
return "%vmovlpd\t{%1, %d0|%d0, %1}"; return "%vmovlpd\t{%1, %d0|%d0, %1}";
case MODE_V2SF: case MODE_V2SF:
...@@ -3014,13 +3010,6 @@ ...@@ -3014,13 +3010,6 @@
(eq_attr "alternative" "3,4,5,6,11,12") (eq_attr "alternative" "3,4,5,6,11,12")
(const_string "DI") (const_string "DI")
/* For SSE1, we have many fewer alternatives. */
(eq (symbol_ref "TARGET_SSE2") (const_int 0))
(cond [(eq_attr "alternative" "7,8")
(const_string "V4SF")
]
(const_string "V2SF"))
/* xorps is one byte shorter. */ /* xorps is one byte shorter. */
(eq_attr "alternative" "7") (eq_attr "alternative" "7")
(cond [(ne (symbol_ref "optimize_function_for_size_p (cfun)") (cond [(ne (symbol_ref "optimize_function_for_size_p (cfun)")
...@@ -3099,9 +3088,6 @@ ...@@ -3099,9 +3088,6 @@
case 8: case 8:
switch (get_attr_mode (insn)) switch (get_attr_mode (insn))
{ {
case MODE_TI:
if (!TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
return "%vmovdqa\t{%1, %0|%0, %1}";
case MODE_V2DF: case MODE_V2DF:
if (!TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL) if (!TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
return "%vmovapd\t{%1, %0|%0, %1}"; return "%vmovapd\t{%1, %0|%0, %1}";
...@@ -3113,18 +3099,11 @@ ...@@ -3113,18 +3099,11 @@
case MODE_DF: case MODE_DF:
if (TARGET_AVX && REG_P (operands[0]) && REG_P (operands[1])) if (TARGET_AVX && REG_P (operands[0]) && REG_P (operands[1]))
return "vmovsd\t{%1, %0, %0|%0, %0, %1}"; return "vmovsd\t{%1, %0, %0|%0, %0, %1}";
else return "%vmovsd\t{%1, %0|%0, %1}";
return "%vmovsd\t{%1, %0|%0, %1}";
case MODE_V1DF: case MODE_V1DF:
if (TARGET_AVX && REG_P (operands[0])) return "%vmovlpd\t{%1, %d0|%d0, %1}";
return "vmovlpd\t{%1, %0, %0|%0, %0, %1}";
else
return "%vmovlpd\t{%1, %0|%0, %1}";
case MODE_V2SF: case MODE_V2SF:
if (TARGET_AVX && REG_P (operands[0])) return "%vmovlps\t{%1, %d0|%d0, %1}";
return "vmovlps\t{%1, %0, %0|%0, %0, %1}";
else
return "%vmovlps\t{%1, %0|%0, %1}";
default: default:
gcc_unreachable (); gcc_unreachable ();
} }
...@@ -3150,9 +3129,9 @@ ...@@ -3150,9 +3129,9 @@
/* For SSE1, we have many fewer alternatives. */ /* For SSE1, we have many fewer alternatives. */
(eq (symbol_ref "TARGET_SSE2") (const_int 0)) (eq (symbol_ref "TARGET_SSE2") (const_int 0))
(cond [(eq_attr "alternative" "5,6") (if_then_else
(const_string "V4SF") (eq_attr "alternative" "5,6")
] (const_string "V4SF")
(const_string "V2SF")) (const_string "V2SF"))
/* xorps is one byte shorter. */ /* xorps is one byte shorter. */
...@@ -3195,9 +3174,9 @@ ...@@ -3195,9 +3174,9 @@
(define_insn "*movsf_internal" (define_insn "*movsf_internal"
[(set (match_operand:SF 0 "nonimmediate_operand" [(set (match_operand:SF 0 "nonimmediate_operand"
"=f,m,f,?r ,?m,x,x,x ,m,!*y,!m,!*y,?Yi,?r,!*Ym,!r") "=f,m,f,?r ,?m,x,x,x,m,!*y,!m,!*y,?Yi,?r,!*Ym,!r")
(match_operand:SF 1 "general_operand" (match_operand:SF 1 "general_operand"
"fm,f,G,rmF,Fr,C,x,xm,x,m ,*y,*y ,r ,Yi,r ,*Ym"))] "fm,f,G,rmF,Fr,C,x,m,x,m ,*y,*y ,r ,Yi,r ,*Ym"))]
"!(MEM_P (operands[0]) && MEM_P (operands[1])) "!(MEM_P (operands[0]) && MEM_P (operands[1]))
&& (!can_create_pseudo_p () && (!can_create_pseudo_p ()
|| (ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_LARGE) || (ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_LARGE)
...@@ -3228,23 +3207,24 @@ ...@@ -3228,23 +3207,24 @@
case 6: case 6:
if (get_attr_mode (insn) == MODE_V4SF) if (get_attr_mode (insn) == MODE_V4SF)
return "%vmovaps\t{%1, %0|%0, %1}"; return "%vmovaps\t{%1, %0|%0, %1}";
else
return "%vmovss\t{%1, %d0|%d0, %1}";
case 7: case 7:
if (TARGET_AVX && REG_P (operands[1]))
return "vmovss\t{%1, %0, %0|%0, %0, %1}";
else
return "%vmovss\t{%1, %0|%0, %1}";
case 8: case 8:
if (TARGET_AVX && REG_P (operands[0]) && REG_P (operands[1]))
return "vmovss\t{%1, %0, %0|%0, %0, %1}";
return "%vmovss\t{%1, %0|%0, %1}"; return "%vmovss\t{%1, %0|%0, %1}";
case 9: case 10: case 14: case 15: case 9:
case 10:
case 14:
case 15:
return "movd\t{%1, %0|%0, %1}"; return "movd\t{%1, %0|%0, %1}";
case 11: case 11:
return "movq\t{%1, %0|%0, %1}"; return "movq\t{%1, %0|%0, %1}";
case 12: case 13: case 12:
case 13:
return "%vmovd\t{%1, %0|%0, %1}"; return "%vmovd\t{%1, %0|%0, %1}";
default: default:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment