Commit ef719a44 by Richard Henderson Committed by Richard Henderson

emmintrin.h (_mm_cvtsi128_si32): Move earlier.

	* config/i386/emmintrin.h (_mm_cvtsi128_si32): Move earlier.
	(_mm_cvtsi128_si64x): Likewise.
	(_mm_srl_epi64, _mm_srl_epi32, _mm_srl_epi16, _mm_sra_epi32,
	_mm_sra_epi16, _mm_sll_epi64, _mm_sll_epi32, _mm_sll_epi16): Use
	the _mm_{srl,sll}i_foo counterpart, and _mm_cvtsi128_si32.
	* config/i386/i386-modes.def: Add V16HI, V32QI, V4DF, V8SF.
	* config/i386/i386-protos.h: Update.
	* config/i386/i386.c (print_operand): Add 'H'.
	(ix86_fixup_binary_operands): Split out from ...
	(ix86_expand_binary_operator): ... here.
	(ix86_fixup_binary_operands_no_copy): New.
	(ix86_expand_fp_absneg_operator): Handle vector mode results.
	(bdesc_2arg): Update names for sse{,2,3}_ prefixes.
	(ix86_init_mmx_sse_builtins): Remove *maskncmp* special cases.
	(safe_vector_operand): Use CONST0_RTX.
	(ix86_expand_binop_builtin): Use ix86_fixup_binary_operands.
	(ix86_expand_builtin): Merge CODE_FOR_sse2_maskmovdqu_rex64 and
	CODE_FOR_sse2_maskmovdqu.  Special case SSE version of MASKMOVDQU
	expansion.  Update names for sse{,2,3}_ prefixes.  Remove *maskncmp*
	special cases.
	* config/i386/i386.h (IX86_BUILTIN_CMPNGTSS): New.
	(IX86_BUILTIN_CMPNGESS): New.
	* config/i386/i386.md (UNSPEC_FIX_NOTRUNC): New.
	(attr type): Add sselog1.
	(attr unit, attr memory): Handle it.
	(movti, movti_internal, movti_rex64): Move near other integer moves.
	(movtf, movtf_internal): Move near other fp moves.
	(SSEMODE, SSEMODEI, vec_setv2df, vec_extractv2df, vec_initv2df,
	vec_setv4sf, vec_extractv4sf, vec_initv4sf, movv4sf, movv4sf_internal,
	movv2df, movv2df_internal, mov<SSEMODEI>, mov<SSEMODEI>_internal,
	movmisalign<SSEMODE>, sse_movups_1, sse_movmskps, sse_movntv4sf,
	sse_movhlps, sse_movlhps, sse_storehps, sse_loadhps, sse_storelps,
	sse_loadlps, sse_loadss, sse_loadss_1, sse_movss, sse_storess,
	sse_shufps, addv4sf3, vmaddv4sf3, subv4sf3, vmsubv4sf3, negv4sf2,
	mulv4sf3, vmmulv4sf3, divv4sf3, vmdivv4sf3, rcpv4sf2, vmrcpv4sf2,
	rsqrtv4sf2, vmrsqrtv4sf2, sqrtv4sf2, vmsqrtv4sf2, sse_andv4sf3,
	sse_nandv4sf3, sse_iorv4sf3, sse_xorv4sf3, sse2_andv2df3,
	sse2_nandv2df3, sse2_iorv2df3, sse2_xorv2df3, sse2_andv2di3,
	sse2_nandv2di3, sse2_iorv2di3, sse2_xorv2di3, maskcmpv4sf3,
	vmmaskcmpv4sf3, sse_comi, sse_ucomi, sse_unpckhps, sse_unpcklps,
	smaxv4sf3, vmsmaxv4sf3, sminv4sf3, vmsminv4sf3, cvtpi2ps, cvtps2pi,
	cvttps2pi, cvtsi2ss, cvtsi2ssq, cvtss2si, cvtss2siq, cvttss2si,
	cvttss2siq, addv2df3, vmaddv2df3, subv2df3, vmsubv2df3, mulv2df3,
	vmmulv2df3, divv2df3, vmdivv2df3, smaxv2df3, vmsmaxv2df3, sminv2df3,
	vmsminv2df3, sqrtv2df2, vmsqrtv2df2, maskcmpv2df3, vmmaskcmpv2df3,
	sse2_comi, sse2_ucomi, sse2_movmskpd, sse2_pmovmskb, sse2_maskmovdqu,
	sse2_maskmovdqu_rex64, sse2_movntv2df, sse2_movntv2di, sse2_movntsi,
	cvtdq2ps, cvtps2dq, cvttps2dq, cvtdq2pd, cvtpd2dq, cvttpd2dq,
	cvtpd2pi, cvttpd2pi, cvtpi2pd, cvtsd2si, cvtsd2siq, cvttsd2si,
	cvttsd2siq, cvtsi2sd, cvtsi2sdq, cvtsd2ss, cvtss2sd, cvtpd2ps,
	cvtps2pd, addv16qi3, addv8hi3, addv4si3, addv2di3, ssaddv16qi3,
	ssaddv8hi3, usaddv16qi3, usaddv8hi3, subv16qi3, subv8hi3, subv4si3,
	subv2di3, sssubv16qi3, sssubv8hi3, ussubv16qi3, ussubv8hi3, mulv8hi3,
	smulv8hi3_highpart, umulv8hi3_highpart, sse2_umulsidi3,
	sse2_umulv2siv2di3, sse2_pmaddwd, sse2_uavgv16qi3, sse2_uavgv8hi3,
	sse2_psadbw, sse2_pinsrw, sse2_pextrw, sse2_pshufd, sse2_pshuflw,
	sse2_pshufhw, eqv16qi3, eqv8hi3, eqv4si3, gtv16qi3, gtv8hi3,
	gtv4si3, umaxv16qi3, smaxv8hi3, uminv16qi3, sminv8hi3, ashrv8hi3,
	ashrv4si3, lshrv8hi3, lshrv4si3, lshrv2di3, ashlv8hi3, ashlv4si3,
	ashlv2di3, sse2_ashlti3, sse2_lshrti3, sse2_unpckhpd, sse2_unpcklpd,
	sse2_packsswb, sse2_packssdw, sse2_packuswb, sse2_punpckhbw,
	sse2_punpckhwd, sse2_punpckhdq, sse2_punpcklbw, sse2_punpcklwd,
	sse2_punpckldq, sse2_punpcklqdq, sse2_punpckhqdq, sse2_movupd,
	sse2_movdqu, sse2_movdq2q, sse2_movdq2q_rex64, sse2_movq2dq,
	sse2_movq2dq_rex64, sse2_loadd, sse2_stored, sse2_storehpd,
	sse2_loadhpd, sse2_storelpd, sse2_loadlpd, sse2_movsd, sse2_loadsd,
	sse2_loadsd_1, sse2_storesd, sse2_shufpd, sse2_clflush, sse2_mfence,
	mfence_insn, sse2_lfence, lfence_insn, mwait, monitor, addsubv4sf3,
	addsubv2df3, haddv4sf3, haddv2df3, hsubv4sf3, hsubv2df3, movshdup,
	movsldup, lddqu, loadddup, movddup): Move to sse.md.  Any with
	non-optabs meanings renamed with an "sse{,2,3}_" prefix at the
	same time.
	(SSEPUSH, push<SSEPUSH>): Remove.
	(MMXPUSH, push<MMXPUSH>): Remove.
	(sse_movaps, sse_movaps_1, sse_movups): Remove.
	(sse2_movapd, sse2_movdqa, sse2_movq): Remove.
	(sse2_andti3, sse2_nandti3, sse2_iorti3, sse2_xorti3): Remove.
	(sse_clrv4sf, sse_clrv2df, sse2_clrti): Remove.
	(maskncmpv4sf3, vmmaskncmpv4sf3): Remove.
	(maskncmpv2df3, vmmaskncmpv2df3): Remove.
	(ashrv8hi3_ti, ashrv4si3_ti, lshrv8hi3_ti, lshrv4si3_ti): Remove.
	(lshrv2di3_ti, ashlv8hi3_ti, ashlv4si3_ti, ashlv2di3_ti): Remove.
	* config/i386/athlon.md (athlon_sselog_load): Handle sselog1.
	(athlon_sselog_load_k8, athlon_sselog, athlon_sselog_k8): Likewise.
	* config/i386/ppro.md (ppro_sse_div_V4SF_load): Fix memory attr.
	(ppro_sse_log_V4SF_load): Similarly.  Handle sselog1.
	(ppro_sse_log_V4SF): Handle sselog1.
	* config/i386/predicates.md (const_0_to_1_operand): New.
	(const_0_to_255_mul_8_operand): New.
	(const_1_to_31_operand): Rename from const_int_1_31_operand.
	(const_2_to_3_operand, const_4_to_7_operand): New.
	* config/i386/sse.md: New file.
	(SSEMODE12, SSEMODE24, SSEMODE124, SSEMODE248, ssevecsize): New.
	(sse_movups): Rename from sse_movups_1.
	(sse_loadlss): Rename from sse_loadss_1.
	(andv4sf3, iorv4sf3, xorv4sf3, andv2df3): Remove the sse prefix
	from the name.
	(negv4sf2): Use ix86_expand_fp_absneg_operator.
	(absv4sf2, negv2df, absv2df): New.
	(addv4sf3): Add expander to call ix86_fixup_binary_operands_no_copy.
	(subv4sf3, mulv4sf3, divv4sf3, smaxv4sf3, sminv4sf3, andv4sf3,
	iorv4sf3, xorv4sf3, addv2df3, subv2df3, mulv2df3, divv2df3,
	smaxv2df3, sminv2df3, andv2df3, iorv2df3, xorv2df3, mulv8hi3,
	umaxv16qi3, smaxv8hi3, uminv16qi3, sminv8hi3): Likewise.
	(sse3_addsubv4sf3): Model correctly.
	sse3_haddv4sf3, sse3_hsubv4sf3, sse3_addsubv2df3, sse3_haddv2df3,
	sse3_hsubv2df3, sse2_ashlti3, sse2_lshrti3): Likewise.
	(sse_movhlps): Model with vec_select+vec_concat.
	(sse_movlhps, sse_unpckhps, sse_unpcklps, sse3_movshdup,
	sse3_movsldup, sse_shufps, sse_shufps_1, sse2_unpckhpd, sse3_movddup,
	sse2_unpcklpd, sse2_shufpd, sse2_shufpd_1, sse2_punpckhbw,
	sse2_punpcklbw, sse2_punpckhwd, sse2_punpcklwd, sse2_punpckhdq,
	sse2_punpckldq, sse2_punpckhqdq, sse2_punpcklqdq, sse2_pshufd,
	sse2_pshufd_1, sse2_pshuflw, sse2_pshuflw_1, sse2_pshufhw,
	sse2_pshufhw_1): Likewise.
	(neg<SSEMODEI>2, one_cmpl<SSEMODEI>2): New.
	(add<SSEMODEI>3, sse2_ssadd<SSEMODE12>3, sse2_usadd<SSEMODE12>3,
	sub<SSEMODEI>3, sse2_sssub<SSEMODE12>3, sse2_ussub<SSEMODE12>3,
	ashr<SSEMODE24>3, lshr<SSEMODE248>3, sse2_eq<SSEMODE124>3,
	sse2_gt<SSEMODDE124>3, and<SSEMODEI>3, sse_nand<SSEMODEI>3,
	ior<SSEMODEI>3, xor<SSEMODEI>3): Macroize from existing patterns.
	(addv4sf3, sse_vmaddv4sf3, mulv4sf3, sse_vmmulv4sf3, smaxv4sf3,
	sse_vmsmaxv4sf3, sminv4sf3, sse_vmsminv4sf3, addv2df3, sse2_vmaddv2df3,
	mulv2df3, sse2_vmmulv2df3, smaxv2df3, sse2_vmsmaxv2df3, sminv2df3,
	sse2_vmsminv2df3, umaxv16qi3, smaxv8hi3, uminv16qi3
	sminv8hi3): Mark commutative
	operands.  Use ix86_binary_operator_ok.
	(sse_unpckhps, sse_unpcklps, sse2_packsswb, sse2_packssdw,
	sse2_packuswb, sse2_punpckhbw, sse2_punpcklbw, sse2_punpckhwd,
	sse2_punpcklwd, sse2_punpckhdq, sse2_punpckldq, sse2_punpckhqdq,
	sse2_punpcklqdq): Allow operand2 in memory.
	(sse_movhlps, sse_movlhps, sse2_unpckhpd, sse2_unpcklpd
	sse2_movsd): Add memory alternatives.
	(sse_storelps): Turn expander into an insn; split after reload.
	(sse_storess, sse2_loadhpd, sse2_loadlpd): Add non-xmm inputs.
	(sse2_storehpd, sse2_storelpd): Add non-xmm outputs.

From-SVN: r93101
parent a7e53bbf
2005-01-08 Richard Henderson <rth@redhat.com>
* config/i386/emmintrin.h (_mm_cvtsi128_si32): Move earlier.
(_mm_cvtsi128_si64x): Likewise.
(_mm_srl_epi64, _mm_srl_epi32, _mm_srl_epi16, _mm_sra_epi32,
_mm_sra_epi16, _mm_sll_epi64, _mm_sll_epi32, _mm_sll_epi16): Use
the _mm_{srl,sll}i_foo counterpart, and _mm_cvtsi128_si32.
* config/i386/i386-modes.def: Add V16HI, V32QI, V4DF, V8SF.
* config/i386/i386-protos.h: Update.
* config/i386/i386.c (print_operand): Add 'H'.
(ix86_fixup_binary_operands): Split out from ...
(ix86_expand_binary_operator): ... here.
(ix86_fixup_binary_operands_no_copy): New.
(ix86_expand_fp_absneg_operator): Handle vector mode results.
(bdesc_2arg): Update names for sse{,2,3}_ prefixes.
(ix86_init_mmx_sse_builtins): Remove *maskncmp* special cases.
(safe_vector_operand): Use CONST0_RTX.
(ix86_expand_binop_builtin): Use ix86_fixup_binary_operands.
(ix86_expand_builtin): Merge CODE_FOR_sse2_maskmovdqu_rex64 and
CODE_FOR_sse2_maskmovdqu. Special case SSE version of MASKMOVDQU
expansion. Update names for sse{,2,3}_ prefixes. Remove *maskncmp*
special cases.
* config/i386/i386.h (IX86_BUILTIN_CMPNGTSS): New.
(IX86_BUILTIN_CMPNGESS): New.
* config/i386/i386.md (UNSPEC_FIX_NOTRUNC): New.
(attr type): Add sselog1.
(attr unit, attr memory): Handle it.
(movti, movti_internal, movti_rex64): Move near other integer moves.
(movtf, movtf_internal): Move near other fp moves.
(SSEMODE, SSEMODEI, vec_setv2df, vec_extractv2df, vec_initv2df,
vec_setv4sf, vec_extractv4sf, vec_initv4sf, movv4sf, movv4sf_internal,
movv2df, movv2df_internal, mov<SSEMODEI>, mov<SSEMODEI>_internal,
movmisalign<SSEMODE>, sse_movups_1, sse_movmskps, sse_movntv4sf,
sse_movhlps, sse_movlhps, sse_storehps, sse_loadhps, sse_storelps,
sse_loadlps, sse_loadss, sse_loadss_1, sse_movss, sse_storess,
sse_shufps, addv4sf3, vmaddv4sf3, subv4sf3, vmsubv4sf3, negv4sf2,
mulv4sf3, vmmulv4sf3, divv4sf3, vmdivv4sf3, rcpv4sf2, vmrcpv4sf2,
rsqrtv4sf2, vmrsqrtv4sf2, sqrtv4sf2, vmsqrtv4sf2, sse_andv4sf3,
sse_nandv4sf3, sse_iorv4sf3, sse_xorv4sf3, sse2_andv2df3,
sse2_nandv2df3, sse2_iorv2df3, sse2_xorv2df3, sse2_andv2di3,
sse2_nandv2di3, sse2_iorv2di3, sse2_xorv2di3, maskcmpv4sf3,
vmmaskcmpv4sf3, sse_comi, sse_ucomi, sse_unpckhps, sse_unpcklps,
smaxv4sf3, vmsmaxv4sf3, sminv4sf3, vmsminv4sf3, cvtpi2ps, cvtps2pi,
cvttps2pi, cvtsi2ss, cvtsi2ssq, cvtss2si, cvtss2siq, cvttss2si,
cvttss2siq, addv2df3, vmaddv2df3, subv2df3, vmsubv2df3, mulv2df3,
vmmulv2df3, divv2df3, vmdivv2df3, smaxv2df3, vmsmaxv2df3, sminv2df3,
vmsminv2df3, sqrtv2df2, vmsqrtv2df2, maskcmpv2df3, vmmaskcmpv2df3,
sse2_comi, sse2_ucomi, sse2_movmskpd, sse2_pmovmskb, sse2_maskmovdqu,
sse2_maskmovdqu_rex64, sse2_movntv2df, sse2_movntv2di, sse2_movntsi,
cvtdq2ps, cvtps2dq, cvttps2dq, cvtdq2pd, cvtpd2dq, cvttpd2dq,
cvtpd2pi, cvttpd2pi, cvtpi2pd, cvtsd2si, cvtsd2siq, cvttsd2si,
cvttsd2siq, cvtsi2sd, cvtsi2sdq, cvtsd2ss, cvtss2sd, cvtpd2ps,
cvtps2pd, addv16qi3, addv8hi3, addv4si3, addv2di3, ssaddv16qi3,
ssaddv8hi3, usaddv16qi3, usaddv8hi3, subv16qi3, subv8hi3, subv4si3,
subv2di3, sssubv16qi3, sssubv8hi3, ussubv16qi3, ussubv8hi3, mulv8hi3,
smulv8hi3_highpart, umulv8hi3_highpart, sse2_umulsidi3,
sse2_umulv2siv2di3, sse2_pmaddwd, sse2_uavgv16qi3, sse2_uavgv8hi3,
sse2_psadbw, sse2_pinsrw, sse2_pextrw, sse2_pshufd, sse2_pshuflw,
sse2_pshufhw, eqv16qi3, eqv8hi3, eqv4si3, gtv16qi3, gtv8hi3,
gtv4si3, umaxv16qi3, smaxv8hi3, uminv16qi3, sminv8hi3, ashrv8hi3,
ashrv4si3, lshrv8hi3, lshrv4si3, lshrv2di3, ashlv8hi3, ashlv4si3,
ashlv2di3, sse2_ashlti3, sse2_lshrti3, sse2_unpckhpd, sse2_unpcklpd,
sse2_packsswb, sse2_packssdw, sse2_packuswb, sse2_punpckhbw,
sse2_punpckhwd, sse2_punpckhdq, sse2_punpcklbw, sse2_punpcklwd,
sse2_punpckldq, sse2_punpcklqdq, sse2_punpckhqdq, sse2_movupd,
sse2_movdqu, sse2_movdq2q, sse2_movdq2q_rex64, sse2_movq2dq,
sse2_movq2dq_rex64, sse2_loadd, sse2_stored, sse2_storehpd,
sse2_loadhpd, sse2_storelpd, sse2_loadlpd, sse2_movsd, sse2_loadsd,
sse2_loadsd_1, sse2_storesd, sse2_shufpd, sse2_clflush, sse2_mfence,
mfence_insn, sse2_lfence, lfence_insn, mwait, monitor, addsubv4sf3,
addsubv2df3, haddv4sf3, haddv2df3, hsubv4sf3, hsubv2df3, movshdup,
movsldup, lddqu, loadddup, movddup): Move to sse.md. Any with
non-optabs meanings renamed with an "sse{,2,3}_" prefix at the
same time.
(SSEPUSH, push<SSEPUSH>): Remove.
(MMXPUSH, push<MMXPUSH>): Remove.
(sse_movaps, sse_movaps_1, sse_movups): Remove.
(sse2_movapd, sse2_movdqa, sse2_movq): Remove.
(sse2_andti3, sse2_nandti3, sse2_iorti3, sse2_xorti3): Remove.
(sse_clrv4sf, sse_clrv2df, sse2_clrti): Remove.
(maskncmpv4sf3, vmmaskncmpv4sf3): Remove.
(maskncmpv2df3, vmmaskncmpv2df3): Remove.
(ashrv8hi3_ti, ashrv4si3_ti, lshrv8hi3_ti, lshrv4si3_ti): Remove.
(lshrv2di3_ti, ashlv8hi3_ti, ashlv4si3_ti, ashlv2di3_ti): Remove.
* config/i386/athlon.md (athlon_sselog_load): Handle sselog1.
(athlon_sselog_load_k8, athlon_sselog, athlon_sselog_k8): Likewise.
* config/i386/ppro.md (ppro_sse_div_V4SF_load): Fix memory attr.
(ppro_sse_log_V4SF_load): Similarly. Handle sselog1.
(ppro_sse_log_V4SF): Handle sselog1.
* config/i386/predicates.md (const_0_to_1_operand): New.
(const_0_to_255_mul_8_operand): New.
(const_1_to_31_operand): Rename from const_int_1_31_operand.
(const_2_to_3_operand, const_4_to_7_operand): New.
* config/i386/sse.md: New file.
(SSEMODE12, SSEMODE24, SSEMODE124, SSEMODE248, ssevecsize): New.
(sse_movups): Rename from sse_movups_1.
(sse_loadlss): Rename from sse_loadss_1.
(andv4sf3, iorv4sf3, xorv4sf3, andv2df3): Remove the sse prefix
from the name.
(negv4sf2): Use ix86_expand_fp_absneg_operator.
(absv4sf2, negv2df, absv2df): New.
(addv4sf3): Add expander to call ix86_fixup_binary_operands_no_copy.
(subv4sf3, mulv4sf3, divv4sf3, smaxv4sf3, sminv4sf3, andv4sf3,
iorv4sf3, xorv4sf3, addv2df3, subv2df3, mulv2df3, divv2df3,
smaxv2df3, sminv2df3, andv2df3, iorv2df3, xorv2df3, mulv8hi3,
umaxv16qi3, smaxv8hi3, uminv16qi3, sminv8hi3): Likewise.
(sse3_addsubv4sf3): Model correctly.
sse3_haddv4sf3, sse3_hsubv4sf3, sse3_addsubv2df3, sse3_haddv2df3,
sse3_hsubv2df3, sse2_ashlti3, sse2_lshrti3): Likewise.
(sse_movhlps): Model with vec_select+vec_concat.
(sse_movlhps, sse_unpckhps, sse_unpcklps, sse3_movshdup,
sse3_movsldup, sse_shufps, sse_shufps_1, sse2_unpckhpd, sse3_movddup,
sse2_unpcklpd, sse2_shufpd, sse2_shufpd_1, sse2_punpckhbw,
sse2_punpcklbw, sse2_punpckhwd, sse2_punpcklwd, sse2_punpckhdq,
sse2_punpckldq, sse2_punpckhqdq, sse2_punpcklqdq, sse2_pshufd,
sse2_pshufd_1, sse2_pshuflw, sse2_pshuflw_1, sse2_pshufhw,
sse2_pshufhw_1): Likewise.
(neg<SSEMODEI>2, one_cmpl<SSEMODEI>2): New.
(add<SSEMODEI>3, sse2_ssadd<SSEMODE12>3, sse2_usadd<SSEMODE12>3,
sub<SSEMODEI>3, sse2_sssub<SSEMODE12>3, sse2_ussub<SSEMODE12>3,
ashr<SSEMODE24>3, lshr<SSEMODE248>3, sse2_eq<SSEMODE124>3,
sse2_gt<SSEMODDE124>3, and<SSEMODEI>3, sse_nand<SSEMODEI>3,
ior<SSEMODEI>3, xor<SSEMODEI>3): Macroize from existing patterns.
(addv4sf3, sse_vmaddv4sf3, mulv4sf3, sse_vmmulv4sf3, smaxv4sf3,
sse_vmsmaxv4sf3, sminv4sf3, sse_vmsminv4sf3, addv2df3, sse2_vmaddv2df3,
mulv2df3, sse2_vmmulv2df3, smaxv2df3, sse2_vmsmaxv2df3, sminv2df3,
sse2_vmsminv2df3, umaxv16qi3, smaxv8hi3, uminv16qi3
sminv8hi3): Mark commutative
operands. Use ix86_binary_operator_ok.
(sse_unpckhps, sse_unpcklps, sse2_packsswb, sse2_packssdw,
sse2_packuswb, sse2_punpckhbw, sse2_punpcklbw, sse2_punpckhwd,
sse2_punpcklwd, sse2_punpckhdq, sse2_punpckldq, sse2_punpckhqdq,
sse2_punpcklqdq): Allow operand2 in memory.
(sse_movhlps, sse_movlhps, sse2_unpckhpd, sse2_unpcklpd
sse2_movsd): Add memory alternatives.
(sse_storelps): Turn expander into an insn; split after reload.
(sse_storess, sse2_loadhpd, sse2_loadlpd): Add non-xmm inputs.
(sse2_storehpd, sse2_storelpd): Add non-xmm outputs.
2005-01-08 Eric Botcazou <ebotcazou@libertysurf.fr>
* configure.ac (DWARF-2 debug_line): Use objdump.
......
......@@ -565,21 +565,21 @@
(define_insn_reservation "athlon_sselog_load" 3
(and (eq_attr "cpu" "athlon")
(and (eq_attr "type" "sselog")
(and (eq_attr "type" "sselog,sselog1")
(eq_attr "memory" "load")))
"athlon-vector,athlon-fpload2,(athlon-fmul*2)")
(define_insn_reservation "athlon_sselog_load_k8" 5
(and (eq_attr "cpu" "k8")
(and (eq_attr "type" "sselog")
(and (eq_attr "type" "sselog,sselog1")
(eq_attr "memory" "load")))
"athlon-double,athlon-fpload2k8,(athlon-fmul*2)")
(define_insn_reservation "athlon_sselog" 3
(and (eq_attr "cpu" "athlon")
(eq_attr "type" "sselog"))
(eq_attr "type" "sselog,sselog1"))
"athlon-vector,athlon-fpsched,athlon-fmul*2")
(define_insn_reservation "athlon_sselog_k8" 3
(and (eq_attr "cpu" "k8")
(eq_attr "type" "sselog"))
(eq_attr "type" "sselog,sselog1"))
"athlon-double,athlon-fpsched,athlon-fmul")
;; ??? pcmp executes in addmul, probably not worthwhile to bother about that.
(define_insn_reservation "athlon_ssecmp_load" 2
......
......@@ -177,6 +177,22 @@ _mm_storer_pd (double *__P, __m128d __A)
__builtin_ia32_storeapd (__P, __tmp);
}
static __inline int
_mm_cvtsi128_si32 (__m128i __A)
{
int __tmp;
__builtin_ia32_stored (&__tmp, (__v4si)__A);
return __tmp;
}
#ifdef __x86_64__
static __inline long long
_mm_cvtsi128_si64x (__m128i __A)
{
return __builtin_ia32_movdq2q ((__v2di)__A);
}
#endif
/* Sets the low DPFP value of A from the low value of B. */
static __inline __m128d
_mm_move_sd (__m128d __A, __m128d __B)
......@@ -1157,115 +1173,118 @@ _mm_mul_epu32 (__m128i __A, __m128i __B)
}
static __inline __m128i
_mm_sll_epi16 (__m128i __A, __m128i __B)
_mm_slli_epi16 (__m128i __A, int __B)
{
return (__m128i)__builtin_ia32_psllw128 ((__v8hi)__A, (__v2di)__B);
return (__m128i)__builtin_ia32_psllwi128 ((__v8hi)__A, __B);
}
static __inline __m128i
_mm_sll_epi32 (__m128i __A, __m128i __B)
_mm_slli_epi32 (__m128i __A, int __B)
{
return (__m128i)__builtin_ia32_pslld128 ((__v4si)__A, (__v2di)__B);
return (__m128i)__builtin_ia32_pslldi128 ((__v4si)__A, __B);
}
static __inline __m128i
_mm_sll_epi64 (__m128i __A, __m128i __B)
_mm_slli_epi64 (__m128i __A, int __B)
{
return (__m128i)__builtin_ia32_psllq128 ((__v2di)__A, (__v2di)__B);
return (__m128i)__builtin_ia32_psllqi128 ((__v2di)__A, __B);
}
static __inline __m128i
_mm_sra_epi16 (__m128i __A, __m128i __B)
_mm_srai_epi16 (__m128i __A, int __B)
{
return (__m128i)__builtin_ia32_psraw128 ((__v8hi)__A, (__v2di)__B);
return (__m128i)__builtin_ia32_psrawi128 ((__v8hi)__A, __B);
}
static __inline __m128i
_mm_sra_epi32 (__m128i __A, __m128i __B)
_mm_srai_epi32 (__m128i __A, int __B)
{
return (__m128i)__builtin_ia32_psrad128 ((__v4si)__A, (__v2di)__B);
return (__m128i)__builtin_ia32_psradi128 ((__v4si)__A, __B);
}
static __inline __m128i
_mm_srl_epi16 (__m128i __A, __m128i __B)
#if 0
static __m128i __attribute__((__always_inline__))
_mm_srli_si128 (__m128i __A, const int __B)
{
return (__m128i)__builtin_ia32_psrlw128 ((__v8hi)__A, (__v2di)__B);
return ((__m128i)__builtin_ia32_psrldqi128 (__A, __B))
}
static __inline __m128i
_mm_srl_epi32 (__m128i __A, __m128i __B)
static __m128i __attribute__((__always_inline__))
_mm_srli_si128 (__m128i __A, const int __B)
{
return (__m128i)__builtin_ia32_psrld128 ((__v4si)__A, (__v2di)__B);
return ((__m128i)__builtin_ia32_pslldqi128 (__A, __B))
}
#else
#define _mm_srli_si128(__A, __B) \
((__m128i)__builtin_ia32_psrldqi128 (__A, (__B) * 8))
#define _mm_slli_si128(__A, __B) \
((__m128i)__builtin_ia32_pslldqi128 (__A, (__B) * 8))
#endif
static __inline __m128i
_mm_srl_epi64 (__m128i __A, __m128i __B)
_mm_srli_epi16 (__m128i __A, int __B)
{
return (__m128i)__builtin_ia32_psrlq128 ((__v2di)__A, (__v2di)__B);
return (__m128i)__builtin_ia32_psrlwi128 ((__v8hi)__A, __B);
}
static __inline __m128i
_mm_slli_epi16 (__m128i __A, int __B)
_mm_srli_epi32 (__m128i __A, int __B)
{
return (__m128i)__builtin_ia32_psllwi128 ((__v8hi)__A, __B);
return (__m128i)__builtin_ia32_psrldi128 ((__v4si)__A, __B);
}
static __inline __m128i
_mm_slli_epi32 (__m128i __A, int __B)
_mm_srli_epi64 (__m128i __A, int __B)
{
return (__m128i)__builtin_ia32_pslldi128 ((__v4si)__A, __B);
return (__m128i)__builtin_ia32_psrlqi128 ((__v2di)__A, __B);
}
static __inline __m128i
_mm_slli_epi64 (__m128i __A, int __B)
_mm_sll_epi16 (__m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_psllqi128 ((__v2di)__A, __B);
return _mm_slli_epi16 (__A, _mm_cvtsi128_si32 (__B));
}
static __inline __m128i
_mm_srai_epi16 (__m128i __A, int __B)
_mm_sll_epi32 (__m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_psrawi128 ((__v8hi)__A, __B);
return _mm_slli_epi32 (__A, _mm_cvtsi128_si32 (__B));
}
static __inline __m128i
_mm_srai_epi32 (__m128i __A, int __B)
_mm_sll_epi64 (__m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_psradi128 ((__v4si)__A, __B);
return _mm_slli_epi64 (__A, _mm_cvtsi128_si32 (__B));
}
#if 0
static __m128i __attribute__((__always_inline__))
_mm_srli_si128 (__m128i __A, const int __B)
static __inline __m128i
_mm_sra_epi16 (__m128i __A, __m128i __B)
{
return ((__m128i)__builtin_ia32_psrldqi128 (__A, __B))
return _mm_srai_epi16 (__A, _mm_cvtsi128_si32 (__B));
}
static __m128i __attribute__((__always_inline__))
_mm_srli_si128 (__m128i __A, const int __B)
static __inline __m128i
_mm_sra_epi32 (__m128i __A, __m128i __B)
{
return ((__m128i)__builtin_ia32_pslldqi128 (__A, __B))
return _mm_srai_epi32 (__A, _mm_cvtsi128_si32 (__B));
}
#endif
#define _mm_srli_si128(__A, __B) ((__m128i)__builtin_ia32_psrldqi128 (__A, __B))
#define _mm_slli_si128(__A, __B) ((__m128i)__builtin_ia32_pslldqi128 (__A, __B))
static __inline __m128i
_mm_srli_epi16 (__m128i __A, int __B)
_mm_srl_epi16 (__m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_psrlwi128 ((__v8hi)__A, __B);
return _mm_srli_epi16 (__A, _mm_cvtsi128_si32 (__B));
}
static __inline __m128i
_mm_srli_epi32 (__m128i __A, int __B)
_mm_srl_epi32 (__m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_psrldi128 ((__v4si)__A, __B);
return _mm_srli_epi32 (__A, _mm_cvtsi128_si32 (__B));
}
static __inline __m128i
_mm_srli_epi64 (__m128i __A, int __B)
_mm_srl_epi64 (__m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_psrlqi128 ((__v2di)__A, __B);
return _mm_srli_epi64 (__A, _mm_cvtsi128_si32 (__B));
}
static __inline __m128i
......@@ -1470,22 +1489,6 @@ _mm_cvtsi64x_si128 (long long __A)
}
#endif
static __inline int
_mm_cvtsi128_si32 (__m128i __A)
{
int __tmp;
__builtin_ia32_stored (&__tmp, (__v4si)__A);
return __tmp;
}
#ifdef __x86_64__
static __inline long long
_mm_cvtsi128_si64x (__m128i __A)
{
return __builtin_ia32_movdq2q ((__v2di)__A);
}
#endif
#endif /* __SSE2__ */
#endif /* _EMMINTRIN_H_INCLUDED */
......@@ -70,6 +70,10 @@ VECTOR_MODES (FLOAT, 8); /* V4HF V2SF */
VECTOR_MODES (FLOAT, 16); /* V8HF V4SF V2DF */
VECTOR_MODE (INT, DI, 4); /* V4DI */
VECTOR_MODE (INT, SI, 8); /* V8SI */
VECTOR_MODE (INT, HI, 16); /* V16HI */
VECTOR_MODE (INT, QI, 32); /* V32QI */
VECTOR_MODE (FLOAT, DF, 4); /* V4DF */
VECTOR_MODE (FLOAT, SF, 8); /* V8SF */
/* The symbol Pmode stands for one of the above machine modes (usually SImode).
The tm.h file specifies which one. It is not a distinct mode. */
......@@ -126,6 +126,10 @@ extern void ix86_expand_clear (rtx);
extern void ix86_expand_move (enum machine_mode, rtx[]);
extern void ix86_expand_vector_move (enum machine_mode, rtx[]);
extern void ix86_expand_vector_move_misalign (enum machine_mode, rtx[]);
extern rtx ix86_fixup_binary_operands (enum rtx_code,
enum machine_mode, rtx[]);
extern void ix86_fixup_binary_operands_no_copy (enum rtx_code,
enum machine_mode, rtx[]);
extern void ix86_expand_binary_operator (enum rtx_code,
enum machine_mode, rtx[]);
extern int ix86_binary_operator_ok (enum rtx_code, enum machine_mode, rtx[]);
......
......@@ -2062,6 +2062,8 @@ enum ix86_builtins
IX86_BUILTIN_CMPNEQSS,
IX86_BUILTIN_CMPNLTSS,
IX86_BUILTIN_CMPNLESS,
IX86_BUILTIN_CMPNGTSS,
IX86_BUILTIN_CMPNGESS,
IX86_BUILTIN_CMPORDSS,
IX86_BUILTIN_CMPUNORDSS,
IX86_BUILTIN_CMPNESS,
......
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -687,7 +687,7 @@
(define_insn_reservation "ppro_sse_div_V4SF_load" 48
(and (eq_attr "cpu" "pentiumpro")
(and (eq_attr "memory" "none")
(and (eq_attr "memory" "load")
(and (eq_attr "mode" "V4SF")
(eq_attr "type" "ssediv"))))
"decoder0,(p2+p0)*2,p0*32")
......@@ -696,14 +696,14 @@
(and (eq_attr "cpu" "pentiumpro")
(and (eq_attr "memory" "none")
(and (eq_attr "mode" "V4SF")
(eq_attr "type" "sselog"))))
(eq_attr "type" "sselog,sselog1"))))
"decodern,p1")
(define_insn_reservation "ppro_sse_log_V4SF_load" 2
(and (eq_attr "cpu" "pentiumpro")
(and (eq_attr "memory" "none")
(and (eq_attr "memory" "load")
(and (eq_attr "mode" "V4SF")
(eq_attr "type" "sselog"))))
(eq_attr "type" "sselog,sselog1"))))
"decoder0,(p2+p1)")
(define_insn_reservation "ppro_sse_mov_V4SF" 1
......
......@@ -319,12 +319,6 @@
(and (match_operand 0 "const_double_operand")
(match_test "GET_MODE_SIZE (mode) <= 8")))))
;; Return nonzero if OP is CONST_INT >= 1 and <= 31 (a valid operand
;; for shift & compare patterns, as shifting by 0 does not change flags).
(define_predicate "const_int_1_31_operand"
(and (match_code "const_int")
(match_test "INTVAL (op) >= 1 && INTVAL (op) <= 31")))
;; Returns nonzero if OP is either a symbol reference or a sum of a symbol
;; reference and a constant.
(define_predicate "symbolic_operand"
......@@ -521,6 +515,11 @@
return i == 2 || i == 4 || i == 8;
})
;; Match 0 or 1.
(define_predicate "const_0_to_1_operand"
(and (match_code "const_int")
(match_test "op == const0_rtx || op == const1_rtx")))
;; Match 0 to 3.
(define_predicate "const_0_to_3_operand"
(and (match_code "const_int")
......@@ -546,6 +545,30 @@
(and (match_code "const_int")
(match_test "INTVAL (op) >= 0 && INTVAL (op) <= 255")))
;; Match (0 to 255) * 8
(define_predicate "const_0_to_255_mul_8_operand"
(match_code "const_int")
{
unsigned HOST_WIDE_INT val = INTVAL (op);
return val <= 255*8 && val % 8 == 0;
})
;; Return nonzero if OP is CONST_INT >= 1 and <= 31 (a valid operand
;; for shift & compare patterns, as shifting by 0 does not change flags).
(define_predicate "const_1_to_31_operand"
(and (match_code "const_int")
(match_test "INTVAL (op) >= 1 && INTVAL (op) <= 31")))
;; Match 2 or 3.
(define_predicate "const_2_to_3_operand"
(and (match_code "const_int")
(match_test "INTVAL (op) == 2 || INTVAL (op) == 3")))
;; Match 4 to 7.
(define_predicate "const_4_to_7_operand"
(and (match_code "const_int")
(match_test "INTVAL (op) >= 4 && INTVAL (op) <= 7")))
;; Match exactly one bit in 4-bit mask.
(define_predicate "const_pow2_1_to_8_operand"
(match_code "const_int")
......
2005-01-08 Richard Henderson <rth@redhat.com>
* lib/target-supports.exp (check_effective_target_vect_no_bitwise):
False for x86 and x86-64.
2005-01-08 Diego Novillo <dnovillo@redhat.com>
PR tree-optimization/18241
......
......@@ -563,10 +563,6 @@ proc check_effective_target_vect_no_bitwise { } {
verbose "check_effective_target_vect_no_bitwise: using cached result" 2
} else {
set et_vect_no_bitwise_saved 0
if { [istarget i?86-*-*]
|| [istarget x86_64-*-*] } {
set et_vect_no_bitwise_saved 1
}
}
verbose "check_effective_target_vect_no_bitwise: returning $et_vect_no_bitwise_saved" 2
return $et_vect_no_bitwise_saved
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment