Commit 822eda12 by Jan Hubicka Committed by Jan Hubicka

re PR target/7693 (Typo in i386 mmintrin.h header)

	* mmintrin.h (__m64): typedef it to v2si.
	(_mm_cvtsi32_si64, _mm_cvtsi32_si64_mm_sll_pi16,
	_mm_sll_pi32, _mm_sll_pi64, _mm_slli_pi64, _mm_sra_pi16,
	_mm_sra_pi32, _mm_srl_pi16, _mm_srl_pi32, _mm_srl_pi64,
	_mm_srli_pi64, _mm_and_si64, _mm_andnot_si64,
	_mm_or_si64, _mm_xor_si64): Add neccesary casts.
	* xmmintrin.h (_mm_setzero_si64): Likewise.

	* i386.h (ALIGN_MODE_128): Update comment; add missing modes
	(SSE_REG_MODE_P, MMX_REG_MODE_P): New macros.

	PR target/7693
	Patch by Shawn Wagner
	* mmintrin.h: Replace pi64 by si64.

From-SVN: r58306
parent bf3d8f27
Sat Oct 19 10:46:52 CEST 2002 Jan Hubicka <jh@suse.cz>
* mmintrin.h (__m64): typedef it to v2si.
(_mm_cvtsi32_si64, _mm_cvtsi32_si64_mm_sll_pi16,
_mm_sll_pi32, _mm_sll_pi64, _mm_slli_pi64, _mm_sra_pi16,
_mm_sra_pi32, _mm_srl_pi16, _mm_srl_pi32, _mm_srl_pi64,
_mm_srli_pi64, _mm_and_si64, _mm_andnot_si64,
_mm_or_si64, _mm_xor_si64): Add neccesary casts.
* xmmintrin.h (_mm_setzero_si64): Likewise.
* i386.h (ALIGN_MODE_128): Update comment; add missing modes
(SSE_REG_MODE_P, MMX_REG_MODE_P): New macros.
PR target/7693
Patch by Shawn Wagner
* mmintrin.h: Replace pi64 by si64.
2002-10-18 David Edelsohn <edelsohn@gnu.org> 2002-10-18 David Edelsohn <edelsohn@gnu.org>
* rs6000.md (movdf_hardfloat32): Order alternatives consistently. * rs6000.md (movdf_hardfloat32): Order alternatives consistently.
......
...@@ -730,10 +730,9 @@ extern int x86_prefetch_sse; ...@@ -730,10 +730,9 @@ extern int x86_prefetch_sse;
#define BIGGEST_ALIGNMENT 128 #define BIGGEST_ALIGNMENT 128
/* Decide whether a variable of mode MODE must be 128 bit aligned. */ /* Decide whether a variable of mode MODE should be 128 bit aligned. */
#define ALIGN_MODE_128(MODE) \ #define ALIGN_MODE_128(MODE) \
((MODE) == XFmode || (MODE) == TFmode || ((MODE) == TImode) \ ((MODE) == XFmode || (MODE) == TFmode || SSE_REG_MODE_P (MODE))
|| (MODE) == V4SFmode || (MODE) == V4SImode)
/* The published ABIs say that doubles should be aligned on word /* The published ABIs say that doubles should be aligned on word
boundaries, so lower the aligment for structure fields unless boundaries, so lower the aligment for structure fields unless
...@@ -1007,6 +1006,17 @@ do { \ ...@@ -1007,6 +1006,17 @@ do { \
|| (MODE) == CDImode \ || (MODE) == CDImode \
|| (TARGET_64BIT && ((MODE) == TImode || (MODE) == CTImode))) || (TARGET_64BIT && ((MODE) == TImode || (MODE) == CTImode)))
/* Return true for modes passed in SSE registers. */
#define SSE_REG_MODE_P(MODE) \
((MODE) == TImode || (MODE) == V16QImode \
|| (MODE) == V8HImode || (MODE) == V2DFmode || (MODE) == V2DImode \
|| (MODE) == V4SFmode || (MODE) == V4SImode)
/* Return true for modes passed in MMX registers. */
#define MMX_REG_MODE_P(MODE) \
((MODE) == V8QImode || (MODE) == V4HImode || (MODE) == V2SImode \
|| (MODE) == V2SFmode)
/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE. */ /* Value is 1 if hard register REGNO can hold a value of machine-mode MODE. */
#define HARD_REGNO_MODE_OK(REGNO, MODE) \ #define HARD_REGNO_MODE_OK(REGNO, MODE) \
......
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
# error "MMX instruction set not enabled" # error "MMX instruction set not enabled"
#else #else
/* The data type intended for user use. */ /* The data type intended for user use. */
typedef unsigned long long __m64 __attribute__ ((__aligned__ (8))); typedef int __m64 __attribute__ ((__mode__ (__V2SI__)));
/* Internal data types for implementing the intrinsics. */ /* Internal data types for implementing the intrinsics. */
typedef int __v2si __attribute__ ((__mode__ (__V2SI__))); typedef int __v2si __attribute__ ((__mode__ (__V2SI__)));
...@@ -52,14 +52,16 @@ _mm_empty (void) ...@@ -52,14 +52,16 @@ _mm_empty (void)
static __inline __m64 static __inline __m64
_mm_cvtsi32_si64 (int __i) _mm_cvtsi32_si64 (int __i)
{ {
return (unsigned int) __i; long long __tmp = (unsigned int)__i;
return (__m64) __tmp;
} }
/* Convert the lower 32 bits of the __m64 object into an integer. */ /* Convert the lower 32 bits of the __m64 object into an integer. */
static __inline int static __inline int
_mm_cvtsi64_si32 (__m64 __i) _mm_cvtsi64_si32 (__m64 __i)
{ {
return __i; long long __tmp = (long long)__i;
return __tmp;
} }
/* Pack the four 16-bit values from M1 into the lower four 8-bit values of /* Pack the four 16-bit values from M1 into the lower four 8-bit values of
...@@ -272,7 +274,7 @@ _mm_mullo_pi16 (__m64 __m1, __m64 __m2) ...@@ -272,7 +274,7 @@ _mm_mullo_pi16 (__m64 __m1, __m64 __m2)
static __inline __m64 static __inline __m64
_mm_sll_pi16 (__m64 __m, __m64 __count) _mm_sll_pi16 (__m64 __m, __m64 __count)
{ {
return (__m64) __builtin_ia32_psllw ((__v4hi)__m, __count); return (__m64) __builtin_ia32_psllw ((__v4hi)__m, (long long)__count);
} }
static __inline __m64 static __inline __m64
...@@ -285,7 +287,7 @@ _mm_slli_pi16 (__m64 __m, int __count) ...@@ -285,7 +287,7 @@ _mm_slli_pi16 (__m64 __m, int __count)
static __inline __m64 static __inline __m64
_mm_sll_pi32 (__m64 __m, __m64 __count) _mm_sll_pi32 (__m64 __m, __m64 __count)
{ {
return (__m64) __builtin_ia32_pslld ((__v2si)__m, __count); return (__m64) __builtin_ia32_pslld ((__v2si)__m, (long long)__count);
} }
static __inline __m64 static __inline __m64
...@@ -296,22 +298,22 @@ _mm_slli_pi32 (__m64 __m, int __count) ...@@ -296,22 +298,22 @@ _mm_slli_pi32 (__m64 __m, int __count)
/* Shift the 64-bit value in M left by COUNT. */ /* Shift the 64-bit value in M left by COUNT. */
static __inline __m64 static __inline __m64
_mm_sll_pi64 (__m64 __m, __m64 __count) _mm_sll_si64 (__m64 __m, __m64 __count)
{ {
return (__m64) __builtin_ia32_psllq (__m, __count); return (__m64) __builtin_ia32_psllq ((long long)__m, (long long)__count);
} }
static __inline __m64 static __inline __m64
_mm_slli_pi64 (__m64 __m, int __count) _mm_slli_si64 (__m64 __m, int __count)
{ {
return (__m64) __builtin_ia32_psllq (__m, __count); return (__m64) __builtin_ia32_psllq ((long long)__m, (long long)__count);
} }
/* Shift four 16-bit values in M right by COUNT; shift in the sign bit. */ /* Shift four 16-bit values in M right by COUNT; shift in the sign bit. */
static __inline __m64 static __inline __m64
_mm_sra_pi16 (__m64 __m, __m64 __count) _mm_sra_pi16 (__m64 __m, __m64 __count)
{ {
return (__m64) __builtin_ia32_psraw ((__v4hi)__m, __count); return (__m64) __builtin_ia32_psraw ((__v4hi)__m, (long long)__count);
} }
static __inline __m64 static __inline __m64
...@@ -324,7 +326,7 @@ _mm_srai_pi16 (__m64 __m, int __count) ...@@ -324,7 +326,7 @@ _mm_srai_pi16 (__m64 __m, int __count)
static __inline __m64 static __inline __m64
_mm_sra_pi32 (__m64 __m, __m64 __count) _mm_sra_pi32 (__m64 __m, __m64 __count)
{ {
return (__m64) __builtin_ia32_psrad ((__v2si)__m, __count); return (__m64) __builtin_ia32_psrad ((__v2si)__m, (long long)__count);
} }
static __inline __m64 static __inline __m64
...@@ -337,7 +339,7 @@ _mm_srai_pi32 (__m64 __m, int __count) ...@@ -337,7 +339,7 @@ _mm_srai_pi32 (__m64 __m, int __count)
static __inline __m64 static __inline __m64
_mm_srl_pi16 (__m64 __m, __m64 __count) _mm_srl_pi16 (__m64 __m, __m64 __count)
{ {
return (__m64) __builtin_ia32_psrlw ((__v4hi)__m, __count); return (__m64) __builtin_ia32_psrlw ((__v4hi)__m, (long long)__count);
} }
static __inline __m64 static __inline __m64
...@@ -350,7 +352,7 @@ _mm_srli_pi16 (__m64 __m, int __count) ...@@ -350,7 +352,7 @@ _mm_srli_pi16 (__m64 __m, int __count)
static __inline __m64 static __inline __m64
_mm_srl_pi32 (__m64 __m, __m64 __count) _mm_srl_pi32 (__m64 __m, __m64 __count)
{ {
return (__m64) __builtin_ia32_psrld ((__v2si)__m, __count); return (__m64) __builtin_ia32_psrld ((__v2si)__m, (long long)__count);
} }
static __inline __m64 static __inline __m64
...@@ -361,22 +363,22 @@ _mm_srli_pi32 (__m64 __m, int __count) ...@@ -361,22 +363,22 @@ _mm_srli_pi32 (__m64 __m, int __count)
/* Shift the 64-bit value in M left by COUNT; shift in zeros. */ /* Shift the 64-bit value in M left by COUNT; shift in zeros. */
static __inline __m64 static __inline __m64
_mm_srl_pi64 (__m64 __m, __m64 __count) _mm_srl_si64 (__m64 __m, __m64 __count)
{ {
return (__m64) __builtin_ia32_psrlq (__m, __count); return (__m64) __builtin_ia32_psrlq ((long long)__m, (long long)__count);
} }
static __inline __m64 static __inline __m64
_mm_srli_pi64 (__m64 __m, int __count) _mm_srli_si64 (__m64 __m, int __count)
{ {
return (__m64) __builtin_ia32_psrlq (__m, __count); return (__m64) __builtin_ia32_psrlq ((long long)__m, (long long)__count);
} }
/* Bit-wise AND the 64-bit values in M1 and M2. */ /* Bit-wise AND the 64-bit values in M1 and M2. */
static __inline __m64 static __inline __m64
_mm_and_si64 (__m64 __m1, __m64 __m2) _mm_and_si64 (__m64 __m1, __m64 __m2)
{ {
return __builtin_ia32_pand (__m1, __m2); return (__m64) __builtin_ia32_pand ((long long)__m1, (long long)__m2);
} }
/* Bit-wise complement the 64-bit value in M1 and bit-wise AND it with the /* Bit-wise complement the 64-bit value in M1 and bit-wise AND it with the
...@@ -384,21 +386,21 @@ _mm_and_si64 (__m64 __m1, __m64 __m2) ...@@ -384,21 +386,21 @@ _mm_and_si64 (__m64 __m1, __m64 __m2)
static __inline __m64 static __inline __m64
_mm_andnot_si64 (__m64 __m1, __m64 __m2) _mm_andnot_si64 (__m64 __m1, __m64 __m2)
{ {
return __builtin_ia32_pandn (__m1, __m2); return (__m64) __builtin_ia32_pandn ((long long)__m1, (long long)__m2);
} }
/* Bit-wise inclusive OR the 64-bit values in M1 and M2. */ /* Bit-wise inclusive OR the 64-bit values in M1 and M2. */
static __inline __m64 static __inline __m64
_mm_or_si64 (__m64 __m1, __m64 __m2) _mm_or_si64 (__m64 __m1, __m64 __m2)
{ {
return __builtin_ia32_por (__m1, __m2); return (__m64)__builtin_ia32_por ((long long)__m1, (long long)__m2);
} }
/* Bit-wise exclusive OR the 64-bit values in M1 and M2. */ /* Bit-wise exclusive OR the 64-bit values in M1 and M2. */
static __inline __m64 static __inline __m64
_mm_xor_si64 (__m64 __m1, __m64 __m2) _mm_xor_si64 (__m64 __m1, __m64 __m2)
{ {
return __builtin_ia32_pxor (__m1, __m2); return (__m64)__builtin_ia32_pxor ((long long)__m1, (long long)__m2);
} }
/* Compare eight 8-bit values. The result of the comparison is 0xFF if the /* Compare eight 8-bit values. The result of the comparison is 0xFF if the
...@@ -447,7 +449,7 @@ _mm_cmpgt_pi32 (__m64 __m1, __m64 __m2) ...@@ -447,7 +449,7 @@ _mm_cmpgt_pi32 (__m64 __m1, __m64 __m2)
static __inline __m64 static __inline __m64
_mm_setzero_si64 (void) _mm_setzero_si64 (void)
{ {
return __builtin_ia32_mmx_zero (); return (__m64)__builtin_ia32_mmx_zero ();
} }
/* Creates a vector of two 32-bit values; I0 is least significant. */ /* Creates a vector of two 32-bit values; I0 is least significant. */
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment