Commit 98c6d93c by H.J. Lu Committed by H.J. Lu

Correct mask operand for AVX mask load/store.

gcc/

2011-01-17  H.J. Lu  <hongjiu.lu@intel.com>

	PR target/47318
	* config/i386/avxintrin.h (_mm_maskload_pd): Change mask to
	__m128i.
	(_mm_maskstore_pd): Likewise.
	(_mm_maskload_ps): Likewise.
	(_mm_maskstore_ps): Likewise.
	(_mm256_maskload_pd): Change mask to __m256i.
	(_mm256_maskstore_pd): Likewise.
	(_mm256_maskload_ps): Likewise.
	(_mm256_maskstore_ps): Likewise.

	* config/i386/i386-builtin-types.def: Updated.
	(ix86_expand_special_args_builtin): Likewise.

	* config/i386/i386.c (bdesc_special_args): Update
	__builtin_ia32_maskloadpd, __builtin_ia32_maskloadps,
	__builtin_ia32_maskloadpd256, __builtin_ia32_maskloadps256,
	__builtin_ia32_maskstorepd, __builtin_ia32_maskstoreps,
	__builtin_ia32_maskstorepd256 and __builtin_ia32_maskstoreps256.

	* config/i386/sse.md (avx_maskload<ssemodesuffix><avxmodesuffix>):
	Use <avxpermvecmode> on mask register.
	(avx_maskstore<ssemodesuffix><avxmodesuffix>): Likewise.

gcc/testsuite/

2011-01-17  H.J. Lu  <hongjiu.lu@intel.com>

	PR target/47318
	* gcc.target/i386/avx-vmaskmovpd-1.c: New.
	* gcc.target/i386/avx-vmaskmovpd-2.c: Likewise.
	* gcc.target/i386/avx-vmaskmovps-1.c: Likewise.
	* gcc.target/i386/avx-vmaskmovps-1.c: Likewise.

	* gcc.target/i386/avx-vmaskmovpd-256-1.c (avx_test): Load mask
	as __m256i.
	* gcc.target/i386/avx-vmaskmovpd-256-2.c (avx_test): Likewise.
	* gcc.target/i386/avx-vmaskmovps-256-1.c (avx_test): Likewise.
	* gcc.target/i386/avx-vmaskmovps-256-2.c (avx_test): Likewise.

From-SVN: r168899
parent b9361af2
2011-01-17 H.J. Lu <hongjiu.lu@intel.com>
PR target/47318
* config/i386/avxintrin.h (_mm_maskload_pd): Change mask to
__m128i.
(_mm_maskstore_pd): Likewise.
(_mm_maskload_ps): Likewise.
(_mm_maskstore_ps): Likewise.
(_mm256_maskload_pd): Change mask to __m256i.
(_mm256_maskstore_pd): Likewise.
(_mm256_maskload_ps): Likewise.
(_mm256_maskstore_ps): Likewise.
* config/i386/i386-builtin-types.def: Updated.
(ix86_expand_special_args_builtin): Likewise.
* config/i386/i386.c (bdesc_special_args): Update
__builtin_ia32_maskloadpd, __builtin_ia32_maskloadps,
__builtin_ia32_maskloadpd256, __builtin_ia32_maskloadps256,
__builtin_ia32_maskstorepd, __builtin_ia32_maskstoreps,
__builtin_ia32_maskstorepd256 and __builtin_ia32_maskstoreps256.
* config/i386/sse.md (avx_maskload<ssemodesuffix><avxmodesuffix>):
Use <avxpermvecmode> on mask register.
(avx_maskstore<ssemodesuffix><avxmodesuffix>): Likewise.
2011-01-17 Olivier Hainque <hainque@adacore.com> 2011-01-17 Olivier Hainque <hainque@adacore.com>
Michael Haubenwallner <michael.haubenwallner@salomon.at> Michael Haubenwallner <michael.haubenwallner@salomon.at>
Eric Botcazou <ebotcazou@adacore.com> Eric Botcazou <ebotcazou@adacore.com>
......
...@@ -890,55 +890,55 @@ _mm256_storeu_si256 (__m256i *__P, __m256i __A) ...@@ -890,55 +890,55 @@ _mm256_storeu_si256 (__m256i *__P, __m256i __A)
} }
extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_maskload_pd (double const *__P, __m128d __M) _mm_maskload_pd (double const *__P, __m128i __M)
{ {
return (__m128d) __builtin_ia32_maskloadpd ((const __v2df *)__P, return (__m128d) __builtin_ia32_maskloadpd ((const __v2df *)__P,
(__v2df)__M); (__v2di)__M);
} }
extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_maskstore_pd (double *__P, __m128d __M, __m128d __A) _mm_maskstore_pd (double *__P, __m128i __M, __m128d __A)
{ {
__builtin_ia32_maskstorepd ((__v2df *)__P, (__v2df)__M, (__v2df)__A); __builtin_ia32_maskstorepd ((__v2df *)__P, (__v2di)__M, (__v2df)__A);
} }
extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm256_maskload_pd (double const *__P, __m256d __M) _mm256_maskload_pd (double const *__P, __m256i __M)
{ {
return (__m256d) __builtin_ia32_maskloadpd256 ((const __v4df *)__P, return (__m256d) __builtin_ia32_maskloadpd256 ((const __v4df *)__P,
(__v4df)__M); (__v4di)__M);
} }
extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm256_maskstore_pd (double *__P, __m256d __M, __m256d __A) _mm256_maskstore_pd (double *__P, __m256i __M, __m256d __A)
{ {
__builtin_ia32_maskstorepd256 ((__v4df *)__P, (__v4df)__M, (__v4df)__A); __builtin_ia32_maskstorepd256 ((__v4df *)__P, (__v4di)__M, (__v4df)__A);
} }
extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_maskload_ps (float const *__P, __m128 __M) _mm_maskload_ps (float const *__P, __m128i __M)
{ {
return (__m128) __builtin_ia32_maskloadps ((const __v4sf *)__P, return (__m128) __builtin_ia32_maskloadps ((const __v4sf *)__P,
(__v4sf)__M); (__v4si)__M);
} }
extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_maskstore_ps (float *__P, __m128 __M, __m128 __A) _mm_maskstore_ps (float *__P, __m128i __M, __m128 __A)
{ {
__builtin_ia32_maskstoreps ((__v4sf *)__P, (__v4sf)__M, (__v4sf)__A); __builtin_ia32_maskstoreps ((__v4sf *)__P, (__v4si)__M, (__v4sf)__A);
} }
extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm256_maskload_ps (float const *__P, __m256 __M) _mm256_maskload_ps (float const *__P, __m256i __M)
{ {
return (__m256) __builtin_ia32_maskloadps256 ((const __v8sf *)__P, return (__m256) __builtin_ia32_maskloadps256 ((const __v8sf *)__P,
(__v8sf)__M); (__v8si)__M);
} }
extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm256_maskstore_ps (float *__P, __m256 __M, __m256 __A) _mm256_maskstore_ps (float *__P, __m256i __M, __m256 __A)
{ {
__builtin_ia32_maskstoreps256 ((__v8sf *)__P, (__v8sf)__M, (__v8sf)__A); __builtin_ia32_maskstoreps256 ((__v8sf *)__P, (__v8si)__M, (__v8sf)__A);
} }
extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
......
...@@ -236,7 +236,7 @@ DEF_FUNCTION_TYPE (V1DI, V1DI, SI) ...@@ -236,7 +236,7 @@ DEF_FUNCTION_TYPE (V1DI, V1DI, SI)
DEF_FUNCTION_TYPE (V1DI, V1DI, V1DI) DEF_FUNCTION_TYPE (V1DI, V1DI, V1DI)
DEF_FUNCTION_TYPE (V1DI, V2SI, V2SI) DEF_FUNCTION_TYPE (V1DI, V2SI, V2SI)
DEF_FUNCTION_TYPE (V1DI, V8QI, V8QI) DEF_FUNCTION_TYPE (V1DI, V8QI, V8QI)
DEF_FUNCTION_TYPE (V2DF, PCV2DF, V2DF) DEF_FUNCTION_TYPE (V2DF, PCV2DF, V2DI)
DEF_FUNCTION_TYPE (V2DF, V2DF, DI) DEF_FUNCTION_TYPE (V2DF, V2DF, DI)
DEF_FUNCTION_TYPE (V2DF, V2DF, INT) DEF_FUNCTION_TYPE (V2DF, V2DF, INT)
DEF_FUNCTION_TYPE (V2DF, V2DF, PCDOUBLE) DEF_FUNCTION_TYPE (V2DF, V2DF, PCDOUBLE)
...@@ -258,7 +258,7 @@ DEF_FUNCTION_TYPE (V2SI, V2SF, V2SF) ...@@ -258,7 +258,7 @@ DEF_FUNCTION_TYPE (V2SI, V2SF, V2SF)
DEF_FUNCTION_TYPE (V2SI, V2SI, SI) DEF_FUNCTION_TYPE (V2SI, V2SI, SI)
DEF_FUNCTION_TYPE (V2SI, V2SI, V2SI) DEF_FUNCTION_TYPE (V2SI, V2SI, V2SI)
DEF_FUNCTION_TYPE (V2SI, V4HI, V4HI) DEF_FUNCTION_TYPE (V2SI, V4HI, V4HI)
DEF_FUNCTION_TYPE (V4DF, PCV4DF, V4DF) DEF_FUNCTION_TYPE (V4DF, PCV4DF, V4DI)
DEF_FUNCTION_TYPE (V4DF, V4DF, INT) DEF_FUNCTION_TYPE (V4DF, V4DF, INT)
DEF_FUNCTION_TYPE (V4DF, V4DF, V4DF) DEF_FUNCTION_TYPE (V4DF, V4DF, V4DF)
DEF_FUNCTION_TYPE (V4DF, V4DF, V4DI) DEF_FUNCTION_TYPE (V4DF, V4DF, V4DI)
...@@ -267,7 +267,7 @@ DEF_FUNCTION_TYPE (V4HI, V4HI, INT) ...@@ -267,7 +267,7 @@ DEF_FUNCTION_TYPE (V4HI, V4HI, INT)
DEF_FUNCTION_TYPE (V4HI, V4HI, SI) DEF_FUNCTION_TYPE (V4HI, V4HI, SI)
DEF_FUNCTION_TYPE (V4HI, V4HI, V4HI) DEF_FUNCTION_TYPE (V4HI, V4HI, V4HI)
DEF_FUNCTION_TYPE (V4HI, V8QI, V8QI) DEF_FUNCTION_TYPE (V4HI, V8QI, V8QI)
DEF_FUNCTION_TYPE (V4SF, PCV4SF, V4SF) DEF_FUNCTION_TYPE (V4SF, PCV4SF, V4SI)
DEF_FUNCTION_TYPE (V4SF, V4SF, DI) DEF_FUNCTION_TYPE (V4SF, V4SF, DI)
DEF_FUNCTION_TYPE (V4SF, V4SF, INT) DEF_FUNCTION_TYPE (V4SF, V4SF, INT)
DEF_FUNCTION_TYPE (V4SF, V4SF, PCV2SF) DEF_FUNCTION_TYPE (V4SF, V4SF, PCV2SF)
...@@ -293,7 +293,7 @@ DEF_FUNCTION_TYPE (V8HI, V8SF, INT) ...@@ -293,7 +293,7 @@ DEF_FUNCTION_TYPE (V8HI, V8SF, INT)
DEF_FUNCTION_TYPE (V8HI, V4SF, INT) DEF_FUNCTION_TYPE (V8HI, V4SF, INT)
DEF_FUNCTION_TYPE (V8QI, V4HI, V4HI) DEF_FUNCTION_TYPE (V8QI, V4HI, V4HI)
DEF_FUNCTION_TYPE (V8QI, V8QI, V8QI) DEF_FUNCTION_TYPE (V8QI, V8QI, V8QI)
DEF_FUNCTION_TYPE (V8SF, PCV8SF, V8SF) DEF_FUNCTION_TYPE (V8SF, PCV8SF, V8SI)
DEF_FUNCTION_TYPE (V8SF, V8SF, INT) DEF_FUNCTION_TYPE (V8SF, V8SF, INT)
DEF_FUNCTION_TYPE (V8SF, V8SF, V8SF) DEF_FUNCTION_TYPE (V8SF, V8SF, V8SF)
DEF_FUNCTION_TYPE (V8SF, V8SF, V8SI) DEF_FUNCTION_TYPE (V8SF, V8SF, V8SI)
...@@ -352,10 +352,10 @@ DEF_FUNCTION_TYPE (V8SI, V8SI, V4SI, INT) ...@@ -352,10 +352,10 @@ DEF_FUNCTION_TYPE (V8SI, V8SI, V4SI, INT)
DEF_FUNCTION_TYPE (V8SI, V8SI, V8SI, INT) DEF_FUNCTION_TYPE (V8SI, V8SI, V8SI, INT)
DEF_FUNCTION_TYPE (V8SI, V8SI, V8SI, V8SI) DEF_FUNCTION_TYPE (V8SI, V8SI, V8SI, V8SI)
DEF_FUNCTION_TYPE (VOID, PCVOID, UNSIGNED, UNSIGNED) DEF_FUNCTION_TYPE (VOID, PCVOID, UNSIGNED, UNSIGNED)
DEF_FUNCTION_TYPE (VOID, PV2DF, V2DF, V2DF) DEF_FUNCTION_TYPE (VOID, PV2DF, V2DI, V2DF)
DEF_FUNCTION_TYPE (VOID, PV4DF, V4DF, V4DF) DEF_FUNCTION_TYPE (VOID, PV4DF, V4DI, V4DF)
DEF_FUNCTION_TYPE (VOID, PV4SF, V4SF, V4SF) DEF_FUNCTION_TYPE (VOID, PV4SF, V4SI, V4SF)
DEF_FUNCTION_TYPE (VOID, PV8SF, V8SF, V8SF) DEF_FUNCTION_TYPE (VOID, PV8SF, V8SI, V8SF)
DEF_FUNCTION_TYPE (VOID, UINT, UINT, UINT) DEF_FUNCTION_TYPE (VOID, UINT, UINT, UINT)
DEF_FUNCTION_TYPE (VOID, UINT64, UINT, UINT) DEF_FUNCTION_TYPE (VOID, UINT64, UINT, UINT)
DEF_FUNCTION_TYPE (VOID, V16QI, V16QI, PCHAR) DEF_FUNCTION_TYPE (VOID, V16QI, V16QI, PCHAR)
......
...@@ -24483,14 +24483,14 @@ static const struct builtin_description bdesc_special_args[] = ...@@ -24483,14 +24483,14 @@ static const struct builtin_description bdesc_special_args[] =
{ OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4df, "__builtin_ia32_movntpd256", IX86_BUILTIN_MOVNTPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF }, { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4df, "__builtin_ia32_movntpd256", IX86_BUILTIN_MOVNTPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
{ OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv8sf, "__builtin_ia32_movntps256", IX86_BUILTIN_MOVNTPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF }, { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv8sf, "__builtin_ia32_movntps256", IX86_BUILTIN_MOVNTPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
{ OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd, "__builtin_ia32_maskloadpd", IX86_BUILTIN_MASKLOADPD, UNKNOWN, (int) V2DF_FTYPE_PCV2DF_V2DF }, { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd, "__builtin_ia32_maskloadpd", IX86_BUILTIN_MASKLOADPD, UNKNOWN, (int) V2DF_FTYPE_PCV2DF_V2DI },
{ OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps, "__builtin_ia32_maskloadps", IX86_BUILTIN_MASKLOADPS, UNKNOWN, (int) V4SF_FTYPE_PCV4SF_V4SF }, { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps, "__builtin_ia32_maskloadps", IX86_BUILTIN_MASKLOADPS, UNKNOWN, (int) V4SF_FTYPE_PCV4SF_V4SI },
{ OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd256, "__builtin_ia32_maskloadpd256", IX86_BUILTIN_MASKLOADPD256, UNKNOWN, (int) V4DF_FTYPE_PCV4DF_V4DF }, { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd256, "__builtin_ia32_maskloadpd256", IX86_BUILTIN_MASKLOADPD256, UNKNOWN, (int) V4DF_FTYPE_PCV4DF_V4DI },
{ OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps256, "__builtin_ia32_maskloadps256", IX86_BUILTIN_MASKLOADPS256, UNKNOWN, (int) V8SF_FTYPE_PCV8SF_V8SF }, { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps256, "__builtin_ia32_maskloadps256", IX86_BUILTIN_MASKLOADPS256, UNKNOWN, (int) V8SF_FTYPE_PCV8SF_V8SI },
{ OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd, "__builtin_ia32_maskstorepd", IX86_BUILTIN_MASKSTOREPD, UNKNOWN, (int) VOID_FTYPE_PV2DF_V2DF_V2DF }, { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd, "__builtin_ia32_maskstorepd", IX86_BUILTIN_MASKSTOREPD, UNKNOWN, (int) VOID_FTYPE_PV2DF_V2DI_V2DF },
{ OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps, "__builtin_ia32_maskstoreps", IX86_BUILTIN_MASKSTOREPS, UNKNOWN, (int) VOID_FTYPE_PV4SF_V4SF_V4SF }, { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps, "__builtin_ia32_maskstoreps", IX86_BUILTIN_MASKSTOREPS, UNKNOWN, (int) VOID_FTYPE_PV4SF_V4SI_V4SF },
{ OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd256, "__builtin_ia32_maskstorepd256", IX86_BUILTIN_MASKSTOREPD256, UNKNOWN, (int) VOID_FTYPE_PV4DF_V4DF_V4DF }, { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd256, "__builtin_ia32_maskstorepd256", IX86_BUILTIN_MASKSTOREPD256, UNKNOWN, (int) VOID_FTYPE_PV4DF_V4DI_V4DF },
{ OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps256, "__builtin_ia32_maskstoreps256", IX86_BUILTIN_MASKSTOREPS256, UNKNOWN, (int) VOID_FTYPE_PV8SF_V8SF_V8SF }, { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps256, "__builtin_ia32_maskstoreps256", IX86_BUILTIN_MASKSTOREPS256, UNKNOWN, (int) VOID_FTYPE_PV8SF_V8SI_V8SF },
{ OPTION_MASK_ISA_LWP, CODE_FOR_lwp_llwpcb, "__builtin_ia32_llwpcb", IX86_BUILTIN_LLWPCB, UNKNOWN, (int) VOID_FTYPE_PVOID }, { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_llwpcb, "__builtin_ia32_llwpcb", IX86_BUILTIN_LLWPCB, UNKNOWN, (int) VOID_FTYPE_PVOID },
{ OPTION_MASK_ISA_LWP, CODE_FOR_lwp_slwpcb, "__builtin_ia32_slwpcb", IX86_BUILTIN_SLWPCB, UNKNOWN, (int) PVOID_FTYPE_VOID }, { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_slwpcb, "__builtin_ia32_slwpcb", IX86_BUILTIN_SLWPCB, UNKNOWN, (int) PVOID_FTYPE_VOID },
...@@ -26821,18 +26821,18 @@ ix86_expand_special_args_builtin (const struct builtin_description *d, ...@@ -26821,18 +26821,18 @@ ix86_expand_special_args_builtin (const struct builtin_description *d,
klass = load; klass = load;
memory = 1; memory = 1;
break; break;
case V8SF_FTYPE_PCV8SF_V8SF: case V8SF_FTYPE_PCV8SF_V8SI:
case V4DF_FTYPE_PCV4DF_V4DF: case V4DF_FTYPE_PCV4DF_V4DI:
case V4SF_FTYPE_PCV4SF_V4SF: case V4SF_FTYPE_PCV4SF_V4SI:
case V2DF_FTYPE_PCV2DF_V2DF: case V2DF_FTYPE_PCV2DF_V2DI:
nargs = 2; nargs = 2;
klass = load; klass = load;
memory = 0; memory = 0;
break; break;
case VOID_FTYPE_PV8SF_V8SF_V8SF: case VOID_FTYPE_PV8SF_V8SI_V8SF:
case VOID_FTYPE_PV4DF_V4DF_V4DF: case VOID_FTYPE_PV4DF_V4DI_V4DF:
case VOID_FTYPE_PV4SF_V4SF_V4SF: case VOID_FTYPE_PV4SF_V4SI_V4SF:
case VOID_FTYPE_PV2DF_V2DF_V2DF: case VOID_FTYPE_PV2DF_V2DI_V2DF:
nargs = 2; nargs = 2;
klass = store; klass = store;
/* Reserve memory operand for target. */ /* Reserve memory operand for target. */
...@@ -11938,7 +11938,7 @@ ...@@ -11938,7 +11938,7 @@
[(set (match_operand:AVXMODEF2P 0 "register_operand" "=x") [(set (match_operand:AVXMODEF2P 0 "register_operand" "=x")
(unspec:AVXMODEF2P (unspec:AVXMODEF2P
[(match_operand:AVXMODEF2P 1 "memory_operand" "m") [(match_operand:AVXMODEF2P 1 "memory_operand" "m")
(match_operand:AVXMODEF2P 2 "register_operand" "x") (match_operand:<avxpermvecmode> 2 "register_operand" "x")
(match_dup 0)] (match_dup 0)]
UNSPEC_MASKLOAD))] UNSPEC_MASKLOAD))]
"TARGET_AVX" "TARGET_AVX"
...@@ -11951,7 +11951,7 @@ ...@@ -11951,7 +11951,7 @@
(define_insn "avx_maskstore<ssemodesuffix><avxmodesuffix>" (define_insn "avx_maskstore<ssemodesuffix><avxmodesuffix>"
[(set (match_operand:AVXMODEF2P 0 "memory_operand" "=m") [(set (match_operand:AVXMODEF2P 0 "memory_operand" "=m")
(unspec:AVXMODEF2P (unspec:AVXMODEF2P
[(match_operand:AVXMODEF2P 1 "register_operand" "x") [(match_operand:<avxpermvecmode> 1 "register_operand" "x")
(match_operand:AVXMODEF2P 2 "register_operand" "x") (match_operand:AVXMODEF2P 2 "register_operand" "x")
(match_dup 0)] (match_dup 0)]
UNSPEC_MASKSTORE))] UNSPEC_MASKSTORE))]
......
2011-01-17 H.J. Lu <hongjiu.lu@intel.com>
PR target/47318
* gcc.target/i386/avx-vmaskmovpd-1.c: New.
* gcc.target/i386/avx-vmaskmovpd-2.c: Likewise.
* gcc.target/i386/avx-vmaskmovps-1.c: Likewise.
* gcc.target/i386/avx-vmaskmovps-1.c: Likewise.
* gcc.target/i386/avx-vmaskmovpd-256-1.c (avx_test): Load mask
as __m256i.
* gcc.target/i386/avx-vmaskmovpd-256-2.c (avx_test): Likewise.
* gcc.target/i386/avx-vmaskmovps-256-1.c (avx_test): Likewise.
* gcc.target/i386/avx-vmaskmovps-256-2.c (avx_test): Likewise.
2011-01-17 Richard Guenther <rguenther@suse.de> 2011-01-17 Richard Guenther <rguenther@suse.de>
PR tree-optimization/45967 PR tree-optimization/45967
......
/* { dg-do run } */
/* { dg-require-effective-target avx } */
/* { dg-options "-O2 -mavx" } */
#include "avx-check.h"
#ifndef MASK
#define MASK 7
#endif
#define mask_v(pos) (((MASK & (0x1ULL << (pos))) >> (pos)) << 63)
void static
avx_test (void)
{
int i;
long long m[2] = {mask_v(0), mask_v(1)};
double s[2] = {1.1, 2.2};
union128d u;
union128i_q mask;
double e[2] = {0.0};
mask.x = _mm_loadu_si128 ((__m128i *)m);
u.x = _mm_maskload_pd (s, mask.x);
for (i = 0 ; i < 2; i++)
e[i] = m[i] ? s[i] : 0;
if (check_union128d (u, e))
abort ();
}
/* { dg-do run } */
/* { dg-require-effective-target avx } */
/* { dg-options "-O2 -mavx" } */
#include "avx-check.h"
#ifndef MASK
#define MASK 6
#endif
#define mask_v(pos) (((MASK & (0x1ULL << (pos))) >> (pos)) << 63)
void static
avx_test (void)
{
int i;
long long m[2] = {mask_v(0), mask_v(1)};
double s[2] = {1.1, 2.2};
double e[2] = {0.0};
double d[2] = {0.0};
union128d src;
union128i_q mask;
src.x = _mm_loadu_pd (s);
mask.x = _mm_loadu_si128 ((__m128i *)m);
_mm_maskstore_pd (d, mask.x, src.x);
for (i = 0 ; i < 2; i++)
e[i] = m[i] ? s[i] : 0;
if (checkVd (d, e, 2))
abort ();
}
...@@ -14,12 +14,13 @@ void static ...@@ -14,12 +14,13 @@ void static
avx_test (void) avx_test (void)
{ {
int i; int i;
long long m[8] = {mask_v(0), mask_v(1), mask_v(2), mask_v(3)}; long long m[4] = {mask_v(0), mask_v(1), mask_v(2), mask_v(3)};
double s[4] = {1.1, 2.2, 3.3, 4.4}; double s[4] = {1.1, 2.2, 3.3, 4.4};
union256d u, mask; union256d u;
union256i_q mask;
double e [4] = {0.0}; double e [4] = {0.0};
mask.x = _mm256_loadu_pd ((double*)m); mask.x = _mm256_loadu_si256 ((__m256i *)m);
u.x = _mm256_maskload_pd (s, mask.x); u.x = _mm256_maskload_pd (s, mask.x);
for (i = 0 ; i < 4; i++) for (i = 0 ; i < 4; i++)
......
...@@ -18,10 +18,11 @@ avx_test (void) ...@@ -18,10 +18,11 @@ avx_test (void)
double s[4] = {1.1, 2.2, 3.3, 4.4}; double s[4] = {1.1, 2.2, 3.3, 4.4};
double e [4] = {0.0}; double e [4] = {0.0};
double d [4] = {0.0}; double d [4] = {0.0};
union256d src, mask; union256d src;
union256i_q mask;
src.x = _mm256_loadu_pd (s); src.x = _mm256_loadu_pd (s);
mask.x = _mm256_loadu_pd ((double*)m); mask.x = _mm256_loadu_si256 ((__m256i *)m);
_mm256_maskstore_pd (d, mask.x, src.x); _mm256_maskstore_pd (d, mask.x, src.x);
for (i = 0 ; i < 4; i++) for (i = 0 ; i < 4; i++)
......
/* { dg-do run } */
/* { dg-require-effective-target avx } */
/* { dg-options "-O2 -mavx" } */
#include "avx-check.h"
#ifndef MASK
#define MASK 134
#endif
#define mask_v(pos) (((MASK & (0x1 << (pos))) >> (pos)) << 31)
void static
avx_test (void)
{
int i;
int m[4] = {mask_v(0), mask_v(1), mask_v(2), mask_v(3)};
float s[4] = {1,2,3,4};
union128 u;
union128i_d mask;
float e[4] = {0.0};
mask.x = _mm_loadu_si128 ((__m128i *)m);
u.x = _mm_maskload_ps (s, mask.x);
for (i = 0 ; i < 4; i++)
e[i] = m[i] ? s[i] : 0;
if (check_union128 (u, e))
abort ();
}
/* { dg-do run } */
/* { dg-require-effective-target avx } */
/* { dg-options "-O2 -mavx" } */
#include "avx-check.h"
#ifndef MASK
#define MASK 214
#endif
#define mask_v(pos) (((MASK & (0x1 << (pos))) >> (pos)) << 31)
void static
avx_test (void)
{
int i;
int m[4] = {mask_v(0), mask_v(1), mask_v(2), mask_v(3)};
float s[4] = {1,2,3,4};
union128 src;
union128i_d mask;
float e[4] = {0.0};
float d[4] = {0.0};
src.x = _mm_loadu_ps (s);
mask.x = _mm_loadu_si128 ((__m128i *)m);
_mm_maskstore_ps (d, mask.x, src.x);
for (i = 0 ; i < 4; i++)
e[i] = m[i] ? s[i] : 0;
if (checkVf (d, e, 4))
abort ();
}
...@@ -16,10 +16,11 @@ avx_test (void) ...@@ -16,10 +16,11 @@ avx_test (void)
int i; int i;
int m[8] = {mask_v(0), mask_v(1), mask_v(2), mask_v(3), mask_v(4), mask_v(5), mask_v(6), mask_v(7)}; int m[8] = {mask_v(0), mask_v(1), mask_v(2), mask_v(3), mask_v(4), mask_v(5), mask_v(6), mask_v(7)};
float s[8] = {1,2,3,4,5,6,7,8}; float s[8] = {1,2,3,4,5,6,7,8};
union256 u, mask; union256 u;
union256i_d mask;
float e [8] = {0.0}; float e [8] = {0.0};
mask.x = _mm256_loadu_ps ((float*)m); mask.x = _mm256_loadu_si256 ((__m256i *)m);
u.x = _mm256_maskload_ps (s, mask.x); u.x = _mm256_maskload_ps (s, mask.x);
for (i = 0 ; i < 8; i++) for (i = 0 ; i < 8; i++)
......
...@@ -16,12 +16,13 @@ avx_test (void) ...@@ -16,12 +16,13 @@ avx_test (void)
int i; int i;
int m[8] = {mask_v(0), mask_v(1), mask_v(2), mask_v(3), mask_v(4), mask_v(5), mask_v(6), mask_v(7)}; int m[8] = {mask_v(0), mask_v(1), mask_v(2), mask_v(3), mask_v(4), mask_v(5), mask_v(6), mask_v(7)};
float s[8] = {1,2,3,4,5,6,7,8}; float s[8] = {1,2,3,4,5,6,7,8};
union256 src, mask; union256 src;
union256i_d mask;
float e [8] = {0.0}; float e [8] = {0.0};
float d [8] = {0.0}; float d [8] = {0.0};
src.x = _mm256_loadu_ps (s); src.x = _mm256_loadu_ps (s);
mask.x = _mm256_loadu_ps ((float *)m); mask.x = _mm256_loadu_si256 ((__m256i *)m);
_mm256_maskstore_ps (d, mask.x, src.x); _mm256_maskstore_ps (d, mask.x, src.x);
for (i = 0 ; i < 8; i++) for (i = 0 ; i < 8; i++)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment