Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
R
riscv-gcc-1
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
lvzhengyang
riscv-gcc-1
Commits
c220e3a9
Commit
c220e3a9
authored
Jun 06, 2003
by
H.J. Lu
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Apply the right patch.
From-SVN: r67544
parent
30fb3231
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
447 additions
and
81 deletions
+447
-81
gcc/config/i386/mmintrin.h
+330
-57
gcc/config/i386/xmmintrin.h
+117
-24
No files found.
gcc/config/i386/mmintrin.h
View file @
c220e3a9
...
@@ -48,6 +48,12 @@ _mm_empty (void)
...
@@ -48,6 +48,12 @@ _mm_empty (void)
__builtin_ia32_emms
();
__builtin_ia32_emms
();
}
}
static
__inline
void
_m_empty
(
void
)
{
_mm_empty
();
}
/* Convert I to a __m64 object. The integer is zero-extended to 64-bits. */
/* Convert I to a __m64 object. The integer is zero-extended to 64-bits. */
static
__inline
__m64
static
__inline
__m64
_mm_cvtsi32_si64
(
int
__i
)
_mm_cvtsi32_si64
(
int
__i
)
...
@@ -56,6 +62,12 @@ _mm_cvtsi32_si64 (int __i)
...
@@ -56,6 +62,12 @@ _mm_cvtsi32_si64 (int __i)
return
(
__m64
)
__tmp
;
return
(
__m64
)
__tmp
;
}
}
static
__inline
__m64
_m_from_int
(
int
__i
)
{
return
_mm_cvtsi32_si64
(
__i
);
}
#ifdef __x86_64__
#ifdef __x86_64__
/* Convert I to a __m64 object. */
/* Convert I to a __m64 object. */
static
__inline
__m64
static
__inline
__m64
...
@@ -80,6 +92,12 @@ _mm_cvtsi64_si32 (__m64 __i)
...
@@ -80,6 +92,12 @@ _mm_cvtsi64_si32 (__m64 __i)
return
__tmp
;
return
__tmp
;
}
}
static
__inline
int
_m_to_int
(
__m64
__i
)
{
return
_mm_cvtsi64_si32
(
__i
);
}
#ifdef __x86_64__
#ifdef __x86_64__
/* Convert the lower 32 bits of the __m64 object into an integer. */
/* Convert the lower 32 bits of the __m64 object into an integer. */
static
__inline
long
long
static
__inline
long
long
...
@@ -98,6 +116,12 @@ _mm_packs_pi16 (__m64 __m1, __m64 __m2)
...
@@ -98,6 +116,12 @@ _mm_packs_pi16 (__m64 __m1, __m64 __m2)
return
(
__m64
)
__builtin_ia32_packsswb
((
__v4hi
)
__m1
,
(
__v4hi
)
__m2
);
return
(
__m64
)
__builtin_ia32_packsswb
((
__v4hi
)
__m1
,
(
__v4hi
)
__m2
);
}
}
static
__inline
__m64
_m_packsswb
(
__m64
__m1
,
__m64
__m2
)
{
return
_mm_packs_pi16
(
__m1
,
__m2
);
}
/* Pack the two 32-bit values from M1 in to the lower two 16-bit values of
/* Pack the two 32-bit values from M1 in to the lower two 16-bit values of
the result, and the two 32-bit values from M2 into the upper two 16-bit
the result, and the two 32-bit values from M2 into the upper two 16-bit
values of the result, all with signed saturation. */
values of the result, all with signed saturation. */
...
@@ -107,6 +131,12 @@ _mm_packs_pi32 (__m64 __m1, __m64 __m2)
...
@@ -107,6 +131,12 @@ _mm_packs_pi32 (__m64 __m1, __m64 __m2)
return
(
__m64
)
__builtin_ia32_packssdw
((
__v2si
)
__m1
,
(
__v2si
)
__m2
);
return
(
__m64
)
__builtin_ia32_packssdw
((
__v2si
)
__m1
,
(
__v2si
)
__m2
);
}
}
static
__inline
__m64
_m_packssdw
(
__m64
__m1
,
__m64
__m2
)
{
return
_mm_packs_pi32
(
__m1
,
__m2
);
}
/* Pack the four 16-bit values from M1 into the lower four 8-bit values of
/* Pack the four 16-bit values from M1 into the lower four 8-bit values of
the result, and the four 16-bit values from M2 into the upper four 8-bit
the result, and the four 16-bit values from M2 into the upper four 8-bit
values of the result, all with unsigned saturation. */
values of the result, all with unsigned saturation. */
...
@@ -116,6 +146,12 @@ _mm_packs_pu16 (__m64 __m1, __m64 __m2)
...
@@ -116,6 +146,12 @@ _mm_packs_pu16 (__m64 __m1, __m64 __m2)
return
(
__m64
)
__builtin_ia32_packuswb
((
__v4hi
)
__m1
,
(
__v4hi
)
__m2
);
return
(
__m64
)
__builtin_ia32_packuswb
((
__v4hi
)
__m1
,
(
__v4hi
)
__m2
);
}
}
static
__inline
__m64
_m_packuswb
(
__m64
__m1
,
__m64
__m2
)
{
return
_mm_packs_pu16
(
__m1
,
__m2
);
}
/* Interleave the four 8-bit values from the high half of M1 with the four
/* Interleave the four 8-bit values from the high half of M1 with the four
8-bit values from the high half of M2. */
8-bit values from the high half of M2. */
static
__inline
__m64
static
__inline
__m64
...
@@ -124,6 +160,12 @@ _mm_unpackhi_pi8 (__m64 __m1, __m64 __m2)
...
@@ -124,6 +160,12 @@ _mm_unpackhi_pi8 (__m64 __m1, __m64 __m2)
return
(
__m64
)
__builtin_ia32_punpckhbw
((
__v8qi
)
__m1
,
(
__v8qi
)
__m2
);
return
(
__m64
)
__builtin_ia32_punpckhbw
((
__v8qi
)
__m1
,
(
__v8qi
)
__m2
);
}
}
static
__inline
__m64
_m_punpckhbw
(
__m64
__m1
,
__m64
__m2
)
{
return
_mm_unpackhi_pi8
(
__m1
,
__m2
);
}
/* Interleave the two 16-bit values from the high half of M1 with the two
/* Interleave the two 16-bit values from the high half of M1 with the two
16-bit values from the high half of M2. */
16-bit values from the high half of M2. */
static
__inline
__m64
static
__inline
__m64
...
@@ -132,6 +174,12 @@ _mm_unpackhi_pi16 (__m64 __m1, __m64 __m2)
...
@@ -132,6 +174,12 @@ _mm_unpackhi_pi16 (__m64 __m1, __m64 __m2)
return
(
__m64
)
__builtin_ia32_punpckhwd
((
__v4hi
)
__m1
,
(
__v4hi
)
__m2
);
return
(
__m64
)
__builtin_ia32_punpckhwd
((
__v4hi
)
__m1
,
(
__v4hi
)
__m2
);
}
}
static
__inline
__m64
_m_punpckhwd
(
__m64
__m1
,
__m64
__m2
)
{
return
_mm_unpackhi_pi16
(
__m1
,
__m2
);
}
/* Interleave the 32-bit value from the high half of M1 with the 32-bit
/* Interleave the 32-bit value from the high half of M1 with the 32-bit
value from the high half of M2. */
value from the high half of M2. */
static
__inline
__m64
static
__inline
__m64
...
@@ -140,6 +188,12 @@ _mm_unpackhi_pi32 (__m64 __m1, __m64 __m2)
...
@@ -140,6 +188,12 @@ _mm_unpackhi_pi32 (__m64 __m1, __m64 __m2)
return
(
__m64
)
__builtin_ia32_punpckhdq
((
__v2si
)
__m1
,
(
__v2si
)
__m2
);
return
(
__m64
)
__builtin_ia32_punpckhdq
((
__v2si
)
__m1
,
(
__v2si
)
__m2
);
}
}
static
__inline
__m64
_m_punpckhdq
(
__m64
__m1
,
__m64
__m2
)
{
return
_mm_unpackhi_pi32
(
__m1
,
__m2
);
}
/* Interleave the four 8-bit values from the low half of M1 with the four
/* Interleave the four 8-bit values from the low half of M1 with the four
8-bit values from the low half of M2. */
8-bit values from the low half of M2. */
static
__inline
__m64
static
__inline
__m64
...
@@ -148,6 +202,12 @@ _mm_unpacklo_pi8 (__m64 __m1, __m64 __m2)
...
@@ -148,6 +202,12 @@ _mm_unpacklo_pi8 (__m64 __m1, __m64 __m2)
return
(
__m64
)
__builtin_ia32_punpcklbw
((
__v8qi
)
__m1
,
(
__v8qi
)
__m2
);
return
(
__m64
)
__builtin_ia32_punpcklbw
((
__v8qi
)
__m1
,
(
__v8qi
)
__m2
);
}
}
static
__inline
__m64
_m_punpcklbw
(
__m64
__m1
,
__m64
__m2
)
{
return
_mm_unpacklo_pi8
(
__m1
,
__m2
);
}
/* Interleave the two 16-bit values from the low half of M1 with the two
/* Interleave the two 16-bit values from the low half of M1 with the two
16-bit values from the low half of M2. */
16-bit values from the low half of M2. */
static
__inline
__m64
static
__inline
__m64
...
@@ -156,6 +216,12 @@ _mm_unpacklo_pi16 (__m64 __m1, __m64 __m2)
...
@@ -156,6 +216,12 @@ _mm_unpacklo_pi16 (__m64 __m1, __m64 __m2)
return
(
__m64
)
__builtin_ia32_punpcklwd
((
__v4hi
)
__m1
,
(
__v4hi
)
__m2
);
return
(
__m64
)
__builtin_ia32_punpcklwd
((
__v4hi
)
__m1
,
(
__v4hi
)
__m2
);
}
}
static
__inline
__m64
_m_punpcklwd
(
__m64
__m1
,
__m64
__m2
)
{
return
_mm_unpacklo_pi16
(
__m1
,
__m2
);
}
/* Interleave the 32-bit value from the low half of M1 with the 32-bit
/* Interleave the 32-bit value from the low half of M1 with the 32-bit
value from the low half of M2. */
value from the low half of M2. */
static
__inline
__m64
static
__inline
__m64
...
@@ -164,6 +230,12 @@ _mm_unpacklo_pi32 (__m64 __m1, __m64 __m2)
...
@@ -164,6 +230,12 @@ _mm_unpacklo_pi32 (__m64 __m1, __m64 __m2)
return
(
__m64
)
__builtin_ia32_punpckldq
((
__v2si
)
__m1
,
(
__v2si
)
__m2
);
return
(
__m64
)
__builtin_ia32_punpckldq
((
__v2si
)
__m1
,
(
__v2si
)
__m2
);
}
}
static
__inline
__m64
_m_punpckldq
(
__m64
__m1
,
__m64
__m2
)
{
return
_mm_unpacklo_pi32
(
__m1
,
__m2
);
}
/* Add the 8-bit values in M1 to the 8-bit values in M2. */
/* Add the 8-bit values in M1 to the 8-bit values in M2. */
static
__inline
__m64
static
__inline
__m64
_mm_add_pi8
(
__m64
__m1
,
__m64
__m2
)
_mm_add_pi8
(
__m64
__m1
,
__m64
__m2
)
...
@@ -171,6 +243,12 @@ _mm_add_pi8 (__m64 __m1, __m64 __m2)
...
@@ -171,6 +243,12 @@ _mm_add_pi8 (__m64 __m1, __m64 __m2)
return
(
__m64
)
__builtin_ia32_paddb
((
__v8qi
)
__m1
,
(
__v8qi
)
__m2
);
return
(
__m64
)
__builtin_ia32_paddb
((
__v8qi
)
__m1
,
(
__v8qi
)
__m2
);
}
}
static
__inline
__m64
_m_paddb
(
__m64
__m1
,
__m64
__m2
)
{
return
_mm_add_pi8
(
__m1
,
__m2
);
}
/* Add the 16-bit values in M1 to the 16-bit values in M2. */
/* Add the 16-bit values in M1 to the 16-bit values in M2. */
static
__inline
__m64
static
__inline
__m64
_mm_add_pi16
(
__m64
__m1
,
__m64
__m2
)
_mm_add_pi16
(
__m64
__m1
,
__m64
__m2
)
...
@@ -178,6 +256,12 @@ _mm_add_pi16 (__m64 __m1, __m64 __m2)
...
@@ -178,6 +256,12 @@ _mm_add_pi16 (__m64 __m1, __m64 __m2)
return
(
__m64
)
__builtin_ia32_paddw
((
__v4hi
)
__m1
,
(
__v4hi
)
__m2
);
return
(
__m64
)
__builtin_ia32_paddw
((
__v4hi
)
__m1
,
(
__v4hi
)
__m2
);
}
}
static
__inline
__m64
_m_paddw
(
__m64
__m1
,
__m64
__m2
)
{
return
_mm_add_pi16
(
__m1
,
__m2
);
}
/* Add the 32-bit values in M1 to the 32-bit values in M2. */
/* Add the 32-bit values in M1 to the 32-bit values in M2. */
static
__inline
__m64
static
__inline
__m64
_mm_add_pi32
(
__m64
__m1
,
__m64
__m2
)
_mm_add_pi32
(
__m64
__m1
,
__m64
__m2
)
...
@@ -185,6 +269,12 @@ _mm_add_pi32 (__m64 __m1, __m64 __m2)
...
@@ -185,6 +269,12 @@ _mm_add_pi32 (__m64 __m1, __m64 __m2)
return
(
__m64
)
__builtin_ia32_paddd
((
__v2si
)
__m1
,
(
__v2si
)
__m2
);
return
(
__m64
)
__builtin_ia32_paddd
((
__v2si
)
__m1
,
(
__v2si
)
__m2
);
}
}
static
__inline
__m64
_m_paddd
(
__m64
__m1
,
__m64
__m2
)
{
return
_mm_add_pi32
(
__m1
,
__m2
);
}
/* Add the 64-bit values in M1 to the 64-bit values in M2. */
/* Add the 64-bit values in M1 to the 64-bit values in M2. */
static
__inline
__m64
static
__inline
__m64
_mm_add_si64
(
__m64
__m1
,
__m64
__m2
)
_mm_add_si64
(
__m64
__m1
,
__m64
__m2
)
...
@@ -200,6 +290,12 @@ _mm_adds_pi8 (__m64 __m1, __m64 __m2)
...
@@ -200,6 +290,12 @@ _mm_adds_pi8 (__m64 __m1, __m64 __m2)
return
(
__m64
)
__builtin_ia32_paddsb
((
__v8qi
)
__m1
,
(
__v8qi
)
__m2
);
return
(
__m64
)
__builtin_ia32_paddsb
((
__v8qi
)
__m1
,
(
__v8qi
)
__m2
);
}
}
static
__inline
__m64
_m_paddsb
(
__m64
__m1
,
__m64
__m2
)
{
return
_mm_adds_pi8
(
__m1
,
__m2
);
}
/* Add the 16-bit values in M1 to the 16-bit values in M2 using signed
/* Add the 16-bit values in M1 to the 16-bit values in M2 using signed
saturated arithmetic. */
saturated arithmetic. */
static
__inline
__m64
static
__inline
__m64
...
@@ -208,6 +304,12 @@ _mm_adds_pi16 (__m64 __m1, __m64 __m2)
...
@@ -208,6 +304,12 @@ _mm_adds_pi16 (__m64 __m1, __m64 __m2)
return
(
__m64
)
__builtin_ia32_paddsw
((
__v4hi
)
__m1
,
(
__v4hi
)
__m2
);
return
(
__m64
)
__builtin_ia32_paddsw
((
__v4hi
)
__m1
,
(
__v4hi
)
__m2
);
}
}
static
__inline
__m64
_m_paddsw
(
__m64
__m1
,
__m64
__m2
)
{
return
_mm_adds_pi16
(
__m1
,
__m2
);
}
/* Add the 8-bit values in M1 to the 8-bit values in M2 using unsigned
/* Add the 8-bit values in M1 to the 8-bit values in M2 using unsigned
saturated arithmetic. */
saturated arithmetic. */
static
__inline
__m64
static
__inline
__m64
...
@@ -216,6 +318,12 @@ _mm_adds_pu8 (__m64 __m1, __m64 __m2)
...
@@ -216,6 +318,12 @@ _mm_adds_pu8 (__m64 __m1, __m64 __m2)
return
(
__m64
)
__builtin_ia32_paddusb
((
__v8qi
)
__m1
,
(
__v8qi
)
__m2
);
return
(
__m64
)
__builtin_ia32_paddusb
((
__v8qi
)
__m1
,
(
__v8qi
)
__m2
);
}
}
static
__inline
__m64
_m_paddusb
(
__m64
__m1
,
__m64
__m2
)
{
return
_mm_adds_pu8
(
__m1
,
__m2
);
}
/* Add the 16-bit values in M1 to the 16-bit values in M2 using unsigned
/* Add the 16-bit values in M1 to the 16-bit values in M2 using unsigned
saturated arithmetic. */
saturated arithmetic. */
static
__inline
__m64
static
__inline
__m64
...
@@ -224,6 +332,12 @@ _mm_adds_pu16 (__m64 __m1, __m64 __m2)
...
@@ -224,6 +332,12 @@ _mm_adds_pu16 (__m64 __m1, __m64 __m2)
return
(
__m64
)
__builtin_ia32_paddusw
((
__v4hi
)
__m1
,
(
__v4hi
)
__m2
);
return
(
__m64
)
__builtin_ia32_paddusw
((
__v4hi
)
__m1
,
(
__v4hi
)
__m2
);
}
}
static
__inline
__m64
_m_paddusw
(
__m64
__m1
,
__m64
__m2
)
{
return
_mm_adds_pu16
(
__m1
,
__m2
);
}
/* Subtract the 8-bit values in M2 from the 8-bit values in M1. */
/* Subtract the 8-bit values in M2 from the 8-bit values in M1. */
static
__inline
__m64
static
__inline
__m64
_mm_sub_pi8
(
__m64
__m1
,
__m64
__m2
)
_mm_sub_pi8
(
__m64
__m1
,
__m64
__m2
)
...
@@ -231,6 +345,12 @@ _mm_sub_pi8 (__m64 __m1, __m64 __m2)
...
@@ -231,6 +345,12 @@ _mm_sub_pi8 (__m64 __m1, __m64 __m2)
return
(
__m64
)
__builtin_ia32_psubb
((
__v8qi
)
__m1
,
(
__v8qi
)
__m2
);
return
(
__m64
)
__builtin_ia32_psubb
((
__v8qi
)
__m1
,
(
__v8qi
)
__m2
);
}
}
static
__inline
__m64
_m_psubb
(
__m64
__m1
,
__m64
__m2
)
{
return
_mm_sub_pi8
(
__m1
,
__m2
);
}
/* Subtract the 16-bit values in M2 from the 16-bit values in M1. */
/* Subtract the 16-bit values in M2 from the 16-bit values in M1. */
static
__inline
__m64
static
__inline
__m64
_mm_sub_pi16
(
__m64
__m1
,
__m64
__m2
)
_mm_sub_pi16
(
__m64
__m1
,
__m64
__m2
)
...
@@ -238,6 +358,12 @@ _mm_sub_pi16 (__m64 __m1, __m64 __m2)
...
@@ -238,6 +358,12 @@ _mm_sub_pi16 (__m64 __m1, __m64 __m2)
return
(
__m64
)
__builtin_ia32_psubw
((
__v4hi
)
__m1
,
(
__v4hi
)
__m2
);
return
(
__m64
)
__builtin_ia32_psubw
((
__v4hi
)
__m1
,
(
__v4hi
)
__m2
);
}
}
static
__inline
__m64
_m_psubw
(
__m64
__m1
,
__m64
__m2
)
{
return
_mm_sub_pi16
(
__m1
,
__m2
);
}
/* Subtract the 32-bit values in M2 from the 32-bit values in M1. */
/* Subtract the 32-bit values in M2 from the 32-bit values in M1. */
static
__inline
__m64
static
__inline
__m64
_mm_sub_pi32
(
__m64
__m1
,
__m64
__m2
)
_mm_sub_pi32
(
__m64
__m1
,
__m64
__m2
)
...
@@ -245,6 +371,12 @@ _mm_sub_pi32 (__m64 __m1, __m64 __m2)
...
@@ -245,6 +371,12 @@ _mm_sub_pi32 (__m64 __m1, __m64 __m2)
return
(
__m64
)
__builtin_ia32_psubd
((
__v2si
)
__m1
,
(
__v2si
)
__m2
);
return
(
__m64
)
__builtin_ia32_psubd
((
__v2si
)
__m1
,
(
__v2si
)
__m2
);
}
}
static
__inline
__m64
_m_psubd
(
__m64
__m1
,
__m64
__m2
)
{
return
_mm_sub_pi32
(
__m1
,
__m2
);
}
/* Add the 64-bit values in M1 to the 64-bit values in M2. */
/* Add the 64-bit values in M1 to the 64-bit values in M2. */
static
__inline
__m64
static
__inline
__m64
_mm_sub_si64
(
__m64
__m1
,
__m64
__m2
)
_mm_sub_si64
(
__m64
__m1
,
__m64
__m2
)
...
@@ -260,6 +392,12 @@ _mm_subs_pi8 (__m64 __m1, __m64 __m2)
...
@@ -260,6 +392,12 @@ _mm_subs_pi8 (__m64 __m1, __m64 __m2)
return
(
__m64
)
__builtin_ia32_psubsb
((
__v8qi
)
__m1
,
(
__v8qi
)
__m2
);
return
(
__m64
)
__builtin_ia32_psubsb
((
__v8qi
)
__m1
,
(
__v8qi
)
__m2
);
}
}
static
__inline
__m64
_m_psubsb
(
__m64
__m1
,
__m64
__m2
)
{
return
_mm_subs_pi8
(
__m1
,
__m2
);
}
/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
signed saturating arithmetic. */
signed saturating arithmetic. */
static
__inline
__m64
static
__inline
__m64
...
@@ -268,6 +406,12 @@ _mm_subs_pi16 (__m64 __m1, __m64 __m2)
...
@@ -268,6 +406,12 @@ _mm_subs_pi16 (__m64 __m1, __m64 __m2)
return
(
__m64
)
__builtin_ia32_psubsw
((
__v4hi
)
__m1
,
(
__v4hi
)
__m2
);
return
(
__m64
)
__builtin_ia32_psubsw
((
__v4hi
)
__m1
,
(
__v4hi
)
__m2
);
}
}
static
__inline
__m64
_m_psubsw
(
__m64
__m1
,
__m64
__m2
)
{
return
_mm_subs_pi16
(
__m1
,
__m2
);
}
/* Subtract the 8-bit values in M2 from the 8-bit values in M1 using
/* Subtract the 8-bit values in M2 from the 8-bit values in M1 using
unsigned saturating arithmetic. */
unsigned saturating arithmetic. */
static
__inline
__m64
static
__inline
__m64
...
@@ -276,6 +420,12 @@ _mm_subs_pu8 (__m64 __m1, __m64 __m2)
...
@@ -276,6 +420,12 @@ _mm_subs_pu8 (__m64 __m1, __m64 __m2)
return
(
__m64
)
__builtin_ia32_psubusb
((
__v8qi
)
__m1
,
(
__v8qi
)
__m2
);
return
(
__m64
)
__builtin_ia32_psubusb
((
__v8qi
)
__m1
,
(
__v8qi
)
__m2
);
}
}
static
__inline
__m64
_m_psubusb
(
__m64
__m1
,
__m64
__m2
)
{
return
_mm_subs_pu8
(
__m1
,
__m2
);
}
/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
unsigned saturating arithmetic. */
unsigned saturating arithmetic. */
static
__inline
__m64
static
__inline
__m64
...
@@ -284,6 +434,12 @@ _mm_subs_pu16 (__m64 __m1, __m64 __m2)
...
@@ -284,6 +434,12 @@ _mm_subs_pu16 (__m64 __m1, __m64 __m2)
return
(
__m64
)
__builtin_ia32_psubusw
((
__v4hi
)
__m1
,
(
__v4hi
)
__m2
);
return
(
__m64
)
__builtin_ia32_psubusw
((
__v4hi
)
__m1
,
(
__v4hi
)
__m2
);
}
}
static
__inline
__m64
_m_psubusw
(
__m64
__m1
,
__m64
__m2
)
{
return
_mm_subs_pu16
(
__m1
,
__m2
);
}
/* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing
/* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing
four 32-bit intermediate results, which are then summed by pairs to
four 32-bit intermediate results, which are then summed by pairs to
produce two 32-bit results. */
produce two 32-bit results. */
...
@@ -293,6 +449,12 @@ _mm_madd_pi16 (__m64 __m1, __m64 __m2)
...
@@ -293,6 +449,12 @@ _mm_madd_pi16 (__m64 __m1, __m64 __m2)
return
(
__m64
)
__builtin_ia32_pmaddwd
((
__v4hi
)
__m1
,
(
__v4hi
)
__m2
);
return
(
__m64
)
__builtin_ia32_pmaddwd
((
__v4hi
)
__m1
,
(
__v4hi
)
__m2
);
}
}
static
__inline
__m64
_m_pmaddwd
(
__m64
__m1
,
__m64
__m2
)
{
return
_mm_madd_pi16
(
__m1
,
__m2
);
}
/* Multiply four signed 16-bit values in M1 by four signed 16-bit values in
/* Multiply four signed 16-bit values in M1 by four signed 16-bit values in
M2 and produce the high 16 bits of the 32-bit results. */
M2 and produce the high 16 bits of the 32-bit results. */
static
__inline
__m64
static
__inline
__m64
...
@@ -301,6 +463,12 @@ _mm_mulhi_pi16 (__m64 __m1, __m64 __m2)
...
@@ -301,6 +463,12 @@ _mm_mulhi_pi16 (__m64 __m1, __m64 __m2)
return
(
__m64
)
__builtin_ia32_pmulhw
((
__v4hi
)
__m1
,
(
__v4hi
)
__m2
);
return
(
__m64
)
__builtin_ia32_pmulhw
((
__v4hi
)
__m1
,
(
__v4hi
)
__m2
);
}
}
static
__inline
__m64
_m_pmulhw
(
__m64
__m1
,
__m64
__m2
)
{
return
_mm_mulhi_pi16
(
__m1
,
__m2
);
}
/* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce
/* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce
the low 16 bits of the results. */
the low 16 bits of the results. */
static
__inline
__m64
static
__inline
__m64
...
@@ -309,6 +477,12 @@ _mm_mullo_pi16 (__m64 __m1, __m64 __m2)
...
@@ -309,6 +477,12 @@ _mm_mullo_pi16 (__m64 __m1, __m64 __m2)
return
(
__m64
)
__builtin_ia32_pmullw
((
__v4hi
)
__m1
,
(
__v4hi
)
__m2
);
return
(
__m64
)
__builtin_ia32_pmullw
((
__v4hi
)
__m1
,
(
__v4hi
)
__m2
);
}
}
static
__inline
__m64
_m_pmullw
(
__m64
__m1
,
__m64
__m2
)
{
return
_mm_mullo_pi16
(
__m1
,
__m2
);
}
/* Shift four 16-bit values in M left by COUNT. */
/* Shift four 16-bit values in M left by COUNT. */
static
__inline
__m64
static
__inline
__m64
_mm_sll_pi16
(
__m64
__m
,
__m64
__count
)
_mm_sll_pi16
(
__m64
__m
,
__m64
__count
)
...
@@ -317,11 +491,23 @@ _mm_sll_pi16 (__m64 __m, __m64 __count)
...
@@ -317,11 +491,23 @@ _mm_sll_pi16 (__m64 __m, __m64 __count)
}
}
static
__inline
__m64
static
__inline
__m64
_m_psllw
(
__m64
__m
,
__m64
__count
)
{
return
_mm_sll_pi16
(
__m
,
__count
);
}
static
__inline
__m64
_mm_slli_pi16
(
__m64
__m
,
int
__count
)
_mm_slli_pi16
(
__m64
__m
,
int
__count
)
{
{
return
(
__m64
)
__builtin_ia32_psllw
((
__v4hi
)
__m
,
__count
);
return
(
__m64
)
__builtin_ia32_psllw
((
__v4hi
)
__m
,
__count
);
}
}
static
__inline
__m64
_m_psllwi
(
__m64
__m
,
int
__count
)
{
return
_mm_slli_pi16
(
__m
,
__count
);
}
/* Shift two 32-bit values in M left by COUNT. */
/* Shift two 32-bit values in M left by COUNT. */
static
__inline
__m64
static
__inline
__m64
_mm_sll_pi32
(
__m64
__m
,
__m64
__count
)
_mm_sll_pi32
(
__m64
__m
,
__m64
__count
)
...
@@ -330,11 +516,23 @@ _mm_sll_pi32 (__m64 __m, __m64 __count)
...
@@ -330,11 +516,23 @@ _mm_sll_pi32 (__m64 __m, __m64 __count)
}
}
static
__inline
__m64
static
__inline
__m64
_m_pslld
(
__m64
__m
,
__m64
__count
)
{
return
_mm_sll_pi32
(
__m
,
__count
);
}
static
__inline
__m64
_mm_slli_pi32
(
__m64
__m
,
int
__count
)
_mm_slli_pi32
(
__m64
__m
,
int
__count
)
{
{
return
(
__m64
)
__builtin_ia32_pslld
((
__v2si
)
__m
,
__count
);
return
(
__m64
)
__builtin_ia32_pslld
((
__v2si
)
__m
,
__count
);
}
}
static
__inline
__m64
_m_pslldi
(
__m64
__m
,
int
__count
)
{
return
_mm_slli_pi32
(
__m
,
__count
);
}
/* Shift the 64-bit value in M left by COUNT. */
/* Shift the 64-bit value in M left by COUNT. */
static
__inline
__m64
static
__inline
__m64
_mm_sll_si64
(
__m64
__m
,
__m64
__count
)
_mm_sll_si64
(
__m64
__m
,
__m64
__count
)
...
@@ -343,11 +541,23 @@ _mm_sll_si64 (__m64 __m, __m64 __count)
...
@@ -343,11 +541,23 @@ _mm_sll_si64 (__m64 __m, __m64 __count)
}
}
static
__inline
__m64
static
__inline
__m64
_m_psllq
(
__m64
__m
,
__m64
__count
)
{
return
_mm_sll_si64
(
__m
,
__count
);
}
static
__inline
__m64
_mm_slli_si64
(
__m64
__m
,
int
__count
)
_mm_slli_si64
(
__m64
__m
,
int
__count
)
{
{
return
(
__m64
)
__builtin_ia32_psllq
((
long
long
)
__m
,
(
long
long
)
__count
);
return
(
__m64
)
__builtin_ia32_psllq
((
long
long
)
__m
,
(
long
long
)
__count
);
}
}
static
__inline
__m64
_m_psllqi
(
__m64
__m
,
int
__count
)
{
return
_mm_slli_si64
(
__m
,
__count
);
}
/* Shift four 16-bit values in M right by COUNT; shift in the sign bit. */
/* Shift four 16-bit values in M right by COUNT; shift in the sign bit. */
static
__inline
__m64
static
__inline
__m64
_mm_sra_pi16
(
__m64
__m
,
__m64
__count
)
_mm_sra_pi16
(
__m64
__m
,
__m64
__count
)
...
@@ -356,11 +566,23 @@ _mm_sra_pi16 (__m64 __m, __m64 __count)
...
@@ -356,11 +566,23 @@ _mm_sra_pi16 (__m64 __m, __m64 __count)
}
}
static
__inline
__m64
static
__inline
__m64
_m_psraw
(
__m64
__m
,
__m64
__count
)
{
return
_mm_sra_pi16
(
__m
,
__count
);
}
static
__inline
__m64
_mm_srai_pi16
(
__m64
__m
,
int
__count
)
_mm_srai_pi16
(
__m64
__m
,
int
__count
)
{
{
return
(
__m64
)
__builtin_ia32_psraw
((
__v4hi
)
__m
,
__count
);
return
(
__m64
)
__builtin_ia32_psraw
((
__v4hi
)
__m
,
__count
);
}
}
static
__inline
__m64
_m_psrawi
(
__m64
__m
,
int
__count
)
{
return
_mm_srai_pi16
(
__m
,
__count
);
}
/* Shift two 32-bit values in M right by COUNT; shift in the sign bit. */
/* Shift two 32-bit values in M right by COUNT; shift in the sign bit. */
static
__inline
__m64
static
__inline
__m64
_mm_sra_pi32
(
__m64
__m
,
__m64
__count
)
_mm_sra_pi32
(
__m64
__m
,
__m64
__count
)
...
@@ -369,11 +591,23 @@ _mm_sra_pi32 (__m64 __m, __m64 __count)
...
@@ -369,11 +591,23 @@ _mm_sra_pi32 (__m64 __m, __m64 __count)
}
}
static
__inline
__m64
static
__inline
__m64
_m_psrad
(
__m64
__m
,
__m64
__count
)
{
return
_mm_sra_pi32
(
__m
,
__count
);
}
static
__inline
__m64
_mm_srai_pi32
(
__m64
__m
,
int
__count
)
_mm_srai_pi32
(
__m64
__m
,
int
__count
)
{
{
return
(
__m64
)
__builtin_ia32_psrad
((
__v2si
)
__m
,
__count
);
return
(
__m64
)
__builtin_ia32_psrad
((
__v2si
)
__m
,
__count
);
}
}
static
__inline
__m64
_m_psradi
(
__m64
__m
,
int
__count
)
{
return
_mm_srai_pi32
(
__m
,
__count
);
}
/* Shift four 16-bit values in M right by COUNT; shift in zeros. */
/* Shift four 16-bit values in M right by COUNT; shift in zeros. */
static
__inline
__m64
static
__inline
__m64
_mm_srl_pi16
(
__m64
__m
,
__m64
__count
)
_mm_srl_pi16
(
__m64
__m
,
__m64
__count
)
...
@@ -382,11 +616,23 @@ _mm_srl_pi16 (__m64 __m, __m64 __count)
...
@@ -382,11 +616,23 @@ _mm_srl_pi16 (__m64 __m, __m64 __count)
}
}
static
__inline
__m64
static
__inline
__m64
_m_psrlw
(
__m64
__m
,
__m64
__count
)
{
return
_mm_srl_pi16
(
__m
,
__count
);
}
static
__inline
__m64
_mm_srli_pi16
(
__m64
__m
,
int
__count
)
_mm_srli_pi16
(
__m64
__m
,
int
__count
)
{
{
return
(
__m64
)
__builtin_ia32_psrlw
((
__v4hi
)
__m
,
__count
);
return
(
__m64
)
__builtin_ia32_psrlw
((
__v4hi
)
__m
,
__count
);
}
}
static
__inline
__m64
_m_psrlwi
(
__m64
__m
,
int
__count
)
{
return
_mm_srli_pi16
(
__m
,
__count
);
}
/* Shift two 32-bit values in M right by COUNT; shift in zeros. */
/* Shift two 32-bit values in M right by COUNT; shift in zeros. */
static
__inline
__m64
static
__inline
__m64
_mm_srl_pi32
(
__m64
__m
,
__m64
__count
)
_mm_srl_pi32
(
__m64
__m
,
__m64
__count
)
...
@@ -395,11 +641,23 @@ _mm_srl_pi32 (__m64 __m, __m64 __count)
...
@@ -395,11 +641,23 @@ _mm_srl_pi32 (__m64 __m, __m64 __count)
}
}
static
__inline
__m64
static
__inline
__m64
_m_psrld
(
__m64
__m
,
__m64
__count
)
{
return
_mm_srl_pi32
(
__m
,
__count
);
}
static
__inline
__m64
_mm_srli_pi32
(
__m64
__m
,
int
__count
)
_mm_srli_pi32
(
__m64
__m
,
int
__count
)
{
{
return
(
__m64
)
__builtin_ia32_psrld
((
__v2si
)
__m
,
__count
);
return
(
__m64
)
__builtin_ia32_psrld
((
__v2si
)
__m
,
__count
);
}
}
static
__inline
__m64
_m_psrldi
(
__m64
__m
,
int
__count
)
{
return
_mm_srli_pi32
(
__m
,
__count
);
}
/* Shift the 64-bit value in M left by COUNT; shift in zeros. */
/* Shift the 64-bit value in M left by COUNT; shift in zeros. */
static
__inline
__m64
static
__inline
__m64
_mm_srl_si64
(
__m64
__m
,
__m64
__count
)
_mm_srl_si64
(
__m64
__m
,
__m64
__count
)
...
@@ -408,11 +666,23 @@ _mm_srl_si64 (__m64 __m, __m64 __count)
...
@@ -408,11 +666,23 @@ _mm_srl_si64 (__m64 __m, __m64 __count)
}
}
static
__inline
__m64
static
__inline
__m64
_m_psrlq
(
__m64
__m
,
__m64
__count
)
{
return
_mm_srl_si64
(
__m
,
__count
);
}
static
__inline
__m64
_mm_srli_si64
(
__m64
__m
,
int
__count
)
_mm_srli_si64
(
__m64
__m
,
int
__count
)
{
{
return
(
__m64
)
__builtin_ia32_psrlq
((
long
long
)
__m
,
(
long
long
)
__count
);
return
(
__m64
)
__builtin_ia32_psrlq
((
long
long
)
__m
,
(
long
long
)
__count
);
}
}
static
__inline
__m64
_m_psrlqi
(
__m64
__m
,
int
__count
)
{
return
_mm_srli_si64
(
__m
,
__count
);
}
/* Bit-wise AND the 64-bit values in M1 and M2. */
/* Bit-wise AND the 64-bit values in M1 and M2. */
static
__inline
__m64
static
__inline
__m64
_mm_and_si64
(
__m64
__m1
,
__m64
__m2
)
_mm_and_si64
(
__m64
__m1
,
__m64
__m2
)
...
@@ -420,6 +690,12 @@ _mm_and_si64 (__m64 __m1, __m64 __m2)
...
@@ -420,6 +690,12 @@ _mm_and_si64 (__m64 __m1, __m64 __m2)
return
(
__m64
)
__builtin_ia32_pand
((
long
long
)
__m1
,
(
long
long
)
__m2
);
return
(
__m64
)
__builtin_ia32_pand
((
long
long
)
__m1
,
(
long
long
)
__m2
);
}
}
static
__inline
__m64
_m_pand
(
__m64
__m1
,
__m64
__m2
)
{
return
_mm_and_si64
(
__m1
,
__m2
);
}
/* Bit-wise complement the 64-bit value in M1 and bit-wise AND it with the
/* Bit-wise complement the 64-bit value in M1 and bit-wise AND it with the
64-bit value in M2. */
64-bit value in M2. */
static
__inline
__m64
static
__inline
__m64
...
@@ -428,6 +704,12 @@ _mm_andnot_si64 (__m64 __m1, __m64 __m2)
...
@@ -428,6 +704,12 @@ _mm_andnot_si64 (__m64 __m1, __m64 __m2)
return
(
__m64
)
__builtin_ia32_pandn
((
long
long
)
__m1
,
(
long
long
)
__m2
);
return
(
__m64
)
__builtin_ia32_pandn
((
long
long
)
__m1
,
(
long
long
)
__m2
);
}
}
static
__inline
__m64
_m_pandn
(
__m64
__m1
,
__m64
__m2
)
{
return
_mm_andnot_si64
(
__m1
,
__m2
);
}
/* Bit-wise inclusive OR the 64-bit values in M1 and M2. */
/* Bit-wise inclusive OR the 64-bit values in M1 and M2. */
static
__inline
__m64
static
__inline
__m64
_mm_or_si64
(
__m64
__m1
,
__m64
__m2
)
_mm_or_si64
(
__m64
__m1
,
__m64
__m2
)
...
@@ -435,6 +717,12 @@ _mm_or_si64 (__m64 __m1, __m64 __m2)
...
@@ -435,6 +717,12 @@ _mm_or_si64 (__m64 __m1, __m64 __m2)
return
(
__m64
)
__builtin_ia32_por
((
long
long
)
__m1
,
(
long
long
)
__m2
);
return
(
__m64
)
__builtin_ia32_por
((
long
long
)
__m1
,
(
long
long
)
__m2
);
}
}
static
__inline
__m64
_m_por
(
__m64
__m1
,
__m64
__m2
)
{
return
_mm_or_si64
(
__m1
,
__m2
);
}
/* Bit-wise exclusive OR the 64-bit values in M1 and M2. */
/* Bit-wise exclusive OR the 64-bit values in M1 and M2. */
static
__inline
__m64
static
__inline
__m64
_mm_xor_si64
(
__m64
__m1
,
__m64
__m2
)
_mm_xor_si64
(
__m64
__m1
,
__m64
__m2
)
...
@@ -442,6 +730,12 @@ _mm_xor_si64 (__m64 __m1, __m64 __m2)
...
@@ -442,6 +730,12 @@ _mm_xor_si64 (__m64 __m1, __m64 __m2)
return
(
__m64
)
__builtin_ia32_pxor
((
long
long
)
__m1
,
(
long
long
)
__m2
);
return
(
__m64
)
__builtin_ia32_pxor
((
long
long
)
__m1
,
(
long
long
)
__m2
);
}
}
static
__inline
__m64
_m_pxor
(
__m64
__m1
,
__m64
__m2
)
{
return
_mm_xor_si64
(
__m1
,
__m2
);
}
/* Compare eight 8-bit values. The result of the comparison is 0xFF if the
/* Compare eight 8-bit values. The result of the comparison is 0xFF if the
test is true and zero if false. */
test is true and zero if false. */
static
__inline
__m64
static
__inline
__m64
...
@@ -451,11 +745,23 @@ _mm_cmpeq_pi8 (__m64 __m1, __m64 __m2)
...
@@ -451,11 +745,23 @@ _mm_cmpeq_pi8 (__m64 __m1, __m64 __m2)
}
}
static
__inline
__m64
static
__inline
__m64
_m_pcmpeqb
(
__m64
__m1
,
__m64
__m2
)
{
return
_mm_cmpeq_pi8
(
__m1
,
__m2
);
}
static
__inline
__m64
_mm_cmpgt_pi8
(
__m64
__m1
,
__m64
__m2
)
_mm_cmpgt_pi8
(
__m64
__m1
,
__m64
__m2
)
{
{
return
(
__m64
)
__builtin_ia32_pcmpgtb
((
__v8qi
)
__m1
,
(
__v8qi
)
__m2
);
return
(
__m64
)
__builtin_ia32_pcmpgtb
((
__v8qi
)
__m1
,
(
__v8qi
)
__m2
);
}
}
static
__inline
__m64
_m_pcmpgtb
(
__m64
__m1
,
__m64
__m2
)
{
return
_mm_cmpgt_pi8
(
__m1
,
__m2
);
}
/* Compare four 16-bit values. The result of the comparison is 0xFFFF if
/* Compare four 16-bit values. The result of the comparison is 0xFFFF if
the test is true and zero if false. */
the test is true and zero if false. */
static
__inline
__m64
static
__inline
__m64
...
@@ -465,11 +771,23 @@ _mm_cmpeq_pi16 (__m64 __m1, __m64 __m2)
...
@@ -465,11 +771,23 @@ _mm_cmpeq_pi16 (__m64 __m1, __m64 __m2)
}
}
static
__inline
__m64
static
__inline
__m64
_m_pcmpeqw
(
__m64
__m1
,
__m64
__m2
)
{
return
_mm_cmpeq_pi16
(
__m1
,
__m2
);
}
static
__inline
__m64
_mm_cmpgt_pi16
(
__m64
__m1
,
__m64
__m2
)
_mm_cmpgt_pi16
(
__m64
__m1
,
__m64
__m2
)
{
{
return
(
__m64
)
__builtin_ia32_pcmpgtw
((
__v4hi
)
__m1
,
(
__v4hi
)
__m2
);
return
(
__m64
)
__builtin_ia32_pcmpgtw
((
__v4hi
)
__m1
,
(
__v4hi
)
__m2
);
}
}
static
__inline
__m64
_m_pcmpgtw
(
__m64
__m1
,
__m64
__m2
)
{
return
_mm_cmpgt_pi16
(
__m1
,
__m2
);
}
/* Compare two 32-bit values. The result of the comparison is 0xFFFFFFFF if
/* Compare two 32-bit values. The result of the comparison is 0xFFFFFFFF if
the test is true and zero if false. */
the test is true and zero if false. */
static
__inline
__m64
static
__inline
__m64
...
@@ -479,11 +797,23 @@ _mm_cmpeq_pi32 (__m64 __m1, __m64 __m2)
...
@@ -479,11 +797,23 @@ _mm_cmpeq_pi32 (__m64 __m1, __m64 __m2)
}
}
static
__inline
__m64
static
__inline
__m64
_m_pcmpeqd
(
__m64
__m1
,
__m64
__m2
)
{
return
_mm_cmpeq_pi32
(
__m1
,
__m2
);
}
static
__inline
__m64
_mm_cmpgt_pi32
(
__m64
__m1
,
__m64
__m2
)
_mm_cmpgt_pi32
(
__m64
__m1
,
__m64
__m2
)
{
{
return
(
__m64
)
__builtin_ia32_pcmpgtd
((
__v2si
)
__m1
,
(
__v2si
)
__m2
);
return
(
__m64
)
__builtin_ia32_pcmpgtd
((
__v2si
)
__m1
,
(
__v2si
)
__m2
);
}
}
static
__inline
__m64
_m_pcmpgtd
(
__m64
__m1
,
__m64
__m2
)
{
return
_mm_cmpgt_pi32
(
__m1
,
__m2
);
}
/* Creates a 64-bit zero. */
/* Creates a 64-bit zero. */
static
__inline
__m64
static
__inline
__m64
_mm_setzero_si64
(
void
)
_mm_setzero_si64
(
void
)
...
@@ -583,62 +913,5 @@ _mm_set1_pi8 (char __b)
...
@@ -583,62 +913,5 @@ _mm_set1_pi8 (char __b)
return
_mm_set1_pi32
(
__i
);
return
_mm_set1_pi32
(
__i
);
}
}
/* Alternate intrinsic name definitions. */
#define _m_empty _mm_empty
#define _m_from_int _mm_cvtsi32_si64
#define _m_to_int _mm_cvtsi64_si32
#define _m_packsswb _mm_packs_pi16
#define _m_packssdw _mm_packs_pi32
#define _m_packuswb _mm_packs_pu16
#define _m_punpckhbw _mm_unpackhi_pi8
#define _m_punpckhwd _mm_unpackhi_pi16
#define _m_punpckhdq _mm_unpackhi_pi32
#define _m_punpcklbw _mm_unpacklo_pi8
#define _m_punpcklwd _mm_unpacklo_pi16
#define _m_punpckldq _mm_unpacklo_pi32
#define _m_paddb _mm_add_pi8
#define _m_paddw _mm_add_pi16
#define _m_paddd _mm_add_pi32
#define _m_paddsb _mm_adds_pi8
#define _m_paddsw _mm_adds_pi16
#define _m_paddusb _mm_adds_pu8
#define _m_paddusw _mm_adds_pu16
#define _m_psubb _mm_sub_pi8
#define _m_psubw _mm_sub_pi16
#define _m_psubd _mm_sub_pi32
#define _m_psubsb _mm_subs_pi8
#define _m_psubsw _mm_subs_pi16
#define _m_psubusb _mm_subs_pu8
#define _m_psubusw _mm_subs_pu16
#define _m_pmaddwd _mm_madd_pi16
#define _m_pmulhw _mm_mulhi_pi16
#define _m_pmullw _mm_mullo_pi16
#define _m_psllw _mm_sll_pi16
#define _m_psllwi _mm_slli_pi16
#define _m_pslld _mm_sll_pi32
#define _m_pslldi _mm_slli_pi32
#define _m_psllq _mm_sll_si64
#define _m_psllqi _mm_slli_si64
#define _m_psraw _mm_sra_pi16
#define _m_psrawi _mm_srai_pi16
#define _m_psrad _mm_sra_pi32
#define _m_psradi _mm_srai_pi32
#define _m_psrlw _mm_srl_pi16
#define _m_psrlwi _mm_srli_pi16
#define _m_psrld _mm_srl_pi32
#define _m_psrldi _mm_srli_pi32
#define _m_psrlq _mm_srl_si64
#define _m_psrlqi _mm_srli_si64
#define _m_pand _mm_and_si64
#define _m_pandn _mm_andnot_si64
#define _m_por _mm_or_si64
#define _m_pxor _mm_xor_si64
#define _m_pcmpeqb _mm_cmpeq_pi8
#define _m_pcmpeqw _mm_cmpeq_pi16
#define _m_pcmpeqd _mm_cmpeq_pi32
#define _m_pcmpgtb _mm_cmpgt_pi8
#define _m_pcmpgtw _mm_cmpgt_pi16
#define _m_pcmpgtd _mm_cmpgt_pi32
#endif
/* __MMX__ */
#endif
/* __MMX__ */
#endif
/* _MMINTRIN_H_INCLUDED */
#endif
/* _MMINTRIN_H_INCLUDED */
gcc/config/i386/xmmintrin.h
View file @
c220e3a9
...
@@ -475,6 +475,12 @@ _mm_cvtss_si32 (__m128 __A)
...
@@ -475,6 +475,12 @@ _mm_cvtss_si32 (__m128 __A)
return
__builtin_ia32_cvtss2si
((
__v4sf
)
__A
);
return
__builtin_ia32_cvtss2si
((
__v4sf
)
__A
);
}
}
static
__inline
int
_mm_cvt_ss2si
(
__m128
__A
)
{
return
_mm_cvtss_si32
(
__A
);
}
#ifdef __x86_64__
#ifdef __x86_64__
/* Convert the lower SPFP value to a 32-bit integer according to the current
/* Convert the lower SPFP value to a 32-bit integer according to the current
rounding mode. */
rounding mode. */
...
@@ -493,6 +499,12 @@ _mm_cvtps_pi32 (__m128 __A)
...
@@ -493,6 +499,12 @@ _mm_cvtps_pi32 (__m128 __A)
return
(
__m64
)
__builtin_ia32_cvtps2pi
((
__v4sf
)
__A
);
return
(
__m64
)
__builtin_ia32_cvtps2pi
((
__v4sf
)
__A
);
}
}
static
__inline
__m64
_mm_cvt_ps2pi
(
__m128
__A
)
{
return
_mm_cvtps_pi32
(
__A
);
}
/* Truncate the lower SPFP value to a 32-bit integer. */
/* Truncate the lower SPFP value to a 32-bit integer. */
static
__inline
int
static
__inline
int
_mm_cvttss_si32
(
__m128
__A
)
_mm_cvttss_si32
(
__m128
__A
)
...
@@ -500,6 +512,12 @@ _mm_cvttss_si32 (__m128 __A)
...
@@ -500,6 +512,12 @@ _mm_cvttss_si32 (__m128 __A)
return
__builtin_ia32_cvttss2si
((
__v4sf
)
__A
);
return
__builtin_ia32_cvttss2si
((
__v4sf
)
__A
);
}
}
static
__inline
int
_mm_cvtt_ss2si
(
__m128
__A
)
{
return
_mm_cvttss_si32
(
__A
);
}
#ifdef __x86_64__
#ifdef __x86_64__
/* Truncate the lower SPFP value to a 32-bit integer. */
/* Truncate the lower SPFP value to a 32-bit integer. */
static
__inline
long
long
static
__inline
long
long
...
@@ -517,6 +535,12 @@ _mm_cvttps_pi32 (__m128 __A)
...
@@ -517,6 +535,12 @@ _mm_cvttps_pi32 (__m128 __A)
return
(
__m64
)
__builtin_ia32_cvttps2pi
((
__v4sf
)
__A
);
return
(
__m64
)
__builtin_ia32_cvttps2pi
((
__v4sf
)
__A
);
}
}
static
__inline
__m64
_mm_cvtt_ps2pi
(
__m128
__A
)
{
return
_mm_cvttps_pi32
(
__A
);
}
/* Convert B to a SPFP value and insert it as element zero in A. */
/* Convert B to a SPFP value and insert it as element zero in A. */
static
__inline
__m128
static
__inline
__m128
_mm_cvtsi32_ss
(
__m128
__A
,
int
__B
)
_mm_cvtsi32_ss
(
__m128
__A
,
int
__B
)
...
@@ -524,6 +548,12 @@ _mm_cvtsi32_ss (__m128 __A, int __B)
...
@@ -524,6 +548,12 @@ _mm_cvtsi32_ss (__m128 __A, int __B)
return
(
__m128
)
__builtin_ia32_cvtsi2ss
((
__v4sf
)
__A
,
__B
);
return
(
__m128
)
__builtin_ia32_cvtsi2ss
((
__v4sf
)
__A
,
__B
);
}
}
static
__inline
__m128
_mm_cvt_si2ss
(
__m128
__A
,
int
__B
)
{
return
_mm_cvtsi32_ss
(
__A
,
__B
);
}
#ifdef __x86_64__
#ifdef __x86_64__
/* Convert B to a SPFP value and insert it as element zero in A. */
/* Convert B to a SPFP value and insert it as element zero in A. */
static
__inline
__m128
static
__inline
__m128
...
@@ -541,6 +571,12 @@ _mm_cvtpi32_ps (__m128 __A, __m64 __B)
...
@@ -541,6 +571,12 @@ _mm_cvtpi32_ps (__m128 __A, __m64 __B)
return
(
__m128
)
__builtin_ia32_cvtpi2ps
((
__v4sf
)
__A
,
(
__v2si
)
__B
);
return
(
__m128
)
__builtin_ia32_cvtpi2ps
((
__v4sf
)
__A
,
(
__v2si
)
__B
);
}
}
static
__inline
__m128
_mm_cvt_pi2ps
(
__m128
__A
,
__m64
__B
)
{
return
_mm_cvtpi32_ps
(
__A
,
__B
);
}
/* Convert the four signed 16-bit values in A to SPFP form. */
/* Convert the four signed 16-bit values in A to SPFP form. */
static
__inline
__m128
static
__inline
__m128
_mm_cvtpi16_ps
(
__m64
__A
)
_mm_cvtpi16_ps
(
__m64
__A
)
...
@@ -942,9 +978,16 @@ _mm_extract_pi16 (__m64 __A, int __N)
...
@@ -942,9 +978,16 @@ _mm_extract_pi16 (__m64 __A, int __N)
{
{
return __builtin_ia32_pextrw ((__v4hi)__A, __N);
return __builtin_ia32_pextrw ((__v4hi)__A, __N);
}
}
static __inline int
_m_pextrw (__m64 __A, int __N)
{
return _mm_extract_pi16 (__A, __N);
}
#else
#else
#define _mm_extract_pi16(A, N) \
#define _mm_extract_pi16(A, N) \
__builtin_ia32_pextrw ((__v4hi)(A), (N))
__builtin_ia32_pextrw ((__v4hi)(A), (N))
#define _m_pextrw(A, N) _mm_extract_pi16((A), (N))
#endif
#endif
/* Inserts word D into one of four words of A. The selector N must be
/* Inserts word D into one of four words of A. The selector N must be
...
@@ -955,9 +998,16 @@ _mm_insert_pi16 (__m64 __A, int __D, int __N)
...
@@ -955,9 +998,16 @@ _mm_insert_pi16 (__m64 __A, int __D, int __N)
{
{
return (__m64)__builtin_ia32_pinsrw ((__v4hi)__A, __D, __N);
return (__m64)__builtin_ia32_pinsrw ((__v4hi)__A, __D, __N);
}
}
static __inline __m64
_m_pinsrw (__m64 __A, int __D, int __N)
{
return _mm_insert_pi16 (__A, __D, __N);
}
#else
#else
#define _mm_insert_pi16(A, D, N) \
#define _mm_insert_pi16(A, D, N) \
((__m64) __builtin_ia32_pinsrw ((__v4hi)(A), (D), (N)))
((__m64) __builtin_ia32_pinsrw ((__v4hi)(A), (D), (N)))
#define _m_pinsrw(A, D, N) _mm_insert_pi16((A), (D), (N))
#endif
#endif
/* Compute the element-wise maximum of signed 16-bit values. */
/* Compute the element-wise maximum of signed 16-bit values. */
...
@@ -967,6 +1017,12 @@ _mm_max_pi16 (__m64 __A, __m64 __B)
...
@@ -967,6 +1017,12 @@ _mm_max_pi16 (__m64 __A, __m64 __B)
return
(
__m64
)
__builtin_ia32_pmaxsw
((
__v4hi
)
__A
,
(
__v4hi
)
__B
);
return
(
__m64
)
__builtin_ia32_pmaxsw
((
__v4hi
)
__A
,
(
__v4hi
)
__B
);
}
}
static
__inline
__m64
_m_pmaxsw
(
__m64
__A
,
__m64
__B
)
{
return
_mm_max_pi16
(
__A
,
__B
);
}
/* Compute the element-wise maximum of unsigned 8-bit values. */
/* Compute the element-wise maximum of unsigned 8-bit values. */
static
__inline
__m64
static
__inline
__m64
_mm_max_pu8
(
__m64
__A
,
__m64
__B
)
_mm_max_pu8
(
__m64
__A
,
__m64
__B
)
...
@@ -974,6 +1030,12 @@ _mm_max_pu8 (__m64 __A, __m64 __B)
...
@@ -974,6 +1030,12 @@ _mm_max_pu8 (__m64 __A, __m64 __B)
return
(
__m64
)
__builtin_ia32_pmaxub
((
__v8qi
)
__A
,
(
__v8qi
)
__B
);
return
(
__m64
)
__builtin_ia32_pmaxub
((
__v8qi
)
__A
,
(
__v8qi
)
__B
);
}
}
static
__inline
__m64
_m_pmaxub
(
__m64
__A
,
__m64
__B
)
{
return
_mm_max_pu8
(
__A
,
__B
);
}
/* Compute the element-wise minimum of signed 16-bit values. */
/* Compute the element-wise minimum of signed 16-bit values. */
static
__inline
__m64
static
__inline
__m64
_mm_min_pi16
(
__m64
__A
,
__m64
__B
)
_mm_min_pi16
(
__m64
__A
,
__m64
__B
)
...
@@ -981,6 +1043,12 @@ _mm_min_pi16 (__m64 __A, __m64 __B)
...
@@ -981,6 +1043,12 @@ _mm_min_pi16 (__m64 __A, __m64 __B)
return
(
__m64
)
__builtin_ia32_pminsw
((
__v4hi
)
__A
,
(
__v4hi
)
__B
);
return
(
__m64
)
__builtin_ia32_pminsw
((
__v4hi
)
__A
,
(
__v4hi
)
__B
);
}
}
static
__inline
__m64
_m_pminsw
(
__m64
__A
,
__m64
__B
)
{
return
_mm_min_pi16
(
__A
,
__B
);
}
/* Compute the element-wise minimum of unsigned 8-bit values. */
/* Compute the element-wise minimum of unsigned 8-bit values. */
static
__inline
__m64
static
__inline
__m64
_mm_min_pu8
(
__m64
__A
,
__m64
__B
)
_mm_min_pu8
(
__m64
__A
,
__m64
__B
)
...
@@ -988,6 +1056,12 @@ _mm_min_pu8 (__m64 __A, __m64 __B)
...
@@ -988,6 +1056,12 @@ _mm_min_pu8 (__m64 __A, __m64 __B)
return
(
__m64
)
__builtin_ia32_pminub
((
__v8qi
)
__A
,
(
__v8qi
)
__B
);
return
(
__m64
)
__builtin_ia32_pminub
((
__v8qi
)
__A
,
(
__v8qi
)
__B
);
}
}
static
__inline
__m64
_m_pminub
(
__m64
__A
,
__m64
__B
)
{
return
_mm_min_pu8
(
__A
,
__B
);
}
/* Create an 8-bit mask of the signs of 8-bit values. */
/* Create an 8-bit mask of the signs of 8-bit values. */
static
__inline
int
static
__inline
int
_mm_movemask_pi8
(
__m64
__A
)
_mm_movemask_pi8
(
__m64
__A
)
...
@@ -995,6 +1069,12 @@ _mm_movemask_pi8 (__m64 __A)
...
@@ -995,6 +1069,12 @@ _mm_movemask_pi8 (__m64 __A)
return
__builtin_ia32_pmovmskb
((
__v8qi
)
__A
);
return
__builtin_ia32_pmovmskb
((
__v8qi
)
__A
);
}
}
static
__inline
int
_m_pmovmskb
(
__m64
__A
)
{
return
_mm_movemask_pi8
(
__A
);
}
/* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values
/* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values
in B and produce the high 16 bits of the 32-bit results. */
in B and produce the high 16 bits of the 32-bit results. */
static
__inline
__m64
static
__inline
__m64
...
@@ -1003,6 +1083,12 @@ _mm_mulhi_pu16 (__m64 __A, __m64 __B)
...
@@ -1003,6 +1083,12 @@ _mm_mulhi_pu16 (__m64 __A, __m64 __B)
return
(
__m64
)
__builtin_ia32_pmulhuw
((
__v4hi
)
__A
,
(
__v4hi
)
__B
);
return
(
__m64
)
__builtin_ia32_pmulhuw
((
__v4hi
)
__A
,
(
__v4hi
)
__B
);
}
}
static
__inline
__m64
_m_pmulhuw
(
__m64
__A
,
__m64
__B
)
{
return
_mm_mulhi_pu16
(
__A
,
__B
);
}
/* Return a combination of the four 16-bit values in A. The selector
/* Return a combination of the four 16-bit values in A. The selector
must be an immediate. */
must be an immediate. */
#if 0
#if 0
...
@@ -1011,9 +1097,16 @@ _mm_shuffle_pi16 (__m64 __A, int __N)
...
@@ -1011,9 +1097,16 @@ _mm_shuffle_pi16 (__m64 __A, int __N)
{
{
return (__m64) __builtin_ia32_pshufw ((__v4hi)__A, __N);
return (__m64) __builtin_ia32_pshufw ((__v4hi)__A, __N);
}
}
static __inline __m64
_m_pshufw (__m64 __A, int __N)
{
return _mm_shuffle_pi16 (__A, __N);
}
#else
#else
#define _mm_shuffle_pi16(A, N) \
#define _mm_shuffle_pi16(A, N) \
((__m64) __builtin_ia32_pshufw ((__v4hi)(A), (N)))
((__m64) __builtin_ia32_pshufw ((__v4hi)(A), (N)))
#define _m_pshufw(A, N) _mm_shuffle_pi16 ((A), (N))
#endif
#endif
/* Conditionally store byte elements of A into P. The high bit of each
/* Conditionally store byte elements of A into P. The high bit of each
...
@@ -1025,6 +1118,12 @@ _mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P)
...
@@ -1025,6 +1118,12 @@ _mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P)
__builtin_ia32_maskmovq
((
__v8qi
)
__A
,
(
__v8qi
)
__N
,
__P
);
__builtin_ia32_maskmovq
((
__v8qi
)
__A
,
(
__v8qi
)
__N
,
__P
);
}
}
static
__inline
void
_m_maskmovq
(
__m64
__A
,
__m64
__N
,
char
*
__P
)
{
_mm_maskmove_si64
(
__A
,
__N
,
__P
);
}
/* Compute the rounded averages of the unsigned 8-bit values in A and B. */
/* Compute the rounded averages of the unsigned 8-bit values in A and B. */
static
__inline
__m64
static
__inline
__m64
_mm_avg_pu8
(
__m64
__A
,
__m64
__B
)
_mm_avg_pu8
(
__m64
__A
,
__m64
__B
)
...
@@ -1032,6 +1131,12 @@ _mm_avg_pu8 (__m64 __A, __m64 __B)
...
@@ -1032,6 +1131,12 @@ _mm_avg_pu8 (__m64 __A, __m64 __B)
return
(
__m64
)
__builtin_ia32_pavgb
((
__v8qi
)
__A
,
(
__v8qi
)
__B
);
return
(
__m64
)
__builtin_ia32_pavgb
((
__v8qi
)
__A
,
(
__v8qi
)
__B
);
}
}
static
__inline
__m64
_m_pavgb
(
__m64
__A
,
__m64
__B
)
{
return
_mm_avg_pu8
(
__A
,
__B
);
}
/* Compute the rounded averages of the unsigned 16-bit values in A and B. */
/* Compute the rounded averages of the unsigned 16-bit values in A and B. */
static
__inline
__m64
static
__inline
__m64
_mm_avg_pu16
(
__m64
__A
,
__m64
__B
)
_mm_avg_pu16
(
__m64
__A
,
__m64
__B
)
...
@@ -1039,6 +1144,12 @@ _mm_avg_pu16 (__m64 __A, __m64 __B)
...
@@ -1039,6 +1144,12 @@ _mm_avg_pu16 (__m64 __A, __m64 __B)
return
(
__m64
)
__builtin_ia32_pavgw
((
__v4hi
)
__A
,
(
__v4hi
)
__B
);
return
(
__m64
)
__builtin_ia32_pavgw
((
__v4hi
)
__A
,
(
__v4hi
)
__B
);
}
}
static
__inline
__m64
_m_pavgw
(
__m64
__A
,
__m64
__B
)
{
return
_mm_avg_pu16
(
__A
,
__B
);
}
/* Compute the sum of the absolute differences of the unsigned 8-bit
/* Compute the sum of the absolute differences of the unsigned 8-bit
values in A and B. Return the value in the lower 16-bit word; the
values in A and B. Return the value in the lower 16-bit word; the
upper words are cleared. */
upper words are cleared. */
...
@@ -1048,6 +1159,12 @@ _mm_sad_pu8 (__m64 __A, __m64 __B)
...
@@ -1048,6 +1159,12 @@ _mm_sad_pu8 (__m64 __A, __m64 __B)
return
(
__m64
)
__builtin_ia32_psadbw
((
__v8qi
)
__A
,
(
__v8qi
)
__B
);
return
(
__m64
)
__builtin_ia32_psadbw
((
__v8qi
)
__A
,
(
__v8qi
)
__B
);
}
}
static
__inline
__m64
_m_psadbw
(
__m64
__A
,
__m64
__B
)
{
return
_mm_sad_pu8
(
__A
,
__B
);
}
/* Loads one cache line from address P to a location "closer" to the
/* Loads one cache line from address P to a location "closer" to the
processor. The selector I specifies the type of prefetch operation. */
processor. The selector I specifies the type of prefetch operation. */
#if 0
#if 0
...
@@ -1106,30 +1223,6 @@ do { \
...
@@ -1106,30 +1223,6 @@ do { \
(row3) = __builtin_ia32_shufps (__t2, __t3, 0xDD); \
(row3) = __builtin_ia32_shufps (__t2, __t3, 0xDD); \
} while (0)
} while (0)
/* Alternate intrinsic name definitions. */
#define _mm_cvt_ss2si _mm_cvtss_si32
#define _mm_cvt_ps2pi _mm_cvtps_pi32
#define _mm_cvtt_ss2si _mm_cvttss_si32
#define _mm_cvtt_ps2pi _mm_cvttps_pi32
#define _mm_cvt_si2ss _mm_cvtsi32_ss
#define _mm_cvt_pi2ps _mm_cvtpi32_ps
#define _m_pextrw _mm_extract_pi16
#define _m_pinsrw _mm_insert_pi16
#define _m_pmaxsw _mm_max_pi16
#define _m_pmaxub _mm_max_pu8
#define _m_pminsw _mm_min_pi16
#define _m_pminub _mm_min_pu8
#define _m_pmovmskb _mm_movemask_pi8
#define _m_pmulhuw _mm_mulhi_pu16
#define _m_pshufw _mm_shuffle_pi16
#define _m_maskmovq _mm_maskmove_si64
#define _m_pavgb _mm_avg_pu8
#define _m_pavgw _mm_avg_pu16
#define _m_psadbw _mm_sad_pu8
#define _mm_set_ps1 _mm_set1_ps
#define _mm_load_ps1 _mm_load1_ps
#define _mm_store_ps1 _mm_store1_ps
/* For backward source compatibility. */
/* For backward source compatibility. */
#include <emmintrin.h>
#include <emmintrin.h>
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment