Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
R
riscv-gcc-1
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
lvzhengyang
riscv-gcc-1
Commits
8ec3e357
Commit
8ec3e357
authored
Mar 13, 2008
by
Uros Bizjak
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Revert my previous commit.
From-SVN: r133167
parent
8a6c8430
Show whitespace changes
Inline
Side-by-side
Showing
11 changed files
with
26 additions
and
60 deletions
+26
-60
gcc/ChangeLog
+0
-16
gcc/config/i386/ammintrin.h
+2
-2
gcc/config/i386/bmmintrin.h
+1
-1
gcc/config/i386/emmintrin.h
+4
-4
gcc/config/i386/i386.h
+0
-5
gcc/config/i386/mmintrin-common.h
+2
-2
gcc/config/i386/smmintrin.h
+11
-11
gcc/config/i386/tmmintrin.h
+1
-1
gcc/config/i386/xmmintrin.h
+5
-5
gcc/testsuite/ChangeLog
+0
-5
gcc/testsuite/g++.dg/other/i386-3.C
+0
-8
No files found.
gcc/ChangeLog
View file @
8ec3e357
2008-03-13 Uros Bizjak <ubizjak@gmail.com>
PR target/35553
* config/i386/i386.h (TARGET_CPU_CPP_BUILTINS): Define
__SSE_USE_INLINED_FUNC__ when using -flag-keep-inline-functions
together with optimization.
* config/i386/xmmintrin.h: Use __SSE_USE_INLINED_FUNC__ instead of
__OPTIMIZE__ to choose between inlined intrinsic SSE function having
immediate arguments and its equivalent macro definition.
* config/i386/bmintrin.h: Ditto.
* config/i386/smmintrin.h: Ditto.
* config/i386/tmmintrin.h: Ditto.
* config/i386/mmintrin-common.h: Ditto.
* config/i386/ammintrin.h: Ditto.
* config/i386/emmintrin.h: Ditto.
2008-03-13 Jakub Jelinek <jakub@redhat.com>
PR middle-end/35185
...
...
gcc/config/i386/ammintrin.h
View file @
8ec3e357
...
...
@@ -55,7 +55,7 @@ _mm_extract_si64 (__m128i __X, __m128i __Y)
return
(
__m128i
)
__builtin_ia32_extrq
((
__v2di
)
__X
,
(
__v16qi
)
__Y
);
}
#ifdef __
SSE_USE_INLINED_FUNC
__
#ifdef __
OPTIMIZE
__
static
__inline
__m128i
__attribute__
((
__always_inline__
,
__artificial__
))
_mm_extracti_si64
(
__m128i
__X
,
unsigned
const
int
__I
,
unsigned
const
int
__L
)
{
...
...
@@ -73,7 +73,7 @@ _mm_insert_si64 (__m128i __X,__m128i __Y)
return
(
__m128i
)
__builtin_ia32_insertq
((
__v2di
)
__X
,
(
__v2di
)
__Y
);
}
#ifdef __
SSE_USE_INLINED_FUNC
__
#ifdef __
OPTIMIZE
__
static
__inline
__m128i
__attribute__
((
__always_inline__
,
__artificial__
))
_mm_inserti_si64
(
__m128i
__X
,
__m128i
__Y
,
unsigned
const
int
__I
,
unsigned
const
int
__L
)
{
...
...
gcc/config/i386/bmmintrin.h
View file @
8ec3e357
...
...
@@ -350,7 +350,7 @@ _mm_rot_epi64(__m128i __A, __m128i __B)
/* Rotates - Immediate form */
#ifdef __
SSE_USE_INLINED_FUNC
__
#ifdef __
OPTIMIZE
__
static
__inline
__m128i
__attribute__
((
__always_inline__
,
__artificial__
))
_mm_roti_epi8
(
__m128i
__A
,
const
int
__B
)
{
...
...
gcc/config/i386/emmintrin.h
View file @
8ec3e357
...
...
@@ -880,7 +880,7 @@ _mm_cvtss_sd (__m128d __A, __m128 __B)
return
(
__m128d
)
__builtin_ia32_cvtss2sd
((
__v2df
)
__A
,
(
__v4sf
)
__B
);
}
#ifdef __
SSE_USE_INLINED_FUNC
__
#ifdef __
OPTIMIZE
__
static
__inline
__m128d
__attribute__
((
__always_inline__
,
__artificial__
))
_mm_shuffle_pd
(
__m128d
__A
,
__m128d
__B
,
const
int
__mask
)
{
...
...
@@ -1144,7 +1144,7 @@ _mm_srai_epi32 (__m128i __A, int __B)
return
(
__m128i
)
__builtin_ia32_psradi128
((
__v4si
)
__A
,
__B
);
}
#ifdef __
SSE_USE_INLINED_FUNC
__
#ifdef __
OPTIMIZE
__
static
__inline
__m128i
__attribute__
((
__always_inline__
,
__artificial__
))
_mm_srli_si128
(
__m128i
__A
,
const
int
__N
)
{
...
...
@@ -1307,7 +1307,7 @@ _mm_cmpgt_epi32 (__m128i __A, __m128i __B)
return
(
__m128i
)
__builtin_ia32_pcmpgtd128
((
__v4si
)
__A
,
(
__v4si
)
__B
);
}
#ifdef __
SSE_USE_INLINED_FUNC
__
#ifdef __
OPTIMIZE
__
static
__inline
int
__attribute__
((
__always_inline__
,
__artificial__
))
_mm_extract_epi16
(
__m128i
const
__A
,
int
const
__N
)
{
...
...
@@ -1363,7 +1363,7 @@ _mm_mulhi_epu16 (__m128i __A, __m128i __B)
return
(
__m128i
)
__builtin_ia32_pmulhuw128
((
__v8hi
)
__A
,
(
__v8hi
)
__B
);
}
#ifdef __
SSE_USE_INLINED_FUNC
__
#ifdef __
OPTIMIZE
__
static
__inline
__m128i
__attribute__
((
__always_inline__
,
__artificial__
))
_mm_shufflehi_epi16
(
__m128i
__A
,
const
int
__mask
)
{
...
...
gcc/config/i386/i386.h
View file @
8ec3e357
...
...
@@ -691,11 +691,6 @@ extern const char *host_detect_local_cpu (int argc, const char **argv);
builtin_define ("__SSE_MATH__"); \
if (TARGET_SSE_MATH && TARGET_SSE2) \
builtin_define ("__SSE2_MATH__"); \
\
/* Use inlined intrinsic SSE function having immediate \
arguments instead of a macro definition. */
\
if (optimize && !flag_keep_inline_functions) \
builtin_define ("__SSE_USE_INLINED_FUNC__"); \
} \
while (0)
...
...
gcc/config/i386/mmintrin-common.h
View file @
8ec3e357
...
...
@@ -92,7 +92,7 @@ _mm_testnzc_si128 (__m128i __M, __m128i __V)
/* Packed/scalar double precision floating point rounding. */
#ifdef __
SSE_USE_INLINED_FUNC
__
#ifdef __
OPTIMIZE
__
static
__inline
__m128d
__attribute__
((
__always_inline__
,
__artificial__
))
_mm_round_pd
(
__m128d
__V
,
const
int
__M
)
{
...
...
@@ -117,7 +117,7 @@ _mm_round_sd(__m128d __D, __m128d __V, const int __M)
/* Packed/scalar single precision floating point rounding. */
#ifdef __
SSE_USE_INLINED_FUNC
__
#ifdef __
OPTIMIZE
__
static
__inline
__m128
__attribute__
((
__always_inline__
,
__artificial__
))
_mm_round_ps
(
__m128
__V
,
const
int
__M
)
{
...
...
gcc/config/i386/smmintrin.h
View file @
8ec3e357
...
...
@@ -44,7 +44,7 @@
/* Integer blend instructions - select data from 2 sources using
constant/variable mask. */
#ifdef __
SSE_USE_INLINED_FUNC
__
#ifdef __
OPTIMIZE
__
static
__inline
__m128i
__attribute__
((
__always_inline__
,
__artificial__
))
_mm_blend_epi16
(
__m128i
__X
,
__m128i
__Y
,
const
int
__M
)
{
...
...
@@ -69,7 +69,7 @@ _mm_blendv_epi8 (__m128i __X, __m128i __Y, __m128i __M)
/* Single precision floating point blend instructions - select data
from 2 sources using constant/variable mask. */
#ifdef __
SSE_USE_INLINED_FUNC
__
#ifdef __
OPTIMIZE
__
static
__inline
__m128
__attribute__
((
__always_inline__
,
__artificial__
))
_mm_blend_ps
(
__m128
__X
,
__m128
__Y
,
const
int
__M
)
{
...
...
@@ -94,7 +94,7 @@ _mm_blendv_ps (__m128 __X, __m128 __Y, __m128 __M)
/* Double precision floating point blend instructions - select data
from 2 sources using constant/variable mask. */
#ifdef __
SSE_USE_INLINED_FUNC
__
#ifdef __
OPTIMIZE
__
static
__inline
__m128d
__attribute__
((
__always_inline__
,
__artificial__
))
_mm_blend_pd
(
__m128d
__X
,
__m128d
__Y
,
const
int
__M
)
{
...
...
@@ -119,7 +119,7 @@ _mm_blendv_pd (__m128d __X, __m128d __Y, __m128d __M)
/* Dot product instructions with mask-defined summing and zeroing parts
of result. */
#ifdef __
SSE_USE_INLINED_FUNC
__
#ifdef __
OPTIMIZE
__
static
__inline
__m128
__attribute__
((
__always_inline__
,
__artificial__
))
_mm_dp_ps
(
__m128
__X
,
__m128
__Y
,
const
int
__M
)
{
...
...
@@ -224,7 +224,7 @@ _mm_mul_epi32 (__m128i __X, __m128i __Y)
index, the bits [5-4] define D index, and bits [3-0] define
zeroing mask for D. */
#ifdef __
SSE_USE_INLINED_FUNC
__
#ifdef __
OPTIMIZE
__
static
__inline
__m128
__attribute__
((
__always_inline__
,
__artificial__
))
_mm_insert_ps
(
__m128
__D
,
__m128
__S
,
const
int
__N
)
{
...
...
@@ -244,7 +244,7 @@ _mm_insert_ps (__m128 __D, __m128 __S, const int __N)
/* Extract binary representation of single precision float from packed
single precision array element of X selected by index N. */
#ifdef __
SSE_USE_INLINED_FUNC
__
#ifdef __
OPTIMIZE
__
static
__inline
int
__attribute__
((
__always_inline__
,
__artificial__
))
_mm_extract_ps
(
__m128
__X
,
const
int
__N
)
{
...
...
@@ -277,7 +277,7 @@ _mm_extract_ps (__m128 __X, const int __N)
/* Insert integer, S, into packed integer array element of D
selected by index N. */
#ifdef __
SSE_USE_INLINED_FUNC
__
#ifdef __
OPTIMIZE
__
static
__inline
__m128i
__attribute__
((
__always_inline__
,
__artificial__
))
_mm_insert_epi8
(
__m128i
__D
,
int
__S
,
const
int
__N
)
{
...
...
@@ -319,7 +319,7 @@ _mm_insert_epi64 (__m128i __D, long long __S, const int __N)
/* Extract integer from packed integer array element of X selected by
index N. */
#ifdef __
SSE_USE_INLINED_FUNC
__
#ifdef __
OPTIMIZE
__
static
__inline
int
__attribute__
((
__always_inline__
,
__artificial__
))
_mm_extract_epi8
(
__m128i
__X
,
const
int
__N
)
{
...
...
@@ -447,7 +447,7 @@ _mm_packus_epi32 (__m128i __X, __m128i __Y)
byte integers in the first 2 operands. Starting offsets within
operands are determined by the 3rd mask operand. */
#ifdef __
SSE_USE_INLINED_FUNC
__
#ifdef __
OPTIMIZE
__
static
__inline
__m128i
__attribute__
((
__always_inline__
,
__artificial__
))
_mm_mpsadbw_epu8
(
__m128i
__X
,
__m128i
__Y
,
const
int
__M
)
{
...
...
@@ -497,7 +497,7 @@ _mm_stream_load_si128 (__m128i *__X)
/* Intrinsics for text/string processing. */
#ifdef __
SSE_USE_INLINED_FUNC
__
#ifdef __
OPTIMIZE
__
static
__inline
__m128i
__attribute__
((
__always_inline__
,
__artificial__
))
_mm_cmpistrm
(
__m128i
__X
,
__m128i
__Y
,
const
int
__M
)
{
...
...
@@ -550,7 +550,7 @@ _mm_cmpestri (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
/* Intrinsics for text/string processing and reading values of
EFlags. */
#ifdef __
SSE_USE_INLINED_FUNC
__
#ifdef __
OPTIMIZE
__
static
__inline
int
__attribute__
((
__always_inline__
,
__artificial__
))
_mm_cmpistra
(
__m128i
__X
,
__m128i
__Y
,
const
int
__M
)
{
...
...
gcc/config/i386/tmmintrin.h
View file @
8ec3e357
...
...
@@ -181,7 +181,7 @@ _mm_sign_pi32 (__m64 __X, __m64 __Y)
return
(
__m64
)
__builtin_ia32_psignd
((
__v2si
)
__X
,
(
__v2si
)
__Y
);
}
#ifdef __
SSE_USE_INLINED_FUNC
__
#ifdef __
OPTIMIZE
__
static
__inline
__m128i
__attribute__
((
__always_inline__
,
__artificial__
))
_mm_alignr_epi8
(
__m128i
__X
,
__m128i
__Y
,
const
int
__N
)
{
...
...
gcc/config/i386/xmmintrin.h
View file @
8ec3e357
...
...
@@ -716,7 +716,7 @@ _mm_cvtps_pi8(__m128 __A)
}
/* Selects four specific SPFP values from A and B based on MASK. */
#ifdef __
SSE_USE_INLINED_FUNC
__
#ifdef __
OPTIMIZE
__
static
__inline
__m128
__attribute__
((
__always_inline__
,
__artificial__
))
_mm_shuffle_ps
(
__m128
__A
,
__m128
__B
,
int
const
__mask
)
{
...
...
@@ -992,7 +992,7 @@ _mm_move_ss (__m128 __A, __m128 __B)
}
/* Extracts one of the four words of A. The selector N must be immediate. */
#ifdef __
SSE_USE_INLINED_FUNC
__
#ifdef __
OPTIMIZE
__
static
__inline
int
__attribute__
((
__always_inline__
,
__artificial__
))
_mm_extract_pi16
(
__m64
const
__A
,
int
const
__N
)
{
...
...
@@ -1013,7 +1013,7 @@ _m_pextrw (__m64 const __A, int const __N)
/* Inserts word D into one of four words of A. The selector N must be
immediate. */
#ifdef __
SSE_USE_INLINED_FUNC
__
#ifdef __
OPTIMIZE
__
static
__inline
__m64
__attribute__
((
__always_inline__
,
__artificial__
))
_mm_insert_pi16
(
__m64
const
__A
,
int
const
__D
,
int
const
__N
)
{
...
...
@@ -1114,7 +1114,7 @@ _m_pmulhuw (__m64 __A, __m64 __B)
/* Return a combination of the four 16-bit values in A. The selector
must be an immediate. */
#ifdef __
SSE_USE_INLINED_FUNC
__
#ifdef __
OPTIMIZE
__
static
__inline
__m64
__attribute__
((
__always_inline__
,
__artificial__
))
_mm_shuffle_pi16
(
__m64
__A
,
int
const
__N
)
{
...
...
@@ -1191,7 +1191,7 @@ _m_psadbw (__m64 __A, __m64 __B)
/* Loads one cache line from address P to a location "closer" to the
processor. The selector I specifies the type of prefetch operation. */
#ifdef __
SSE_USE_INLINED_FUNC
__
#ifdef __
OPTIMIZE
__
static
__inline
void
__attribute__
((
__always_inline__
,
__artificial__
))
_mm_prefetch
(
const
void
*
__P
,
enum
_mm_hint
__I
)
{
...
...
gcc/testsuite/ChangeLog
View file @
8ec3e357
2008
-
03
-
13
Uros
Bizjak
<
ubizjak
@gmail
.
com
>
PR
target
/
35553
*
g
++
.
dg
/
other
/
i386
-
3
.
C
:
New
test
.
2008
-
03
-
13
Paolo
Bonzini
<
bonzini
@gnu
.
org
>
PR
tree
-
opt
/
35422
gcc/testsuite/g++.dg/other/i386-3.C
deleted
100644 → 0
View file @
8a6c8430
/* Test that {,x,e,p,t,s,a,b}mmintrin.h, mm3dnow.h and mm_malloc.h are
usable with -O -fkeep-inline-functions. */
/* { dg-do compile { target i?86-*-* x86_64-*-* } } */
/* { dg-options "-O -fkeep-inline-functions -march=k8 -m3dnow -msse4 -msse5" } */
#include <bmmintrin.h>
#include <smmintrin.h>
#include <mm3dnow.h>
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment