Commit 8505bf12 by Paul A. Clarke Committed by Paul Clarke

[rs6000] Consistently use '__vector' instead of 'vector'

Revision r265535 committed changes that used 'vector' instead of the
preferred '__vector'.  There is a reason that '__vector' is preferred,
because it ensures no conflicts with C++ namespace.  Indeed,
gcc/config/rs6000/xmmintrin.h undefines it, leading to errors:

  gcc/include/xmmintrin.h:999:20: error: 'vector' undeclared (first use in this function); did you mean 'vec_or'?
  gcc/include/xmmintrin.h:999:20: note: each undeclared identifier is reported only once for each function it appears in
  gcc/include/xmmintrin.h:999:26: error: expected ')' before 'long'
  gcc/include/xmmintrin.h:999:37: error: expected ')' before 'result'

Also fixed a few whitespace issues.

[gcc]

2018-10-29  Paul A. Clarke  <pc@us.ibm.com>

	* gcc/config/rs6000/mmintrin.h (_mm_packs_pi16, _mm_packs_pi32,
	_mm_packs_pu16, _mm_unpackhi_pi8, _mm_unpacklo_pi8, _mm_add_pi8,
	_mm_add_pi16, _mm_add_pi32, _mm_sub_pi8, _mm_sub_pi16, _mm_sub_pi32,
	_mm_cmpgt_pi8, _mm_cmpeq_pi16, _mm_cmpgt_pi16, _mm_cmpeq_pi32,
	_mm_cmpgt_pi32, _mm_adds_pi8, _mm_adds_pi16, _mm_adds_pu8,
	_mm_adds_pu16, _mm_subs_pi8, _mm_subs_pi16, _mm_subs_pu8,
	_mm_subs_pu16, _mm_madd_pi16, _mm_mulhi_pi16, _mm_mullo_pi16,
	_mm_sll_pi16, _mm_sra_pi16, _mm_srl_pi16, _mm_set1_pi16, _mm_set1_pi8):
	Change 'vector' to '__vector'.
	* gcc/config/rs6000/xmmintrin.h (_mm_cvtps_pi32, _mm_cvttps_pi32,
	_mm_cvtps_pi16, _mm_cvtps_pi8, _mm_max_pi16, _mm_max_pu8, _mm_min_pi16,
	_mm_min_pu8, _mm_mulhi_pu16, _mm_shuffle_pi16, _mm_avg_pu8,
	_mm_avg_pu16): Likewise.  And, whitespace corrections.

From-SVN: r265601
parent a385474c
2018-10-29 Paul A. Clarke <pc@us.ibm.com>
* gcc/config/rs6000/mmintrin.h (_mm_packs_pi16, _mm_packs_pi32,
_mm_packs_pu16, _mm_unpackhi_pi8, _mm_unpacklo_pi8, _mm_add_pi8,
_mm_add_pi16, _mm_add_pi32, _mm_sub_pi8, _mm_sub_pi16, _mm_sub_pi32,
_mm_cmpgt_pi8, _mm_cmpeq_pi16, _mm_cmpgt_pi16, _mm_cmpeq_pi32,
_mm_cmpgt_pi32, _mm_adds_pi8, _mm_adds_pi16, _mm_adds_pu8,
_mm_adds_pu16, _mm_subs_pi8, _mm_subs_pi16, _mm_subs_pu8,
_mm_subs_pu16, _mm_madd_pi16, _mm_mulhi_pi16, _mm_mullo_pi16,
_mm_sll_pi16, _mm_sra_pi16, _mm_srl_pi16, _mm_set1_pi16, _mm_set1_pi8):
Change 'vector' to '__vector'.
* gcc/config/rs6000/xmmintrin.h (_mm_cvtps_pi32, _mm_cvttps_pi32,
_mm_cvtps_pi16, _mm_cvtps_pi8, _mm_max_pi16, _mm_max_pu8, _mm_min_pi16,
_mm_min_pu8, _mm_mulhi_pu16, _mm_shuffle_pi16, _mm_avg_pu8,
_mm_avg_pu16): Likewise. And, whitespace corrections.
2018-10-29 Richard Biener <rguenther@suse.de>
PR tree-optimization/87785
......@@ -996,7 +996,7 @@ _mm_cvtps_pi32 (__m128 __A)
rounded = vec_rint(temp);
result = (__vector unsigned long long) vec_cts (rounded, 0);
return (__m64) ((vector long long) result)[0];
return (__m64) ((__vector long long) result)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
......@@ -1053,7 +1053,7 @@ _mm_cvttps_pi32 (__m128 __A)
temp = (__v4sf) vec_splat ((__vector long long)__A, 0);
result = (__vector unsigned long long) vec_cts (temp, 0);
return (__m64) ((vector long long) result)[0];
return (__m64) ((__vector long long) result)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
......@@ -1190,7 +1190,7 @@ _mm_cvtpu8_ps (__m64 __A)
/* Convert the four signed 32-bit values in A and B to SPFP form. */
extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_cvtpi32x2_ps(__m64 __A, __m64 __B)
_mm_cvtpi32x2_ps (__m64 __A, __m64 __B)
{
__vector signed int vi4;
__vector float vf4;
......@@ -1202,7 +1202,7 @@ _mm_cvtpi32x2_ps(__m64 __A, __m64 __B)
/* Convert the four SPFP values in A to four signed 16-bit integers. */
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_cvtps_pi16(__m128 __A)
_mm_cvtps_pi16 (__m128 __A)
{
__v4sf rounded;
__vector signed int temp;
......@@ -1212,12 +1212,12 @@ _mm_cvtps_pi16(__m128 __A)
temp = vec_cts (rounded, 0);
result = (__vector unsigned long long) vec_pack (temp, temp);
return (__m64) ((vector long long) result)[0];
return (__m64) ((__vector long long) result)[0];
}
/* Convert the four SPFP values in A to four signed 8-bit integers. */
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_cvtps_pi8(__m128 __A)
_mm_cvtps_pi8 (__m128 __A)
{
__v4sf rounded;
__vector signed int tmp_i;
......@@ -1229,7 +1229,7 @@ _mm_cvtps_pi8(__m128 __A)
tmp_i = vec_cts (rounded, 0);
tmp_s = vec_pack (tmp_i, zero);
res_v = vec_pack (tmp_s, tmp_s);
return (__m64) ((vector long long) res_v)[0];
return (__m64) ((__vector long long) res_v)[0];
}
/* Selects four specific SPFP values from A and B based on MASK. */
......@@ -1429,7 +1429,7 @@ _mm_max_pi16 (__m64 __A, __m64 __B)
b = (__vector signed short)vec_splats (__B);
c = (__vector __bool short)vec_cmpgt (a, b);
r = vec_sel (b, a, c);
return (__m64) ((vector long long) r)[0];
return (__m64) ((__vector long long) r)[0];
#else
__m64_union m1, m2, res;
......@@ -1467,7 +1467,7 @@ _mm_max_pu8 (__m64 __A, __m64 __B)
b = (__vector unsigned char)vec_splats (__B);
c = (__vector __bool char)vec_cmpgt (a, b);
r = vec_sel (b, a, c);
return (__m64) ((vector long long) r)[0];
return (__m64) ((__vector long long) r)[0];
#else
__m64_union m1, m2, res;
long i;
......@@ -1503,7 +1503,7 @@ _mm_min_pi16 (__m64 __A, __m64 __B)
b = (__vector signed short)vec_splats (__B);
c = (__vector __bool short)vec_cmplt (a, b);
r = vec_sel (b, a, c);
return (__m64) ((vector long long) r)[0];
return (__m64) ((__vector long long) r)[0];
#else
__m64_union m1, m2, res;
......@@ -1541,7 +1541,7 @@ _mm_min_pu8 (__m64 __A, __m64 __B)
b = (__vector unsigned char)vec_splats (__B);
c = (__vector __bool char)vec_cmplt (a, b);
r = vec_sel (b, a, c);
return (__m64) ((vector long long) r)[0];
return (__m64) ((__vector long long) r)[0];
#else
__m64_union m1, m2, res;
long i;
......@@ -1600,7 +1600,7 @@ _mm_mulhi_pu16 (__m64 __A, __m64 __B)
w1 = vec_vmulouh (a, b);
c = (__vector unsigned short)vec_perm (w0, w1, xform1);
return (__m64) ((vector long long) c)[0];
return (__m64) ((__vector long long) c)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
......@@ -1643,7 +1643,7 @@ _mm_shuffle_pi16 (__m64 __A, int const __N)
p = vec_splats (t.as_m64);
a = vec_splats (__A);
r = vec_perm (a, a, (__vector unsigned char)p);
return (__m64) ((vector long long) r)[0];
return (__m64) ((__vector long long) r)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
......@@ -1683,7 +1683,7 @@ _mm_avg_pu8 (__m64 __A, __m64 __B)
a = (__vector unsigned char)vec_splats (__A);
b = (__vector unsigned char)vec_splats (__B);
c = vec_avg (a, b);
return (__m64) ((vector long long) c)[0];
return (__m64) ((__vector long long) c)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
......@@ -1701,7 +1701,7 @@ _mm_avg_pu16 (__m64 __A, __m64 __B)
a = (__vector unsigned short)vec_splats (__A);
b = (__vector unsigned short)vec_splats (__B);
c = vec_avg (a, b);
return (__m64) ((vector long long) c)[0];
return (__m64) ((__vector long long) c)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment