Commit 3292e376 by H.J. Lu

Consolidate ABI warning into type_natural_mode

gcc/

	PR target/59794
	* config/i386/i386.c (type_natural_mode): Add a bool parameter
	to indicate if type is used for function return value.  Warn
	ABI change if the vector mode isn't available for function
	return value.
	(ix86_function_arg_advance): Pass false to type_natural_mode.
	(ix86_function_arg): Likewise.
	(ix86_gimplify_va_arg): Likewise.
	(function_arg_32): Don't warn ABI change.
	(ix86_function_value): Pass true to type_natural_mode.
	(ix86_return_in_memory): Likewise.
	(ix86_struct_value_rtx): Removed.
	(TARGET_STRUCT_VALUE_RTX): Likewise.

gcc/testsuite/

	PR target/59794
	* g++.dg/ext/vector23.C: Also prune ABI change for Linux/x86.
	* gcc.target/i386/pr39162.c (y): New __m256i variable.
	(bar): Change return type to void.  Set y to x.
	* gcc.target/i386/pr59794-1.c: New testcase.
	* gcc.target/i386/pr59794-2.c: Likewise.
	* gcc.target/i386/pr59794-3.c: Likewise.
	* gcc.target/i386/pr59794-4.c: Likewise.
	* gcc.target/i386/pr59794-5.c: Likewise.
	* gcc.target/i386/pr59794-6.c: Likewise.
	* gcc.target/i386/pr59794-7.c: Likewise.

From-SVN: r206603
parent db930875
2014-01-14 H.J. Lu <hongjiu.lu@intel.com>
PR target/59794
* config/i386/i386.c (type_natural_mode): Add a bool parameter
to indicate if type is used for function return value. Warn
ABI change if the vector mode isn't available for function
return value.
(ix86_function_arg_advance): Pass false to type_natural_mode.
(ix86_function_arg): Likewise.
(ix86_gimplify_va_arg): Likewise.
(function_arg_32): Don't warn ABI change.
(ix86_function_value): Pass true to type_natural_mode.
(ix86_return_in_memory): Likewise.
(ix86_struct_value_rtx): Removed.
(TARGET_STRUCT_VALUE_RTX): Likewise.
2014-01-14 Richard Sandiford <rsandifo@linux.vnet.ibm.com> 2014-01-14 Richard Sandiford <rsandifo@linux.vnet.ibm.com>
* jump.c (redirect_jump_2): Remove REG_CROSSING_JUMP notes when * jump.c (redirect_jump_2): Remove REG_CROSSING_JUMP notes when
...@@ -18,7 +34,7 @@ ...@@ -18,7 +34,7 @@
2014-01-14 Kirill Yukhin <kirill.yukhin@intel.com> 2014-01-14 Kirill Yukhin <kirill.yukhin@intel.com>
* config/i386/avx512erintrin.h (_mm_rcp28_round_sd): New. * config/i386/avx512erintrin.h (_mm_rcp28_round_sd): New.
(_mm_rcp28_round_ss): Ditto. (_mm_rcp28_round_ss): Ditto.
(_mm_rsqrt28_round_sd): Ditto. (_mm_rsqrt28_round_sd): Ditto.
(_mm_rsqrt28_round_ss): Ditto. (_mm_rsqrt28_round_ss): Ditto.
......
...@@ -6104,10 +6104,14 @@ init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */ ...@@ -6104,10 +6104,14 @@ init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
The midde-end can't deal with the vector types > 16 bytes. In this The midde-end can't deal with the vector types > 16 bytes. In this
case, we return the original mode and warn ABI change if CUM isn't case, we return the original mode and warn ABI change if CUM isn't
NULL. */ NULL.
If INT_RETURN is true, warn ABI change if the vector mode isn't
available for function return value. */
static enum machine_mode static enum machine_mode
type_natural_mode (const_tree type, const CUMULATIVE_ARGS *cum) type_natural_mode (const_tree type, const CUMULATIVE_ARGS *cum,
bool in_return)
{ {
enum machine_mode mode = TYPE_MODE (type); enum machine_mode mode = TYPE_MODE (type);
...@@ -6133,6 +6137,7 @@ type_natural_mode (const_tree type, const CUMULATIVE_ARGS *cum) ...@@ -6133,6 +6137,7 @@ type_natural_mode (const_tree type, const CUMULATIVE_ARGS *cum)
if (size == 32 && !TARGET_AVX) if (size == 32 && !TARGET_AVX)
{ {
static bool warnedavx; static bool warnedavx;
static bool warnedavx_ret;
if (cum if (cum
&& !warnedavx && !warnedavx
...@@ -6142,12 +6147,20 @@ type_natural_mode (const_tree type, const CUMULATIVE_ARGS *cum) ...@@ -6142,12 +6147,20 @@ type_natural_mode (const_tree type, const CUMULATIVE_ARGS *cum)
warning (0, "AVX vector argument without AVX " warning (0, "AVX vector argument without AVX "
"enabled changes the ABI"); "enabled changes the ABI");
} }
else if (in_return & !warnedavx_ret)
{
warnedavx_ret = true;
warning (0, "AVX vector return without AVX "
"enabled changes the ABI");
}
return TYPE_MODE (type); return TYPE_MODE (type);
} }
else if (((size == 8 && TARGET_64BIT) || size == 16) else if (((size == 8 && TARGET_64BIT) || size == 16)
&& !TARGET_SSE) && !TARGET_SSE)
{ {
static bool warnedsse; static bool warnedsse;
static bool warnedsse_ret;
if (cum if (cum
&& !warnedsse && !warnedsse
...@@ -6157,10 +6170,19 @@ type_natural_mode (const_tree type, const CUMULATIVE_ARGS *cum) ...@@ -6157,10 +6170,19 @@ type_natural_mode (const_tree type, const CUMULATIVE_ARGS *cum)
warning (0, "SSE vector argument without SSE " warning (0, "SSE vector argument without SSE "
"enabled changes the ABI"); "enabled changes the ABI");
} }
else if (!TARGET_64BIT
&& in_return
& !warnedsse_ret)
{
warnedsse_ret = true;
warning (0, "SSE vector return without SSE "
"enabled changes the ABI");
}
} }
else if ((size == 8 && !TARGET_64BIT) && !TARGET_MMX) else if ((size == 8 && !TARGET_64BIT) && !TARGET_MMX)
{ {
static bool warnedmmx; static bool warnedmmx;
static bool warnedmmx_ret;
if (cum if (cum
&& !warnedmmx && !warnedmmx
...@@ -6170,6 +6192,12 @@ type_natural_mode (const_tree type, const CUMULATIVE_ARGS *cum) ...@@ -6170,6 +6192,12 @@ type_natural_mode (const_tree type, const CUMULATIVE_ARGS *cum)
warning (0, "MMX vector argument without MMX " warning (0, "MMX vector argument without MMX "
"enabled changes the ABI"); "enabled changes the ABI");
} }
else if (in_return & !warnedmmx_ret)
{
warnedmmx_ret = true;
warning (0, "MMX vector return without MMX "
"enabled changes the ABI");
}
} }
return mode; return mode;
} }
...@@ -7097,7 +7125,7 @@ ix86_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode, ...@@ -7097,7 +7125,7 @@ ix86_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD; words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
if (type) if (type)
mode = type_natural_mode (type, NULL); mode = type_natural_mode (type, NULL, false);
if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI) if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
function_arg_advance_ms_64 (cum, bytes, words); function_arg_advance_ms_64 (cum, bytes, words);
...@@ -7125,8 +7153,6 @@ function_arg_32 (const CUMULATIVE_ARGS *cum, enum machine_mode mode, ...@@ -7125,8 +7153,6 @@ function_arg_32 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
enum machine_mode orig_mode, const_tree type, enum machine_mode orig_mode, const_tree type,
HOST_WIDE_INT bytes, HOST_WIDE_INT words) HOST_WIDE_INT bytes, HOST_WIDE_INT words)
{ {
static bool warnedsse, warnedmmx;
/* Avoid the AL settings for the Unix64 ABI. */ /* Avoid the AL settings for the Unix64 ABI. */
if (mode == VOIDmode) if (mode == VOIDmode)
return constm1_rtx; return constm1_rtx;
...@@ -7183,12 +7209,6 @@ function_arg_32 (const CUMULATIVE_ARGS *cum, enum machine_mode mode, ...@@ -7183,12 +7209,6 @@ function_arg_32 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
case V2DFmode: case V2DFmode:
if (!type || !AGGREGATE_TYPE_P (type)) if (!type || !AGGREGATE_TYPE_P (type))
{ {
if (!TARGET_SSE && !warnedsse && cum->warn_sse)
{
warnedsse = true;
warning (0, "SSE vector argument without SSE enabled "
"changes the ABI");
}
if (cum->sse_nregs) if (cum->sse_nregs)
return gen_reg_or_parallel (mode, orig_mode, return gen_reg_or_parallel (mode, orig_mode,
cum->sse_regno + FIRST_SSE_REG); cum->sse_regno + FIRST_SSE_REG);
...@@ -7228,12 +7248,6 @@ function_arg_32 (const CUMULATIVE_ARGS *cum, enum machine_mode mode, ...@@ -7228,12 +7248,6 @@ function_arg_32 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
case V1DImode: case V1DImode:
if (!type || !AGGREGATE_TYPE_P (type)) if (!type || !AGGREGATE_TYPE_P (type))
{ {
if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
{
warnedmmx = true;
warning (0, "MMX vector argument without MMX enabled "
"changes the ABI");
}
if (cum->mmx_nregs) if (cum->mmx_nregs)
return gen_reg_or_parallel (mode, orig_mode, return gen_reg_or_parallel (mode, orig_mode,
cum->mmx_regno + FIRST_MMX_REG); cum->mmx_regno + FIRST_MMX_REG);
...@@ -7362,7 +7376,7 @@ ix86_function_arg (cumulative_args_t cum_v, enum machine_mode omode, ...@@ -7362,7 +7376,7 @@ ix86_function_arg (cumulative_args_t cum_v, enum machine_mode omode,
/* To simplify the code below, represent vector types with a vector mode /* To simplify the code below, represent vector types with a vector mode
even if MMX/SSE are not active. */ even if MMX/SSE are not active. */
if (type && TREE_CODE (type) == VECTOR_TYPE) if (type && TREE_CODE (type) == VECTOR_TYPE)
mode = type_natural_mode (type, cum); mode = type_natural_mode (type, cum, false);
if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI) if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
arg = function_arg_ms_64 (cum, mode, omode, named, bytes); arg = function_arg_ms_64 (cum, mode, omode, named, bytes);
...@@ -7816,7 +7830,7 @@ ix86_function_value (const_tree valtype, const_tree fntype_or_decl, ...@@ -7816,7 +7830,7 @@ ix86_function_value (const_tree valtype, const_tree fntype_or_decl,
enum machine_mode mode, orig_mode; enum machine_mode mode, orig_mode;
orig_mode = TYPE_MODE (valtype); orig_mode = TYPE_MODE (valtype);
mode = type_natural_mode (valtype, NULL); mode = type_natural_mode (valtype, NULL, true);
return ix86_function_value_1 (valtype, fntype_or_decl, orig_mode, mode); return ix86_function_value_1 (valtype, fntype_or_decl, orig_mode, mode);
} }
...@@ -7935,7 +7949,7 @@ ix86_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED) ...@@ -7935,7 +7949,7 @@ ix86_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
#ifdef SUBTARGET_RETURN_IN_MEMORY #ifdef SUBTARGET_RETURN_IN_MEMORY
return SUBTARGET_RETURN_IN_MEMORY (type, fntype); return SUBTARGET_RETURN_IN_MEMORY (type, fntype);
#else #else
const enum machine_mode mode = type_natural_mode (type, NULL); const enum machine_mode mode = type_natural_mode (type, NULL, true);
if (TARGET_64BIT) if (TARGET_64BIT)
{ {
...@@ -7949,52 +7963,6 @@ ix86_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED) ...@@ -7949,52 +7963,6 @@ ix86_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
#endif #endif
} }
/* When returning SSE vector types, we have a choice of either
(1) being abi incompatible with a -march switch, or
(2) generating an error.
Given no good solution, I think the safest thing is one warning.
The user won't be able to use -Werror, but....
Choose the STRUCT_VALUE_RTX hook because that's (at present) only
called in response to actually generating a caller or callee that
uses such a type. As opposed to TARGET_RETURN_IN_MEMORY, which is called
via aggregate_value_p for general type probing from tree-ssa. */
static rtx
ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
{
static bool warnedsse, warnedmmx;
if (!TARGET_64BIT && type)
{
/* Look at the return type of the function, not the function type. */
enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
if (!TARGET_SSE && !warnedsse)
{
if (mode == TImode
|| (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
{
warnedsse = true;
warning (0, "SSE vector return without SSE enabled "
"changes the ABI");
}
}
if (!TARGET_MMX && !warnedmmx)
{
if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
{
warnedmmx = true;
warning (0, "MMX vector return without MMX enabled "
"changes the ABI");
}
}
}
return NULL;
}
/* Create the va_list data type. */ /* Create the va_list data type. */
...@@ -8419,7 +8387,7 @@ ix86_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p, ...@@ -8419,7 +8387,7 @@ ix86_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
size = int_size_in_bytes (type); size = int_size_in_bytes (type);
rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD; rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
nat_mode = type_natural_mode (type, NULL); nat_mode = type_natural_mode (type, NULL, false);
switch (nat_mode) switch (nat_mode)
{ {
case V8SFmode: case V8SFmode:
...@@ -46819,8 +46787,6 @@ ix86_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update) ...@@ -46819,8 +46787,6 @@ ix86_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
#undef TARGET_PROMOTE_PROTOTYPES #undef TARGET_PROMOTE_PROTOTYPES
#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
#undef TARGET_STRUCT_VALUE_RTX
#define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
#undef TARGET_SETUP_INCOMING_VARARGS #undef TARGET_SETUP_INCOMING_VARARGS
#define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
#undef TARGET_MUST_PASS_IN_STACK #undef TARGET_MUST_PASS_IN_STACK
2014-01-14 H.J. Lu <hongjiu.lu@intel.com>
PR target/59794
* g++.dg/ext/vector23.C: Also prune ABI change for Linux/x86.
* gcc.target/i386/pr39162.c (y): New __m256i variable.
(bar): Change return type to void. Set y to x.
* gcc.target/i386/pr59794-1.c: New testcase.
* gcc.target/i386/pr59794-2.c: Likewise.
* gcc.target/i386/pr59794-3.c: Likewise.
* gcc.target/i386/pr59794-4.c: Likewise.
* gcc.target/i386/pr59794-5.c: Likewise.
* gcc.target/i386/pr59794-6.c: Likewise.
* gcc.target/i386/pr59794-7.c: Likewise.
2014-01-14 Richard Biener <rguenther@suse.de> 2014-01-14 Richard Biener <rguenther@suse.de>
PR tree-optimization/58921 PR tree-optimization/58921
...@@ -17,7 +31,7 @@ ...@@ -17,7 +31,7 @@
2014-01-14 Kirill Yukhin <kirill.yukhin@intel.com> 2014-01-14 Kirill Yukhin <kirill.yukhin@intel.com>
* gcc.target/i386/avx-1.c: Fix __builtin_ia32_exp2ps_mask, * gcc.target/i386/avx-1.c: Fix __builtin_ia32_exp2ps_mask,
__builtin_ia32_exp2pd_mask, __builtin_ia32_rcp28ps_mask, __builtin_ia32_exp2pd_mask, __builtin_ia32_rcp28ps_mask,
__builtin_ia32_rcp28pd_mask, __builtin_ia32_rsqrt28ps_mask, __builtin_ia32_rcp28pd_mask, __builtin_ia32_rsqrt28ps_mask,
__builtin_ia32_rsqrt28pd_mask. Add __builtin_ia32_rcp28ss_round, __builtin_ia32_rsqrt28pd_mask. Add __builtin_ia32_rcp28ss_round,
......
...@@ -2,6 +2,8 @@ ...@@ -2,6 +2,8 @@
/* { dg-options "-std=gnu++1y -Wsign-conversion" } */ /* { dg-options "-std=gnu++1y -Wsign-conversion" } */
// Ignore warning on some powerpc-linux configurations. // Ignore warning on some powerpc-linux configurations.
// { dg-prune-output "non-standard ABI extension" } // { dg-prune-output "non-standard ABI extension" }
// Ignore warning on Linux/x86
// { dg-prune-output "changes the ABI" }
typedef double vecd __attribute__((vector_size(4*sizeof(double)))); typedef double vecd __attribute__((vector_size(4*sizeof(double))));
typedef float vecf __attribute__((vector_size(8*sizeof(float)))); typedef float vecf __attribute__((vector_size(8*sizeof(float))));
......
...@@ -4,8 +4,10 @@ ...@@ -4,8 +4,10 @@
typedef long long __m256i __attribute__ ((__vector_size__ (32), __may_alias__)); typedef long long __m256i __attribute__ ((__vector_size__ (32), __may_alias__));
__m256i extern __m256i y;
void
bar (__m256i x) /* { dg-warning "AVX" "" } */ bar (__m256i x) /* { dg-warning "AVX" "" } */
{ {
return x; y = x;
} }
/* PR target/59794 */
/* { dg-do compile { target { ia32 } } } */
/* { dg-options "-O2 -mno-mmx" } */
/* { dg-skip-if "no MMX vector" { *-*-mingw* } } */
typedef int __v2si __attribute__ ((__vector_size__ (8)));
extern __v2si x;
extern void bar (__v2si);
void
foo (void)
{
bar (x); /* { dg-message "warning: MMX vector argument without MMX enabled changes the ABI" } */
}
/* PR target/59794 */
/* { dg-options "-Wno-psabi -O2 -mno-sse" } */
/* { dg-skip-if "no SSE vector" { *-*-mingw* } } */
typedef double __v2df __attribute__ ((__vector_size__ (16)));
extern __v2df x;
extern void bar (__v2df);
void
foo (void)
{
bar (x); /* { dg-message "warning: SSE vector argument without SSE enabled changes the ABI" } */
}
/* PR target/59794 */
/* { dg-options "-O2 -mno-avx -Wno-psabi" } */
/* { dg-skip-if "no AVX vector" { *-*-mingw* } } */
typedef int __v8si __attribute__ ((__vector_size__ (32)));
extern __v8si x;
extern void bar (__v8si);
void
foo (void)
{
bar (x); /* { dg-message "warning: AVX vector argument without AVX enabled changes the ABI" } */
}
/* PR target/59794 */
/* { dg-do compile { target { ia32 } } } */
/* { dg-options "-O2 -mno-mmx" } */
/* { dg-skip-if "no MMX vector" { *-*-mingw* } } */
typedef int __v2si __attribute__ ((__vector_size__ (8)));
extern __v2si x;
__v2si
foo (void)
{ /* { dg-warning "MMX vector return without MMX enabled changes the ABI" } */
return x;
}
/* PR target/59794 */
/* { dg-do compile { target { ia32 } } } */
/* { dg-options "-O2 -mno-sse" } */
/* { dg-skip-if "no SSE vector" { *-*-mingw* } } */
typedef int __v4si __attribute__ ((__vector_size__ (16)));
extern __v4si x;
__v4si
foo (void)
{ /* { dg-warning "SSE vector return without SSE enabled changes the ABI" } */
return x;
}
/* PR target/59794 */
/* { dg-do compile { target { ! ia32 } } } */
/* { dg-options "-O2 -mno-sse" } */
/* { dg-skip-if "no SSE vector" { *-*-mingw* } } */
typedef int __v4si __attribute__ ((__vector_size__ (16)));
extern __v4si x;
__v4si
foo (void)
{ /* { dg-error "SSE register return with SSE disabled" } */
return x;
}
/* PR target/59794 */
/* { dg-options "-O2 -mno-avx" } */
/* { dg-skip-if "no AVX vector" { *-*-mingw* } } */
typedef int __v8si __attribute__ ((__vector_size__ (32)));
extern __v8si x;
__v8si
foo (void)
{ /* { dg-warning "AVX vector return without AVX enabled changes the ABI" } */
return x;
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment