Commit 0dc47331 by Peter Bergner Committed by Peter Bergner

re PR target/71656 (ICE in reload when generating code for -mcpu=power9 -mpower9-dform-vector)

gcc/
	PR target/71656
	* config/rs6000/rs6000-cpus.def (ISA_3_0_MASKS_SERVER): Add
	OPTION_MASK_P9_DFORM_VECTOR.
	* config/rs6000/rs6000.c (rs6000_option_override_internal): Do not
	disable -mpower9-dform-vector when using reload.
	(quad_address_p): Remove 'gpr_p' argument and all associated code.
	New 'strict' argument.  Update all callers.  Add strict addressing
	support.
	(rs6000_legitimate_offset_address_p): Remove call to
	virtual_stack_registers_memory_p.
	(rs6000_legitimize_reload_address): Add quad address support.
	(rs6000_legitimate_address_p): Move call to quad_address_p above
	call to virtual_stack_registers_memory_p.  Adjust quad_address_p args
	to account for new strict usage.
	(rs6000_output_move_128bit): Adjust quad_address_p args to account
	for new strict usage.
	* config/rs6000/predicates.md (quad_memory_operand): Likewise.

gcc/testsuite/
	PR target/71656
	* gcc.target/powerpc/pr71656-1.c: New test.
	* gcc.target/powerpc/pr71656-2.c: New test.

From-SVN: r237811
parent f0388d83
2016-06-27 Peter Bergner <bergner@vnet.ibm.com>
PR target/71656
* config/rs6000/rs6000-cpus.def (ISA_3_0_MASKS_SERVER): Add
OPTION_MASK_P9_DFORM_VECTOR.
* config/rs6000/rs6000.c (rs6000_option_override_internal): Do not
disable -mpower9-dform-vector when using reload.
(quad_address_p): Remove 'gpr_p' argument and all associated code.
New 'strict' argument. Update all callers. Add strict addressing
support.
(rs6000_legitimate_offset_address_p): Remove call to
virtual_stack_registers_memory_p.
(rs6000_legitimize_reload_address): Add quad address support.
(rs6000_legitimate_address_p): Move call to quad_address_p above
call to virtual_stack_registers_memory_p. Adjust quad_address_p args
to account for new strict usage.
(rs6000_output_move_128bit): Adjust quad_address_p args to account
for new strict usage.
* config/rs6000/predicates.md (quad_memory_operand): Likewise.
2016-06-26 Uros Bizjak <ubizjak@gmail.com> 2016-06-26 Uros Bizjak <ubizjak@gmail.com>
PR target/70902 PR target/70902
......
...@@ -740,7 +740,7 @@ ...@@ -740,7 +740,7 @@
if (GET_MODE_SIZE (mode) != 16 || !MEM_P (op) || MEM_ALIGN (op) < 128) if (GET_MODE_SIZE (mode) != 16 || !MEM_P (op) || MEM_ALIGN (op) < 128)
return false; return false;
return quad_address_p (XEXP (op, 0), mode, true); return quad_address_p (XEXP (op, 0), mode, false);
}) })
;; Return 1 if the operand is suitable for load/store to vector registers with ;; Return 1 if the operand is suitable for load/store to vector registers with
......
...@@ -61,14 +61,14 @@ ...@@ -61,14 +61,14 @@
| OPTION_MASK_UPPER_REGS_SF) | OPTION_MASK_UPPER_REGS_SF)
/* Add ISEL back into ISA 3.0, since it is supposed to be a win. Do not add /* Add ISEL back into ISA 3.0, since it is supposed to be a win. Do not add
P9_MINMAX until the hardware that supports it is available. Do not add P9_MINMAX until the hardware that supports it is available. */
P9_DFORM_VECTOR until LRA is the default register allocator. */
#define ISA_3_0_MASKS_SERVER (ISA_2_7_MASKS_SERVER \ #define ISA_3_0_MASKS_SERVER (ISA_2_7_MASKS_SERVER \
| OPTION_MASK_FLOAT128_HW \ | OPTION_MASK_FLOAT128_HW \
| OPTION_MASK_ISEL \ | OPTION_MASK_ISEL \
| OPTION_MASK_MODULO \ | OPTION_MASK_MODULO \
| OPTION_MASK_P9_FUSION \ | OPTION_MASK_P9_FUSION \
| OPTION_MASK_P9_DFORM_SCALAR \ | OPTION_MASK_P9_DFORM_SCALAR \
| OPTION_MASK_P9_DFORM_VECTOR \
| OPTION_MASK_P9_VECTOR) | OPTION_MASK_P9_VECTOR)
#define POWERPC_7400_MASK (OPTION_MASK_PPC_GFXOPT | OPTION_MASK_ALTIVEC) #define POWERPC_7400_MASK (OPTION_MASK_PPC_GFXOPT | OPTION_MASK_ALTIVEC)
......
...@@ -4271,13 +4271,10 @@ rs6000_option_override_internal (bool global_init_p) ...@@ -4271,13 +4271,10 @@ rs6000_option_override_internal (bool global_init_p)
rs6000_isa_flags |= OPTION_MASK_TOC_FUSION; rs6000_isa_flags |= OPTION_MASK_TOC_FUSION;
/* -mpower9-dform turns on both -mpower9-dform-scalar and /* -mpower9-dform turns on both -mpower9-dform-scalar and
-mpower9-dform-vector. There are currently problems if -mpower9-dform-vector. */
-mpower9-dform-vector instructions are enabled when we use the RELOAD
register allocator. */
if (TARGET_P9_DFORM_BOTH > 0) if (TARGET_P9_DFORM_BOTH > 0)
{ {
if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_VECTOR) if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_VECTOR))
&& TARGET_LRA)
rs6000_isa_flags |= OPTION_MASK_P9_DFORM_VECTOR; rs6000_isa_flags |= OPTION_MASK_P9_DFORM_VECTOR;
if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_SCALAR)) if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_SCALAR))
...@@ -4323,11 +4320,10 @@ rs6000_option_override_internal (bool global_init_p) ...@@ -4323,11 +4320,10 @@ rs6000_option_override_internal (bool global_init_p)
rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR; rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
} }
/* There have been bugs with both -mvsx-timode and -mpower9-dform-vector that /* There have been bugs with -mvsx-timode that don't show up with -mlra,
don't show up with -mlra, but do show up with -mno-lra. Given -mlra will but do show up with -mno-lra. Given -mlra will become the default once
become the default once PR 69847 is fixed, turn off the options with PR 69847 is fixed, turn off the options with problems by default if
problems by default if -mno-lra was used, and warn if the user explicitly -mno-lra was used, and warn if the user explicitly asked for the option.
asked for the option.
Enable -mpower9-dform-vector by default if LRA and other power9 options. Enable -mpower9-dform-vector by default if LRA and other power9 options.
Enable -mvsx-timode by default if LRA and VSX. */ Enable -mvsx-timode by default if LRA and VSX. */
...@@ -4341,15 +4337,6 @@ rs6000_option_override_internal (bool global_init_p) ...@@ -4341,15 +4337,6 @@ rs6000_option_override_internal (bool global_init_p)
else else
rs6000_isa_flags &= ~OPTION_MASK_VSX_TIMODE; rs6000_isa_flags &= ~OPTION_MASK_VSX_TIMODE;
} }
if (TARGET_P9_DFORM_VECTOR)
{
if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_VECTOR) != 0)
warning (0, "-mpower9-dform-vector might need -mlra");
else
rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM_VECTOR;
}
} }
else else
...@@ -4357,11 +4344,6 @@ rs6000_option_override_internal (bool global_init_p) ...@@ -4357,11 +4344,6 @@ rs6000_option_override_internal (bool global_init_p)
if (TARGET_VSX && !TARGET_VSX_TIMODE if (TARGET_VSX && !TARGET_VSX_TIMODE
&& (rs6000_isa_flags_explicit & OPTION_MASK_VSX_TIMODE) == 0) && (rs6000_isa_flags_explicit & OPTION_MASK_VSX_TIMODE) == 0)
rs6000_isa_flags |= OPTION_MASK_VSX_TIMODE; rs6000_isa_flags |= OPTION_MASK_VSX_TIMODE;
if (TARGET_VSX && TARGET_P9_VECTOR && !TARGET_P9_DFORM_VECTOR
&& TARGET_P9_DFORM_SCALAR && TARGET_P9_DFORM_BOTH < 0
&& (rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_VECTOR) == 0)
rs6000_isa_flags |= OPTION_MASK_P9_DFORM_VECTOR;
} }
/* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
...@@ -7248,34 +7230,24 @@ quad_address_offset_p (HOST_WIDE_INT offset) ...@@ -7248,34 +7230,24 @@ quad_address_offset_p (HOST_WIDE_INT offset)
3.0 LXV/STXV instruction. */ 3.0 LXV/STXV instruction. */
bool bool
quad_address_p (rtx addr, machine_mode mode, bool gpr_p) quad_address_p (rtx addr, machine_mode mode, bool strict)
{ {
rtx op0, op1; rtx op0, op1;
if (GET_MODE_SIZE (mode) != 16) if (GET_MODE_SIZE (mode) != 16)
return false; return false;
if (gpr_p) if (legitimate_indirect_address_p (addr, strict))
{ return true;
if (!TARGET_QUAD_MEMORY && !TARGET_SYNC_TI)
return false;
/* LQ/STQ can handle indirect addresses. */
if (base_reg_operand (addr, Pmode))
return true;
}
else if (VECTOR_MODE_P (mode) && !mode_supports_vsx_dform_quad (mode))
{ return false;
if (!mode_supports_vsx_dform_quad (mode))
return false;
}
if (GET_CODE (addr) != PLUS) if (GET_CODE (addr) != PLUS)
return false; return false;
op0 = XEXP (addr, 0); op0 = XEXP (addr, 0);
if (!base_reg_operand (op0, Pmode)) if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
return false; return false;
op1 = XEXP (addr, 1); op1 = XEXP (addr, 1);
...@@ -7644,8 +7616,7 @@ rs6000_legitimate_offset_address_p (machine_mode mode, rtx x, ...@@ -7644,8 +7616,7 @@ rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict)) if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
return false; return false;
if (mode_supports_vsx_dform_quad (mode)) if (mode_supports_vsx_dform_quad (mode))
return (virtual_stack_registers_memory_p (x) return quad_address_p (x, mode, strict);
|| quad_address_p (x, mode, false));
if (!reg_offset_addressing_ok_p (mode)) if (!reg_offset_addressing_ok_p (mode))
return virtual_stack_registers_memory_p (x); return virtual_stack_registers_memory_p (x);
if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress)) if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
...@@ -8548,6 +8519,7 @@ rs6000_legitimize_reload_address (rtx x, machine_mode mode, ...@@ -8548,6 +8519,7 @@ rs6000_legitimize_reload_address (rtx x, machine_mode mode,
int ind_levels ATTRIBUTE_UNUSED, int *win) int ind_levels ATTRIBUTE_UNUSED, int *win)
{ {
bool reg_offset_p = reg_offset_addressing_ok_p (mode); bool reg_offset_p = reg_offset_addressing_ok_p (mode);
bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
/* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */ DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
...@@ -8617,6 +8589,7 @@ rs6000_legitimize_reload_address (rtx x, machine_mode mode, ...@@ -8617,6 +8589,7 @@ rs6000_legitimize_reload_address (rtx x, machine_mode mode,
if (TARGET_CMODEL != CMODEL_SMALL if (TARGET_CMODEL != CMODEL_SMALL
&& reg_offset_p && reg_offset_p
&& !quad_offset_p
&& small_toc_ref (x, VOIDmode)) && small_toc_ref (x, VOIDmode))
{ {
rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x)); rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
...@@ -8634,22 +8607,24 @@ rs6000_legitimize_reload_address (rtx x, machine_mode mode, ...@@ -8634,22 +8607,24 @@ rs6000_legitimize_reload_address (rtx x, machine_mode mode,
} }
if (GET_CODE (x) == PLUS if (GET_CODE (x) == PLUS
&& GET_CODE (XEXP (x, 0)) == REG && REG_P (XEXP (x, 0))
&& REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
&& INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1) && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
&& GET_CODE (XEXP (x, 1)) == CONST_INT && CONST_INT_P (XEXP (x, 1))
&& reg_offset_p && reg_offset_p
&& !SPE_VECTOR_MODE (mode) && !SPE_VECTOR_MODE (mode)
&& !(TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD) && !(TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
&& (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))) && (quad_offset_p || !VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
{ {
HOST_WIDE_INT val = INTVAL (XEXP (x, 1)); HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000; HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
HOST_WIDE_INT high HOST_WIDE_INT high
= (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000; = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
/* Check for 32-bit overflow. */ /* Check for 32-bit overflow or quad addresses with one of the
if (high + low != val) four least significant bits set. */
if (high + low != val
|| (quad_offset_p && (low & 0xf)))
{ {
*win = 0; *win = 0;
return x; return x;
...@@ -8677,6 +8652,7 @@ rs6000_legitimize_reload_address (rtx x, machine_mode mode, ...@@ -8677,6 +8652,7 @@ rs6000_legitimize_reload_address (rtx x, machine_mode mode,
if (GET_CODE (x) == SYMBOL_REF if (GET_CODE (x) == SYMBOL_REF
&& reg_offset_p && reg_offset_p
&& !quad_offset_p
&& (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)) && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
&& !SPE_VECTOR_MODE (mode) && !SPE_VECTOR_MODE (mode)
#if TARGET_MACHO #if TARGET_MACHO
...@@ -8761,6 +8737,7 @@ rs6000_legitimize_reload_address (rtx x, machine_mode mode, ...@@ -8761,6 +8737,7 @@ rs6000_legitimize_reload_address (rtx x, machine_mode mode,
if (TARGET_TOC if (TARGET_TOC
&& reg_offset_p && reg_offset_p
&& !quad_offset_p
&& GET_CODE (x) == SYMBOL_REF && GET_CODE (x) == SYMBOL_REF
&& use_toc_relative_ref (x, mode)) && use_toc_relative_ref (x, mode))
{ {
...@@ -8849,15 +8826,14 @@ rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict) ...@@ -8849,15 +8826,14 @@ rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
&& mode_supports_pre_incdec_p (mode) && mode_supports_pre_incdec_p (mode)
&& legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)) && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
return 1; return 1;
if (virtual_stack_registers_memory_p (x))
return 1;
/* Handle restricted vector d-form offsets in ISA 3.0. */ /* Handle restricted vector d-form offsets in ISA 3.0. */
if (quad_offset_p) if (quad_offset_p)
{ {
if (quad_address_p (x, mode, false)) if (quad_address_p (x, mode, reg_ok_strict))
return 1; return 1;
} }
else if (virtual_stack_registers_memory_p (x))
return 1;
else if (reg_offset_p) else if (reg_offset_p)
{ {
...@@ -20463,7 +20439,7 @@ rs6000_output_move_128bit (rtx operands[]) ...@@ -20463,7 +20439,7 @@ rs6000_output_move_128bit (rtx operands[])
else if (TARGET_VSX && dest_vsx_p) else if (TARGET_VSX && dest_vsx_p)
{ {
if (mode_supports_vsx_dform_quad (mode) if (mode_supports_vsx_dform_quad (mode)
&& quad_address_p (XEXP (src, 0), mode, false)) && quad_address_p (XEXP (src, 0), mode, true))
return "lxv %x0,%1"; return "lxv %x0,%1";
else if (TARGET_P9_VECTOR) else if (TARGET_P9_VECTOR)
...@@ -20501,7 +20477,7 @@ rs6000_output_move_128bit (rtx operands[]) ...@@ -20501,7 +20477,7 @@ rs6000_output_move_128bit (rtx operands[])
else if (TARGET_VSX && src_vsx_p) else if (TARGET_VSX && src_vsx_p)
{ {
if (mode_supports_vsx_dform_quad (mode) if (mode_supports_vsx_dform_quad (mode)
&& quad_address_p (XEXP (dest, 0), mode, false)) && quad_address_p (XEXP (dest, 0), mode, true))
return "stxv %x1,%0"; return "stxv %x1,%0";
else if (TARGET_P9_VECTOR) else if (TARGET_P9_VECTOR)
2016-06-27 Peter Bergner <bergner@vnet.ibm.com>
PR target/71656
* gcc.target/powerpc/pr71656-1.c: New test.
* gcc.target/powerpc/pr71656-2.c: New test.
2016-06-27 Christophe Lyon <christophe.lyon@linaro.org> 2016-06-27 Christophe Lyon <christophe.lyon@linaro.org>
* gcc.target/aarch64/advsimd-intrinsics/vget_lane.c: Add ifdef * gcc.target/aarch64/advsimd-intrinsics/vget_lane.c: Add ifdef
......
/* Test for reload ICE arising from POWER9 Vector Dform code generation. */
/* { dg-do compile } */
/* { dg-require-effective-target powerpc_p9vector_ok } */
/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power9" } } */
/* { dg-options "-O1 -mcpu=power9 -mpower9-dform-vector -mno-lra" } */
typedef __attribute__((altivec(vector__))) int type_t;
type_t
func (type_t *src)
{
asm volatile ("# force the base reg on the load below to be spilled"
: /* no outputs */
: /* no inputs */
: "r0", "r3", "r4", "r5", "r6", "r7",
"r8", "r9", "r10", "r11", "r12", "r14", "r15",
"r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
"r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31");
return src[1];
}
/* Test for reload ICE arising from POWER9 Vector Dform code generation. */
/* { dg-do compile } */
/* { dg-require-effective-target powerpc_p9vector_ok } */
/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power9" } } */
/* { dg-options "-O3 -mcpu=power9 -mpower9-dform-vector -mno-lra -funroll-loops -fno-aggressive-loop-optimizations" } */
typedef double vec[3];
struct vec_t
{
vec x;
vec y;
};
int a, j, k, l, m, n, o, p, q;
double b, i;
vec c;
double h[6];
void func1 (vec);
void
func2 (double *)
{
for (; k; k--)
for (; j <= k;)
for (; m <= q; m++)
for (; n <= k; n++)
for (; o <= l; o++)
{
j = p + m + n + o;
h[j] = i;
}
}
void
func3 (void)
{
vec_t d;
func1 (d.y);
func2 (&b);
for (; a;)
{
double *e = d.y, *g;
double f;
c[0] = g[0] + f * e[0];
c[1] = g[1] + f * e[1];
func1 (c);
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment