Commit 09b22f48 by Jakub Jelinek Committed by Jakub Jelinek

re PR middle-end/59917 (ICE in calc_dfs_tree, at dominance.c:401)

	PR middle-end/59917
	PR tree-optimization/59920
	* tree.c (build_common_builtin_nodes): Remove
	__builtin_setjmp_dispatcher initialization.
	* omp-low.h (make_gimple_omp_edges): Add a new int * argument.
	* profile.c (branch_prob): Use gsi_start_nondebug_after_labels_bb
	instead of gsi_after_labels + manually skipping debug stmts.
	Don't ignore bbs with BUILT_IN_SETJMP_DISPATCHER, instead
	ignore bbs with IFN_ABNORMAL_DISPATCHER.
	* tree-inline.c (copy_edges_for_bb): Remove
	can_make_abnormal_goto argument, instead add abnormal_goto_dest
	argument.  Ignore computed_goto_p stmts.  Don't call
	make_abnormal_goto_edges.  If a call might need abnormal edges
	for non-local gotos, see if it already has an edge to
	IFN_ABNORMAL_DISPATCHER or if it is IFN_ABNORMAL_DISPATCHER
	with true argument, don't do anything then, otherwise add
	EDGE_ABNORMAL from the call's bb to abnormal_goto_dest.
	(copy_cfg_body): Compute abnormal_goto_dest, adjust copy_edges_for_bb
	caller.
	* gimple-low.c (struct lower_data): Remove calls_builtin_setjmp.
	(lower_function_body): Don't emit __builtin_setjmp_dispatcher.
	(lower_stmt): Don't set data->calls_builtin_setjmp.
	(lower_builtin_setjmp): Adjust comment.
	* builtins.def (BUILT_IN_SETJMP_DISPATCHER): Remove.
	* tree-cfg.c (found_computed_goto): Remove.
	(factor_computed_gotos): Remove.
	(make_goto_expr_edges): Return bool, true for computed gotos.
	Don't call make_abnormal_goto_edges.
	(build_gimple_cfg): Don't set found_computed_goto, don't call
	factor_computed_gotos.
	(computed_goto_p): No longer static.
	(make_blocks): Don't set found_computed_goto.
	(get_abnormal_succ_dispatcher, handle_abnormal_edges): New functions.
	(make_edges): If make_goto_expr_edges returns true, push bb
	into ab_edge_goto vector, for stmt_can_make_abnormal_goto calls
	instead of calling make_abnormal_goto_edges push bb into ab_edge_call
	vector.  Record mapping between bbs and OpenMP regions if there
	are any, adjust make_gimple_omp_edges caller.  Call
	handle_abnormal_edges.
	(make_abnormal_goto_edges): Remove.
	* tree-cfg.h (make_abnormal_goto_edges): Remove.
	(computed_goto_p, get_abnormal_succ_dispatcher): New prototypes.
	* internal-fn.c (expand_ABNORMAL_DISPATCHER): New function.
	* builtins.c (expand_builtin): Don't handle
	BUILT_IN_SETJMP_DISPATCHER.
	* internal-fn.def (ABNORMAL_DISPATCHER): New.
	* omp-low.c (make_gimple_omp_edges): Add region_idx argument, when
	filling *region also set *region_idx to (*region)->entry->index.

	* gcc.dg/pr59920-1.c: New test.
	* gcc.dg/pr59920-2.c: New test.
	* gcc.dg/pr59920-3.c: New test.
	* c-c++-common/gomp/pr59917-1.c: New test.
	* c-c++-common/gomp/pr59917-2.c: New test.

From-SVN: r207231
parent 42eb8bd1
2014-01-29 Jakub Jelinek <jakub@redhat.com>
PR middle-end/59917
PR tree-optimization/59920
* tree.c (build_common_builtin_nodes): Remove
__builtin_setjmp_dispatcher initialization.
* omp-low.h (make_gimple_omp_edges): Add a new int * argument.
* profile.c (branch_prob): Use gsi_start_nondebug_after_labels_bb
instead of gsi_after_labels + manually skipping debug stmts.
Don't ignore bbs with BUILT_IN_SETJMP_DISPATCHER, instead
ignore bbs with IFN_ABNORMAL_DISPATCHER.
* tree-inline.c (copy_edges_for_bb): Remove
can_make_abnormal_goto argument, instead add abnormal_goto_dest
argument. Ignore computed_goto_p stmts. Don't call
make_abnormal_goto_edges. If a call might need abnormal edges
for non-local gotos, see if it already has an edge to
IFN_ABNORMAL_DISPATCHER or if it is IFN_ABNORMAL_DISPATCHER
with true argument, don't do anything then, otherwise add
EDGE_ABNORMAL from the call's bb to abnormal_goto_dest.
(copy_cfg_body): Compute abnormal_goto_dest, adjust copy_edges_for_bb
caller.
* gimple-low.c (struct lower_data): Remove calls_builtin_setjmp.
(lower_function_body): Don't emit __builtin_setjmp_dispatcher.
(lower_stmt): Don't set data->calls_builtin_setjmp.
(lower_builtin_setjmp): Adjust comment.
* builtins.def (BUILT_IN_SETJMP_DISPATCHER): Remove.
* tree-cfg.c (found_computed_goto): Remove.
(factor_computed_gotos): Remove.
(make_goto_expr_edges): Return bool, true for computed gotos.
Don't call make_abnormal_goto_edges.
(build_gimple_cfg): Don't set found_computed_goto, don't call
factor_computed_gotos.
(computed_goto_p): No longer static.
(make_blocks): Don't set found_computed_goto.
(get_abnormal_succ_dispatcher, handle_abnormal_edges): New functions.
(make_edges): If make_goto_expr_edges returns true, push bb
into ab_edge_goto vector, for stmt_can_make_abnormal_goto calls
instead of calling make_abnormal_goto_edges push bb into ab_edge_call
vector. Record mapping between bbs and OpenMP regions if there
are any, adjust make_gimple_omp_edges caller. Call
handle_abnormal_edges.
(make_abnormal_goto_edges): Remove.
* tree-cfg.h (make_abnormal_goto_edges): Remove.
(computed_goto_p, get_abnormal_succ_dispatcher): New prototypes.
* internal-fn.c (expand_ABNORMAL_DISPATCHER): New function.
* builtins.c (expand_builtin): Don't handle
BUILT_IN_SETJMP_DISPATCHER.
* internal-fn.def (ABNORMAL_DISPATCHER): New.
* omp-low.c (make_gimple_omp_edges): Add region_idx argument, when
filling *region also set *region_idx to (*region)->entry->index.
PR other/58712
* read-rtl.c (read_rtx_code): Clear all of RTX_CODE_SIZE (code).
For REGs set ORIGINAL_REGNO.
......
......@@ -6205,20 +6205,6 @@ expand_builtin (tree exp, rtx target, rtx subtarget, enum machine_mode mode,
}
break;
case BUILT_IN_SETJMP_DISPATCHER:
/* __builtin_setjmp_dispatcher is passed the dispatcher label. */
if (validate_arglist (exp, POINTER_TYPE, VOID_TYPE))
{
tree label = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
rtx label_r = label_rtx (label);
/* Remove the dispatcher label from the list of non-local labels
since the receiver labels have been added to it above. */
remove_node_from_expr_list (label_r, &nonlocal_goto_handler_labels);
return const0_rtx;
}
break;
case BUILT_IN_SETJMP_RECEIVER:
/* __builtin_setjmp_receiver is passed the receiver label. */
if (validate_arglist (exp, POINTER_TYPE, VOID_TYPE))
......
......@@ -783,7 +783,6 @@ DEF_BUILTIN_STUB (BUILT_IN_NONLOCAL_GOTO, "__builtin_nonlocal_goto")
/* Implementing __builtin_setjmp. */
DEF_BUILTIN_STUB (BUILT_IN_SETJMP_SETUP, "__builtin_setjmp_setup")
DEF_BUILTIN_STUB (BUILT_IN_SETJMP_DISPATCHER, "__builtin_setjmp_dispatcher")
DEF_BUILTIN_STUB (BUILT_IN_SETJMP_RECEIVER, "__builtin_setjmp_receiver")
/* Implementing variable sized local variables. */
......
......@@ -76,9 +76,6 @@ struct lower_data
/* True if the current statement cannot fall through. */
bool cannot_fallthru;
/* True if the function calls __builtin_setjmp. */
bool calls_builtin_setjmp;
};
static void lower_stmt (gimple_stmt_iterator *, struct lower_data *);
......@@ -99,7 +96,6 @@ lower_function_body (void)
gimple_seq lowered_body;
gimple_stmt_iterator i;
gimple bind;
tree t;
gimple x;
/* The gimplifier should've left a body of exactly one statement,
......@@ -146,34 +142,6 @@ lower_function_body (void)
gsi_insert_after (&i, t.stmt, GSI_CONTINUE_LINKING);
}
/* If the function calls __builtin_setjmp, we need to emit the computed
goto that will serve as the unique dispatcher for all the receivers. */
if (data.calls_builtin_setjmp)
{
tree disp_label, disp_var, arg;
/* Build 'DISP_LABEL:' and insert. */
disp_label = create_artificial_label (cfun->function_end_locus);
/* This mark will create forward edges from every call site. */
DECL_NONLOCAL (disp_label) = 1;
cfun->has_nonlocal_label = 1;
x = gimple_build_label (disp_label);
gsi_insert_after (&i, x, GSI_CONTINUE_LINKING);
/* Build 'DISP_VAR = __builtin_setjmp_dispatcher (DISP_LABEL);'
and insert. */
disp_var = create_tmp_var (ptr_type_node, "setjmpvar");
arg = build_addr (disp_label, current_function_decl);
t = builtin_decl_implicit (BUILT_IN_SETJMP_DISPATCHER);
x = gimple_build_call (t, 1, arg);
gimple_call_set_lhs (x, disp_var);
/* Build 'goto DISP_VAR;' and insert. */
gsi_insert_after (&i, x, GSI_CONTINUE_LINKING);
x = gimple_build_goto (disp_var);
gsi_insert_after (&i, x, GSI_CONTINUE_LINKING);
}
/* Once the old body has been lowered, replace it with the new
lowered sequence. */
gimple_set_body (current_function_decl, lowered_body);
......@@ -364,7 +332,6 @@ lower_stmt (gimple_stmt_iterator *gsi, struct lower_data *data)
{
lower_builtin_setjmp (gsi);
data->cannot_fallthru = false;
data->calls_builtin_setjmp = true;
return;
}
......@@ -689,15 +656,12 @@ lower_gimple_return (gimple_stmt_iterator *gsi, struct lower_data *data)
all will be used on all machines). It operates similarly to the C
library function of the same name, but is more efficient.
It is lowered into 3 other builtins, namely __builtin_setjmp_setup,
__builtin_setjmp_dispatcher and __builtin_setjmp_receiver, but with
__builtin_setjmp_dispatcher shared among all the instances; that's
why it is only emitted at the end by lower_function_body.
It is lowered into 2 other builtins, namely __builtin_setjmp_setup,
__builtin_setjmp_receiver.
After full lowering, the body of the function should look like:
{
void * setjmpvar.0;
int D.1844;
int D.2844;
......@@ -727,14 +691,13 @@ lower_gimple_return (gimple_stmt_iterator *gsi, struct lower_data *data)
<D3850>:;
return;
<D3853>: [non-local];
setjmpvar.0 = __builtin_setjmp_dispatcher (&<D3853>);
goto setjmpvar.0;
}
The dispatcher block will be both the unique destination of all the
abnormal call edges and the unique source of all the abnormal edges
to the receivers, thus keeping the complexity explosion localized. */
During cfg creation an extra per-function (or per-OpenMP region)
block with ABNORMAL_DISPATCHER internal call will be added, unique
destination of all the abnormal call edges and the unique source of
all the abnormal edges to the receivers, thus keeping the complexity
explosion localized. */
static void
lower_builtin_setjmp (gimple_stmt_iterator *gsi)
......
......@@ -857,6 +857,11 @@ expand_MASK_STORE (gimple stmt)
expand_insn (optab_handler (maskstore_optab, TYPE_MODE (type)), 3, ops);
}
static void
expand_ABNORMAL_DISPATCHER (gimple)
{
}
/* Routines to expand each internal function, indexed by function number.
Each routine has the prototype:
......
......@@ -51,3 +51,4 @@ DEF_INTERNAL_FN (UBSAN_NULL, ECF_LEAF | ECF_NOTHROW)
DEF_INTERNAL_FN (UBSAN_CHECK_ADD, ECF_CONST | ECF_LEAF | ECF_NOTHROW)
DEF_INTERNAL_FN (UBSAN_CHECK_SUB, ECF_CONST | ECF_LEAF | ECF_NOTHROW)
DEF_INTERNAL_FN (UBSAN_CHECK_MUL, ECF_CONST | ECF_LEAF | ECF_NOTHROW)
DEF_INTERNAL_FN (ABNORMAL_DISPATCHER, ECF_NORETURN)
......@@ -10449,7 +10449,8 @@ diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
/* Called from tree-cfg.c::make_edges to create cfg edges for all GIMPLE_OMP
codes. */
bool
make_gimple_omp_edges (basic_block bb, struct omp_region **region)
make_gimple_omp_edges (basic_block bb, struct omp_region **region,
int *region_idx)
{
gimple last = last_stmt (bb);
enum gimple_code code = gimple_code (last);
......@@ -10556,7 +10557,13 @@ make_gimple_omp_edges (basic_block bb, struct omp_region **region)
}
if (*region != cur_region)
{
*region = cur_region;
if (cur_region)
*region_idx = cur_region->entry->index;
else
*region_idx = 0;
}
return fallthru;
}
......
......@@ -26,6 +26,6 @@ extern tree find_omp_clause (tree, enum omp_clause_code);
extern void omp_expand_local (basic_block);
extern void free_omp_regions (void);
extern tree omp_reduction_init (tree, tree);
extern bool make_gimple_omp_edges (basic_block, struct omp_region **);
extern bool make_gimple_omp_edges (basic_block, struct omp_region **, int *);
#endif /* GCC_OMP_LOW_H */
......@@ -1106,27 +1106,22 @@ branch_prob (void)
gimple first;
tree fndecl;
gsi = gsi_after_labels (bb);
gsi = gsi_start_nondebug_after_labels_bb (bb);
gcc_checking_assert (!gsi_end_p (gsi));
first = gsi_stmt (gsi);
if (is_gimple_debug (first))
{
gsi_next_nondebug (&gsi);
gcc_checking_assert (!gsi_end_p (gsi));
first = gsi_stmt (gsi);
}
/* Don't split the bbs containing __builtin_setjmp_receiver
or __builtin_setjmp_dispatcher calls. These are very
or ABNORMAL_DISPATCHER calls. These are very
special and don't expect anything to be inserted before
them. */
if (is_gimple_call (first)
&& (((fndecl = gimple_call_fndecl (first)) != NULL
&& DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
&& (DECL_FUNCTION_CODE (fndecl)
== BUILT_IN_SETJMP_RECEIVER
|| (DECL_FUNCTION_CODE (fndecl)
== BUILT_IN_SETJMP_DISPATCHER)))
|| gimple_call_flags (first) & ECF_RETURNS_TWICE))
== BUILT_IN_SETJMP_RECEIVER))
|| (gimple_call_flags (first) & ECF_RETURNS_TWICE)
|| (gimple_call_internal_p (first)
&& (gimple_call_internal_fn (first)
== IFN_ABNORMAL_DISPATCHER))))
continue;
if (dump_file)
......
2014-01-29 Jakub Jelinek <jakub@redhat.com>
PR middle-end/59917
PR tree-optimization/59920
* gcc.dg/pr59920-1.c: New test.
* gcc.dg/pr59920-2.c: New test.
* gcc.dg/pr59920-3.c: New test.
* c-c++-common/gomp/pr59917-1.c: New test.
* c-c++-common/gomp/pr59917-2.c: New test.
PR tree-optimization/59594
* gcc.dg/vect/no-vfa-vect-depend-2.c: New test.
* gcc.dg/vect/no-vfa-vect-depend-3.c: New test.
......
/* PR middle-end/59917 */
/* { dg-do compile } */
/* { dg-options "-O2 -fopenmp" } */
struct J { long buf[8]; };
extern int setjmp (struct J[1]);
extern struct J j[1];
void foo (int);
void
bar (void)
{
if (setjmp (j) == 0)
{
int k;
foo (-1);
#pragma omp parallel
for (k = 0; k < 10; ++k)
foo (k);
foo (-2);
}
}
/* PR middle-end/59917 */
/* { dg-do compile } */
/* { dg-options "-O2 -fopenmp" } */
struct J { long buf[8]; };
extern int setjmp (struct J[1]);
void foo (int);
void
bar (void)
{
int k;
foo (-1);
#pragma omp parallel
for (k = 0; k < 10; ++k)
{
struct J j[1];
if (setjmp (j) == 0)
foo (k);
}
foo (-2);
}
/* PR tree-optimization/59920 */
/* { dg-do compile } */
/* { dg-options "-O0" } */
#include <setjmp.h>
int bar (void);
void baz (int);
#define A { int x = bar (); if (setjmp (buf) == 0) baz (x); }
#define B A A A A A A A A A A
#define C B B B B B B B B B B
extern jmp_buf buf;
void
foo (void)
{
C C
}
/* PR tree-optimization/59920 */
/* { dg-do compile } */
/* { dg-options "-O0" } */
void *bar (void **);
void *baz (int, void **);
#define A(n) \
{ __label__ l1_##n, l2_##n, l3_##n; \
static void *a[] = { &&l1_##n, &&l2_##n, &&l3_##n };\
void *b = bar (a); \
goto *b; \
l1_##n: \
b = baz (1, a); \
goto *b; \
l2_##n: \
b = baz (2, a); \
goto *b; \
l3_##n:; \
}
#define B(n) A(n##0) A(n##1) A(n##2) A(n##3) A(n##4) \
A(n##5) A(n##6) A(n##7) A(n##8) A(n##9)
#define C(n) B(n##0) B(n##1) B(n##2) B(n##3) B(n##4) \
B(n##5) B(n##6) B(n##7) B(n##8) B(n##9)
void
foo (void)
{
C(1)
}
/* PR tree-optimization/59920 */
/* { dg-do compile } */
/* { dg-options "-O0" } */
void *bar (void **);
void *baz (int, void **);
#define A(n) __label__ l##n;
#define B(n) A(n##0) A(n##1) A(n##2) A(n##3) A(n##4) \
A(n##5) A(n##6) A(n##7) A(n##8) A(n##9)
#define C(n) B(n##0) B(n##1) B(n##2) B(n##3) B(n##4) \
B(n##5) B(n##6) B(n##7) B(n##8) B(n##9)
#define D C(1)
int
foo (void)
{
D
int bar (int i)
{
switch (i)
{
#undef A
#define A(n) \
case n: goto l##n;
D
}
return i;
}
int w = 0;
#undef A
#define A(n) int w##n = 0;
D
#undef A
#define A(n) \
{ l##n:; \
w##n += bar (10000 + n) - 10000; \
w##n += bar (10001 + n) - 10000; \
bar (n + 1); \
return w##n; \
}
D
#undef A
#define A(n) w += w##n;
D
return w;
}
......@@ -31,7 +31,6 @@ extern void start_recording_case_labels (void);
extern void end_recording_case_labels (void);
extern basic_block label_to_block_fn (struct function *, tree);
#define label_to_block(t) (label_to_block_fn (cfun, t))
extern void make_abnormal_goto_edges (basic_block, bool);
extern void cleanup_dead_labels (void);
extern void group_case_labels_stmt (gimple);
extern void group_case_labels (void);
......@@ -46,7 +45,9 @@ extern void gimple_debug_cfg (int);
extern void gimple_dump_cfg (FILE *, int);
extern void dump_cfg_stats (FILE *);
extern void debug_cfg_stats (void);
extern bool computed_goto_p (gimple);
extern bool stmt_can_make_abnormal_goto (gimple);
extern basic_block get_abnormal_succ_dispatcher (basic_block);
extern bool is_ctrl_stmt (gimple);
extern bool is_ctrl_altering_stmt (gimple);
extern bool simple_goto_p (gimple);
......
......@@ -1967,7 +1967,7 @@ update_ssa_across_abnormal_edges (basic_block bb, basic_block ret_bb,
static bool
copy_edges_for_bb (basic_block bb, gcov_type count_scale, basic_block ret_bb,
bool can_make_abnormal_goto)
basic_block abnormal_goto_dest)
{
basic_block new_bb = (basic_block) bb->aux;
edge_iterator ei;
......@@ -2021,7 +2021,9 @@ copy_edges_for_bb (basic_block bb, gcov_type count_scale, basic_block ret_bb,
into a COMPONENT_REF which doesn't. If the copy
can throw, the original could also throw. */
can_throw = stmt_can_throw_internal (copy_stmt);
nonlocal_goto = stmt_can_make_abnormal_goto (copy_stmt);
nonlocal_goto
= (stmt_can_make_abnormal_goto (copy_stmt)
&& !computed_goto_p (copy_stmt));
if (can_throw || nonlocal_goto)
{
......@@ -2052,9 +2054,26 @@ copy_edges_for_bb (basic_block bb, gcov_type count_scale, basic_block ret_bb,
/* If the call we inline cannot make abnormal goto do not add
additional abnormal edges but only retain those already present
in the original function body. */
nonlocal_goto &= can_make_abnormal_goto;
if (abnormal_goto_dest == NULL)
nonlocal_goto = false;
if (nonlocal_goto)
make_abnormal_goto_edges (gimple_bb (copy_stmt), true);
{
basic_block copy_stmt_bb = gimple_bb (copy_stmt);
if (get_abnormal_succ_dispatcher (copy_stmt_bb))
nonlocal_goto = false;
/* ABNORMAL_DISPATCHER (1) is for longjmp/setjmp or nonlocal gotos
in OpenMP regions which aren't allowed to be left abnormally.
So, no need to add abnormal edge in that case. */
else if (is_gimple_call (copy_stmt)
&& gimple_call_internal_p (copy_stmt)
&& (gimple_call_internal_fn (copy_stmt)
== IFN_ABNORMAL_DISPATCHER)
&& gimple_call_arg (copy_stmt, 0) == boolean_true_node)
nonlocal_goto = false;
else
make_edge (copy_stmt_bb, abnormal_goto_dest, EDGE_ABNORMAL);
}
if ((can_throw || nonlocal_goto)
&& gimple_in_ssa_p (cfun))
......@@ -2493,13 +2512,22 @@ copy_cfg_body (copy_body_data * id, gcov_type count, int frequency_scale,
last = last_basic_block_for_fn (cfun);
/* Now that we've duplicated the blocks, duplicate their edges. */
bool can_make_abormal_goto
= id->gimple_call && stmt_can_make_abnormal_goto (id->gimple_call);
basic_block abnormal_goto_dest = NULL;
if (id->gimple_call
&& stmt_can_make_abnormal_goto (id->gimple_call))
{
gimple_stmt_iterator gsi = gsi_for_stmt (id->gimple_call);
bb = gimple_bb (id->gimple_call);
gsi_next (&gsi);
if (gsi_end_p (gsi))
abnormal_goto_dest = get_abnormal_succ_dispatcher (bb);
}
FOR_ALL_BB_FN (bb, cfun_to_copy)
if (!id->blocks_to_copy
|| (bb->index > 0 && bitmap_bit_p (id->blocks_to_copy, bb->index)))
need_debug_cleanup |= copy_edges_for_bb (bb, count_scale, exit_block_map,
can_make_abormal_goto);
abnormal_goto_dest);
if (new_entry)
{
......
......@@ -9977,12 +9977,6 @@ build_common_builtin_nodes (void)
BUILT_IN_SETJMP_SETUP,
"__builtin_setjmp_setup", ECF_NOTHROW);
ftype = build_function_type_list (ptr_type_node, ptr_type_node, NULL_TREE);
local_define_builtin ("__builtin_setjmp_dispatcher", ftype,
BUILT_IN_SETJMP_DISPATCHER,
"__builtin_setjmp_dispatcher",
ECF_PURE | ECF_NOTHROW);
ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
local_define_builtin ("__builtin_setjmp_receiver", ftype,
BUILT_IN_SETJMP_RECEIVER,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment