Commit 7855700e by Jakub Jelinek Committed by Jakub Jelinek

gimplify.c (gimplify_scan_omp_clauses): Don't sorry_at on lastprivate…

gimplify.c (gimplify_scan_omp_clauses): Don't sorry_at on lastprivate conditional on combined for simd.

	* gimplify.c (gimplify_scan_omp_clauses): Don't sorry_at on lastprivate
	conditional on combined for simd.
	* omp-low.c (struct omp_context): Add combined_into_simd_safelen0
	member.
	(lower_rec_input_clauses): For gimple_omp_for_combined_into_p max_vf 1
	constructs, don't remove lastprivate_conditional_map, but instead set
	ctx->combined_into_simd_safelen0 and adjust hash_map, so that it points
	to parent construct temporaries.
	(lower_lastprivate_clauses): Handle ctx->combined_into_simd_safelen0
	like !ctx->lastprivate_conditional_map.
	(lower_omp_1) <case GIMPLE_ASSIGN>: If up->combined_into_simd_safelen0,
	use up->outer context instead of up.
	* omp-expand.c (expand_omp_for_generic): Perform cond_var bump even if
	gimple_omp_for_combined_p.
	(expand_omp_for_static_nochunk): Likewise.
	(expand_omp_for_static_chunk): Add forgotten cond_var bump that was
	probably moved over into expand_omp_for_generic rather than being copied
	there.
gcc/cp/
	* cp-tree.h (CP_OMP_CLAUSE_INFO): Allow for any clauses up to _condvar_
	instead of only up to linear.
gcc/testsuite/
	* c-c++-common/gomp/lastprivate-conditional-2.c (foo): Don't expect
	a sorry_at on any of the clauses.
libgomp/
	* testsuite/libgomp.c-c++-common/lastprivate-conditional-7.c: New test.
	* testsuite/libgomp.c-c++-common/lastprivate-conditional-8.c: New test.
	* testsuite/libgomp.c-c++-common/lastprivate-conditional-9.c: New test.
	* testsuite/libgomp.c-c++-common/lastprivate-conditional-10.c: New test.

From-SVN: r271907
parent 0697ecea
2019-06-04 Jakub Jelinek <jakub@redhat.com>
* gimplify.c (gimplify_scan_omp_clauses): Don't sorry_at on lastprivate
conditional on combined for simd.
* omp-low.c (struct omp_context): Add combined_into_simd_safelen0
member.
(lower_rec_input_clauses): For gimple_omp_for_combined_into_p max_vf 1
constructs, don't remove lastprivate_conditional_map, but instead set
ctx->combined_into_simd_safelen0 and adjust hash_map, so that it points
to parent construct temporaries.
(lower_lastprivate_clauses): Handle ctx->combined_into_simd_safelen0
like !ctx->lastprivate_conditional_map.
(lower_omp_1) <case GIMPLE_ASSIGN>: If up->combined_into_simd_safelen0,
use up->outer context instead of up.
* omp-expand.c (expand_omp_for_generic): Perform cond_var bump even if
gimple_omp_for_combined_p.
(expand_omp_for_static_nochunk): Likewise.
(expand_omp_for_static_chunk): Add forgotten cond_var bump that was
probably moved over into expand_omp_for_generic rather than being copied
there.
2019-06-04 Martin Liska <mliska@suse.cz>
* value-prof.c (dump_histogram_value): Fix typo.
......
2019-06-04 Jakub Jelinek <jakub@redhat.com>
* cp-tree.h (CP_OMP_CLAUSE_INFO): Allow for any clauses up to _condvar_
instead of only up to linear.
2019-06-03 Paolo Carlini <paolo.carlini@oracle.com>
* parser.c (cp_parser_unqualified_id): Use build_min_nt_loc in
......
......@@ -4924,7 +4924,7 @@ more_aggr_init_expr_args_p (const aggr_init_expr_arg_iterator *iter)
See semantics.c for details. */
#define CP_OMP_CLAUSE_INFO(NODE) \
TREE_TYPE (OMP_CLAUSE_RANGE_CHECK (NODE, OMP_CLAUSE_PRIVATE, \
OMP_CLAUSE_LINEAR))
OMP_CLAUSE__CONDTEMP_))
/* Nonzero if this transaction expression's body contains statements. */
#define TRANSACTION_EXPR_IS_STMT(NODE) \
......
......@@ -8145,31 +8145,7 @@ gimplify_scan_omp_clauses (tree *list_p, gimple_seq *pre_p,
OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c) = 0;
}
if (OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c))
{
splay_tree_node n = NULL;
if (code == OMP_SIMD
&& outer_ctx
&& outer_ctx->region_type == ORT_WORKSHARE)
{
n = splay_tree_lookup (outer_ctx->variables,
(splay_tree_key) decl);
if (n == NULL
&& outer_ctx->outer_context
&& (outer_ctx->outer_context->region_type
== ORT_COMBINED_PARALLEL))
n = splay_tree_lookup (outer_ctx->outer_context->variables,
(splay_tree_key) decl);
}
if (n && (n->value & GOVD_LASTPRIVATE_CONDITIONAL) != 0)
{
sorry_at (OMP_CLAUSE_LOCATION (c),
"%<conditional%> modifier on %<lastprivate%> "
"clause not supported yet");
OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c) = 0;
}
else
flags |= GOVD_LASTPRIVATE_CONDITIONAL;
}
if (outer_ctx
&& (outer_ctx->region_type == ORT_COMBINED_PARALLEL
|| ((outer_ctx->region_type & ORT_COMBINED_TEAMS)
......
......@@ -3257,19 +3257,6 @@ expand_omp_for_generic (struct omp_region *region,
vmain = gimple_omp_continue_control_use (cont_stmt);
vback = gimple_omp_continue_control_def (cont_stmt);
if (!gimple_omp_for_combined_p (fd->for_stmt))
{
if (POINTER_TYPE_P (type))
t = fold_build_pointer_plus (vmain, fd->loop.step);
else
t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
t = force_gimple_operand_gsi (&gsi, t,
DECL_P (vback)
&& TREE_ADDRESSABLE (vback),
NULL_TREE, true, GSI_SAME_STMT);
assign_stmt = gimple_build_assign (vback, t);
gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
if (cond_var)
{
tree itype = TREE_TYPE (cond_var);
......@@ -3289,6 +3276,19 @@ expand_omp_for_generic (struct omp_region *region,
gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
}
if (!gimple_omp_for_combined_p (fd->for_stmt))
{
if (POINTER_TYPE_P (type))
t = fold_build_pointer_plus (vmain, fd->loop.step);
else
t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
t = force_gimple_operand_gsi (&gsi, t,
DECL_P (vback)
&& TREE_ADDRESSABLE (vback),
NULL_TREE, true, GSI_SAME_STMT);
assign_stmt = gimple_build_assign (vback, t);
gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
if (fd->ordered && counts[fd->collapse - 1] == NULL_TREE)
{
tree tem;
......@@ -3962,19 +3962,6 @@ expand_omp_for_static_nochunk (struct omp_region *region,
vmain = gimple_omp_continue_control_use (cont_stmt);
vback = gimple_omp_continue_control_def (cont_stmt);
if (!gimple_omp_for_combined_p (fd->for_stmt))
{
if (POINTER_TYPE_P (type))
t = fold_build_pointer_plus (vmain, step);
else
t = fold_build2 (PLUS_EXPR, type, vmain, step);
t = force_gimple_operand_gsi (&gsi, t,
DECL_P (vback)
&& TREE_ADDRESSABLE (vback),
NULL_TREE, true, GSI_SAME_STMT);
assign_stmt = gimple_build_assign (vback, t);
gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
if (cond_var)
{
tree itype = TREE_TYPE (cond_var);
......@@ -3992,6 +3979,19 @@ expand_omp_for_static_nochunk (struct omp_region *region,
gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
}
if (!gimple_omp_for_combined_p (fd->for_stmt))
{
if (POINTER_TYPE_P (type))
t = fold_build_pointer_plus (vmain, step);
else
t = fold_build2 (PLUS_EXPR, type, vmain, step);
t = force_gimple_operand_gsi (&gsi, t,
DECL_P (vback)
&& TREE_ADDRESSABLE (vback),
NULL_TREE, true, GSI_SAME_STMT);
assign_stmt = gimple_build_assign (vback, t);
gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
t = build2 (fd->loop.cond_code, boolean_type_node,
DECL_P (vback) && TREE_ADDRESSABLE (vback)
? t : vback, e);
......@@ -4607,6 +4607,23 @@ expand_omp_for_static_chunk (struct omp_region *region,
vmain = gimple_omp_continue_control_use (cont_stmt);
vback = gimple_omp_continue_control_def (cont_stmt);
if (cond_var)
{
tree itype = TREE_TYPE (cond_var);
tree t2;
if (POINTER_TYPE_P (type)
|| TREE_CODE (n1) != INTEGER_CST
|| fd->loop.cond_code != LT_EXPR)
t2 = build_int_cst (itype, 1);
else
t2 = fold_convert (itype, step);
t2 = fold_build2 (PLUS_EXPR, itype, cond_var, t2);
t2 = force_gimple_operand_gsi (&gsi, t2, false,
NULL_TREE, true, GSI_SAME_STMT);
assign_stmt = gimple_build_assign (cond_var, t2);
gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
}
if (!gimple_omp_for_combined_p (fd->for_stmt))
{
if (POINTER_TYPE_P (type))
......
......@@ -137,6 +137,10 @@ struct omp_context
/* True if this construct can be cancelled. */
bool cancellable;
/* True if lower_omp_1 should look up lastprivate conditional in parent
context. */
bool combined_into_simd_safelen0;
};
static splay_tree all_contexts;
......@@ -4816,6 +4820,8 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
void_node);
gimple_seq tseq = NULL;
gimplify_and_add (x, &tseq);
if (ctx->outer)
lower_omp (&tseq, ctx->outer);
gimple_seq_add_seq (&llist[1], tseq);
}
if (y)
......@@ -5278,6 +5284,25 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
sctx.is_simt = false;
if (ctx->lastprivate_conditional_map)
{
if (gimple_omp_for_combined_into_p (ctx->stmt))
{
/* Signal to lower_omp_1 that it should use parent context. */
ctx->combined_into_simd_safelen0 = true;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c))
{
tree o = lookup_decl (OMP_CLAUSE_DECL (c), ctx);
tree *v
= ctx->lastprivate_conditional_map->get (o);
tree po = lookup_decl (OMP_CLAUSE_DECL (c), ctx->outer);
tree *pv
= ctx->outer->lastprivate_conditional_map->get (po);
*v = *pv;
}
}
else
{
/* When not vectorized, treat lastprivate(conditional:) like
normal lastprivate, as there will be just one simd lane
writing the privatized variable. */
......@@ -5285,6 +5310,7 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
ctx->lastprivate_conditional_map = NULL;
}
}
}
if (nonconst_simd_if)
{
......@@ -5652,7 +5678,8 @@ lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *body_p,
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c)
&& ctx->lastprivate_conditional_map)
&& ctx->lastprivate_conditional_map
&& !ctx->combined_into_simd_safelen0)
{
gcc_assert (body_p);
if (simduid)
......@@ -10812,6 +10839,8 @@ lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
if (tree *v = up->lastprivate_conditional_map->get (lhs))
{
tree clauses;
if (up->combined_into_simd_safelen0)
up = up->outer;
if (gimple_code (up->stmt) == GIMPLE_OMP_FOR)
clauses = gimple_omp_for_clauses (up->stmt);
else
......
2019-06-04 Jakub Jelinek <jakub@redhat.com>
* c-c++-common/gomp/lastprivate-conditional-2.c (foo): Don't expect
a sorry_at on any of the clauses.
2019-06-04 Richard Biener <rguenther@suse.de>
PR middle-end/90726
......
......@@ -13,7 +13,7 @@ foo (int *p)
if (p[i])
b = i;
#pragma omp parallel
#pragma omp for simd lastprivate (conditional: c) /* { dg-message "not supported yet" } */
#pragma omp for simd lastprivate (conditional: c)
for (i = 0; i < 32; i++)
if (p[i])
c = i;
......@@ -21,7 +21,7 @@ foo (int *p)
for (i = 0; i < 32; i++)
if (p[i])
d = i;
#pragma omp parallel for simd lastprivate (conditional: e) /* { dg-message "not supported yet" } */
#pragma omp parallel for simd lastprivate (conditional: e)
for (i = 0; i < 32; i++)
if (p[i])
e = i;
......
2019-06-04 Jakub Jelinek <jakub@redhat.com>
* testsuite/libgomp.c-c++-common/lastprivate-conditional-7.c: New test.
* testsuite/libgomp.c-c++-common/lastprivate-conditional-8.c: New test.
* testsuite/libgomp.c-c++-common/lastprivate-conditional-9.c: New test.
* testsuite/libgomp.c-c++-common/lastprivate-conditional-10.c: New test.
2019-05-30 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
* configure.ac: Call AX_COUNT_CPUS.
......
/* { dg-do run } */
int v, x;
__attribute__((noipa)) int
foo (int *a)
{
#pragma omp parallel for simd lastprivate (conditional: x) schedule(simd: static) if (simd: 0)
for (int i = 0; i < 128; i++)
if (a[i])
x = a[i];
return x;
}
__attribute__((noipa)) int
bar (int *a, int *b)
{
#pragma omp parallel
#pragma omp for simd lastprivate (conditional: x, v) schedule(static, 16) simdlen (1)
for (int i = 16; i < 128; ++i)
{
if (a[i])
x = a[i];
if (b[i])
v = b[i] + 10;
}
return x;
}
__attribute__((noipa)) int
baz (int *a)
{
#pragma omp parallel for simd if (simd: 0) lastprivate (conditional: x) schedule(simd: dynamic, 16)
for (int i = 0; i < 128; i++)
if (a[i])
x = a[i] + 5;
return x;
}
int
main ()
{
int a[128], b[128], i;
for (i = 0; i < 128; i++)
{
a[i] = ((i % 11) == 2) ? i + 10 : 0;
asm volatile ("" : "+g" (i));
b[i] = ((i % 13) == 5) ? i * 2 : 0;
}
if (foo (a) != 133)
__builtin_abort ();
if (bar (b, a) != 244 || v != 143)
__builtin_abort ();
if (baz (b) != 249)
__builtin_abort ();
return 0;
}
/* { dg-do run } */
/* { dg-additional-options "-O2 -fdump-tree-vect-details" } */
/* { dg-additional-options "-mavx" { target avx_runtime } } */
/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 3 "vect" { target avx_runtime } } } */
int v, x;
__attribute__((noipa)) void
foo (int *a)
{
#pragma omp for simd lastprivate (conditional: x) schedule(simd: static)
for (int i = 0; i < 128; i++)
if (a[i])
x = a[i];
}
__attribute__((noipa)) void
bar (int *a, int *b)
{
#pragma omp for simd lastprivate (conditional: x, v) schedule(static, 16)
for (int i = 16; i < 128; ++i)
{
if (a[i])
x = a[i];
if (b[i])
v = b[i] + 10;
}
}
__attribute__((noipa)) void
baz (int *a)
{
#pragma omp for simd lastprivate (conditional: x) schedule(simd: dynamic, 16)
for (int i = 0; i < 128; i++)
if (a[i])
x = a[i] + 5;
}
int
main ()
{
int a[128], b[128], i;
for (i = 0; i < 128; i++)
{
a[i] = ((i % 11) == 2) ? i + 10 : 0;
asm volatile ("" : "+g" (i));
b[i] = ((i % 13) == 5) ? i * 2 : 0;
}
#pragma omp parallel
foo (a);
if (x != 133)
__builtin_abort ();
x = -3;
#pragma omp parallel
bar (b, a);
if (x != 244 || v != 143)
__builtin_abort ();
#pragma omp parallel
baz (b);
if (x != 249)
__builtin_abort ();
return 0;
}
/* { dg-do run } */
/* { dg-additional-options "-O2 -fdump-tree-vect-details" } */
/* { dg-additional-options "-mavx" { target avx_runtime } } */
/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 3 "vect" { target avx_runtime } } } */
int v, x;
__attribute__((noipa)) int
foo (int *a)
{
#pragma omp parallel for simd lastprivate (conditional: x) schedule(simd: static)
for (int i = 0; i < 128; i++)
if (a[i])
x = a[i];
return x;
}
__attribute__((noipa)) int
bar (int *a, int *b)
{
#pragma omp parallel
#pragma omp for simd lastprivate (conditional: x, v) schedule(static, 16)
for (int i = 16; i < 128; ++i)
{
if (a[i])
x = a[i];
if (b[i])
v = b[i] + 10;
}
return x;
}
__attribute__((noipa)) int
baz (int *a)
{
#pragma omp parallel for simd lastprivate (conditional: x) schedule(simd: dynamic, 16)
for (int i = 0; i < 128; i++)
if (a[i])
x = a[i] + 5;
return x;
}
int
main ()
{
int a[128], b[128], i;
for (i = 0; i < 128; i++)
{
a[i] = ((i % 11) == 2) ? i + 10 : 0;
asm volatile ("" : "+g" (i));
b[i] = ((i % 13) == 5) ? i * 2 : 0;
}
if (foo (a) != 133)
__builtin_abort ();
if (bar (b, a) != 244 || v != 143)
__builtin_abort ();
if (baz (b) != 249)
__builtin_abort ();
return 0;
}
/* { dg-do run } */
int v, x;
__attribute__((noipa)) void
foo (int *a)
{
#pragma omp for simd lastprivate (conditional: x) schedule(simd: static) if (simd: 0)
for (int i = 0; i < 128; i++)
if (a[i])
x = a[i];
}
__attribute__((noipa)) void
bar (int *a, int *b)
{
#pragma omp for simd lastprivate (conditional: x, v) schedule(static, 16) simdlen(1)
for (int i = 16; i < 128; ++i)
{
if (a[i])
x = a[i];
if (b[i])
v = b[i] + 10;
}
}
__attribute__((noipa)) void
baz (int *a)
{
#pragma omp for simd lastprivate (conditional: x) schedule(simd: dynamic, 16) if (0)
for (int i = 0; i < 128; i++)
if (a[i])
x = a[i] + 5;
}
int
main ()
{
int a[128], b[128], i;
for (i = 0; i < 128; i++)
{
a[i] = ((i % 11) == 2) ? i + 10 : 0;
asm volatile ("" : "+g" (i));
b[i] = ((i % 13) == 5) ? i * 2 : 0;
}
#pragma omp parallel
foo (a);
if (x != 133)
__builtin_abort ();
x = -3;
#pragma omp parallel
bar (b, a);
if (x != 244 || v != 143)
__builtin_abort ();
#pragma omp parallel
baz (b);
if (x != 249)
__builtin_abort ();
return 0;
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment