Commit 041cb615 by Jan Hubicka Committed by Jan Hubicka

Optimize updating of badness after applying inline

        * ipa-inline.c (wrapper_heuristics_may_apply): Break out from ...
	(edge_badness): ... here.
	(inline_small_functions): Use monotonicity of badness calculation
	to avoid redundant updates.

From-SVN: r278496
parent 140ee00a
2019-11-20 Jan Hubicka <jh@suse.cz>
* ipa-inline.c (wrapper_heuristics_may_apply): Break out from ...
(edge_badness): ... here.
(inline_small_functions): Use monotonicity of badness calculation
to avoid redundant updates.
2019-11-20 Richard Biener <rguenther@suse.de> 2019-11-20 Richard Biener <rguenther@suse.de>
* tree-vect-slp.c (vect_analyze_slp_instance): Dump * tree-vect-slp.c (vect_analyze_slp_instance): Dump
...@@ -1097,6 +1097,17 @@ want_inline_function_to_all_callers_p (struct cgraph_node *node, bool cold) ...@@ -1097,6 +1097,17 @@ want_inline_function_to_all_callers_p (struct cgraph_node *node, bool cold)
return true; return true;
} }
/* Return true if WHERE of SIZE is a possible candidate for wrapper heuristics
in estimate_edge_badness. */
static bool
wrapper_heuristics_may_apply (struct cgraph_node *where, int size)
{
return size < (DECL_DECLARED_INLINE_P (where->decl)
? inline_insns_single (where, false)
: inline_insns_auto (where, false));
}
/* A cost model driving the inlining heuristics in a way so the edges with /* A cost model driving the inlining heuristics in a way so the edges with
smallest badness are inlined first. After each inlining is performed smallest badness are inlined first. After each inlining is performed
the costs of all caller edges of nodes affected are recomputed so the the costs of all caller edges of nodes affected are recomputed so the
...@@ -1227,10 +1238,8 @@ edge_badness (struct cgraph_edge *edge, bool dump) ...@@ -1227,10 +1238,8 @@ edge_badness (struct cgraph_edge *edge, bool dump)
and it is not called once. */ and it is not called once. */
if (!caller_info->single_caller && overall_growth < caller_growth if (!caller_info->single_caller && overall_growth < caller_growth
&& caller_info->inlinable && caller_info->inlinable
&& ipa_size_summaries->get (caller)->size && wrapper_heuristics_may_apply
< (DECL_DECLARED_INLINE_P (caller->decl) (caller, ipa_size_summaries->get (caller)->size))
? inline_insns_single (caller, false)
: inline_insns_auto (caller, false)))
{ {
if (dump) if (dump)
fprintf (dump_file, fprintf (dump_file,
...@@ -2158,11 +2167,24 @@ inline_small_functions (void) ...@@ -2158,11 +2167,24 @@ inline_small_functions (void)
fprintf (dump_file, " Peeling recursion with depth %i\n", depth); fprintf (dump_file, " Peeling recursion with depth %i\n", depth);
gcc_checking_assert (!callee->inlined_to); gcc_checking_assert (!callee->inlined_to);
int old_size = ipa_size_summaries->get (where)->size;
sreal old_time = ipa_fn_summaries->get (where)->time;
inline_call (edge, true, &new_indirect_edges, &overall_size, true); inline_call (edge, true, &new_indirect_edges, &overall_size, true);
reset_edge_caches (edge->callee); reset_edge_caches (edge->callee);
add_new_edges_to_heap (&edge_heap, new_indirect_edges); add_new_edges_to_heap (&edge_heap, new_indirect_edges);
update_callee_keys (&edge_heap, where, updated_nodes); /* If caller's size and time increased we do not need to update
all edges becuase badness is not going to decrease. */
if (old_size <= ipa_size_summaries->get (where)->size
&& old_time <= ipa_fn_summaries->get (where)->time
/* Wrapper penalty may be non-monotonous in this respect.
Fortunately it only affects small functions. */
&& !wrapper_heuristics_may_apply (where, old_size))
update_callee_keys (&edge_heap, edge->callee, updated_nodes);
else
update_callee_keys (&edge_heap, where, updated_nodes);
} }
where = edge->caller; where = edge->caller;
if (where->inlined_to) if (where->inlined_to)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment