Commit 40fda55b by Jan Hubicka Committed by Jan Hubicka

ipa-inline.c (reset_edge_caches): New function.

	* ipa-inline.c (reset_edge_caches): New function.
	(update_caller_keys): Add check_inlinablity_for; do not
	reset edge caches; remove now unnecesary loop.
	(update_callee_keys): Add comments; reset
	node_growth_cache of callee.
	(update_all_callee_keys): Likewise.
	(inline_small_functions): Sanity check cache; update code
	recomputing it.

From-SVN: r173399
parent bf9fa1b9
2011-05-04 Jan Hubicka <jh@suse.cz>
* ipa-inline.c (reset_edge_caches): New function.
(update_caller_keys): Add check_inlinablity_for; do not
reset edge caches; remove now unnecesary loop.
(update_callee_keys): Add comments; reset
node_growth_cache of callee.
(update_all_callee_keys): Likewise.
(inline_small_functions): Sanity check cache; update code
recomputing it.
2011-05-04 Bernd Schmidt <bernds@codesourcery.com> 2011-05-04 Bernd Schmidt <bernds@codesourcery.com>
PR rtl-optimization/47612 PR rtl-optimization/47612
......
...@@ -850,11 +850,64 @@ update_edge_key (fibheap_t heap, struct cgraph_edge *edge) ...@@ -850,11 +850,64 @@ update_edge_key (fibheap_t heap, struct cgraph_edge *edge)
} }
} }
/* Recompute heap nodes for each of caller edge. */
/* NODE was inlined.
All caller edges needs to be resetted because
size estimates change. Similarly callees needs reset
because better context may be known. */
static void
reset_edge_caches (struct cgraph_node *node)
{
struct cgraph_edge *edge;
struct cgraph_edge *e = node->callees;
struct cgraph_node *where = node;
if (where->global.inlined_to)
where = where->global.inlined_to;
/* WHERE body size has changed, the cached growth is invalid. */
reset_node_growth_cache (where);
for (edge = where->callers; edge; edge = edge->next_caller)
if (edge->inline_failed)
reset_edge_growth_cache (edge);
if (!e)
return;
while (true)
if (!e->inline_failed && e->callee->callees)
e = e->callee->callees;
else
{
if (e->inline_failed)
reset_edge_growth_cache (e);
if (e->next_callee)
e = e->next_callee;
else
{
do
{
if (e->caller == node)
return;
e = e->caller->callers;
}
while (!e->next_callee);
e = e->next_callee;
}
}
}
/* Recompute HEAP nodes for each of caller of NODE.
UPDATED_NODES track nodes we already visited, to avoid redundant work.
When CHECK_INLINABLITY_FOR is set, re-check for specified edge that
it is inlinable. Otherwise check all edges. */
static void static void
update_caller_keys (fibheap_t heap, struct cgraph_node *node, update_caller_keys (fibheap_t heap, struct cgraph_node *node,
bitmap updated_nodes) bitmap updated_nodes,
struct cgraph_edge *check_inlinablity_for)
{ {
struct cgraph_edge *edge; struct cgraph_edge *edge;
...@@ -864,19 +917,13 @@ update_caller_keys (fibheap_t heap, struct cgraph_node *node, ...@@ -864,19 +917,13 @@ update_caller_keys (fibheap_t heap, struct cgraph_node *node,
return; return;
if (!bitmap_set_bit (updated_nodes, node->uid)) if (!bitmap_set_bit (updated_nodes, node->uid))
return; return;
reset_node_growth_cache (node);
/* See if there is something to do. */
for (edge = node->callers; edge; edge = edge->next_caller) for (edge = node->callers; edge; edge = edge->next_caller)
if (edge->inline_failed) if (edge->inline_failed)
break;
if (!edge)
return;
for (; edge; edge = edge->next_caller)
if (edge->inline_failed)
{ {
reset_edge_growth_cache (edge); if (!check_inlinablity_for
|| check_inlinablity_for == edge)
{
if (can_inline_edge_p (edge, false) if (can_inline_edge_p (edge, false)
&& want_inline_small_function_p (edge, false)) && want_inline_small_function_p (edge, false))
update_edge_key (heap, edge); update_edge_key (heap, edge);
...@@ -887,9 +934,12 @@ update_caller_keys (fibheap_t heap, struct cgraph_node *node, ...@@ -887,9 +934,12 @@ update_caller_keys (fibheap_t heap, struct cgraph_node *node,
edge->aux = NULL; edge->aux = NULL;
} }
} }
else if (edge->aux)
update_edge_key (heap, edge);
}
} }
/* Recompute heap nodes for each uninlined call. /* Recompute HEAP nodes for each uninlined call in NODE.
This is used when we know that edge badnesses are going only to increase This is used when we know that edge badnesses are going only to increase
(we introduced new call site) and thus all we need is to insert newly (we introduced new call site) and thus all we need is to insert newly
created edges into heap. */ created edges into heap. */
...@@ -900,8 +950,6 @@ update_callee_keys (fibheap_t heap, struct cgraph_node *node, ...@@ -900,8 +950,6 @@ update_callee_keys (fibheap_t heap, struct cgraph_node *node,
{ {
struct cgraph_edge *e = node->callees; struct cgraph_edge *e = node->callees;
reset_node_growth_cache (node);
if (!e) if (!e)
return; return;
while (true) while (true)
...@@ -909,14 +957,23 @@ update_callee_keys (fibheap_t heap, struct cgraph_node *node, ...@@ -909,14 +957,23 @@ update_callee_keys (fibheap_t heap, struct cgraph_node *node,
e = e->callee->callees; e = e->callee->callees;
else else
{ {
reset_edge_growth_cache (e); /* We inlined and thus callees might have different number of calls.
Reset their caches */
reset_node_growth_cache (e->callee);
if (e->inline_failed if (e->inline_failed
&& inline_summary (e->callee)->inlinable && inline_summary (e->callee)->inlinable
&& cgraph_function_body_availability (e->callee) >= AVAIL_AVAILABLE && cgraph_function_body_availability (e->callee) >= AVAIL_AVAILABLE
&& !bitmap_bit_p (updated_nodes, e->callee->uid)) && !bitmap_bit_p (updated_nodes, e->callee->uid))
{ {
reset_node_growth_cache (node); if (can_inline_edge_p (e, false)
&& want_inline_small_function_p (e, false))
update_edge_key (heap, e); update_edge_key (heap, e);
else if (e->aux)
{
report_inline_failed_reason (e);
fibheap_delete_node (heap, (fibnode_t) e->aux);
e->aux = NULL;
}
} }
if (e->next_callee) if (e->next_callee)
e = e->next_callee; e = e->next_callee;
...@@ -943,8 +1000,6 @@ update_all_callee_keys (fibheap_t heap, struct cgraph_node *node, ...@@ -943,8 +1000,6 @@ update_all_callee_keys (fibheap_t heap, struct cgraph_node *node,
{ {
struct cgraph_edge *e = node->callees; struct cgraph_edge *e = node->callees;
reset_node_growth_cache (node);
if (!e) if (!e)
return; return;
while (true) while (true)
...@@ -952,8 +1007,11 @@ update_all_callee_keys (fibheap_t heap, struct cgraph_node *node, ...@@ -952,8 +1007,11 @@ update_all_callee_keys (fibheap_t heap, struct cgraph_node *node,
e = e->callee->callees; e = e->callee->callees;
else else
{ {
/* We inlined and thus callees might have different number of calls.
Reset their caches */
reset_node_growth_cache (e->callee);
if (e->inline_failed) if (e->inline_failed)
update_caller_keys (heap, e->callee, updated_nodes); update_caller_keys (heap, e->callee, updated_nodes, e);
if (e->next_callee) if (e->next_callee)
e = e->next_callee; e = e->next_callee;
else else
...@@ -1234,6 +1292,12 @@ inline_small_functions (void) ...@@ -1234,6 +1292,12 @@ inline_small_functions (void)
if (!edge->inline_failed) if (!edge->inline_failed)
continue; continue;
/* Be sure that caches are maintained consistent. */
#ifdef ENABLE_CHECKING
reset_edge_growth_cache (edge);
reset_node_growth_cache (edge->callee);
#endif
/* When updating the edge costs, we only decrease badness in the keys. /* When updating the edge costs, we only decrease badness in the keys.
Increases of badness are handled lazilly; when we see key with out Increases of badness are handled lazilly; when we see key with out
of date value on it, we re-insert it now. */ of date value on it, we re-insert it now. */
...@@ -1302,6 +1366,7 @@ inline_small_functions (void) ...@@ -1302,6 +1366,7 @@ inline_small_functions (void)
edge->inline_failed = CIF_RECURSIVE_INLINING; edge->inline_failed = CIF_RECURSIVE_INLINING;
continue; continue;
} }
reset_edge_caches (where);
/* Recursive inliner inlines all recursive calls of the function /* Recursive inliner inlines all recursive calls of the function
at once. Consequently we need to update all callee keys. */ at once. Consequently we need to update all callee keys. */
if (flag_indirect_inlining) if (flag_indirect_inlining)
...@@ -1344,6 +1409,9 @@ inline_small_functions (void) ...@@ -1344,6 +1409,9 @@ inline_small_functions (void)
if (flag_indirect_inlining) if (flag_indirect_inlining)
add_new_edges_to_heap (heap, new_indirect_edges); add_new_edges_to_heap (heap, new_indirect_edges);
reset_edge_caches (edge->callee);
reset_node_growth_cache (callee);
/* We inlined last offline copy to the body. This might lead /* We inlined last offline copy to the body. This might lead
to callees of function having fewer call sites and thus they to callees of function having fewer call sites and thus they
may need updating. */ may need updating. */
...@@ -1362,12 +1430,12 @@ inline_small_functions (void) ...@@ -1362,12 +1430,12 @@ inline_small_functions (void)
inlined into (since it's body size changed) and for the functions inlined into (since it's body size changed) and for the functions
called by function we inlined (since number of it inlinable callers called by function we inlined (since number of it inlinable callers
might change). */ might change). */
update_caller_keys (heap, where, updated_nodes); update_caller_keys (heap, where, updated_nodes, NULL);
/* We removed one call of the function we just inlined. If offline /* We removed one call of the function we just inlined. If offline
copy is still needed, be sure to update the keys. */ copy is still needed, be sure to update the keys. */
if (callee != where && !callee->global.inlined_to) if (callee != where && !callee->global.inlined_to)
update_caller_keys (heap, callee, updated_nodes); update_caller_keys (heap, callee, updated_nodes, NULL);
bitmap_clear (updated_nodes); bitmap_clear (updated_nodes);
if (dump_file) if (dump_file)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment