Commit 1d405d7b by Jan Hubicka Committed by Jan Hubicka

Revert the following patch until testsuite fallout is fixed:

	* cgraph.c (dump_cgraph_node): Dump size/time/benefit.
	* cgraph.h (struct inline_summary): New filed self_wize,
	size_inlining_benefit, self_time and time_inlining_benefit.
	(struct cgraph_global_info): Replace insns by time ans size fields.
	* ipa-cp (ipcp_cloning_candidate_p): Base estimate on size
	(ipcp_estimate_growth, ipcp_insert_stage): Likewise.
	(ipcp_update_callgraph): Do not touch function bodies.
	* ipa-inline.c: Include except.h
	MAX_TIME: New constant.
	(overall_insns): Remove
	(overall_size, max_benefit): New static variables.
	(cgraph_estimate_time_after_inlining): New function.
	(cgraph_estimate_size_after_inlining): Rewrite using benefits.
	(cgraph_clone_inlined_nodes): Update size.
	(cgraph_mark_inline_edge): Update size.
	(cgraph_estimate_growth): Use size info.
	(cgraph_check_inline_limits): Check size.
	(cgraph_default_inline_p): Likewise.
	(cgraph_edge_badness): Compute badness based on benefit and size cost.
	(cgraph_decide_recursive_inlining): Check size.
	(cgraph_decide_inlining_of_small_function): Update size; dump sizes and times.
	(cgraph_decide_inlining): Likewise.
	(cgraph_decide_inlining_incrementally): Likewise; honor PARAM_EARLY_INLINING_INSNS.
	(likely_eliminated_by_inlining_p): New predicate.
	(estimate_function_body_sizes): New function.
	(compute_inline_parameters): Use it.
	* except.c (must_not_throw_labels): New function.
	* except.h (must_not_throw_labels): Declare.
	* tree-inline.c (init_inline_once): Kill inlining_weigths
	* tree-ssa-structalias.c: Avoid uninitialized warning.
	* params.def (PARAM_MAX_INLINE_INSNS_SINGLE): Reduce to 300.
	(PARAM_MAX_INLINE_INSNS_AUTO): Reduce to 60.
	(PARAM_INLINE_CALL_COST): Remove.
	(PARAM_EARLY_INLINING_INSNS): New.

From-SVN: r147575
parent 7ffa47ca
2009-05-15 Jan Hubicka <jh@suse.cz>
Revert the following patch until testsuite fallout is fixed:
* cgraph.c (dump_cgraph_node): Dump size/time/benefit.
* cgraph.h (struct inline_summary): New filed self_wize,
size_inlining_benefit, self_time and time_inlining_benefit.
(struct cgraph_global_info): Replace insns by time ans size fields.
* ipa-cp (ipcp_cloning_candidate_p): Base estimate on size
(ipcp_estimate_growth, ipcp_insert_stage): Likewise.
(ipcp_update_callgraph): Do not touch function bodies.
* ipa-inline.c: Include except.h
MAX_TIME: New constant.
(overall_insns): Remove
(overall_size, max_benefit): New static variables.
(cgraph_estimate_time_after_inlining): New function.
(cgraph_estimate_size_after_inlining): Rewrite using benefits.
(cgraph_clone_inlined_nodes): Update size.
(cgraph_mark_inline_edge): Update size.
(cgraph_estimate_growth): Use size info.
(cgraph_check_inline_limits): Check size.
(cgraph_default_inline_p): Likewise.
(cgraph_edge_badness): Compute badness based on benefit and size cost.
(cgraph_decide_recursive_inlining): Check size.
(cgraph_decide_inlining_of_small_function): Update size; dump sizes and times.
(cgraph_decide_inlining): Likewise.
(cgraph_decide_inlining_incrementally): Likewise; honor PARAM_EARLY_INLINING_INSNS.
(likely_eliminated_by_inlining_p): New predicate.
(estimate_function_body_sizes): New function.
(compute_inline_parameters): Use it.
* except.c (must_not_throw_labels): New function.
* except.h (must_not_throw_labels): Declare.
* tree-inline.c (init_inline_once): Kill inlining_weigths
* tree-ssa-structalias.c: Avoid uninitialized warning.
* params.def (PARAM_MAX_INLINE_INSNS_SINGLE): Reduce to 300.
(PARAM_MAX_INLINE_INSNS_AUTO): Reduce to 60.
(PARAM_INLINE_CALL_COST): Remove.
(PARAM_EARLY_INLINING_INSNS): New.
2009-05-15 Richard Guenther <rguenther@suse.de> 2009-05-15 Richard Guenther <rguenther@suse.de>
* tree-ssa-pre.c (eliminate): Use TODO_update_ssa_only_virtuals, * tree-ssa-pre.c (eliminate): Use TODO_update_ssa_only_virtuals,
......
...@@ -1393,18 +1393,11 @@ dump_cgraph_node (FILE *f, struct cgraph_node *node) ...@@ -1393,18 +1393,11 @@ dump_cgraph_node (FILE *f, struct cgraph_node *node)
if (node->count) if (node->count)
fprintf (f, " executed "HOST_WIDEST_INT_PRINT_DEC"x", fprintf (f, " executed "HOST_WIDEST_INT_PRINT_DEC"x",
(HOST_WIDEST_INT)node->count); (HOST_WIDEST_INT)node->count);
if (node->local.inline_summary.self_time) if (node->local.inline_summary.self_insns)
fprintf (f, " %i time, %i benefit", node->local.inline_summary.self_time, fprintf (f, " %i insns", node->local.inline_summary.self_insns);
node->local.inline_summary.time_inlining_benefit); if (node->global.insns && node->global.insns
if (node->global.time && node->global.time != node->local.inline_summary.self_insns)
!= node->local.inline_summary.self_time) fprintf (f, " (%i after inlining)", node->global.insns);
fprintf (f, " (%i after inlining)", node->global.time);
if (node->local.inline_summary.self_size)
fprintf (f, " %i size, %i benefit", node->local.inline_summary.self_size,
node->local.inline_summary.size_inlining_benefit);
if (node->global.size && node->global.size
!= node->local.inline_summary.self_size)
fprintf (f, " (%i after inlining)", node->global.size);
if (node->local.inline_summary.estimated_self_stack_size) if (node->local.inline_summary.estimated_self_stack_size)
fprintf (f, " %i bytes stack usage", (int)node->local.inline_summary.estimated_self_stack_size); fprintf (f, " %i bytes stack usage", (int)node->local.inline_summary.estimated_self_stack_size);
if (node->global.estimated_stack_size != node->local.inline_summary.estimated_self_stack_size) if (node->global.estimated_stack_size != node->local.inline_summary.estimated_self_stack_size)
......
...@@ -55,14 +55,8 @@ struct GTY(()) inline_summary ...@@ -55,14 +55,8 @@ struct GTY(()) inline_summary
/* Estimated stack frame consumption by the function. */ /* Estimated stack frame consumption by the function. */
HOST_WIDE_INT estimated_self_stack_size; HOST_WIDE_INT estimated_self_stack_size;
/* Size of the function body. */ /* Size of the function before inlining. */
int self_size; int self_insns;
/* How many instructions are likely going to disappear after inlining. */
int size_inlining_benefit;
/* Estimated time spent executing the function body. */
int self_time;
/* How much time is going to be saved by inlining. */
int time_inlining_benefit;
}; };
/* Information about the function collected locally. /* Information about the function collected locally.
...@@ -114,8 +108,7 @@ struct GTY(()) cgraph_global_info { ...@@ -114,8 +108,7 @@ struct GTY(()) cgraph_global_info {
struct cgraph_node *inlined_to; struct cgraph_node *inlined_to;
/* Estimated size of the function after inlining. */ /* Estimated size of the function after inlining. */
int time; int insns;
int size;
/* Estimated growth after inlining. INT_MIN if not computed. */ /* Estimated growth after inlining. INT_MIN if not computed. */
int estimated_growth; int estimated_growth;
......
...@@ -1039,43 +1039,6 @@ get_next_region_sharing_label (int region) ...@@ -1039,43 +1039,6 @@ get_next_region_sharing_label (int region)
return r->next_region_sharing_label->region_number; return r->next_region_sharing_label->region_number;
} }
/* Return bitmap of all labels that are handlers of must not throw regions. */
bitmap
must_not_throw_labels (void)
{
struct eh_region *i;
bitmap labels = BITMAP_ALLOC (NULL);
i = cfun->eh->region_tree;
if (! i)
return labels;
while (1)
{
if (i->type == ERT_MUST_NOT_THROW && i->tree_label
&& LABEL_DECL_UID (i->tree_label) >= 0)
bitmap_set_bit (labels, LABEL_DECL_UID (i->tree_label));
/* If there are sub-regions, process them. */
if (i->inner)
i = i->inner;
/* If there are peers, process them. */
else if (i->next_peer)
i = i->next_peer;
/* Otherwise, step back up the tree to the next peer. */
else
{
do {
i = i->outer;
if (i == NULL)
return labels;
} while (i->next_peer == NULL);
i = i->next_peer;
}
}
}
/* Set up EH labels for RTL. */ /* Set up EH labels for RTL. */
void void
......
...@@ -274,6 +274,5 @@ extern void set_eh_throw_stmt_table (struct function *, struct htab *); ...@@ -274,6 +274,5 @@ extern void set_eh_throw_stmt_table (struct function *, struct htab *);
extern void remove_unreachable_regions (sbitmap, sbitmap); extern void remove_unreachable_regions (sbitmap, sbitmap);
extern VEC(int,heap) * label_to_region_map (void); extern VEC(int,heap) * label_to_region_map (void);
extern int num_eh_regions (void); extern int num_eh_regions (void);
extern bitmap must_not_throw_labels (void);
extern struct eh_region *redirect_eh_edge_to_label (struct edge_def *, tree, bool, bool, int); extern struct eh_region *redirect_eh_edge_to_label (struct edge_def *, tree, bool, bool, int);
extern int get_next_region_sharing_label (int); extern int get_next_region_sharing_label (int);
...@@ -396,7 +396,7 @@ ipcp_cloning_candidate_p (struct cgraph_node *node) ...@@ -396,7 +396,7 @@ ipcp_cloning_candidate_p (struct cgraph_node *node)
cgraph_node_name (node)); cgraph_node_name (node));
return false; return false;
} }
if (node->local.inline_summary.self_size < n_calls) if (node->local.inline_summary.self_insns < n_calls)
{ {
if (dump_file) if (dump_file)
fprintf (dump_file, "Considering %s for cloning; code would shrink.\n", fprintf (dump_file, "Considering %s for cloning; code would shrink.\n",
...@@ -837,7 +837,10 @@ ipcp_update_callgraph (void) ...@@ -837,7 +837,10 @@ ipcp_update_callgraph (void)
{ {
next = cs->next_caller; next = cs->next_caller;
if (!ipcp_node_is_clone (cs->caller) && ipcp_need_redirect_p (cs)) if (!ipcp_node_is_clone (cs->caller) && ipcp_need_redirect_p (cs))
{
cgraph_redirect_edge_callee (cs, orig_node); cgraph_redirect_edge_callee (cs, orig_node);
gimple_call_set_fndecl (cs->call_stmt, orig_node->decl);
}
} }
} }
} }
...@@ -913,7 +916,7 @@ ipcp_estimate_growth (struct cgraph_node *node) ...@@ -913,7 +916,7 @@ ipcp_estimate_growth (struct cgraph_node *node)
call site. Precise cost is dificult to get, as our size metric counts call site. Precise cost is dificult to get, as our size metric counts
constants and moves as free. Generally we are looking for cases that constants and moves as free. Generally we are looking for cases that
small function is called very many times. */ small function is called very many times. */
growth = node->local.inline_summary.self_size growth = node->local.inline_summary.self_insns
- removable_args * redirectable_node_callers; - removable_args * redirectable_node_callers;
if (growth < 0) if (growth < 0)
return 0; return 0;
...@@ -953,7 +956,7 @@ ipcp_estimate_cloning_cost (struct cgraph_node *node) ...@@ -953,7 +956,7 @@ ipcp_estimate_cloning_cost (struct cgraph_node *node)
cost /= freq_sum * 1000 / REG_BR_PROB_BASE + 1; cost /= freq_sum * 1000 / REG_BR_PROB_BASE + 1;
if (dump_file) if (dump_file)
fprintf (dump_file, "Cost of versioning %s is %i, (size: %i, freq: %i)\n", fprintf (dump_file, "Cost of versioning %s is %i, (size: %i, freq: %i)\n",
cgraph_node_name (node), cost, node->local.inline_summary.self_size, cgraph_node_name (node), cost, node->local.inline_summary.self_insns,
freq_sum); freq_sum);
return cost + 1; return cost + 1;
} }
...@@ -1009,7 +1012,7 @@ ipcp_insert_stage (void) ...@@ -1009,7 +1012,7 @@ ipcp_insert_stage (void)
{ {
if (node->count > max_count) if (node->count > max_count)
max_count = node->count; max_count = node->count;
overall_size += node->local.inline_summary.self_size; overall_size += node->local.inline_summary.self_insns;
} }
max_new_size = overall_size; max_new_size = overall_size;
......
...@@ -138,9 +138,6 @@ along with GCC; see the file COPYING3. If not see ...@@ -138,9 +138,6 @@ along with GCC; see the file COPYING3. If not see
#include "tree-flow.h" #include "tree-flow.h"
#include "rtl.h" #include "rtl.h"
#include "ipa-prop.h" #include "ipa-prop.h"
#include "except.h"
#define MAX_TIME 1000000000
/* Mode incremental inliner operate on: /* Mode incremental inliner operate on:
...@@ -166,8 +163,8 @@ cgraph_decide_inlining_incrementally (struct cgraph_node *, enum inlining_mode, ...@@ -166,8 +163,8 @@ cgraph_decide_inlining_incrementally (struct cgraph_node *, enum inlining_mode,
/* Statistics we collect about inlining algorithm. */ /* Statistics we collect about inlining algorithm. */
static int ncalls_inlined; static int ncalls_inlined;
static int nfunctions_inlined; static int nfunctions_inlined;
static int overall_size; static int overall_insns;
static gcov_type max_count, max_benefit; static gcov_type max_count;
/* Holders of ipa cgraph hooks: */ /* Holders of ipa cgraph hooks: */
static struct cgraph_node_hook_list *function_insertion_hook_holder; static struct cgraph_node_hook_list *function_insertion_hook_holder;
...@@ -178,30 +175,19 @@ inline_summary (struct cgraph_node *node) ...@@ -178,30 +175,19 @@ inline_summary (struct cgraph_node *node)
return &node->local.inline_summary; return &node->local.inline_summary;
} }
/* Estimate self time of the function after inlining WHAT into TO. */ /* Estimate size of the function after inlining WHAT into TO. */
static int
cgraph_estimate_time_after_inlining (int frequency, struct cgraph_node *to,
struct cgraph_node *what)
{
gcov_type time = (((gcov_type)what->global.time - inline_summary
(what)->time_inlining_benefit)
* frequency + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE
+ to->global.time;
if (time < 0)
time = 0;
if (time > MAX_TIME)
time = MAX_TIME;
return time;
}
/* Estimate self time of the function after inlining WHAT into TO. */
static int static int
cgraph_estimate_size_after_inlining (int times, struct cgraph_node *to, cgraph_estimate_size_after_inlining (int times, struct cgraph_node *to,
struct cgraph_node *what) struct cgraph_node *what)
{ {
int size = (what->global.size - inline_summary (what)->size_inlining_benefit) * times + to->global.size; int size;
tree fndecl = what->decl, arg;
int call_insns = PARAM_VALUE (PARAM_INLINE_CALL_COST);
for (arg = DECL_ARGUMENTS (fndecl); arg; arg = TREE_CHAIN (arg))
call_insns += estimate_move_cost (TREE_TYPE (arg));
size = (what->global.insns - call_insns) * times + to->global.insns;
gcc_assert (size >= 0); gcc_assert (size >= 0);
return size; return size;
} }
...@@ -227,10 +213,7 @@ cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate, ...@@ -227,10 +213,7 @@ cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
{ {
gcc_assert (!e->callee->global.inlined_to); gcc_assert (!e->callee->global.inlined_to);
if (e->callee->analyzed) if (e->callee->analyzed)
{ overall_insns -= e->callee->global.insns, nfunctions_inlined++;
overall_size -= e->callee->global.size;
nfunctions_inlined++;
}
duplicate = false; duplicate = false;
} }
else else
...@@ -270,7 +253,7 @@ static bool ...@@ -270,7 +253,7 @@ static bool
cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original, cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original,
VEC (cgraph_edge_p, heap) **new_edges) VEC (cgraph_edge_p, heap) **new_edges)
{ {
int old_size = 0, new_size = 0; int old_insns = 0, new_insns = 0;
struct cgraph_node *to = NULL, *what; struct cgraph_node *to = NULL, *what;
struct cgraph_edge *curr = e; struct cgraph_edge *curr = e;
...@@ -291,15 +274,16 @@ cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original, ...@@ -291,15 +274,16 @@ cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original,
/* Now update size of caller and all functions caller is inlined into. */ /* Now update size of caller and all functions caller is inlined into. */
for (;e && !e->inline_failed; e = e->caller->callers) for (;e && !e->inline_failed; e = e->caller->callers)
{ {
old_insns = e->caller->global.insns;
new_insns = cgraph_estimate_size_after_inlining (1, e->caller,
what);
gcc_assert (new_insns >= 0);
to = e->caller; to = e->caller;
old_size = e->caller->global.size; to->global.insns = new_insns;
new_size = cgraph_estimate_size_after_inlining (1, to, what);
to->global.size = new_size;
to->global.time = cgraph_estimate_time_after_inlining (e->frequency, to, what);
} }
gcc_assert (what->global.inlined_to == to); gcc_assert (what->global.inlined_to == to);
if (new_size > old_size) if (new_insns > old_insns)
overall_size += new_size - old_size; overall_insns += new_insns - old_insns;
ncalls_inlined++; ncalls_inlined++;
if (flag_indirect_inlining) if (flag_indirect_inlining)
...@@ -354,7 +338,7 @@ cgraph_estimate_growth (struct cgraph_node *node) ...@@ -354,7 +338,7 @@ cgraph_estimate_growth (struct cgraph_node *node)
self_recursive = true; self_recursive = true;
if (e->inline_failed) if (e->inline_failed)
growth += (cgraph_estimate_size_after_inlining (1, e->caller, node) growth += (cgraph_estimate_size_after_inlining (1, e->caller, node)
- e->caller->global.size); - e->caller->global.insns);
} }
/* ??? Wrong for non-trivially self recursive functions or cases where /* ??? Wrong for non-trivially self recursive functions or cases where
...@@ -362,7 +346,7 @@ cgraph_estimate_growth (struct cgraph_node *node) ...@@ -362,7 +346,7 @@ cgraph_estimate_growth (struct cgraph_node *node)
as in that case we will keep the body around, but we will also avoid as in that case we will keep the body around, but we will also avoid
some inlining. */ some inlining. */
if (!node->needed && !DECL_EXTERNAL (node->decl) && !self_recursive) if (!node->needed && !DECL_EXTERNAL (node->decl) && !self_recursive)
growth -= node->global.size; growth -= node->global.insns;
node->global.estimated_growth = growth; node->global.estimated_growth = growth;
return growth; return growth;
...@@ -397,17 +381,17 @@ cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what, ...@@ -397,17 +381,17 @@ cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
/* When inlining large function body called once into small function, /* When inlining large function body called once into small function,
take the inlined function as base for limiting the growth. */ take the inlined function as base for limiting the growth. */
if (inline_summary (to)->self_size > inline_summary(what)->self_size) if (inline_summary (to)->self_insns > inline_summary(what)->self_insns)
limit = inline_summary (to)->self_size; limit = inline_summary (to)->self_insns;
else else
limit = inline_summary (what)->self_size; limit = inline_summary (what)->self_insns;
limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100; limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
/* Check the size after inlining against the function limits. But allow /* Check the size after inlining against the function limits. But allow
the function to shrink if it went over the limits by forced inlining. */ the function to shrink if it went over the limits by forced inlining. */
newsize = cgraph_estimate_size_after_inlining (times, to, what); newsize = cgraph_estimate_size_after_inlining (times, to, what);
if (newsize >= to->global.size if (newsize >= to->global.insns
&& newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS) && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
&& newsize > limit) && newsize > limit)
{ {
...@@ -458,7 +442,7 @@ cgraph_default_inline_p (struct cgraph_node *n, cgraph_inline_failed_t *reason) ...@@ -458,7 +442,7 @@ cgraph_default_inline_p (struct cgraph_node *n, cgraph_inline_failed_t *reason)
if (DECL_DECLARED_INLINE_P (decl)) if (DECL_DECLARED_INLINE_P (decl))
{ {
if (n->global.size >= MAX_INLINE_INSNS_SINGLE) if (n->global.insns >= MAX_INLINE_INSNS_SINGLE)
{ {
if (reason) if (reason)
*reason = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT; *reason = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
...@@ -467,7 +451,7 @@ cgraph_default_inline_p (struct cgraph_node *n, cgraph_inline_failed_t *reason) ...@@ -467,7 +451,7 @@ cgraph_default_inline_p (struct cgraph_node *n, cgraph_inline_failed_t *reason)
} }
else else
{ {
if (n->global.size >= MAX_INLINE_INSNS_AUTO) if (n->global.insns >= MAX_INLINE_INSNS_AUTO)
{ {
if (reason) if (reason)
*reason = CIF_MAX_INLINE_INSNS_AUTO_LIMIT; *reason = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
...@@ -513,7 +497,7 @@ cgraph_edge_badness (struct cgraph_edge *edge) ...@@ -513,7 +497,7 @@ cgraph_edge_badness (struct cgraph_edge *edge)
int growth = int growth =
cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee); cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
growth -= edge->caller->global.size; growth -= edge->caller->global.insns;
/* Always prefer inlining saving code size. */ /* Always prefer inlining saving code size. */
if (growth <= 0) if (growth <= 0)
...@@ -522,8 +506,7 @@ cgraph_edge_badness (struct cgraph_edge *edge) ...@@ -522,8 +506,7 @@ cgraph_edge_badness (struct cgraph_edge *edge)
/* When profiling is available, base priorities -(#calls / growth). /* When profiling is available, base priorities -(#calls / growth).
So we optimize for overall number of "executed" inlined calls. */ So we optimize for overall number of "executed" inlined calls. */
else if (max_count) else if (max_count)
badness = ((int)((double)edge->count * INT_MIN / max_count / (max_benefit + 1)) badness = ((int)((double)edge->count * INT_MIN / max_count)) / growth;
* (inline_summary (edge->callee)->time_inlining_benefit + 1)) / growth;
/* When function local profile is available, base priorities on /* When function local profile is available, base priorities on
growth / frequency, so we optimize for overall frequency of inlined growth / frequency, so we optimize for overall frequency of inlined
...@@ -536,11 +519,11 @@ cgraph_edge_badness (struct cgraph_edge *edge) ...@@ -536,11 +519,11 @@ cgraph_edge_badness (struct cgraph_edge *edge)
of the same size gets priority). */ of the same size gets priority). */
else if (flag_guess_branch_prob) else if (flag_guess_branch_prob)
{ {
int div = edge->frequency * 100 / CGRAPH_FREQ_BASE + 1; int div = edge->frequency * 100 / CGRAPH_FREQ_BASE;
int growth =
cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
growth -= edge->caller->global.insns;
badness = growth * 256; badness = growth * 256;
div *= MIN (100 * inline_summary (edge->callee)->time_inlining_benefit
/ (edge->callee->global.time + 1) + 1, 100);
/* Decrease badness if call is nested. */ /* Decrease badness if call is nested. */
/* Compress the range so we don't overflow. */ /* Compress the range so we don't overflow. */
...@@ -783,9 +766,8 @@ cgraph_decide_recursive_inlining (struct cgraph_node *node, ...@@ -783,9 +766,8 @@ cgraph_decide_recursive_inlining (struct cgraph_node *node,
fibheap_delete (heap); fibheap_delete (heap);
if (dump_file) if (dump_file)
fprintf (dump_file, fprintf (dump_file,
"\n Inlined %i times, body grown from size %i to %i, time %i to %i\n", n, "\n Inlined %i times, body grown from %i to %i insns\n", n,
master_clone->global.size, node->global.size, master_clone->global.insns, node->global.insns);
master_clone->global.time, node->global.time);
/* Remove master clone we used for inlining. We rely that clones inlined /* Remove master clone we used for inlining. We rely that clones inlined
into master clone gets queued just before master clone so we don't into master clone gets queued just before master clone so we don't
...@@ -863,7 +845,7 @@ cgraph_decide_inlining_of_small_functions (void) ...@@ -863,7 +845,7 @@ cgraph_decide_inlining_of_small_functions (void)
cgraph_inline_failed_t failed_reason; cgraph_inline_failed_t failed_reason;
fibheap_t heap = fibheap_new (); fibheap_t heap = fibheap_new ();
bitmap updated_nodes = BITMAP_ALLOC (NULL); bitmap updated_nodes = BITMAP_ALLOC (NULL);
int min_size, max_size; int min_insns, max_insns;
VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL; VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL;
if (flag_indirect_inlining) if (flag_indirect_inlining)
...@@ -897,26 +879,26 @@ cgraph_decide_inlining_of_small_functions (void) ...@@ -897,26 +879,26 @@ cgraph_decide_inlining_of_small_functions (void)
} }
} }
max_size = compute_max_insns (overall_size); max_insns = compute_max_insns (overall_insns);
min_size = overall_size; min_insns = overall_insns;
while (overall_size <= max_size while (overall_insns <= max_insns
&& (edge = (struct cgraph_edge *) fibheap_extract_min (heap))) && (edge = (struct cgraph_edge *) fibheap_extract_min (heap)))
{ {
int old_size = overall_size; int old_insns = overall_insns;
struct cgraph_node *where; struct cgraph_node *where;
int growth = int growth =
cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee); cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
cgraph_inline_failed_t not_good = CIF_OK; cgraph_inline_failed_t not_good = CIF_OK;
growth -= edge->caller->global.size; growth -= edge->caller->global.insns;
if (dump_file) if (dump_file)
{ {
fprintf (dump_file, fprintf (dump_file,
"\nConsidering %s with %i size\n", "\nConsidering %s with %i insns\n",
cgraph_node_name (edge->callee), cgraph_node_name (edge->callee),
edge->callee->global.size); edge->callee->global.insns);
fprintf (dump_file, fprintf (dump_file,
" to be inlined into %s in %s:%i\n" " to be inlined into %s in %s:%i\n"
" Estimated growth after inlined into all callees is %+i insns.\n" " Estimated growth after inlined into all callees is %+i insns.\n"
...@@ -1058,20 +1040,19 @@ cgraph_decide_inlining_of_small_functions (void) ...@@ -1058,20 +1040,19 @@ cgraph_decide_inlining_of_small_functions (void)
if (dump_file) if (dump_file)
{ {
fprintf (dump_file, fprintf (dump_file,
" Inlined into %s which now has sie %i and self time %i," " Inlined into %s which now has %i insns,"
"net change of %+i.\n", "net change of %+i insns.\n",
cgraph_node_name (edge->caller), cgraph_node_name (edge->caller),
edge->caller->global.time, edge->caller->global.insns,
edge->caller->global.size, overall_insns - old_insns);
overall_size - old_size);
} }
if (min_size > overall_size) if (min_insns > overall_insns)
{ {
min_size = overall_size; min_insns = overall_insns;
max_size = compute_max_insns (min_size); max_insns = compute_max_insns (min_insns);
if (dump_file) if (dump_file)
fprintf (dump_file, "New minimal size reached: %i\n", min_size); fprintf (dump_file, "New minimal insns reached: %i\n", min_insns);
} }
} }
while ((edge = (struct cgraph_edge *) fibheap_extract_min (heap)) != NULL) while ((edge = (struct cgraph_edge *) fibheap_extract_min (heap)) != NULL)
...@@ -1100,38 +1081,34 @@ cgraph_decide_inlining (void) ...@@ -1100,38 +1081,34 @@ cgraph_decide_inlining (void)
int nnodes; int nnodes;
struct cgraph_node **order = struct cgraph_node **order =
XCNEWVEC (struct cgraph_node *, cgraph_n_nodes); XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
int old_size = 0; int old_insns = 0;
int i; int i;
int initial_insns = 0;
bool redo_always_inline = true; bool redo_always_inline = true;
int initial_size = 0;
cgraph_remove_function_insertion_hook (function_insertion_hook_holder); cgraph_remove_function_insertion_hook (function_insertion_hook_holder);
max_count = 0; max_count = 0;
max_benefit = 0;
for (node = cgraph_nodes; node; node = node->next) for (node = cgraph_nodes; node; node = node->next)
if (node->analyzed) if (node->analyzed && (node->needed || node->reachable))
{ {
struct cgraph_edge *e; struct cgraph_edge *e;
gcc_assert (inline_summary (node)->self_size == node->global.size); initial_insns += inline_summary (node)->self_insns;
gcc_assert (node->needed || node->reachable); gcc_assert (inline_summary (node)->self_insns == node->global.insns);
initial_size += node->global.size;
for (e = node->callees; e; e = e->next_callee) for (e = node->callees; e; e = e->next_callee)
if (max_count < e->count) if (max_count < e->count)
max_count = e->count; max_count = e->count;
if (max_benefit < inline_summary (node)->time_inlining_benefit)
max_benefit = inline_summary (node)->time_inlining_benefit;
} }
overall_insns = initial_insns;
gcc_assert (!max_count || (profile_info && flag_branch_probabilities)); gcc_assert (!max_count || (profile_info && flag_branch_probabilities));
overall_size = initial_size;
nnodes = cgraph_postorder (order); nnodes = cgraph_postorder (order);
if (dump_file) if (dump_file)
fprintf (dump_file, fprintf (dump_file,
"\nDeciding on inlining. Starting with size %i.\n", "\nDeciding on inlining. Starting with %i insns.\n",
initial_size); initial_insns);
for (node = cgraph_nodes; node; node = node->next) for (node = cgraph_nodes; node; node = node->next)
node->aux = 0; node->aux = 0;
...@@ -1165,9 +1142,9 @@ cgraph_decide_inlining (void) ...@@ -1165,9 +1142,9 @@ cgraph_decide_inlining (void)
continue; continue;
if (dump_file) if (dump_file)
fprintf (dump_file, fprintf (dump_file,
"\nConsidering %s size:%i (always inline)\n", "\nConsidering %s %i insns (always inline)\n",
cgraph_node_name (node), node->global.size); cgraph_node_name (node), node->global.insns);
old_size = overall_size; old_insns = overall_insns;
for (e = node->callers; e; e = next) for (e = node->callers; e; e = next)
{ {
next = e->next_caller; next = e->next_caller;
...@@ -1186,9 +1163,9 @@ cgraph_decide_inlining (void) ...@@ -1186,9 +1163,9 @@ cgraph_decide_inlining (void)
redo_always_inline = true; redo_always_inline = true;
if (dump_file) if (dump_file)
fprintf (dump_file, fprintf (dump_file,
" Inlined into %s which now has size %i.\n", " Inlined into %s which now has %i insns.\n",
cgraph_node_name (e->caller), cgraph_node_name (e->caller),
e->caller->global.size); e->caller->global.insns);
} }
/* Inlining self recursive function might introduce new calls to /* Inlining self recursive function might introduce new calls to
themselves we didn't see in the loop above. Fill in the proper themselves we didn't see in the loop above. Fill in the proper
...@@ -1198,8 +1175,8 @@ cgraph_decide_inlining (void) ...@@ -1198,8 +1175,8 @@ cgraph_decide_inlining (void)
e->inline_failed = CIF_RECURSIVE_INLINING; e->inline_failed = CIF_RECURSIVE_INLINING;
if (dump_file) if (dump_file)
fprintf (dump_file, fprintf (dump_file,
" Inlined for a net change of %+i size.\n", " Inlined for a net change of %+i insns.\n",
overall_size - old_size); overall_insns - old_insns);
} }
} }
...@@ -1227,25 +1204,27 @@ cgraph_decide_inlining (void) ...@@ -1227,25 +1204,27 @@ cgraph_decide_inlining (void)
if (dump_file) if (dump_file)
{ {
fprintf (dump_file, fprintf (dump_file,
"\nConsidering %s size %i.\n", "\nConsidering %s %i insns.\n",
cgraph_node_name (node), node->global.size); cgraph_node_name (node), node->global.insns);
fprintf (dump_file, fprintf (dump_file,
" Called once from %s %i insns.\n", " Called once from %s %i insns.\n",
cgraph_node_name (node->callers->caller), cgraph_node_name (node->callers->caller),
node->callers->caller->global.size); node->callers->caller->global.insns);
} }
old_insns = overall_insns;
if (cgraph_check_inline_limits (node->callers->caller, node, if (cgraph_check_inline_limits (node->callers->caller, node,
NULL, false)) NULL, false))
{ {
cgraph_mark_inline (node->callers); cgraph_mark_inline (node->callers);
if (dump_file) if (dump_file)
fprintf (dump_file, fprintf (dump_file,
" Inlined into %s which now has %i size" " Inlined into %s which now has %i insns"
" for a net change of %+i size.\n", " for a net change of %+i insns.\n",
cgraph_node_name (node->callers->caller), cgraph_node_name (node->callers->caller),
node->callers->caller->global.size, node->callers->caller->global.insns,
overall_size - old_size); overall_insns - old_insns);
} }
else else
{ {
...@@ -1264,9 +1243,9 @@ cgraph_decide_inlining (void) ...@@ -1264,9 +1243,9 @@ cgraph_decide_inlining (void)
if (dump_file) if (dump_file)
fprintf (dump_file, fprintf (dump_file,
"\nInlined %i calls, eliminated %i functions, " "\nInlined %i calls, eliminated %i functions, "
"size %i turned to %i size.\n\n", "%i insns turned to %i insns.\n\n",
ncalls_inlined, nfunctions_inlined, initial_size, ncalls_inlined, nfunctions_inlined, initial_insns,
overall_size); overall_insns);
free (order); free (order);
return 0; return 0;
} }
...@@ -1451,7 +1430,6 @@ cgraph_decide_inlining_incrementally (struct cgraph_node *node, ...@@ -1451,7 +1430,6 @@ cgraph_decide_inlining_incrementally (struct cgraph_node *node,
if (mode != INLINE_ALL && mode != INLINE_ALWAYS_INLINE) if (mode != INLINE_ALL && mode != INLINE_ALWAYS_INLINE)
for (e = node->callees; e; e = e->next_callee) for (e = node->callees; e; e = e->next_callee)
{ {
int allowed_growth = 0;
if (!e->callee->local.inlinable if (!e->callee->local.inlinable
|| !e->inline_failed || !e->inline_failed
|| e->callee->local.disregard_inline_limits) || e->callee->local.disregard_inline_limits)
...@@ -1478,10 +1456,6 @@ cgraph_decide_inlining_incrementally (struct cgraph_node *node, ...@@ -1478,10 +1456,6 @@ cgraph_decide_inlining_incrementally (struct cgraph_node *node,
} }
continue; continue;
} }
if (cgraph_maybe_hot_edge_p (e))
allowed_growth = PARAM_VALUE (PARAM_EARLY_INLINING_INSNS);
/* When the function body would grow and inlining the function won't /* When the function body would grow and inlining the function won't
eliminate the need for offline copy of the function, don't inline. eliminate the need for offline copy of the function, don't inline.
*/ */
...@@ -1489,17 +1463,17 @@ cgraph_decide_inlining_incrementally (struct cgraph_node *node, ...@@ -1489,17 +1463,17 @@ cgraph_decide_inlining_incrementally (struct cgraph_node *node,
|| (!flag_inline_functions || (!flag_inline_functions
&& !DECL_DECLARED_INLINE_P (e->callee->decl))) && !DECL_DECLARED_INLINE_P (e->callee->decl)))
&& (cgraph_estimate_size_after_inlining (1, e->caller, e->callee) && (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
>= e->caller->global.size + allowed_growth) > e->caller->global.insns)
&& cgraph_estimate_growth (e->callee) >= allowed_growth) && cgraph_estimate_growth (e->callee) > 0)
{ {
if (dump_file) if (dump_file)
{ {
indent_to (dump_file, depth); indent_to (dump_file, depth);
fprintf (dump_file, fprintf (dump_file,
"Not inlining: code size would grow by %i.\n", "Not inlining: code size would grow by %i insns.\n",
cgraph_estimate_size_after_inlining (1, e->caller, cgraph_estimate_size_after_inlining (1, e->caller,
e->callee) e->callee)
- e->caller->global.size); - e->caller->global.insns);
} }
continue; continue;
} }
...@@ -1626,177 +1600,6 @@ struct simple_ipa_opt_pass pass_ipa_early_inline = ...@@ -1626,177 +1600,6 @@ struct simple_ipa_opt_pass pass_ipa_early_inline =
} }
}; };
/* See if statement might disappear after inlining. We are not terribly
sophisficated, basically looking for simple abstraction penalty wrappers. */
static bool
likely_eliminated_by_inlining_p (gimple stmt)
{
enum gimple_code code = gimple_code (stmt);
switch (code)
{
case GIMPLE_RETURN:
return true;
case GIMPLE_ASSIGN:
if (gimple_num_ops (stmt) != 2)
return false;
/* Casts of parameters, loads from parameters passed by reference
and stores to return value or parameters are probably free after
inlining. */
if (gimple_assign_rhs_code (stmt) == CONVERT_EXPR
|| gimple_assign_rhs_code (stmt) == NOP_EXPR
|| gimple_assign_rhs_code (stmt) == VIEW_CONVERT_EXPR
|| gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS)
{
tree rhs = gimple_assign_rhs1 (stmt);
tree lhs = gimple_assign_lhs (stmt);
tree inner_rhs = rhs;
tree inner_lhs = lhs;
bool rhs_free = false;
bool lhs_free = false;
while (handled_component_p (inner_lhs) || TREE_CODE (inner_lhs) == INDIRECT_REF)
inner_lhs = TREE_OPERAND (inner_lhs, 0);
while (handled_component_p (inner_rhs)
|| TREE_CODE (inner_rhs) == ADDR_EXPR || TREE_CODE (inner_rhs) == INDIRECT_REF)
inner_rhs = TREE_OPERAND (inner_rhs, 0);
if (TREE_CODE (inner_rhs) == PARM_DECL
|| (TREE_CODE (inner_rhs) == SSA_NAME
&& SSA_NAME_IS_DEFAULT_DEF (inner_rhs)
&& TREE_CODE (SSA_NAME_VAR (inner_rhs)) == PARM_DECL))
rhs_free = true;
if (rhs_free && is_gimple_reg (lhs))
lhs_free = true;
if (((TREE_CODE (inner_lhs) == PARM_DECL
|| (TREE_CODE (inner_lhs) == SSA_NAME
&& SSA_NAME_IS_DEFAULT_DEF (inner_lhs)
&& TREE_CODE (SSA_NAME_VAR (inner_lhs)) == PARM_DECL))
&& inner_lhs != lhs)
|| TREE_CODE (inner_lhs) == RESULT_DECL
|| (TREE_CODE (inner_lhs) == SSA_NAME
&& TREE_CODE (SSA_NAME_VAR (inner_lhs)) == RESULT_DECL))
lhs_free = true;
if (lhs_free && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
rhs_free = true;
if (lhs_free && rhs_free)
return true;
}
return false;
default:
return false;
}
}
/* Compute function body size parameters for NODE. */
static void
estimate_function_body_sizes (struct cgraph_node *node)
{
gcov_type time = 0;
gcov_type time_inlining_benefit = 0;
int size = 0;
int size_inlining_benefit = 0;
basic_block bb;
gimple_stmt_iterator bsi;
struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
tree arg;
int freq;
tree funtype = TREE_TYPE (node->decl);
bitmap must_not_throw = must_not_throw_labels ();
if (dump_file)
{
fprintf (dump_file, "Analyzing function body size: %s\n", cgraph_node_name (node));
}
gcc_assert (my_function && my_function->cfg);
FOR_EACH_BB_FN (bb, my_function)
{
freq = compute_call_stmt_bb_frequency (node->decl, bb);
for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
{
int this_size = estimate_num_insns (gsi_stmt (bsi), &eni_size_weights);
int this_time = estimate_num_insns (gsi_stmt (bsi), &eni_time_weights);
/* MUST_NOT_THROW is usually handled by runtime calling terminate and stopping
stacking unwinding. However when there is local cleanup that can resume
to MUST_NOT_THROW then we generate explicit handler containing
std::terminate () call.
Because inlining of function can introduce new cleanup region, prior
inlining we keep std::terinate () calls for every MUST_NOT_THROW containing
function call. Wast majority of these will be eliminated after inlining
and crossjumping will inify possible duplicated calls. So ignore
the handlers for function body estimates. */
if (gimple_code (gsi_stmt (bsi)) == GIMPLE_LABEL
&& bitmap_bit_p (must_not_throw,
LABEL_DECL_UID (gimple_label_label (gsi_stmt (bsi)))))
{
if (dump_file)
fprintf (dump_file, " MUST_NOT_THROW landing pad. Ignoring whole BB.\n");
}
if (dump_file)
{
fprintf (dump_file, " freq:%6i size:%3i time:%3i ", freq, this_size, this_time);
print_gimple_stmt (dump_file, gsi_stmt (bsi), 0, 0);
}
this_time *= freq;
time += this_time;
size += this_size;
if (likely_eliminated_by_inlining_p (gsi_stmt (bsi)))
{
size_inlining_benefit += this_size;
time_inlining_benefit += this_time;
if (dump_file)
fprintf (dump_file, " Likely eliminated\n");
}
gcc_assert (time >= 0);
gcc_assert (size >= 0);
}
}
time = (time + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
time_inlining_benefit = ((time_inlining_benefit + CGRAPH_FREQ_BASE / 2)
/ CGRAPH_FREQ_BASE);
if (dump_file)
{
fprintf (dump_file, "Overall function body time: %i-%i size: %i-%i\n",
(int)time, (int)time_inlining_benefit,
size, size_inlining_benefit);
}
time_inlining_benefit += eni_time_weights.call_cost;
size_inlining_benefit += eni_size_weights.call_cost;
if (!VOID_TYPE_P (TREE_TYPE (funtype)))
{
int cost = estimate_move_cost (TREE_TYPE (funtype));
time_inlining_benefit += cost;
size_inlining_benefit += cost;
}
for (arg = DECL_ARGUMENTS (node->decl); arg; arg = TREE_CHAIN (arg))
{
int cost = estimate_move_cost (TREE_TYPE (arg));
time_inlining_benefit += cost;
size_inlining_benefit += cost;
}
if (time_inlining_benefit > MAX_TIME)
time_inlining_benefit = MAX_TIME;
if (time > MAX_TIME)
time = MAX_TIME;
inline_summary (node)->self_time = time;
inline_summary (node)->self_size = size;
if (dump_file)
{
fprintf (dump_file, "With function call overhead time: %i-%i size: %i-%i\n",
(int)time, (int)time_inlining_benefit,
size, size_inlining_benefit);
}
inline_summary (node)->time_inlining_benefit = time_inlining_benefit;
inline_summary (node)->size_inlining_benefit = size_inlining_benefit;
BITMAP_FREE (must_not_throw);
}
/* Compute parameters of functions used by inliner. */ /* Compute parameters of functions used by inliner. */
unsigned int unsigned int
compute_inline_parameters (struct cgraph_node *node) compute_inline_parameters (struct cgraph_node *node)
...@@ -1814,13 +1617,19 @@ compute_inline_parameters (struct cgraph_node *node) ...@@ -1814,13 +1617,19 @@ compute_inline_parameters (struct cgraph_node *node)
/* Can this function be inlined at all? */ /* Can this function be inlined at all? */
node->local.inlinable = tree_inlinable_function_p (current_function_decl); node->local.inlinable = tree_inlinable_function_p (current_function_decl);
/* Estimate the number of instructions for this function.
??? At -O0 we don't use this information except for the dumps, and
even then only for always_inline functions. But disabling this
causes ICEs in the inline heuristics... */
inline_summary (node)->self_insns
= estimate_num_insns_fn (current_function_decl, &eni_inlining_weights);
if (node->local.inlinable && !node->local.disregard_inline_limits) if (node->local.inlinable && !node->local.disregard_inline_limits)
node->local.disregard_inline_limits node->local.disregard_inline_limits
= DECL_DISREGARD_INLINE_LIMITS (current_function_decl); = DECL_DISREGARD_INLINE_LIMITS (current_function_decl);
estimate_function_body_sizes (node);
/* Inlining characteristics are maintained by the cgraph_mark_inline. */ /* Inlining characteristics are maintained by the cgraph_mark_inline. */
node->global.time = inline_summary (node)->self_time; node->global.insns = inline_summary (node)->self_insns;
node->global.size = inline_summary (node)->self_size;
return 0; return 0;
} }
...@@ -1838,7 +1647,7 @@ struct gimple_opt_pass pass_inline_parameters = ...@@ -1838,7 +1647,7 @@ struct gimple_opt_pass pass_inline_parameters =
{ {
{ {
GIMPLE_PASS, GIMPLE_PASS,
"inline_param", /* name */ NULL, /* name */
NULL, /* gate */ NULL, /* gate */
compute_inline_parameters_for_current,/* execute */ compute_inline_parameters_for_current,/* execute */
NULL, /* sub */ NULL, /* sub */
......
...@@ -100,7 +100,7 @@ DEFPARAM (PARAM_PREDICTABLE_BRANCH_OUTCOME, ...@@ -100,7 +100,7 @@ DEFPARAM (PARAM_PREDICTABLE_BRANCH_OUTCOME,
DEFPARAM (PARAM_MAX_INLINE_INSNS_SINGLE, DEFPARAM (PARAM_MAX_INLINE_INSNS_SINGLE,
"max-inline-insns-single", "max-inline-insns-single",
"The maximum number of instructions in a single function eligible for inlining", "The maximum number of instructions in a single function eligible for inlining",
300, 0, 0) 450, 0, 0)
/* The single function inlining limit for functions that are /* The single function inlining limit for functions that are
inlined by virtue of -finline-functions (-O3). inlined by virtue of -finline-functions (-O3).
...@@ -112,7 +112,7 @@ DEFPARAM (PARAM_MAX_INLINE_INSNS_SINGLE, ...@@ -112,7 +112,7 @@ DEFPARAM (PARAM_MAX_INLINE_INSNS_SINGLE,
DEFPARAM (PARAM_MAX_INLINE_INSNS_AUTO, DEFPARAM (PARAM_MAX_INLINE_INSNS_AUTO,
"max-inline-insns-auto", "max-inline-insns-auto",
"The maximum number of instructions when automatically inlining", "The maximum number of instructions when automatically inlining",
60, 0, 0) 90, 0, 0)
DEFPARAM (PARAM_MAX_INLINE_INSNS_RECURSIVE, DEFPARAM (PARAM_MAX_INLINE_INSNS_RECURSIVE,
"max-inline-insns-recursive", "max-inline-insns-recursive",
...@@ -204,9 +204,9 @@ DEFPARAM(PARAM_IPCP_UNIT_GROWTH, ...@@ -204,9 +204,9 @@ DEFPARAM(PARAM_IPCP_UNIT_GROWTH,
"ipcp-unit-growth", "ipcp-unit-growth",
"how much can given compilation unit grow because of the interprocedural constant propagation (in percent)", "how much can given compilation unit grow because of the interprocedural constant propagation (in percent)",
10, 0, 0) 10, 0, 0)
DEFPARAM(PARAM_EARLY_INLINING_INSNS, DEFPARAM(PARAM_INLINE_CALL_COST,
"early-inlining-insns", "inline-call-cost",
"maximal estimated growth of function body caused by early inlining of single call", "expense of call operation relative to ordinary arithmetic operations",
12, 0, 0) 12, 0, 0)
DEFPARAM(PARAM_LARGE_STACK_FRAME, DEFPARAM(PARAM_LARGE_STACK_FRAME,
"large-stack-frame", "large-stack-frame",
......
...@@ -3156,6 +3156,12 @@ estimate_num_insns_fn (tree fndecl, eni_weights *weights) ...@@ -3156,6 +3156,12 @@ estimate_num_insns_fn (tree fndecl, eni_weights *weights)
void void
init_inline_once (void) init_inline_once (void)
{ {
eni_inlining_weights.call_cost = PARAM_VALUE (PARAM_INLINE_CALL_COST);
eni_inlining_weights.target_builtin_call_cost = 1;
eni_inlining_weights.div_mod_cost = 10;
eni_inlining_weights.omp_cost = 40;
eni_inlining_weights.time_based = true;
eni_size_weights.call_cost = 1; eni_size_weights.call_cost = 1;
eni_size_weights.target_builtin_call_cost = 1; eni_size_weights.target_builtin_call_cost = 1;
eni_size_weights.div_mod_cost = 1; eni_size_weights.div_mod_cost = 1;
......
...@@ -3425,7 +3425,7 @@ handle_lhs_call (tree lhs, int flags, VEC(ce_s, heap) *rhsc) ...@@ -3425,7 +3425,7 @@ handle_lhs_call (tree lhs, int flags, VEC(ce_s, heap) *rhsc)
static void static void
handle_const_call (gimple stmt, VEC(ce_s, heap) **results) handle_const_call (gimple stmt, VEC(ce_s, heap) **results)
{ {
struct constraint_expr rhsc, tmpc = {SCALAR, 0, 0}; struct constraint_expr rhsc, tmpc;
tree tmpvar = NULL_TREE; tree tmpvar = NULL_TREE;
unsigned int k; unsigned int k;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment