Commit 956d615d by Jakub Jelinek Committed by Jakub Jelinek

ipa-fnsummary.c: Fix comment typos.

	* ipa-fnsummary.c: Fix comment typos.
	* ipa-ref.h: Likewise.
	* ipa-predicate.h: Likewise.
	* ipa-split.c: Likewise.
	* ipa-inline-analysis.c: Likewise.
	* ipa-predicate.c: Likewise.
	* ipa-devirt.c: Likewise.
	* ipa-icf.h: Likewise.
	* profile-count.c: Likewise.
	* ipa-icf.c: Likewise.
	(sem_function::equals_wpa): Fix typos in dump messages.
	* ipa-icf-gimple.h: Fix comment typos.
	* ipa-inline-transform.c: Likewise.
	* ipa-polymorphic-call.c: Likewise.
	* ipa-fnsummary.h: Likewise.
	* ipa-inline.c: Likewise.
	(dump_inline_stats): Fix typo in debug dump message.
	* profile-count.h: Fix comment typos.

From-SVN: r278643
parent 8d0d7a63
2019-11-23 Jakub Jelinek <jakub@redhat.com> 2019-11-23 Jakub Jelinek <jakub@redhat.com>
* ipa-fnsummary.c: Fix comment typos.
* ipa-ref.h: Likewise.
* ipa-predicate.h: Likewise.
* ipa-split.c: Likewise.
* ipa-inline-analysis.c: Likewise.
* ipa-predicate.c: Likewise.
* ipa-devirt.c: Likewise.
* ipa-icf.h: Likewise.
* profile-count.c: Likewise.
* ipa-icf.c: Likewise.
(sem_function::equals_wpa): Fix typos in dump messages.
* ipa-icf-gimple.h: Fix comment typos.
* ipa-inline-transform.c: Likewise.
* ipa-polymorphic-call.c: Likewise.
* ipa-fnsummary.h: Likewise.
* ipa-inline.c: Likewise.
(dump_inline_stats): Fix typo in debug dump message.
* profile-count.h: Fix comment typos.
PR target/92615 PR target/92615
* config/i386/i386.c (ix86_md_asm_adjust): If dest_mode is * config/i386/i386.c (ix86_md_asm_adjust): If dest_mode is
GET_MODE (dest), is not QImode, using ZERO_EXTEND and dest is not GET_MODE (dest), is not QImode, using ZERO_EXTEND and dest is not
...@@ -216,7 +216,7 @@ struct GTY(()) odr_type_d ...@@ -216,7 +216,7 @@ struct GTY(()) odr_type_d
bool all_derivations_known; bool all_derivations_known;
/* Did we report ODR violation here? */ /* Did we report ODR violation here? */
bool odr_violated; bool odr_violated;
/* Set when virtual table without RTTI previaled table with. */ /* Set when virtual table without RTTI prevailed table with. */
bool rtti_broken; bool rtti_broken;
/* Set when the canonical type is determined using the type name. */ /* Set when the canonical type is determined using the type name. */
bool tbaa_enabled; bool tbaa_enabled;
...@@ -655,7 +655,7 @@ compare_virtual_tables (varpool_node *prevailing, varpool_node *vtable) ...@@ -655,7 +655,7 @@ compare_virtual_tables (varpool_node *prevailing, varpool_node *vtable)
end2 = !vtable->iterate_reference (n2, ref2); end2 = !vtable->iterate_reference (n2, ref2);
/* !DECL_VIRTUAL_P means RTTI entry; /* !DECL_VIRTUAL_P means RTTI entry;
We warn when RTTI is lost because non-RTTI previals; we silently We warn when RTTI is lost because non-RTTI prevails; we silently
accept the other case. */ accept the other case. */
while (!end2 while (!end2
&& (end1 && (end1
...@@ -767,7 +767,7 @@ compare_virtual_tables (varpool_node *prevailing, varpool_node *vtable) ...@@ -767,7 +767,7 @@ compare_virtual_tables (varpool_node *prevailing, varpool_node *vtable)
class_type->odr_violated = true; class_type->odr_violated = true;
/* Complain about size mismatch. Either we have too many virutal /* Complain about size mismatch. Either we have too many virtual
functions or too many virtual table pointers. */ functions or too many virtual table pointers. */
if (end1 || end2) if (end1 || end2)
{ {
...@@ -861,7 +861,7 @@ warn_odr (tree t1, tree t2, tree st1, tree st2, ...@@ -861,7 +861,7 @@ warn_odr (tree t1, tree t2, tree st1, tree st2,
if (!warn || !TYPE_NAME(TYPE_MAIN_VARIANT (t1))) if (!warn || !TYPE_NAME(TYPE_MAIN_VARIANT (t1)))
return; return;
/* ODR warnings are output druing LTO streaming; we must apply location /* ODR warnings are output during LTO streaming; we must apply location
cache for potential warnings to be output correctly. */ cache for potential warnings to be output correctly. */
if (lto_location_cache::current_cache) if (lto_location_cache::current_cache)
lto_location_cache::current_cache->apply_location_cache (); lto_location_cache::current_cache->apply_location_cache ();
...@@ -920,7 +920,7 @@ warn_odr (tree t1, tree t2, tree st1, tree st2, ...@@ -920,7 +920,7 @@ warn_odr (tree t1, tree t2, tree st1, tree st2,
*warned = true; *warned = true;
} }
/* Return ture if T1 and T2 are incompatible and we want to recusively /* Return true if T1 and T2 are incompatible and we want to recursively
dive into them from warn_type_mismatch to give sensible answer. */ dive into them from warn_type_mismatch to give sensible answer. */
static bool static bool
...@@ -941,7 +941,7 @@ type_mismatch_p (tree t1, tree t2) ...@@ -941,7 +941,7 @@ type_mismatch_p (tree t1, tree t2)
This is hard to do in general. We basically handle the common cases. This is hard to do in general. We basically handle the common cases.
If LOC1 and LOC2 are meaningful locations, use it in the case the types If LOC1 and LOC2 are meaningful locations, use it in the case the types
themselves do no thave one.*/ themselves do not have one. */
void void
warn_types_mismatch (tree t1, tree t2, location_t loc1, location_t loc2) warn_types_mismatch (tree t1, tree t2, location_t loc1, location_t loc2)
...@@ -1006,7 +1006,7 @@ warn_types_mismatch (tree t1, tree t2, location_t loc1, location_t loc2) ...@@ -1006,7 +1006,7 @@ warn_types_mismatch (tree t1, tree t2, location_t loc1, location_t loc2)
n1 = DECL_NAME (n1); n1 = DECL_NAME (n1);
if (n2 && TREE_CODE (n2) == TYPE_DECL) if (n2 && TREE_CODE (n2) == TYPE_DECL)
n2 = DECL_NAME (n2); n2 = DECL_NAME (n2);
/* Most of the time, the type names will match, do not be unnecesarily /* Most of the time, the type names will match, do not be unnecessarily
verbose. */ verbose. */
if (n1 != n2) if (n1 != n2)
inform (loc_t1, inform (loc_t1,
...@@ -1132,7 +1132,7 @@ warn_types_mismatch (tree t1, tree t2, location_t loc1, location_t loc2) ...@@ -1132,7 +1132,7 @@ warn_types_mismatch (tree t1, tree t2, location_t loc1, location_t loc2)
if (types_odr_comparable (t1, t2) if (types_odr_comparable (t1, t2)
/* We make assign integers mangled names to be able to handle /* We make assign integers mangled names to be able to handle
signed/unsigned chars. Accepting them here would however lead to signed/unsigned chars. Accepting them here would however lead to
confussing message like confusing message like
"type ‘const int’ itself violates the C++ One Definition Rule" */ "type ‘const int’ itself violates the C++ One Definition Rule" */
&& TREE_CODE (t1) != INTEGER_TYPE && TREE_CODE (t1) != INTEGER_TYPE
&& types_same_for_odr (t1, t2)) && types_same_for_odr (t1, t2))
...@@ -1149,7 +1149,7 @@ warn_types_mismatch (tree t1, tree t2, location_t loc1, location_t loc2) ...@@ -1149,7 +1149,7 @@ warn_types_mismatch (tree t1, tree t2, location_t loc1, location_t loc2)
inform (loc_t2, "the incompatible type is defined here"); inform (loc_t2, "the incompatible type is defined here");
} }
/* Return true if T should be ignored in TYPE_FIELDS for ODR comparsion. */ /* Return true if T should be ignored in TYPE_FIELDS for ODR comparison. */
static bool static bool
skip_in_fields_list_p (tree t) skip_in_fields_list_p (tree t)
...@@ -2047,7 +2047,7 @@ odr_type_violation_reported_p (tree type) ...@@ -2047,7 +2047,7 @@ odr_type_violation_reported_p (tree type)
return get_odr_type (type, false)->odr_violated; return get_odr_type (type, false)->odr_violated;
} }
/* Add TYPE od ODR type hash. */ /* Add TYPE of ODR type hash. */
void void
register_odr_type (tree type) register_odr_type (tree type)
...@@ -2056,7 +2056,7 @@ register_odr_type (tree type) ...@@ -2056,7 +2056,7 @@ register_odr_type (tree type)
odr_hash = new odr_hash_type (23); odr_hash = new odr_hash_type (23);
if (type == TYPE_MAIN_VARIANT (type)) if (type == TYPE_MAIN_VARIANT (type))
{ {
/* To get ODR warings right, first register all sub-types. */ /* To get ODR warnings right, first register all sub-types. */
if (RECORD_OR_UNION_TYPE_P (type) if (RECORD_OR_UNION_TYPE_P (type)
&& COMPLETE_TYPE_P (type)) && COMPLETE_TYPE_P (type))
{ {
...@@ -2157,7 +2157,7 @@ dump_type_inheritance_graph (FILE *f) ...@@ -2157,7 +2157,7 @@ dump_type_inheritance_graph (FILE *f)
continue; continue;
/* To aid ODR warnings we also mangle integer constants but do /* To aid ODR warnings we also mangle integer constants but do
not consinder duplicates there. */ not consider duplicates there. */
if (TREE_CODE (odr_types[i]->type) == INTEGER_TYPE) if (TREE_CODE (odr_types[i]->type) == INTEGER_TYPE)
continue; continue;
...@@ -2987,7 +2987,7 @@ class final_warning_record *final_warning_records; ...@@ -2987,7 +2987,7 @@ class final_warning_record *final_warning_records;
If INCLUDE_BASES is true, walk also base types of OUTER_TYPES containing If INCLUDE_BASES is true, walk also base types of OUTER_TYPES containing
OTR_TYPE and include their virtual method. This is useful for types OTR_TYPE and include their virtual method. This is useful for types
possibly in construction or destruction where the virtual table may possibly in construction or destruction where the virtual table may
temporarily change to one of base types. INCLUDE_DERIVER_TYPES make temporarily change to one of base types. INCLUDE_DERIVED_TYPES make
us to walk the inheritance graph for all derivations. us to walk the inheritance graph for all derivations.
If COMPLETEP is non-NULL, store true if the list is complete. If COMPLETEP is non-NULL, store true if the list is complete.
...@@ -3672,7 +3672,7 @@ ipa_devirt (void) ...@@ -3672,7 +3672,7 @@ ipa_devirt (void)
itself. itself.
This may need to be revisited once we add further ways to use This may need to be revisited once we add further ways to use
the may edges, but it is a resonable thing to do right now. */ the may edges, but it is a reasonable thing to do right now. */
if ((e->indirect_info->param_index == -1 if ((e->indirect_info->param_index == -1
|| (!opt_for_fn (n->decl, flag_devirtualize_speculatively) || (!opt_for_fn (n->decl, flag_devirtualize_speculatively)
......
...@@ -145,7 +145,7 @@ ipa_dump_hints (FILE *f, ipa_hints hints) ...@@ -145,7 +145,7 @@ ipa_dump_hints (FILE *f, ipa_hints hints)
/* Record SIZE and TIME to SUMMARY. /* Record SIZE and TIME to SUMMARY.
The accounted code will be executed when EXEC_PRED is true. The accounted code will be executed when EXEC_PRED is true.
When NONCONST_PRED is false the code will evaulate to constant and When NONCONST_PRED is false the code will evaluate to constant and
will get optimized out in specialized clones of the function. will get optimized out in specialized clones of the function.
If CALL is true account to call_size_time_table rather than If CALL is true account to call_size_time_table rather than
size_time_table. */ size_time_table. */
...@@ -171,12 +171,12 @@ ipa_fn_summary::account_size_time (int size, sreal time, ...@@ -171,12 +171,12 @@ ipa_fn_summary::account_size_time (int size, sreal time,
if (nonconst_pred == false) if (nonconst_pred == false)
return; return;
/* We need to create initial empty unconitional clause, but otherwie /* We need to create initial empty unconditional clause, but otherwise
we don't need to account empty times and sizes. */ we don't need to account empty times and sizes. */
if (!size && time == 0 && table) if (!size && time == 0 && table)
return; return;
/* Only for calls we are unaccounting what we previously recoreded. */ /* Only for calls we are unaccounting what we previously recorded. */
gcc_checking_assert (time >= 0 || call); gcc_checking_assert (time >= 0 || call);
for (i = 0; vec_safe_iterate (table, i, &e); i++) for (i = 0; vec_safe_iterate (table, i, &e); i++)
...@@ -234,7 +234,7 @@ ipa_fn_summary::account_size_time (int size, sreal time, ...@@ -234,7 +234,7 @@ ipa_fn_summary::account_size_time (int size, sreal time,
} }
} }
/* We proved E to be unreachable, redirect it to __bultin_unreachable. */ /* We proved E to be unreachable, redirect it to __builtin_unreachable. */
static struct cgraph_edge * static struct cgraph_edge *
redirect_to_unreachable (struct cgraph_edge *e) redirect_to_unreachable (struct cgraph_edge *e)
...@@ -309,9 +309,9 @@ set_hint_predicate (predicate **p, predicate new_predicate) ...@@ -309,9 +309,9 @@ set_hint_predicate (predicate **p, predicate new_predicate)
} }
/* Compute what conditions may or may not hold given invormation about /* Compute what conditions may or may not hold given information about
parameters. RET_CLAUSE returns truths that may hold in a specialized copy, parameters. RET_CLAUSE returns truths that may hold in a specialized copy,
whie RET_NONSPEC_CLAUSE returns truths that may hold in an nonspecialized while RET_NONSPEC_CLAUSE returns truths that may hold in an nonspecialized
copy when called in a given context. It is a bitmask of conditions. Bit copy when called in a given context. It is a bitmask of conditions. Bit
0 means that condition is known to be false, while bit 1 means that condition 0 means that condition is known to be false, while bit 1 means that condition
may or may not be true. These differs - for example NOT_INLINED condition may or may not be true. These differs - for example NOT_INLINED condition
...@@ -319,7 +319,7 @@ set_hint_predicate (predicate **p, predicate new_predicate) ...@@ -319,7 +319,7 @@ set_hint_predicate (predicate **p, predicate new_predicate)
the fact that parameter is indeed a constant. the fact that parameter is indeed a constant.
KNOWN_VALS is partial mapping of parameters of NODE to constant values. KNOWN_VALS is partial mapping of parameters of NODE to constant values.
KNOWN_AGGS is a vector of aggreggate known offset/value set for each KNOWN_AGGS is a vector of aggregate known offset/value set for each
parameter. Return clause of possible truths. When INLINE_P is true, assume parameter. Return clause of possible truths. When INLINE_P is true, assume
that we are inlining. that we are inlining.
...@@ -506,12 +506,12 @@ evaluate_conditions_for_known_args (struct cgraph_node *node, ...@@ -506,12 +506,12 @@ evaluate_conditions_for_known_args (struct cgraph_node *node,
/* Work out what conditions might be true at invocation of E. /* Work out what conditions might be true at invocation of E.
Compute costs for inlined edge if INLINE_P is true. Compute costs for inlined edge if INLINE_P is true.
Return in CLAUSE_PTR the evaluated condistions and in NONSPEC_CLAUSE_PTR Return in CLAUSE_PTR the evaluated conditions and in NONSPEC_CLAUSE_PTR
(if non-NULL) conditions evaluated for nonspecialized clone called (if non-NULL) conditions evaluated for nonspecialized clone called
in a given context. in a given context.
KNOWN_VALS_PTR and KNOWN_AGGS_PTR must be non-NULL and will be filled by KNOWN_VALS_PTR and KNOWN_AGGS_PTR must be non-NULL and will be filled by
known canstant and aggregate values of parameters. known constant and aggregate values of parameters.
KNOWN_CONTEXT_PTR, if non-NULL, will be filled by polymorphic call contexts KNOWN_CONTEXT_PTR, if non-NULL, will be filled by polymorphic call contexts
of parameter used by a polymorphic call. */ of parameter used by a polymorphic call. */
...@@ -784,7 +784,7 @@ ipa_fn_summary_t::duplicate (cgraph_node *src, ...@@ -784,7 +784,7 @@ ipa_fn_summary_t::duplicate (cgraph_node *src,
info->account_size_time (0, 0, true_pred, true_pred); info->account_size_time (0, 0, true_pred, true_pred);
/* Remap size_time vectors. /* Remap size_time vectors.
Simplify the predicate by prunning out alternatives that are known Simplify the predicate by pruning out alternatives that are known
to be false. to be false.
TODO: as on optimization, we can also eliminate conditions known TODO: as on optimization, we can also eliminate conditions known
to be true. */ to be true. */
...@@ -822,7 +822,7 @@ ipa_fn_summary_t::duplicate (cgraph_node *src, ...@@ -822,7 +822,7 @@ ipa_fn_summary_t::duplicate (cgraph_node *src,
edge_set_predicate (edge, &new_predicate); edge_set_predicate (edge, &new_predicate);
} }
/* Remap indirect edge predicates with the same simplificaiton as above. /* Remap indirect edge predicates with the same simplification as above.
Also copy constantness arrays. */ Also copy constantness arrays. */
for (edge = dst->indirect_calls; edge; edge = next) for (edge = dst->indirect_calls; edge; edge = next)
{ {
...@@ -847,7 +847,7 @@ ipa_fn_summary_t::duplicate (cgraph_node *src, ...@@ -847,7 +847,7 @@ ipa_fn_summary_t::duplicate (cgraph_node *src,
/* If inliner or someone after inliner will ever start producing /* If inliner or someone after inliner will ever start producing
non-trivial clones, we will get trouble with lack of information non-trivial clones, we will get trouble with lack of information
about updating self sizes, because size vectors already contains about updating self sizes, because size vectors already contains
sizes of the calees. */ sizes of the callees. */
gcc_assert (!inlined_to_p || !optimized_out_size); gcc_assert (!inlined_to_p || !optimized_out_size);
} }
else else
...@@ -1202,7 +1202,7 @@ eliminated_by_inlining_prob (ipa_func_body_info *fbi, gimple *stmt) ...@@ -1202,7 +1202,7 @@ eliminated_by_inlining_prob (ipa_func_body_info *fbi, gimple *stmt)
/* Casts of parameters, loads from parameters passed by reference /* Casts of parameters, loads from parameters passed by reference
and stores to return value or parameters are often free after and stores to return value or parameters are often free after
inlining dua to SRA and further combining. inlining due to SRA and further combining.
Assume that half of statements goes away. */ Assume that half of statements goes away. */
if (CONVERT_EXPR_CODE_P (rhs_code) if (CONVERT_EXPR_CODE_P (rhs_code)
|| rhs_code == VIEW_CONVERT_EXPR || rhs_code == VIEW_CONVERT_EXPR
...@@ -1256,12 +1256,12 @@ eliminated_by_inlining_prob (ipa_func_body_info *fbi, gimple *stmt) ...@@ -1256,12 +1256,12 @@ eliminated_by_inlining_prob (ipa_func_body_info *fbi, gimple *stmt)
lhs_free = true; lhs_free = true;
/* Writes to parameters, parameters passed by value and return value /* Writes to parameters, parameters passed by value and return value
(either dirrectly or passed via invisible reference) are free. (either directly or passed via invisible reference) are free.
TODO: We ought to handle testcase like TODO: We ought to handle testcase like
struct a {int a,b;}; struct a {int a,b;};
struct a struct a
retrurnsturct (void) returnstruct (void)
{ {
struct a a ={1,2}; struct a a ={1,2};
return a; return a;
...@@ -1269,7 +1269,7 @@ eliminated_by_inlining_prob (ipa_func_body_info *fbi, gimple *stmt) ...@@ -1269,7 +1269,7 @@ eliminated_by_inlining_prob (ipa_func_body_info *fbi, gimple *stmt)
This translate into: This translate into:
retrurnsturct () returnstruct ()
{ {
int a$b; int a$b;
int a$a; int a$a;
...@@ -1467,7 +1467,7 @@ set_cond_stmt_execution_predicate (struct ipa_func_body_info *fbi, ...@@ -1467,7 +1467,7 @@ set_cond_stmt_execution_predicate (struct ipa_func_body_info *fbi,
enum tree_code this_code = (e->flags & EDGE_TRUE_VALUE enum tree_code this_code = (e->flags & EDGE_TRUE_VALUE
? code : inverted_code); ? code : inverted_code);
/* invert_tree_comparison will return ERROR_MARK on FP /* invert_tree_comparison will return ERROR_MARK on FP
comparsions that are not EQ/NE instead of returning proper comparisons that are not EQ/NE instead of returning proper
unordered one. Be sure it is not confused with NON_CONSTANT. unordered one. Be sure it is not confused with NON_CONSTANT.
And if the edge's target is the final block of diamond CFG graph And if the edge's target is the final block of diamond CFG graph
...@@ -1498,7 +1498,7 @@ set_cond_stmt_execution_predicate (struct ipa_func_body_info *fbi, ...@@ -1498,7 +1498,7 @@ set_cond_stmt_execution_predicate (struct ipa_func_body_info *fbi,
Here we can predicate nonconstant_code. We can't Here we can predicate nonconstant_code. We can't
really handle constant_code since we have no predicate really handle constant_code since we have no predicate
for this and also the constant code is not known to be for this and also the constant code is not known to be
optimized away when inliner doen't see operand is constant. optimized away when inliner doesn't see operand is constant.
Other optimizers might think otherwise. */ Other optimizers might think otherwise. */
if (gimple_cond_code (last) != NE_EXPR if (gimple_cond_code (last) != NE_EXPR
|| !integer_zerop (gimple_cond_rhs (last))) || !integer_zerop (gimple_cond_rhs (last)))
...@@ -1921,7 +1921,7 @@ will_be_nonconstant_predicate (struct ipa_func_body_info *fbi, ...@@ -1921,7 +1921,7 @@ will_be_nonconstant_predicate (struct ipa_func_body_info *fbi,
int base_index; int base_index;
struct agg_position_info aggpos; struct agg_position_info aggpos;
/* What statments might be optimized away /* What statements might be optimized away
when their arguments are constant. */ when their arguments are constant. */
if (gimple_code (stmt) != GIMPLE_ASSIGN if (gimple_code (stmt) != GIMPLE_ASSIGN
&& gimple_code (stmt) != GIMPLE_COND && gimple_code (stmt) != GIMPLE_COND
...@@ -2004,7 +2004,7 @@ struct record_modified_bb_info ...@@ -2004,7 +2004,7 @@ struct record_modified_bb_info
gimple *stmt; gimple *stmt;
}; };
/* Value is initialized in INIT_BB and used in USE_BB. We want to copute /* Value is initialized in INIT_BB and used in USE_BB. We want to compute
probability how often it changes between USE_BB. probability how often it changes between USE_BB.
INIT_BB->count/USE_BB->count is an estimate, but if INIT_BB INIT_BB->count/USE_BB->count is an estimate, but if INIT_BB
is in different loop nest, we can do better. is in different loop nest, we can do better.
...@@ -2333,7 +2333,7 @@ find_foldable_builtin_expect (basic_block bb) ...@@ -2333,7 +2333,7 @@ find_foldable_builtin_expect (basic_block bb)
presence of EH and will be optimized out by optimize_clobbers later in the presence of EH and will be optimized out by optimize_clobbers later in the
game. game.
NEED_EH is used to recurse in case the clobber has non-EH predecestors NEED_EH is used to recurse in case the clobber has non-EH predecessors
that can be clobber only, too.. When it is false, the RESX is not necessary that can be clobber only, too.. When it is false, the RESX is not necessary
on the end of basic block. */ on the end of basic block. */
...@@ -2367,7 +2367,7 @@ clobber_only_eh_bb_p (basic_block bb, bool need_eh = true) ...@@ -2367,7 +2367,7 @@ clobber_only_eh_bb_p (basic_block bb, bool need_eh = true)
return false; return false;
} }
/* See if all predecestors are either throws or clobber only BBs. */ /* See if all predecessors are either throws or clobber only BBs. */
FOR_EACH_EDGE (e, ei, bb->preds) FOR_EACH_EDGE (e, ei, bb->preds)
if (!(e->flags & EDGE_EH) if (!(e->flags & EDGE_EH)
&& !clobber_only_eh_bb_p (e->src, false)) && !clobber_only_eh_bb_p (e->src, false))
...@@ -2543,7 +2543,7 @@ analyze_function_body (struct cgraph_node *node, bool early) ...@@ -2543,7 +2543,7 @@ analyze_function_body (struct cgraph_node *node, bool early)
predicate will_be_nonconstant; predicate will_be_nonconstant;
/* This relation stmt should be folded after we remove /* This relation stmt should be folded after we remove
buildin_expect call. Adjust the cost here. */ __builtin_expect call. Adjust the cost here. */
if (stmt == fix_builtin_expect_stmt) if (stmt == fix_builtin_expect_stmt)
{ {
this_size--; this_size--;
...@@ -2609,7 +2609,7 @@ analyze_function_body (struct cgraph_node *node, bool early) ...@@ -2609,7 +2609,7 @@ analyze_function_body (struct cgraph_node *node, bool early)
} }
} }
/* TODO: When conditional jump or swithc is known to be constant, but /* TODO: When conditional jump or switch is known to be constant, but
we did not translate it into the predicates, we really can account we did not translate it into the predicates, we really can account
just maximum of the possible paths. */ just maximum of the possible paths. */
if (fbi.info) if (fbi.info)
...@@ -3066,7 +3066,7 @@ estimate_calls_size_and_time_1 (struct cgraph_node *node, int *size, ...@@ -3066,7 +3066,7 @@ estimate_calls_size_and_time_1 (struct cgraph_node *node, int *size,
|| es->predicate->evaluate (possible_truths)) || es->predicate->evaluate (possible_truths))
{ {
/* Predicates of calls shall not use NOT_CHANGED codes, /* Predicates of calls shall not use NOT_CHANGED codes,
sowe do not need to compute probabilities. */ so we do not need to compute probabilities. */
estimate_edge_size_and_time (e, size, estimate_edge_size_and_time (e, size,
es->predicate ? NULL : min_size, es->predicate ? NULL : min_size,
time, time,
...@@ -3239,7 +3239,7 @@ estimate_calls_size_and_time (struct cgraph_node *node, int *size, ...@@ -3239,7 +3239,7 @@ estimate_calls_size_and_time (struct cgraph_node *node, int *size,
} }
/* Default constructor for ipa call context. /* Default constructor for ipa call context.
Memory alloction of known_vals, known_contexts Memory allocation of known_vals, known_contexts
and known_aggs vectors is owned by the caller, but can and known_aggs vectors is owned by the caller, but can
be release by ipa_call_context::release. be release by ipa_call_context::release.
...@@ -3334,7 +3334,7 @@ ipa_call_context::duplicate_from (const ipa_call_context &ctx) ...@@ -3334,7 +3334,7 @@ ipa_call_context::duplicate_from (const ipa_call_context &ctx)
/* Release memory used by known_vals/contexts/aggs vectors. /* Release memory used by known_vals/contexts/aggs vectors.
If ALL is true release also inline_param_summary. If ALL is true release also inline_param_summary.
This happens when context was previously duplciated to be stored This happens when context was previously duplicated to be stored
into cache. */ into cache. */
void void
...@@ -3471,7 +3471,7 @@ ipa_call_context::equal_to (const ipa_call_context &ctx) ...@@ -3471,7 +3471,7 @@ ipa_call_context::equal_to (const ipa_call_context &ctx)
} }
/* Estimate size and time needed to execute call in the given context. /* Estimate size and time needed to execute call in the given context.
Additionally detemine hints determined by the context. Finally compute Additionally determine hints determined by the context. Finally compute
minimal size needed for the call that is independent on the call context and minimal size needed for the call that is independent on the call context and
can be used for fast estimates. Return the values in RET_SIZE, can be used for fast estimates. Return the values in RET_SIZE,
RET_MIN_SIZE, RET_TIME and RET_HINTS. */ RET_MIN_SIZE, RET_TIME and RET_HINTS. */
...@@ -3575,7 +3575,7 @@ ipa_call_context::estimate_size_and_time (int *ret_size, ...@@ -3575,7 +3575,7 @@ ipa_call_context::estimate_size_and_time (int *ret_size,
gcc_checking_assert ((nonspecialized_time - time * 99 / 100) >= -1); gcc_checking_assert ((nonspecialized_time - time * 99 / 100) >= -1);
/* Roundoff issues may make specialized time bigger than nonspecialized /* Roundoff issues may make specialized time bigger than nonspecialized
time. We do not really want that to happen because some heurstics time. We do not really want that to happen because some heuristics
may get confused by seeing negative speedups. */ may get confused by seeing negative speedups. */
if (time > nonspecialized_time) if (time > nonspecialized_time)
time = nonspecialized_time; time = nonspecialized_time;
...@@ -3684,7 +3684,7 @@ inline_update_callee_summaries (struct cgraph_node *node, int depth) ...@@ -3684,7 +3684,7 @@ inline_update_callee_summaries (struct cgraph_node *node, int depth)
/* Update change_prob of EDGE after INLINED_EDGE has been inlined. /* Update change_prob of EDGE after INLINED_EDGE has been inlined.
When function A is inlined in B and A calls C with parameter that When function A is inlined in B and A calls C with parameter that
changes with probability PROB1 and C is known to be passthroug changes with probability PROB1 and C is known to be passthrough
of argument if B that change with probability PROB2, the probability of argument if B that change with probability PROB2, the probability
of change is now PROB1*PROB2. */ of change is now PROB1*PROB2. */
......
...@@ -26,13 +26,13 @@ along with GCC; see the file COPYING3. If not see ...@@ -26,13 +26,13 @@ along with GCC; see the file COPYING3. If not see
/* Hints are reasons why IPA heuristics should prefer specializing given /* Hints are reasons why IPA heuristics should prefer specializing given
function. They are represtented as bitmap of the following values. */ function. They are represented as bitmap of the following values. */
enum ipa_hints_vals { enum ipa_hints_vals {
/* When specialization turns indirect call into a direct call, /* When specialization turns indirect call into a direct call,
it is good idea to do so. */ it is good idea to do so. */
INLINE_HINT_indirect_call = 1, INLINE_HINT_indirect_call = 1,
/* Inlining may make loop iterations or loop stride known. It is good idea /* Inlining may make loop iterations or loop stride known. It is good idea
to do so because it enables loop optimizatoins. */ to do so because it enables loop optimizations. */
INLINE_HINT_loop_iterations = 2, INLINE_HINT_loop_iterations = 2,
INLINE_HINT_loop_stride = 4, INLINE_HINT_loop_stride = 4,
/* Inlining within same strongly connected component of callgraph is often /* Inlining within same strongly connected component of callgraph is often
...@@ -162,7 +162,7 @@ public: ...@@ -162,7 +162,7 @@ public:
/* Conditional size/time information. The summaries are being /* Conditional size/time information. The summaries are being
merged during inlining. */ merged during inlining. */
conditions conds; conditions conds;
/* Normal code is acocunted in size_time_table, while calls are /* Normal code is accounted in size_time_table, while calls are
accounted in call_size_time_table. This is because calls accounted in call_size_time_table. This is because calls
are often adjusted by IPA optimizations and thus this summary are often adjusted by IPA optimizations and thus this summary
is generated from call summary information when needed. */ is generated from call summary information when needed. */
...@@ -292,7 +292,7 @@ public: ...@@ -292,7 +292,7 @@ public:
/* This object describe a context of call. That is a summary of known /* This object describe a context of call. That is a summary of known
information about its parameters. Main purpose of this context is information about its parameters. Main purpose of this context is
to give more realistic esitmations of function runtime, size and to give more realistic estimations of function runtime, size and
inline hints. */ inline hints. */
class ipa_call_context class ipa_call_context
{ {
...@@ -323,7 +323,7 @@ private: ...@@ -323,7 +323,7 @@ private:
/* Called function. */ /* Called function. */
cgraph_node *m_node; cgraph_node *m_node;
/* Clause describing what predicate conditionals can be satisfied /* Clause describing what predicate conditionals can be satisfied
in this context if function is inlined/specialised. */ in this context if function is inlined/specialized. */
clause_t m_possible_truths; clause_t m_possible_truths;
/* Clause describing what predicate conditionals can be satisfied /* Clause describing what predicate conditionals can be satisfied
in this context if function is kept offline. */ in this context if function is kept offline. */
......
...@@ -19,13 +19,13 @@ You should have received a copy of the GNU General Public License ...@@ -19,13 +19,13 @@ You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */ <http://www.gnu.org/licenses/>. */
/* Gimple identical code folding (class func_checker) is an infastructure /* Gimple identical code folding (class func_checker) is an infrastructure
capable of comparing two given functions. The class compares every capable of comparing two given functions. The class compares every
gimple statement and uses many dictionaries to map source and target gimple statement and uses many dictionaries to map source and target
SSA_NAMEs, declarations and other components. SSA_NAMEs, declarations and other components.
To use the infrastructure, create an instanse of func_checker and call To use the infrastructure, create an instance of func_checker and call
a comparsion function based on type of gimple statement. */ a comparison function based on type of gimple statement. */
/* Prints string STRING to a FILE with a given number of SPACE_COUNT. */ /* Prints string STRING to a FILE with a given number of SPACE_COUNT. */
#define FPUTS_SPACES(file, space_count, string) \ #define FPUTS_SPACES(file, space_count, string) \
...@@ -220,7 +220,7 @@ public: ...@@ -220,7 +220,7 @@ public:
bool compare_loops (basic_block bb1, basic_block bb2); bool compare_loops (basic_block bb1, basic_block bb2);
/* Return true if types are compatible for polymorphic call analysis. /* Return true if types are compatible for polymorphic call analysis.
COMPARE_PTR indicates if polymorphic type comparsion should be COMPARE_PTR indicates if polymorphic type comparison should be
done for pointers, too. */ done for pointers, too. */
static bool compatible_polymorphic_types_p (tree t1, tree t2, static bool compatible_polymorphic_types_p (tree t1, tree t2,
bool compare_ptr); bool compare_ptr);
......
...@@ -300,7 +300,7 @@ sem_function::get_hash (void) ...@@ -300,7 +300,7 @@ sem_function::get_hash (void)
/* Compare properties of symbols N1 and N2 that does not affect semantics of /* Compare properties of symbols N1 and N2 that does not affect semantics of
symbol itself but affects semantics of its references from USED_BY (which symbol itself but affects semantics of its references from USED_BY (which
may be NULL if it is unknown). If comparsion is false, symbols may be NULL if it is unknown). If comparison is false, symbols
can still be merged but any symbols referring them can't. can still be merged but any symbols referring them can't.
If ADDRESS is true, do extra checking needed for IPA_REF_ADDR. If ADDRESS is true, do extra checking needed for IPA_REF_ADDR.
...@@ -550,7 +550,7 @@ sem_function::equals_wpa (sem_item *item, ...@@ -550,7 +550,7 @@ sem_function::equals_wpa (sem_item *item,
if (DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (decl) if (DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (decl)
!= DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (item->decl)) != DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (item->decl))
return return_false_with_msg ("intrument function entry exit " return return_false_with_msg ("instrument function entry exit "
"attributes are different"); "attributes are different");
if (DECL_NO_LIMIT_STACK (decl) != DECL_NO_LIMIT_STACK (item->decl)) if (DECL_NO_LIMIT_STACK (decl) != DECL_NO_LIMIT_STACK (item->decl))
...@@ -576,7 +576,7 @@ sem_function::equals_wpa (sem_item *item, ...@@ -576,7 +576,7 @@ sem_function::equals_wpa (sem_item *item,
&& TREE_CODE (TREE_TYPE (decl)) == METHOD_TYPE) && TREE_CODE (TREE_TYPE (decl)) == METHOD_TYPE)
{ {
if (TREE_CODE (TREE_TYPE (item->decl)) != METHOD_TYPE) if (TREE_CODE (TREE_TYPE (item->decl)) != METHOD_TYPE)
return return_false_with_msg ("DECL_CXX_CONSTURCTOR type mismatch"); return return_false_with_msg ("DECL_CXX_CONSTRUCTOR type mismatch");
else if (!func_checker::compatible_polymorphic_types_p else if (!func_checker::compatible_polymorphic_types_p
(TYPE_METHOD_BASETYPE (TREE_TYPE (decl)), (TYPE_METHOD_BASETYPE (TREE_TYPE (decl)),
TYPE_METHOD_BASETYPE (TREE_TYPE (item->decl)), false)) TYPE_METHOD_BASETYPE (TREE_TYPE (item->decl)), false))
...@@ -726,7 +726,7 @@ sem_function::equals_wpa (sem_item *item, ...@@ -726,7 +726,7 @@ sem_function::equals_wpa (sem_item *item,
} }
/* Update hash by address sensitive references. We iterate over all /* Update hash by address sensitive references. We iterate over all
sensitive references (address_matters_p) and we hash ultime alias sensitive references (address_matters_p) and we hash ultimate alias
target of these nodes, which can improve a semantic item hash. target of these nodes, which can improve a semantic item hash.
Also hash in referenced symbols properties. This can be done at any time Also hash in referenced symbols properties. This can be done at any time
...@@ -1114,7 +1114,7 @@ sem_function::merge (sem_item *alias_item) ...@@ -1114,7 +1114,7 @@ sem_function::merge (sem_item *alias_item)
} }
/* Do not turn function in one comdat group into wrapper to another /* Do not turn function in one comdat group into wrapper to another
comdat group. Other compiler producing the body of the comdat group. Other compiler producing the body of the
another comdat group may make opossite decision and with unfortunate another comdat group may make opposite decision and with unfortunate
linker choices this may close a loop. */ linker choices this may close a loop. */
else if (DECL_COMDAT_GROUP (original->decl) else if (DECL_COMDAT_GROUP (original->decl)
&& DECL_COMDAT_GROUP (alias->decl) && DECL_COMDAT_GROUP (alias->decl)
...@@ -1160,7 +1160,7 @@ sem_function::merge (sem_item *alias_item) ...@@ -1160,7 +1160,7 @@ sem_function::merge (sem_item *alias_item)
else else
create_wrapper = true; create_wrapper = true;
/* We can redirect local calls in the case both alias and orignal /* We can redirect local calls in the case both alias and original
are not interposable. */ are not interposable. */
redirect_callers redirect_callers
= alias->get_availability () > AVAIL_INTERPOSABLE = alias->get_availability () > AVAIL_INTERPOSABLE
...@@ -1989,7 +1989,7 @@ sem_variable::merge (sem_item *alias_item) ...@@ -1989,7 +1989,7 @@ sem_variable::merge (sem_item *alias_item)
return false; return false;
} }
/* We cannot merge if address comparsion metters. */ /* We cannot merge if address comparison matters. */
if (alias_address_matters && flag_merge_constants < 2) if (alias_address_matters && flag_merge_constants < 2)
{ {
if (dump_enabled_p ()) if (dump_enabled_p ())
...@@ -3420,7 +3420,7 @@ sem_item_optimizer::fixup_points_to_sets (void) ...@@ -3420,7 +3420,7 @@ sem_item_optimizer::fixup_points_to_sets (void)
fixup_pt_set (&SSA_NAME_PTR_INFO (name)->pt); fixup_pt_set (&SSA_NAME_PTR_INFO (name)->pt);
fixup_pt_set (&fn->gimple_df->escaped); fixup_pt_set (&fn->gimple_df->escaped);
/* The above get's us to 99% I guess, at least catching the /* The above gets us to 99% I guess, at least catching the
address compares. Below also gets us aliasing correct address compares. Below also gets us aliasing correct
but as said we're giving leeway to the situation with but as said we're giving leeway to the situation with
readonly vars anyway, so ... */ readonly vars anyway, so ... */
...@@ -3505,7 +3505,7 @@ ipa_icf_read_summary (void) ...@@ -3505,7 +3505,7 @@ ipa_icf_read_summary (void)
optimizer->register_hooks (); optimizer->register_hooks ();
} }
/* Semantic equality exection function. */ /* Semantic equality execution function. */
static unsigned int static unsigned int
ipa_icf_driver (void) ipa_icf_driver (void)
......
...@@ -24,7 +24,7 @@ class sem_item; ...@@ -24,7 +24,7 @@ class sem_item;
/* Congruence class encompasses a collection of either functions or /* Congruence class encompasses a collection of either functions or
read-only variables. These items are considered to be equivalent read-only variables. These items are considered to be equivalent
if not proved the oposite. */ if not proved the opposite. */
class congruence_class class congruence_class
{ {
public: public:
...@@ -200,7 +200,7 @@ public: ...@@ -200,7 +200,7 @@ public:
virtual bool equals_wpa (sem_item *item, virtual bool equals_wpa (sem_item *item,
hash_map <symtab_node *, sem_item *> &ignored_nodes) = 0; hash_map <symtab_node *, sem_item *> &ignored_nodes) = 0;
/* Returns true if the item equals to ITEM given as arguemnt. */ /* Returns true if the item equals to ITEM given as argument. */
virtual bool equals (sem_item *item, virtual bool equals (sem_item *item,
hash_map <symtab_node *, sem_item *> &ignored_nodes) = 0; hash_map <symtab_node *, sem_item *> &ignored_nodes) = 0;
...@@ -350,7 +350,7 @@ public: ...@@ -350,7 +350,7 @@ public:
ipa_icf_gimple::func_checker *checker); ipa_icf_gimple::func_checker *checker);
/* Perform additional checks needed to match types of used function /* Perform additional checks needed to match types of used function
paramters. */ parameters. */
bool compatible_parm_types_p (tree, tree); bool compatible_parm_types_p (tree, tree);
/* Exception handling region tree. */ /* Exception handling region tree. */
...@@ -606,7 +606,7 @@ private: ...@@ -606,7 +606,7 @@ private:
static bool release_split_map (congruence_class * const &cls, bitmap const &b, static bool release_split_map (congruence_class * const &cls, bitmap const &b,
traverse_split_pair *pair); traverse_split_pair *pair);
/* Process split operation for a cognruence class CLS, /* Process split operation for a congruence class CLS,
where bitmap B splits congruence class members. DATA is used where bitmap B splits congruence class members. DATA is used
as argument of split pair. */ as argument of split pair. */
static bool traverse_congruence_split (congruence_class * const &cls, static bool traverse_congruence_split (congruence_class * const &cls,
......
...@@ -148,7 +148,7 @@ free_growth_caches (void) ...@@ -148,7 +148,7 @@ free_growth_caches (void)
node_context_cache_clear = 0; node_context_cache_clear = 0;
} }
/* Return hints derrived from EDGE. */ /* Return hints derived from EDGE. */
int int
simple_edge_hints (struct cgraph_edge *edge) simple_edge_hints (struct cgraph_edge *edge)
......
...@@ -105,7 +105,7 @@ can_remove_node_now_p_1 (struct cgraph_node *node, struct cgraph_edge *e) ...@@ -105,7 +105,7 @@ can_remove_node_now_p_1 (struct cgraph_node *node, struct cgraph_edge *e)
&& (!DECL_VIRTUAL_P (node->decl) && (!DECL_VIRTUAL_P (node->decl)
|| !opt_for_fn (node->decl, flag_devirtualize)) || !opt_for_fn (node->decl, flag_devirtualize))
/* During early inlining some unanalyzed cgraph nodes might be in the /* During early inlining some unanalyzed cgraph nodes might be in the
callgraph and they might reffer the function in question. */ callgraph and they might refer the function in question. */
&& !cgraph_new_nodes.exists ()); && !cgraph_new_nodes.exists ());
} }
...@@ -176,7 +176,7 @@ clone_inlined_nodes (struct cgraph_edge *e, bool duplicate, ...@@ -176,7 +176,7 @@ clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
{ {
/* We may eliminate the need for out-of-line copy to be output. /* We may eliminate the need for out-of-line copy to be output.
In that case just go ahead and re-use it. This is not just an In that case just go ahead and re-use it. This is not just an
memory optimization. Making offline copy of fuction disappear memory optimization. Making offline copy of function disappear
from the program will improve future decisions on inlining. */ from the program will improve future decisions on inlining. */
if (!e->callee->callers->next_caller if (!e->callee->callers->next_caller
/* Recursive inlining never wants the master clone to /* Recursive inlining never wants the master clone to
...@@ -192,7 +192,7 @@ clone_inlined_nodes (struct cgraph_edge *e, bool duplicate, ...@@ -192,7 +192,7 @@ clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
need small function inlining to register edge removal hook to need small function inlining to register edge removal hook to
maintain the priority queue. maintain the priority queue.
For now we keep the ohter functions in the group in program until For now we keep the other functions in the group in program until
cgraph_remove_unreachable_functions gets rid of them. */ cgraph_remove_unreachable_functions gets rid of them. */
gcc_assert (!e->callee->inlined_to); gcc_assert (!e->callee->inlined_to);
e->callee->remove_from_same_comdat_group (); e->callee->remove_from_same_comdat_group ();
......
...@@ -517,7 +517,7 @@ can_inline_edge_by_limits_p (struct cgraph_edge *e, bool report, ...@@ -517,7 +517,7 @@ can_inline_edge_by_limits_p (struct cgraph_edge *e, bool report,
&& DECL_FUNCTION_PERSONALITY (callee->decl)) && DECL_FUNCTION_PERSONALITY (callee->decl))
|| (check_maybe_up (flag_exceptions) || (check_maybe_up (flag_exceptions)
&& DECL_FUNCTION_PERSONALITY (callee->decl)) && DECL_FUNCTION_PERSONALITY (callee->decl))
/* When devirtualization is diabled for callee, it is not safe /* When devirtualization is disabled for callee, it is not safe
to inline it as we possibly mangled the type info. to inline it as we possibly mangled the type info.
Allow early inlining of always inlines. */ Allow early inlining of always inlines. */
|| (!early && check_maybe_down (flag_devirtualize))) || (!early && check_maybe_down (flag_devirtualize)))
...@@ -547,7 +547,7 @@ can_inline_edge_by_limits_p (struct cgraph_edge *e, bool report, ...@@ -547,7 +547,7 @@ can_inline_edge_by_limits_p (struct cgraph_edge *e, bool report,
|| DECL_DISREGARD_INLINE_LIMITS (callee->decl)) || DECL_DISREGARD_INLINE_LIMITS (callee->decl))
; ;
/* If mismatch is caused by merging two LTO units with different /* If mismatch is caused by merging two LTO units with different
optimizationflags we want to be bit nicer. However never inline optimization flags we want to be bit nicer. However never inline
if one of functions is not optimized at all. */ if one of functions is not optimized at all. */
else if (!opt_for_fn (callee->decl, optimize) else if (!opt_for_fn (callee->decl, optimize)
|| !opt_for_fn (caller->decl, optimize)) || !opt_for_fn (caller->decl, optimize))
...@@ -783,8 +783,8 @@ compute_inlined_call_time (struct cgraph_edge *edge, ...@@ -783,8 +783,8 @@ compute_inlined_call_time (struct cgraph_edge *edge,
return time; return time;
} }
/* Determine time saved by inlininig EDGE of frequency FREQ /* Determine time saved by inlining EDGE of frequency FREQ
where callee's runtime w/o inlineing is UNINLINED_TYPE where callee's runtime w/o inlining is UNINLINED_TYPE
and with inlined is INLINED_TYPE. */ and with inlined is INLINED_TYPE. */
inline sreal inline sreal
...@@ -1222,7 +1222,7 @@ edge_badness (struct cgraph_edge *edge, bool dump) ...@@ -1222,7 +1222,7 @@ edge_badness (struct cgraph_edge *edge, bool dump)
if (need_more_work) if (need_more_work)
noninline_callee (); noninline_callee ();
} }
Withhout penalizing this case, we usually inline noninline_callee Without penalizing this case, we usually inline noninline_callee
into the inline_caller because overall_growth is small preventing into the inline_caller because overall_growth is small preventing
further inlining of inline_caller. further inlining of inline_caller.
...@@ -1297,7 +1297,7 @@ edge_badness (struct cgraph_edge *edge, bool dump) ...@@ -1297,7 +1297,7 @@ edge_badness (struct cgraph_edge *edge, bool dump)
} }
} }
/* When function local profile is not available or it does not give /* When function local profile is not available or it does not give
useful information (ie frequency is zero), base the cost on useful information (i.e. frequency is zero), base the cost on
loop nest and overall size growth, so we optimize for overall number loop nest and overall size growth, so we optimize for overall number
of functions fully inlined in program. */ of functions fully inlined in program. */
else else
...@@ -1349,7 +1349,7 @@ update_edge_key (edge_heap_t *heap, struct cgraph_edge *edge) ...@@ -1349,7 +1349,7 @@ update_edge_key (edge_heap_t *heap, struct cgraph_edge *edge)
gcc_checking_assert (n->get_data () == edge); gcc_checking_assert (n->get_data () == edge);
/* fibonacci_heap::replace_key does busy updating of the /* fibonacci_heap::replace_key does busy updating of the
heap that is unnecesarily expensive. heap that is unnecessarily expensive.
We do lazy increases: after extracting minimum if the key We do lazy increases: after extracting minimum if the key
turns out to be out of date, it is re-inserted into heap turns out to be out of date, it is re-inserted into heap
with correct value. */ with correct value. */
...@@ -1383,7 +1383,7 @@ update_edge_key (edge_heap_t *heap, struct cgraph_edge *edge) ...@@ -1383,7 +1383,7 @@ update_edge_key (edge_heap_t *heap, struct cgraph_edge *edge)
/* NODE was inlined. /* NODE was inlined.
All caller edges needs to be resetted because All caller edges needs to be reset because
size estimates change. Similarly callees needs reset size estimates change. Similarly callees needs reset
because better context may be known. */ because better context may be known. */
...@@ -1520,7 +1520,7 @@ update_callee_keys (edge_heap_t *heap, struct cgraph_node *node, ...@@ -1520,7 +1520,7 @@ update_callee_keys (edge_heap_t *heap, struct cgraph_node *node,
update_edge_key (heap, e); update_edge_key (heap, e);
} }
/* We do not reset callee growth cache here. Since we added a new call, /* We do not reset callee growth cache here. Since we added a new call,
growth chould have just increased and consequentely badness metric growth should have just increased and consequently badness metric
don't need updating. */ don't need updating. */
else if (e->inline_failed else if (e->inline_failed
&& (callee = e->callee->ultimate_alias_target (&avail, && (callee = e->callee->ultimate_alias_target (&avail,
...@@ -2082,7 +2082,7 @@ inline_small_functions (void) ...@@ -2082,7 +2082,7 @@ inline_small_functions (void)
edge_growth_cache->get (edge)->hints = old_hints_est + 1; edge_growth_cache->get (edge)->hints = old_hints_est + 1;
/* When updating the edge costs, we only decrease badness in the keys. /* When updating the edge costs, we only decrease badness in the keys.
Increases of badness are handled lazilly; when we see key with out Increases of badness are handled lazily; when we see key with out
of date value on it, we re-insert it now. */ of date value on it, we re-insert it now. */
current_badness = edge_badness (edge, false); current_badness = edge_badness (edge, false);
gcc_assert (cached_badness == current_badness); gcc_assert (cached_badness == current_badness);
...@@ -2225,7 +2225,7 @@ inline_small_functions (void) ...@@ -2225,7 +2225,7 @@ inline_small_functions (void)
add_new_edges_to_heap (&edge_heap, new_indirect_edges); add_new_edges_to_heap (&edge_heap, new_indirect_edges);
/* If caller's size and time increased we do not need to update /* If caller's size and time increased we do not need to update
all edges becuase badness is not going to decrease. */ all edges because badness is not going to decrease. */
if (old_size <= ipa_size_summaries->get (where)->size if (old_size <= ipa_size_summaries->get (where)->size
&& old_time <= ipa_fn_summaries->get (where)->time && old_time <= ipa_fn_summaries->get (where)->time
/* Wrapper penalty may be non-monotonous in this respect. /* Wrapper penalty may be non-monotonous in this respect.
...@@ -2569,7 +2569,7 @@ dump_inline_stats (void) ...@@ -2569,7 +2569,7 @@ dump_inline_stats (void)
"%" PRId64 " + previously indirect " "%" PRId64 " + previously indirect "
"%" PRId64 " + virtual " "%" PRId64 " + virtual "
"%" PRId64 " + virtual and previously indirect " "%" PRId64 " + virtual and previously indirect "
"%" PRId64 " + stil indirect " "%" PRId64 " + still indirect "
"%" PRId64 " + still indirect polymorphic " "%" PRId64 " + still indirect polymorphic "
"%" PRId64 "\n", inlined_cnt, "%" PRId64 "\n", inlined_cnt,
inlined_speculative, inlined_speculative_ply, inlined_speculative, inlined_speculative_ply,
...@@ -2725,7 +2725,7 @@ ipa_inline (void) ...@@ -2725,7 +2725,7 @@ ipa_inline (void)
into callee often leads to better optimization of callee due to into callee often leads to better optimization of callee due to
increased context for optimization. increased context for optimization.
For example if main() function calls a function that outputs help For example if main() function calls a function that outputs help
and then function that does the main optmization, we should inline and then function that does the main optimization, we should inline
the second with priority even if both calls are cold by themselves. the second with priority even if both calls are cold by themselves.
We probably want to implement new predicate replacing our use of We probably want to implement new predicate replacing our use of
...@@ -2850,7 +2850,7 @@ early_inline_small_functions (struct cgraph_node *node) ...@@ -2850,7 +2850,7 @@ early_inline_small_functions (struct cgraph_node *node)
{ {
struct cgraph_node *callee = e->callee->ultimate_alias_target (); struct cgraph_node *callee = e->callee->ultimate_alias_target ();
/* We can enounter not-yet-analyzed function during /* We can encounter not-yet-analyzed function during
early inlining on callgraphs with strongly early inlining on callgraphs with strongly
connected components. */ connected components. */
ipa_fn_summary *s = ipa_fn_summaries->get (callee); ipa_fn_summary *s = ipa_fn_summaries->get (callee);
......
...@@ -69,7 +69,7 @@ contains_polymorphic_type_p (const_tree type) ...@@ -69,7 +69,7 @@ contains_polymorphic_type_p (const_tree type)
} }
/* Return true if it seems valid to use placement new to build EXPECTED_TYPE /* Return true if it seems valid to use placement new to build EXPECTED_TYPE
at possition CUR_OFFSET within TYPE. at position CUR_OFFSET within TYPE.
POD can be changed to an instance of a polymorphic type by POD can be changed to an instance of a polymorphic type by
placement new. Here we play safe and assume that any placement new. Here we play safe and assume that any
...@@ -99,7 +99,7 @@ possible_placement_new (tree type, tree expected_type, ...@@ -99,7 +99,7 @@ possible_placement_new (tree type, tree expected_type,
to represent it. to represent it.
If OTR_TYPE is NULL, just find outermost polymorphic type with If OTR_TYPE is NULL, just find outermost polymorphic type with
virtual table present at possition OFFSET. virtual table present at position OFFSET.
For example when THIS represents type For example when THIS represents type
class A class A
...@@ -113,7 +113,7 @@ possible_placement_new (tree type, tree expected_type, ...@@ -113,7 +113,7 @@ possible_placement_new (tree type, tree expected_type,
If we cannot find corresponding class, give up by setting If we cannot find corresponding class, give up by setting
THIS->OUTER_TYPE to OTR_TYPE and THIS->OFFSET to NULL. THIS->OUTER_TYPE to OTR_TYPE and THIS->OFFSET to NULL.
Return true when lookup was sucesful. Return true when lookup was successful.
When CONSIDER_PLACEMENT_NEW is false, reject contexts that may be made When CONSIDER_PLACEMENT_NEW is false, reject contexts that may be made
valid only via allocation of new polymorphic type inside by means valid only via allocation of new polymorphic type inside by means
...@@ -147,7 +147,7 @@ ipa_polymorphic_call_context::restrict_to_inner_class (tree otr_type, ...@@ -147,7 +147,7 @@ ipa_polymorphic_call_context::restrict_to_inner_class (tree otr_type,
Because the instance type may contain field whose type is of OUTER_TYPE, Because the instance type may contain field whose type is of OUTER_TYPE,
we cannot derive any effective information about it. we cannot derive any effective information about it.
TODO: In the case we know all derrived types, we can definitely do better TODO: In the case we know all derived types, we can definitely do better
here. */ here. */
else if (TYPE_SIZE (outer_type) else if (TYPE_SIZE (outer_type)
&& tree_fits_shwi_p (TYPE_SIZE (outer_type)) && tree_fits_shwi_p (TYPE_SIZE (outer_type))
...@@ -240,7 +240,7 @@ ipa_polymorphic_call_context::restrict_to_inner_class (tree otr_type, ...@@ -240,7 +240,7 @@ ipa_polymorphic_call_context::restrict_to_inner_class (tree otr_type,
if (cur_offset != 0) if (cur_offset != 0)
goto no_useful_type_info; goto no_useful_type_info;
/* If we determined type precisely or we have no clue on /* If we determined type precisely or we have no clue on
speuclation, we are done. */ speculation, we are done. */
if (!maybe_derived_type || !speculative_outer_type if (!maybe_derived_type || !speculative_outer_type
|| !speculation_consistent_p (speculative_outer_type, || !speculation_consistent_p (speculative_outer_type,
speculative_offset, speculative_offset,
...@@ -317,7 +317,7 @@ ipa_polymorphic_call_context::restrict_to_inner_class (tree otr_type, ...@@ -317,7 +317,7 @@ ipa_polymorphic_call_context::restrict_to_inner_class (tree otr_type,
{ {
outer_type = type; outer_type = type;
offset = cur_offset; offset = cur_offset;
/* As soon as we se an field containing the type, /* As soon as we see an field containing the type,
we know we are not looking for derivations. */ we know we are not looking for derivations. */
maybe_derived_type = false; maybe_derived_type = false;
} }
...@@ -395,7 +395,7 @@ no_useful_type_info: ...@@ -395,7 +395,7 @@ no_useful_type_info:
else else
return true; return true;
} }
/* We found no way to embedd EXPECTED_TYPE in TYPE. /* We found no way to embed EXPECTED_TYPE in TYPE.
We still permit two special cases - placement new and We still permit two special cases - placement new and
the case of variadic types containing themselves. */ the case of variadic types containing themselves. */
if (!speculative if (!speculative
...@@ -552,7 +552,7 @@ decl_maybe_in_construction_p (tree base, tree outer_type, ...@@ -552,7 +552,7 @@ decl_maybe_in_construction_p (tree base, tree outer_type,
return true; return true;
/* Pure functions cannot do any changes on the dynamic type; /* Pure functions cannot do any changes on the dynamic type;
that require writting to memory. */ that require writing to memory. */
if ((!base || !auto_var_in_fn_p (base, function)) if ((!base || !auto_var_in_fn_p (base, function))
&& flags_from_decl_or_type (function) & (ECF_PURE | ECF_CONST)) && flags_from_decl_or_type (function) & (ECF_PURE | ECF_CONST))
return false; return false;
...@@ -721,7 +721,7 @@ ipa_polymorphic_call_context::stream_in (class lto_input_block *ib, ...@@ -721,7 +721,7 @@ ipa_polymorphic_call_context::stream_in (class lto_input_block *ib,
} }
} }
/* Proudce polymorphic call context for call method of instance /* Produce polymorphic call context for call method of instance
that is located within BASE (that is assumed to be a decl) at offset OFF. */ that is located within BASE (that is assumed to be a decl) at offset OFF. */
void void
...@@ -915,7 +915,7 @@ ipa_polymorphic_call_context::ipa_polymorphic_call_context (tree fndecl, ...@@ -915,7 +915,7 @@ ipa_polymorphic_call_context::ipa_polymorphic_call_context (tree fndecl,
if (TREE_CODE (base) == MEM_REF || DECL_P (base)) if (TREE_CODE (base) == MEM_REF || DECL_P (base))
{ {
/* We found dereference of a pointer. Type of the pointer /* We found dereference of a pointer. Type of the pointer
and MEM_REF is meaningless, but we can look futher. */ and MEM_REF is meaningless, but we can look further. */
offset_int mem_offset; offset_int mem_offset;
if (TREE_CODE (base) == MEM_REF if (TREE_CODE (base) == MEM_REF
&& mem_ref_offset (base).is_constant (&mem_offset)) && mem_ref_offset (base).is_constant (&mem_offset))
...@@ -1004,14 +1004,14 @@ ipa_polymorphic_call_context::ipa_polymorphic_call_context (tree fndecl, ...@@ -1004,14 +1004,14 @@ ipa_polymorphic_call_context::ipa_polymorphic_call_context (tree fndecl,
0-thunk.fixed_offset. It starts with code that adds 0-thunk.fixed_offset. It starts with code that adds
think.fixed_offset to the pointer to compensate for this. think.fixed_offset to the pointer to compensate for this.
Because we walked all the way to the begining of thunk, we now Because we walked all the way to the beginning of thunk, we now
see pointer &bar-thunk.fixed_offset and need to compensate see pointer &bar-thunk.fixed_offset and need to compensate
for it. */ for it. */
if (node->thunk.fixed_offset) if (node->thunk.fixed_offset)
offset -= node->thunk.fixed_offset * BITS_PER_UNIT; offset -= node->thunk.fixed_offset * BITS_PER_UNIT;
/* Dynamic casting has possibly upcasted the type /* Dynamic casting has possibly upcasted the type
in the hiearchy. In this case outer type is less in the hierarchy. In this case outer type is less
informative than inner type and we should forget informative than inner type and we should forget
about it. */ about it. */
if ((otr_type if ((otr_type
...@@ -1113,7 +1113,7 @@ ipa_polymorphic_call_context::ipa_polymorphic_call_context (tree fndecl, ...@@ -1113,7 +1113,7 @@ ipa_polymorphic_call_context::ipa_polymorphic_call_context (tree fndecl,
offset, offset,
true, NULL /* Do not change type here */); true, NULL /* Do not change type here */);
/* TODO: There are multiple ways to derive a type. For instance /* TODO: There are multiple ways to derive a type. For instance
if BASE_POINTER is passed to an constructor call prior our refernece. if BASE_POINTER is passed to an constructor call prior our reference.
We do not make this type of flow sensitive analysis yet. */ We do not make this type of flow sensitive analysis yet. */
if (instance) if (instance)
*instance = base_pointer; *instance = base_pointer;
...@@ -1323,7 +1323,7 @@ extr_type_from_vtbl_ptr_store (gimple *stmt, struct type_change_info *tci, ...@@ -1323,7 +1323,7 @@ extr_type_from_vtbl_ptr_store (gimple *stmt, struct type_change_info *tci,
{ {
if (dump_file) if (dump_file)
fprintf (dump_file, " Construction vtable used\n"); fprintf (dump_file, " Construction vtable used\n");
/* FIXME: We should suport construction contexts. */ /* FIXME: We should support construction contexts. */
return NULL; return NULL;
} }
...@@ -1557,7 +1557,7 @@ check_stmt_for_type_change (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef, void *data) ...@@ -1557,7 +1557,7 @@ check_stmt_for_type_change (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef, void *data)
AA_WALK_BUDGET_P, if not NULL, is how statements we should allow AA_WALK_BUDGET_P, if not NULL, is how statements we should allow
walk_aliased_vdefs to examine. The value should be decremented by the walk_aliased_vdefs to examine. The value should be decremented by the
number of stetements we examined or set to zero if exhausted. */ number of statements we examined or set to zero if exhausted. */
bool bool
ipa_polymorphic_call_context::get_dynamic_type (tree instance, ipa_polymorphic_call_context::get_dynamic_type (tree instance,
...@@ -1583,7 +1583,7 @@ ipa_polymorphic_call_context::get_dynamic_type (tree instance, ...@@ -1583,7 +1583,7 @@ ipa_polymorphic_call_context::get_dynamic_type (tree instance,
otr_type = TYPE_MAIN_VARIANT (otr_type); otr_type = TYPE_MAIN_VARIANT (otr_type);
/* Walk into inner type. This may clear maybe_derived_type and save us /* Walk into inner type. This may clear maybe_derived_type and save us
from useless work. It also makes later comparsions with static type from useless work. It also makes later comparisons with static type
easier. */ easier. */
if (outer_type && otr_type) if (outer_type && otr_type)
{ {
...@@ -1599,7 +1599,7 @@ ipa_polymorphic_call_context::get_dynamic_type (tree instance, ...@@ -1599,7 +1599,7 @@ ipa_polymorphic_call_context::get_dynamic_type (tree instance,
if (TREE_CODE (instance) == MEM_REF) if (TREE_CODE (instance) == MEM_REF)
return false; return false;
/* We need to obtain refernce to virtual table pointer. It is better /* We need to obtain reference to virtual table pointer. It is better
to look it up in the code rather than build our own. This require bit to look it up in the code rather than build our own. This require bit
of pattern matching, but we end up verifying that what we found is of pattern matching, but we end up verifying that what we found is
correct. correct.
...@@ -1778,7 +1778,7 @@ ipa_polymorphic_call_context::get_dynamic_type (tree instance, ...@@ -1778,7 +1778,7 @@ ipa_polymorphic_call_context::get_dynamic_type (tree instance,
Therefore if the static outer type was found (outer_type) Therefore if the static outer type was found (outer_type)
we can safely ignore tci.speculative that is set on calls and give up we can safely ignore tci.speculative that is set on calls and give up
only if there was dyanmic type store that may affect given variable only if there was dynamic type store that may affect given variable
(seen_unanalyzed_store) */ (seen_unanalyzed_store) */
if (walked < 0) if (walked < 0)
...@@ -1915,7 +1915,7 @@ ipa_polymorphic_call_context::combine_speculation_with ...@@ -1915,7 +1915,7 @@ ipa_polymorphic_call_context::combine_speculation_with
return false; return false;
/* restrict_to_inner_class may eliminate wrong speculation making our job /* restrict_to_inner_class may eliminate wrong speculation making our job
easeier. */ easier. */
if (otr_type) if (otr_type)
restrict_to_inner_class (otr_type); restrict_to_inner_class (otr_type);
...@@ -1963,7 +1963,7 @@ ipa_polymorphic_call_context::combine_speculation_with ...@@ -1963,7 +1963,7 @@ ipa_polymorphic_call_context::combine_speculation_with
} }
/* Choose type that contains the other. This one either contains the outer /* Choose type that contains the other. This one either contains the outer
as a field (thus giving exactly one target) or is deeper in the type as a field (thus giving exactly one target) or is deeper in the type
hiearchy. */ hierarchy. */
else if (speculative_outer_type else if (speculative_outer_type
&& speculative_maybe_derived_type && speculative_maybe_derived_type
&& (new_offset > speculative_offset && (new_offset > speculative_offset
...@@ -2015,7 +2015,7 @@ ipa_polymorphic_call_context::meet_speculation_with ...@@ -2015,7 +2015,7 @@ ipa_polymorphic_call_context::meet_speculation_with
} }
/* restrict_to_inner_class may eliminate wrong speculation making our job /* restrict_to_inner_class may eliminate wrong speculation making our job
easeier. */ easier. */
if (otr_type) if (otr_type)
restrict_to_inner_class (otr_type); restrict_to_inner_class (otr_type);
...@@ -2095,8 +2095,8 @@ ipa_polymorphic_call_context::meet_speculation_with ...@@ -2095,8 +2095,8 @@ ipa_polymorphic_call_context::meet_speculation_with
} }
} }
/* Assume that both THIS and a given context is valid and strenghten THIS /* Assume that both THIS and a given context is valid and strengthen THIS
if possible. Return true if any strenghtening was made. if possible. Return true if any strengthening was made.
If actual type the context is being used in is known, OTR_TYPE should be If actual type the context is being used in is known, OTR_TYPE should be
set accordingly. This improves quality of combined result. */ set accordingly. This improves quality of combined result. */
...@@ -2261,7 +2261,7 @@ ipa_polymorphic_call_context::combine_with (ipa_polymorphic_call_context ctx, ...@@ -2261,7 +2261,7 @@ ipa_polymorphic_call_context::combine_with (ipa_polymorphic_call_context ctx,
goto invalidate; goto invalidate;
} }
} }
/* Pick variant deeper in the hiearchy. */ /* Pick variant deeper in the hierarchy. */
else else
{ {
outer_type = ctx.outer_type; outer_type = ctx.outer_type;
...@@ -2299,7 +2299,7 @@ ipa_polymorphic_call_context::combine_with (ipa_polymorphic_call_context ctx, ...@@ -2299,7 +2299,7 @@ ipa_polymorphic_call_context::combine_with (ipa_polymorphic_call_context ctx,
} }
} }
} }
/* TODO handle merging using hiearchy. */ /* TODO handle merging using hierarchy. */
else if (dump_file && (dump_flags & TDF_DETAILS)) else if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Giving up on merge\n"); fprintf (dump_file, "Giving up on merge\n");
...@@ -2587,7 +2587,7 @@ ipa_polymorphic_call_context::meet_with (ipa_polymorphic_call_context ctx, ...@@ -2587,7 +2587,7 @@ ipa_polymorphic_call_context::meet_with (ipa_polymorphic_call_context ctx,
if (!dynamic && ctx.dynamic) if (!dynamic && ctx.dynamic)
dynamic = true; dynamic = true;
} }
/* TODO handle merging using hiearchy. */ /* TODO handle merging using hierarchy. */
else else
{ {
if (dump_file && (dump_flags & TDF_DETAILS)) if (dump_file && (dump_flags & TDF_DETAILS))
......
...@@ -444,8 +444,8 @@ dump_clause (FILE *f, conditions conds, clause_t clause) ...@@ -444,8 +444,8 @@ dump_clause (FILE *f, conditions conds, clause_t clause)
} }
/* Dump THIS to F. CONDS a vector of conditions used when evauating /* Dump THIS to F. CONDS a vector of conditions used when evaluating
predicats. When NL is true new line is output at the end of dump. */ predicates. When NL is true new line is output at the end of dump. */
void void
predicate::dump (FILE *f, conditions conds, bool nl) const predicate::dump (FILE *f, conditions conds, bool nl) const
...@@ -495,7 +495,7 @@ predicate::remap_after_duplication (clause_t possible_truths) ...@@ -495,7 +495,7 @@ predicate::remap_after_duplication (clause_t possible_truths)
INFO is ipa_fn_summary of function we are adding predicate into, CALLEE_INFO INFO is ipa_fn_summary of function we are adding predicate into, CALLEE_INFO
is summary of function predicate P is from. OPERAND_MAP is array giving is summary of function predicate P is from. OPERAND_MAP is array giving
callee formal IDs the caller formal IDs. POSSSIBLE_TRUTHS is clausule of all callee formal IDs the caller formal IDs. POSSSIBLE_TRUTHS is clause of all
callee conditions that may be true in caller context. TOPLEV_PREDICATE is callee conditions that may be true in caller context. TOPLEV_PREDICATE is
predicate under which callee is executed. OFFSET_MAP is an array of of predicate under which callee is executed. OFFSET_MAP is an array of of
offsets that need to be added to conditions, negative offset means that offsets that need to be added to conditions, negative offset means that
......
...@@ -62,7 +62,7 @@ struct GTY(()) condition ...@@ -62,7 +62,7 @@ struct GTY(()) condition
passed by reference and by value. */ passed by reference and by value. */
unsigned by_ref : 1; unsigned by_ref : 1;
/* A set of sequential operations on the parameter, which can be seen as /* A set of sequential operations on the parameter, which can be seen as
a mathmatical function on the parameter. */ a mathematical function on the parameter. */
expr_eval_ops param_ops; expr_eval_ops param_ops;
}; };
...@@ -89,7 +89,7 @@ struct inline_param_summary ...@@ -89,7 +89,7 @@ struct inline_param_summary
typedef vec<condition, va_gc> *conditions; typedef vec<condition, va_gc> *conditions;
/* Predicates are used to repesent function parameters (such as runtime) /* Predicates are used to represent function parameters (such as runtime)
which depend on a context function is called in. which depend on a context function is called in.
Predicates are logical formulas in conjunctive-disjunctive form consisting Predicates are logical formulas in conjunctive-disjunctive form consisting
...@@ -117,7 +117,7 @@ public: ...@@ -117,7 +117,7 @@ public:
first_dynamic_condition = 2 first_dynamic_condition = 2
}; };
/* Maximal number of conditions predicate can reffer to. This is limited /* Maximal number of conditions predicate can refer to. This is limited
by using clause_t to be 32bit. */ by using clause_t to be 32bit. */
static const int num_conditions = 32; static const int num_conditions = 32;
......
...@@ -46,7 +46,7 @@ public: ...@@ -46,7 +46,7 @@ public:
function. */ function. */
bool cannot_lead_to_return (); bool cannot_lead_to_return ();
/* Return true if refernece may be used in address compare. */ /* Return true if reference may be used in address compare. */
bool address_matters_p (); bool address_matters_p ();
/* Return reference list this reference is in. */ /* Return reference list this reference is in. */
......
...@@ -168,7 +168,7 @@ test_nonssa_use (gimple *, tree t, tree, void *data) ...@@ -168,7 +168,7 @@ test_nonssa_use (gimple *, tree t, tree, void *data)
|| (VAR_P (t) || (VAR_P (t)
&& auto_var_in_fn_p (t, current_function_decl)) && auto_var_in_fn_p (t, current_function_decl))
|| TREE_CODE (t) == RESULT_DECL || TREE_CODE (t) == RESULT_DECL
/* Normal labels are part of CFG and will be handled gratefuly. /* Normal labels are part of CFG and will be handled gratefully.
Forced labels however can be used directly by statements and Forced labels however can be used directly by statements and
need to stay in one partition along with their uses. */ need to stay in one partition along with their uses. */
|| (TREE_CODE (t) == LABEL_DECL || (TREE_CODE (t) == LABEL_DECL
...@@ -455,7 +455,7 @@ consider_split (class split_point *current, bitmap non_ssa_vars, ...@@ -455,7 +455,7 @@ consider_split (class split_point *current, bitmap non_ssa_vars,
(param_partial_inlining_entry_probability, 100)))) (param_partial_inlining_entry_probability, 100))))
{ {
/* When profile is guessed, we cannot expect it to give us /* When profile is guessed, we cannot expect it to give us
realistic estimate on likelyness of function taking the realistic estimate on likeliness of function taking the
complex path. As a special case, when tail of the function is complex path. As a special case, when tail of the function is
a loop, enable splitting since inlining code skipping the loop a loop, enable splitting since inlining code skipping the loop
is likely noticeable win. */ is likely noticeable win. */
......
...@@ -105,7 +105,7 @@ profile_count::debug () const ...@@ -105,7 +105,7 @@ profile_count::debug () const
fprintf (stderr, "\n"); fprintf (stderr, "\n");
} }
/* Return true if THIS differs from OTHER; tolerate small diferences. */ /* Return true if THIS differs from OTHER; tolerate small differences. */
bool bool
profile_count::differs_from_p (profile_count other) const profile_count::differs_from_p (profile_count other) const
...@@ -186,7 +186,7 @@ profile_probability::debug () const ...@@ -186,7 +186,7 @@ profile_probability::debug () const
fprintf (stderr, "\n"); fprintf (stderr, "\n");
} }
/* Return true if THIS differs from OTHER; tolerate small diferences. */ /* Return true if THIS differs from OTHER; tolerate small differences. */
bool bool
profile_probability::differs_from_p (profile_probability other) const profile_probability::differs_from_p (profile_probability other) const
...@@ -388,7 +388,7 @@ profile_count::from_gcov_type (gcov_type v, profile_quality quality) ...@@ -388,7 +388,7 @@ profile_count::from_gcov_type (gcov_type v, profile_quality quality)
} }
/* COUNT1 times event happens with *THIS probability, COUNT2 times OTHER /* COUNT1 times event happens with *THIS probability, COUNT2 times OTHER
happens with COUNT2 probablity. Return probablity that either *THIS or happens with COUNT2 probability. Return probability that either *THIS or
OTHER happens. */ OTHER happens. */
profile_probability profile_probability
...@@ -398,7 +398,7 @@ profile_probability::combine_with_count (profile_count count1, ...@@ -398,7 +398,7 @@ profile_probability::combine_with_count (profile_count count1,
{ {
/* If probabilities are same, we are done. /* If probabilities are same, we are done.
If counts are nonzero we can distribute accordingly. In remaining If counts are nonzero we can distribute accordingly. In remaining
cases just avreage the values and hope for the best. */ cases just average the values and hope for the best. */
if (*this == other || count1 == count2 if (*this == other || count1 == count2
|| (count2 == profile_count::zero () || (count2 == profile_count::zero ()
&& !(count1 == profile_count::zero ()))) && !(count1 == profile_count::zero ())))
......
...@@ -37,7 +37,7 @@ enum profile_quality { ...@@ -37,7 +37,7 @@ enum profile_quality {
GUESSED_LOCAL, GUESSED_LOCAL,
/* Profile was read by feedback and was 0, we used local heuristics to guess /* Profile was read by feedback and was 0, we used local heuristics to guess
better. This is the case of functions not run in profile fedback. better. This is the case of functions not run in profile feedback.
Never used by probabilities. */ Never used by probabilities. */
GUESSED_GLOBAL0, GUESSED_GLOBAL0,
...@@ -48,7 +48,7 @@ enum profile_quality { ...@@ -48,7 +48,7 @@ enum profile_quality {
not reflect the reality but it can be compared interprocedurally not reflect the reality but it can be compared interprocedurally
(for example, we inlined function w/o profile feedback into function (for example, we inlined function w/o profile feedback into function
with feedback and propagated from that). with feedback and propagated from that).
Never used by probablities. */ Never used by probabilities. */
GUESSED, GUESSED,
/* Profile was determined by autofdo. */ /* Profile was determined by autofdo. */
...@@ -111,7 +111,7 @@ safe_scale_64bit (uint64_t a, uint64_t b, uint64_t c, uint64_t *res) ...@@ -111,7 +111,7 @@ safe_scale_64bit (uint64_t a, uint64_t b, uint64_t c, uint64_t *res)
In addition to actual value the quality of profile is tracked and propagated In addition to actual value the quality of profile is tracked and propagated
through all operations. Special value UNINITIALIZED_PROFILE is used for probabilities through all operations. Special value UNINITIALIZED_PROFILE is used for probabilities
that has not been determined yet (for example bacause of that has not been determined yet (for example because of
-fno-guess-branch-probability) -fno-guess-branch-probability)
Typically probabilities are derived from profile feedback (via Typically probabilities are derived from profile feedback (via
...@@ -122,7 +122,7 @@ safe_scale_64bit (uint64_t a, uint64_t b, uint64_t c, uint64_t *res) ...@@ -122,7 +122,7 @@ safe_scale_64bit (uint64_t a, uint64_t b, uint64_t c, uint64_t *res)
- never (0 probability) - never (0 probability)
- guessed_never - guessed_never
- very_unlikely (1/2000 probability) - very_unlikely (1/2000 probability)
- unlikely (1/5 probablity) - unlikely (1/5 probability)
- even (1/2 probability) - even (1/2 probability)
- likely (4/5 probability) - likely (4/5 probability)
- very_likely (1999/2000 probability) - very_likely (1999/2000 probability)
...@@ -479,7 +479,7 @@ public: ...@@ -479,7 +479,7 @@ public:
/* The following is equivalent to: /* The following is equivalent to:
*this = cprob.invert () * *this / ret.invert (); *this = cprob.invert () * *this / ret.invert ();
Avoid scaling when overall outcome is supposed to be always. Avoid scaling when overall outcome is supposed to be always.
Without knowing that one is inverse of toher, the result would be Without knowing that one is inverse of other, the result would be
conservative. */ conservative. */
if (!(*this == always ())) if (!(*this == always ()))
*this = (*this - ret) / ret.invert (); *this = (*this - ret) / ret.invert ();
...@@ -532,7 +532,7 @@ public: ...@@ -532,7 +532,7 @@ public:
/* Return true when the probability of edge is reliable. /* Return true when the probability of edge is reliable.
The profile guessing code is good at predicting branch outcome (ie. The profile guessing code is good at predicting branch outcome (i.e.
taken/not taken), that is predicted right slightly over 75% of time. taken/not taken), that is predicted right slightly over 75% of time.
It is however notoriously poor on predicting the probability itself. It is however notoriously poor on predicting the probability itself.
In general the profile appear a lot flatter (with probabilities closer In general the profile appear a lot flatter (with probabilities closer
...@@ -567,7 +567,7 @@ public: ...@@ -567,7 +567,7 @@ public:
return m_val <= max_probability; return m_val <= max_probability;
} }
/* Comparsions are three-state and conservative. False is returned if /* Comparisons are three-state and conservative. False is returned if
the inequality cannot be decided. */ the inequality cannot be decided. */
bool operator< (const profile_probability &other) const bool operator< (const profile_probability &other) const
{ {
...@@ -608,7 +608,7 @@ public: ...@@ -608,7 +608,7 @@ public:
bool differs_lot_from_p (profile_probability other) const; bool differs_lot_from_p (profile_probability other) const;
/* COUNT1 times event happens with *THIS probability, COUNT2 times OTHER /* COUNT1 times event happens with *THIS probability, COUNT2 times OTHER
happens with COUNT2 probablity. Return probablity that either *THIS or happens with COUNT2 probability. Return probability that either *THIS or
OTHER happens. */ OTHER happens. */
profile_probability combine_with_count (profile_count count1, profile_probability combine_with_count (profile_count count1,
profile_probability other, profile_probability other,
...@@ -631,7 +631,7 @@ public: ...@@ -631,7 +631,7 @@ public:
estimation. estimation.
2) ipa counters which are result of profile feedback or special case 2) ipa counters which are result of profile feedback or special case
of static profile estimation (such as in function main). of static profile estimation (such as in function main).
3) counters which counts as 0 inter-procedurally (beause given function 3) counters which counts as 0 inter-procedurally (because given function
was never run in train feedback) but they hold local static profile was never run in train feedback) but they hold local static profile
estimate. estimate.
...@@ -641,7 +641,7 @@ public: ...@@ -641,7 +641,7 @@ public:
well defined. well defined.
To take local counter and use it inter-procedurally use ipa member function To take local counter and use it inter-procedurally use ipa member function
which strips information irelevant at the inter-procedural level. which strips information irrelevant at the inter-procedural level.
Counters are 61bit integers representing number of executions during the Counters are 61bit integers representing number of executions during the
train run or normalized frequency within the function. train run or normalized frequency within the function.
...@@ -660,7 +660,7 @@ public: ...@@ -660,7 +660,7 @@ public:
and they do end up in uninitialized scale if any of the parameters is and they do end up in uninitialized scale if any of the parameters is
uninitialized. uninitialized.
All comparsions that are three state and handling of probabilities. Thus All comparisons that are three state and handling of probabilities. Thus
a < b is not equal to !(a >= b). a < b is not equal to !(a >= b).
The following pre-defined counts are available: The following pre-defined counts are available:
...@@ -770,7 +770,7 @@ public: ...@@ -770,7 +770,7 @@ public:
return m_quality >= ADJUSTED; return m_quality >= ADJUSTED;
} }
/* Return true if vlaue can be operated inter-procedurally. */ /* Return true if value can be operated inter-procedurally. */
bool ipa_p () const bool ipa_p () const
{ {
return !initialized_p () || m_quality >= GUESSED_GLOBAL0; return !initialized_p () || m_quality >= GUESSED_GLOBAL0;
...@@ -890,7 +890,7 @@ public: ...@@ -890,7 +890,7 @@ public:
return m_val != uninitialized_count || m_quality == GUESSED_LOCAL; return m_val != uninitialized_count || m_quality == GUESSED_LOCAL;
} }
/* Comparsions are three-state and conservative. False is returned if /* Comparisons are three-state and conservative. False is returned if
the inequality cannot be decided. */ the inequality cannot be decided. */
bool operator< (const profile_count &other) const bool operator< (const profile_count &other) const
{ {
...@@ -976,7 +976,7 @@ public: ...@@ -976,7 +976,7 @@ public:
return initialized_p () && m_val != 0; return initialized_p () && m_val != 0;
} }
/* Make counter forcingly nonzero. */ /* Make counter forcibly nonzero. */
profile_count force_nonzero () const profile_count force_nonzero () const
{ {
if (!initialized_p ()) if (!initialized_p ())
...@@ -1119,8 +1119,8 @@ public: ...@@ -1119,8 +1119,8 @@ public:
return ret; return ret;
} }
/* Return variant of profile counte which is always safe to compare /* Return variant of profile count which is always safe to compare
acorss functions. */ across functions. */
profile_count ipa () const profile_count ipa () const
{ {
if (m_quality > GUESSED_GLOBAL0_ADJUSTED) if (m_quality > GUESSED_GLOBAL0_ADJUSTED)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment