summaryrefslogtreecommitdiff
path: root/gcc/ipa-inline.c
diff options
context:
space:
mode:
authorhubicka <hubicka@138bc75d-0d04-0410-961f-82ee72b054a4>2010-04-13 18:22:35 +0000
committerhubicka <hubicka@138bc75d-0d04-0410-961f-82ee72b054a4>2010-04-13 18:22:35 +0000
commit022b3380cc939bd06e310f17578c670e5365050c (patch)
tree781e2dec9feabff28d0f9be27d6fc88b6cc7426e /gcc/ipa-inline.c
parente8a2efc1b53e5c0e2d067f2e394f1438140854cc (diff)
downloadgcc-022b3380cc939bd06e310f17578c670e5365050c.tar.gz
* ipa-inline.c (cgraph_mark_inline_edge): Avoid double accounting
of optimized out static functions. (cgraph_edge_badness): Add DUMP parameter and dump reasons for the cost computation. Also sanity check for overflows. (update_caller_keys): Update cgraph_edge_badness call; properly update fibheap and sanity check that it is up to date. (add_new_edges_to_heap): Update cgraph_edge_badness. (cgraph_decide_inlining_of_small_function): Likewise; add sanity checking that badness in heap is up to date; improve dumping of reason; Update badness of calls to the offline copy of function currently inlined; dump badness of functions not inlined because of unit growth limits. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@158278 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc/ipa-inline.c')
-rw-r--r--gcc/ipa-inline.c161
1 files changed, 125 insertions, 36 deletions
diff --git a/gcc/ipa-inline.c b/gcc/ipa-inline.c
index e9ba04b371c..601695a3fda 100644
--- a/gcc/ipa-inline.c
+++ b/gcc/ipa-inline.c
@@ -306,8 +306,6 @@ cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original,
struct cgraph_node *to = NULL, *what;
struct cgraph_edge *curr = e;
int freq;
- bool duplicate = false;
- int orig_size = e->callee->global.size;
gcc_assert (e->inline_failed);
e->inline_failed = CIF_OK;
@@ -316,10 +314,6 @@ cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original,
DECL_POSSIBLY_INLINED (e->callee->decl) = true;
e->callee->global.inlined = true;
- if (e->callee->callers->next_caller
- || !cgraph_can_remove_if_no_direct_calls_p (e->callee)
- || e->callee->same_comdat_group)
- duplicate = true;
cgraph_clone_inlined_nodes (e, true, update_original);
what = e->callee;
@@ -337,8 +331,6 @@ cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original,
gcc_assert (what->global.inlined_to == to);
if (new_size > old_size)
overall_size += new_size - old_size;
- if (!duplicate)
- overall_size -= orig_size;
ncalls_inlined++;
if (flag_indirect_inlining)
@@ -544,23 +536,54 @@ cgraph_recursive_inlining_p (struct cgraph_node *to,
of the function or function body size. */
static int
-cgraph_edge_badness (struct cgraph_edge *edge)
+cgraph_edge_badness (struct cgraph_edge *edge, bool dump)
{
gcov_type badness;
int growth =
- cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
+ (cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee)
+ - edge->caller->global.size);
- growth -= edge->caller->global.size;
+ if (dump)
+ {
+ fprintf (dump_file, " Badness calculcation for %s -> %s\n",
+ cgraph_node_name (edge->caller),
+ cgraph_node_name (edge->callee));
+ fprintf (dump_file, " growth %i, time %i-%i, size %i-%i\n",
+ growth,
+ edge->callee->global.time,
+ inline_summary (edge->callee)->time_inlining_benefit,
+ edge->callee->global.size,
+ inline_summary (edge->callee)->size_inlining_benefit);
+ }
/* Always prefer inlining saving code size. */
if (growth <= 0)
- badness = INT_MIN - growth;
+ {
+ badness = INT_MIN - growth;
+ if (dump)
+ fprintf (dump_file, " %i: Growth %i < 0\n", (int) badness,
+ growth);
+ }
/* When profiling is available, base priorities -(#calls / growth).
So we optimize for overall number of "executed" inlined calls. */
else if (max_count)
- badness = ((int)((double)edge->count * INT_MIN / max_count / (max_benefit + 1))
- * (inline_summary (edge->callee)->time_inlining_benefit + 1)) / growth;
+ {
+ badness =
+ ((int)
+ ((double) edge->count * INT_MIN / max_count / (max_benefit + 1)) *
+ (inline_summary (edge->callee)->time_inlining_benefit + 1)) / growth;
+ if (dump)
+ {
+ fprintf (dump_file,
+ " %i (relative %f): profile info. Relative count %f"
+ " * Relative benefit %f\n",
+ (int) badness, (double) badness / INT_MIN,
+ (double) edge->count / max_count,
+ (double) (inline_summary (edge->callee)->
+ time_inlining_benefit + 1) / (max_benefit + 1));
+ }
+ }
/* When function local profile is available, base priorities on
growth / frequency, so we optimize for overall frequency of inlined
@@ -574,9 +597,13 @@ cgraph_edge_badness (struct cgraph_edge *edge)
else if (flag_guess_branch_prob)
{
int div = edge->frequency * 100 / CGRAPH_FREQ_BASE + 1;
+ int benefitperc;
+ int growth_for_all;
badness = growth * 10000;
- div *= MIN (100 * inline_summary (edge->callee)->time_inlining_benefit
- / (edge->callee->global.time + 1) + 1, 100);
+ benefitperc =
+ MIN (100 * inline_summary (edge->callee)->time_inlining_benefit /
+ (edge->callee->global.time + 1) +1, 100);
+ div *= benefitperc;
/* Decrease badness if call is nested. */
@@ -587,9 +614,17 @@ cgraph_edge_badness (struct cgraph_edge *edge)
div = 1;
if (badness > 0)
badness /= div;
- badness += cgraph_estimate_growth (edge->callee);
+ growth_for_all = cgraph_estimate_growth (edge->callee);
+ badness += growth_for_all;
if (badness > INT_MAX)
- badness = INT_MAX;
+ badness = INT_MAX;
+ if (dump)
+ {
+ fprintf (dump_file,
+ " %i: guessed profile. frequency %i, overall growth %i,"
+ " benefit %i%%, divisor %i\n",
+ (int) badness, edge->frequency, growth_for_all, benefitperc, div);
+ }
}
/* When function local profile is not available or it does not give
useful information (ie frequency is zero), base the cost on
@@ -604,10 +639,17 @@ cgraph_edge_badness (struct cgraph_edge *edge)
if (badness > 0)
badness >>= nest;
else
- {
+ {
badness <<= nest;
- }
+ }
+ if (dump)
+ fprintf (dump_file, " %i: no profile. nest %i\n", (int) badness,
+ nest);
}
+
+ /* Ensure that we did not overflow in all the fixed point math above. */
+ gcc_assert (badness >= INT_MIN);
+ gcc_assert (badness <= INT_MAX - 1);
/* Make recursive inlining happen always after other inlining is done. */
if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
return badness + 1;
@@ -651,7 +693,7 @@ update_caller_keys (fibheap_t heap, struct cgraph_node *node,
for (edge = node->callers; edge; edge = edge->next_caller)
if (edge->inline_failed)
{
- int badness = cgraph_edge_badness (edge);
+ int badness = cgraph_edge_badness (edge, false);
if (edge->aux)
{
fibnode_t n = (fibnode_t) edge->aux;
@@ -660,8 +702,12 @@ update_caller_keys (fibheap_t heap, struct cgraph_node *node,
continue;
/* fibheap_replace_key only increase the keys. */
- if (fibheap_replace_key (heap, n, badness))
- continue;
+ if (badness < n->key)
+ {
+ fibheap_replace_key (heap, n, badness);
+ gcc_assert (n->key == badness);
+ continue;
+ }
fibheap_delete_node (heap, (fibnode_t) edge->aux);
}
edge->aux = fibheap_insert (heap, badness, edge);
@@ -889,7 +935,7 @@ add_new_edges_to_heap (fibheap_t heap, VEC (cgraph_edge_p, heap) *new_edges)
struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges);
gcc_assert (!edge->aux);
- edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
+ edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge, false), edge);
}
}
@@ -938,7 +984,7 @@ cgraph_decide_inlining_of_small_functions (void)
if (edge->inline_failed)
{
gcc_assert (!edge->aux);
- edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
+ edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge, false), edge);
}
}
@@ -946,15 +992,26 @@ cgraph_decide_inlining_of_small_functions (void)
min_size = overall_size;
while (overall_size <= max_size
- && (edge = (struct cgraph_edge *) fibheap_extract_min (heap)))
+ && !fibheap_empty (heap))
{
int old_size = overall_size;
- struct cgraph_node *where;
- int growth =
- cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
+ struct cgraph_node *where, *callee;
+ int badness = fibheap_min_key (heap);
+ int growth;
cgraph_inline_failed_t not_good = CIF_OK;
- growth -= edge->caller->global.size;
+ edge = (struct cgraph_edge *) fibheap_extract_min (heap);
+ gcc_assert (edge->aux);
+ edge->aux = NULL;
+ if (!edge->inline_failed)
+ continue;
+#ifdef ENABLE_CHECKING
+ gcc_assert (cgraph_edge_badness (edge, false) == badness);
+#endif
+ callee = edge->callee;
+
+ growth = (cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee)
+ - edge->caller->global.size);
if (dump_file)
{
@@ -970,15 +1027,13 @@ cgraph_decide_inlining_of_small_functions (void)
gimple_filename ((const_gimple) edge->call_stmt),
gimple_lineno ((const_gimple) edge->call_stmt),
cgraph_estimate_growth (edge->callee),
- cgraph_edge_badness (edge),
+ badness,
edge->frequency / (double)CGRAPH_FREQ_BASE);
if (edge->count)
fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
+ if (dump_flags & TDF_DETAILS)
+ cgraph_edge_badness (edge, true);
}
- gcc_assert (edge->aux);
- edge->aux = NULL;
- if (!edge->inline_failed)
- continue;
/* When not having profile info ready we don't weight by any way the
position of call in procedure itself. This means if call of
@@ -1096,6 +1151,11 @@ cgraph_decide_inlining_of_small_functions (void)
called by function we inlined (since number of it inlinable callers
might change). */
update_caller_keys (heap, where, updated_nodes);
+
+ /* We removed one call of the function we just inlined. If offline
+ copy is still needed, be sure to update the keys. */
+ if (callee != where && !callee->global.inlined_to)
+ update_caller_keys (heap, callee, updated_nodes);
bitmap_clear (updated_nodes);
if (dump_file)
@@ -1117,10 +1177,39 @@ cgraph_decide_inlining_of_small_functions (void)
fprintf (dump_file, "New minimal size reached: %i\n", min_size);
}
}
- while ((edge = (struct cgraph_edge *) fibheap_extract_min (heap)) != NULL)
+ while (!fibheap_empty (heap))
{
+ int badness = fibheap_min_key (heap);
+
+ edge = (struct cgraph_edge *) fibheap_extract_min (heap);
gcc_assert (edge->aux);
edge->aux = NULL;
+ if (!edge->inline_failed)
+ continue;
+#ifdef ENABLE_CHECKING
+ gcc_assert (cgraph_edge_badness (edge, false) == badness);
+#endif
+ if (dump_file)
+ {
+ fprintf (dump_file,
+ "\nSkipping %s with %i size\n",
+ cgraph_node_name (edge->callee),
+ edge->callee->global.size);
+ fprintf (dump_file,
+ " called by %s in %s:%i\n"
+ " Estimated growth after inlined into all callees is %+i insns.\n"
+ " Estimated badness is %i, frequency %.2f.\n",
+ cgraph_node_name (edge->caller),
+ gimple_filename ((const_gimple) edge->call_stmt),
+ gimple_lineno ((const_gimple) edge->call_stmt),
+ cgraph_estimate_growth (edge->callee),
+ badness,
+ edge->frequency / (double)CGRAPH_FREQ_BASE);
+ if (edge->count)
+ fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
+ if (dump_flags & TDF_DETAILS)
+ cgraph_edge_badness (edge, true);
+ }
if (!edge->callee->local.disregard_inline_limits && edge->inline_failed
&& !cgraph_recursive_inlining_p (edge->caller, edge->callee,
&edge->inline_failed))