summaryrefslogtreecommitdiff
path: root/gcc/ipa-inline.c
diff options
context:
space:
mode:
authorhjl <hjl@138bc75d-0d04-0410-961f-82ee72b054a4>2010-07-23 19:37:40 +0000
committerhjl <hjl@138bc75d-0d04-0410-961f-82ee72b054a4>2010-07-23 19:37:40 +0000
commit10ada81fea4490f94ba2eb5923bf5baa367a38bd (patch)
tree437dca120093cc7b1f6debf6f6b31779526c7192 /gcc/ipa-inline.c
parent95a236de8aa10bf009e9368dfd28f95a980e5570 (diff)
parent3bd7a983695352a99f7dd597725eb5b839d4b4cf (diff)
downloadgcc-ifunc.tar.gz
Merged with trunk at revision 162480.ifunc
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/branches/ifunc@162483 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc/ipa-inline.c')
-rw-r--r--gcc/ipa-inline.c128
1 files changed, 94 insertions, 34 deletions
diff --git a/gcc/ipa-inline.c b/gcc/ipa-inline.c
index f9e4cf3cfa7..b5063bf8c22 100644
--- a/gcc/ipa-inline.c
+++ b/gcc/ipa-inline.c
@@ -334,7 +334,9 @@ cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original,
overall_size += new_size - old_size;
ncalls_inlined++;
- if (flag_indirect_inlining)
+ /* FIXME: We should remove the optimize check after we ensure we never run
+ IPA passes when not optimizng. */
+ if (flag_indirect_inlining && optimize)
return ipa_propagate_indirect_call_infos (curr, new_edges);
else
return false;
@@ -389,7 +391,7 @@ cgraph_estimate_growth (struct cgraph_node *node)
we decide to not inline for different reasons, but it is not big deal
as in that case we will keep the body around, but we will also avoid
some inlining. */
- if (cgraph_only_called_directly_p (node)
+ if (cgraph_will_be_removed_from_program_if_no_direct_calls (node)
&& !DECL_EXTERNAL (node->decl) && !self_recursive)
growth -= node->global.size;
@@ -661,6 +663,30 @@ cgraph_edge_badness (struct cgraph_edge *edge, bool dump)
return badness;
}
+/* Recompute badness of EDGE and update its key in HEAP if needed. */
+static void
+update_edge_key (fibheap_t heap, struct cgraph_edge *edge)
+{
+ int badness = cgraph_edge_badness (edge, false);
+ if (edge->aux)
+ {
+ fibnode_t n = (fibnode_t) edge->aux;
+ gcc_checking_assert (n->data == edge);
+
+ /* fibheap_replace_key only decrease the keys.
+ When we increase the key we do not update heap
+ and instead re-insert the element once it becomes
+ a minium of heap. */
+ if (badness < n->key)
+ {
+ fibheap_replace_key (heap, n, badness);
+ gcc_checking_assert (n->key == badness);
+ }
+ }
+ else
+ edge->aux = fibheap_insert (heap, badness, edge);
+}
+
/* Recompute heap nodes for each of caller edge. */
static void
@@ -678,8 +704,6 @@ update_caller_keys (fibheap_t heap, struct cgraph_node *node,
bitmap_set_bit (updated_nodes, node->uid);
node->global.estimated_growth = INT_MIN;
- if (!node->local.inlinable)
- return;
/* See if there is something to do. */
for (edge = node->callers; edge; edge = edge->next_caller)
if (edge->inline_failed)
@@ -702,28 +726,53 @@ update_caller_keys (fibheap_t heap, struct cgraph_node *node,
for (; edge; edge = edge->next_caller)
if (edge->inline_failed)
+ update_edge_key (heap, edge);
+}
+
+/* Recompute heap nodes for each uninlined call.
+ This is used when we know that edge badnesses are going only to increase
+ (we introduced new call site) and thus all we need is to insert newly
+ created edges into heap. */
+
+static void
+update_callee_keys (fibheap_t heap, struct cgraph_node *node,
+ bitmap updated_nodes)
+{
+ struct cgraph_edge *e = node->callees;
+ node->global.estimated_growth = INT_MIN;
+
+ if (!e)
+ return;
+ while (true)
+ if (!e->inline_failed && e->callee->callees)
+ e = e->callee->callees;
+ else
{
- int badness = cgraph_edge_badness (edge, false);
- if (edge->aux)
+ if (e->inline_failed
+ && e->callee->local.inlinable
+ && !bitmap_bit_p (updated_nodes, e->callee->uid))
{
- fibnode_t n = (fibnode_t) edge->aux;
- gcc_assert (n->data == edge);
- if (n->key == badness)
- continue;
-
- /* fibheap_replace_key only decrease the keys.
- When we increase the key we do not update heap
- and instead re-insert the element once it becomes
- a minium of heap. */
- if (badness < n->key)
+ node->global.estimated_growth = INT_MIN;
+ /* If function becomes uninlinable, we need to remove it from the heap. */
+ if (!cgraph_default_inline_p (e->callee, &e->inline_failed))
+ update_caller_keys (heap, e->callee, updated_nodes);
+ else
+ /* Otherwise update just edge E. */
+ update_edge_key (heap, e);
+ }
+ if (e->next_callee)
+ e = e->next_callee;
+ else
+ {
+ do
{
- fibheap_replace_key (heap, n, badness);
- gcc_assert (n->key == badness);
- continue;
+ if (e->caller == node)
+ return;
+ e = e->caller->callers;
}
+ while (!e->next_callee);
+ e = e->next_callee;
}
- else
- edge->aux = fibheap_insert (heap, badness, edge);
}
}
@@ -731,8 +780,8 @@ update_caller_keys (fibheap_t heap, struct cgraph_node *node,
Walk recursively into all inline clones. */
static void
-update_callee_keys (fibheap_t heap, struct cgraph_node *node,
- bitmap updated_nodes)
+update_all_callee_keys (fibheap_t heap, struct cgraph_node *node,
+ bitmap updated_nodes)
{
struct cgraph_edge *e = node->callees;
node->global.estimated_growth = INT_MIN;
@@ -1166,7 +1215,7 @@ cgraph_decide_inlining_of_small_functions (void)
continue;
if (flag_indirect_inlining)
add_new_edges_to_heap (heap, new_indirect_edges);
- update_callee_keys (heap, where, updated_nodes);
+ update_all_callee_keys (heap, where, updated_nodes);
}
else
{
@@ -1182,11 +1231,18 @@ cgraph_decide_inlining_of_small_functions (void)
continue;
}
callee = edge->callee;
+ gcc_checking_assert (!callee->global.inlined_to);
cgraph_mark_inline_edge (edge, true, &new_indirect_edges);
if (flag_indirect_inlining)
add_new_edges_to_heap (heap, new_indirect_edges);
- update_callee_keys (heap, callee, updated_nodes);
+ /* We inlined last offline copy to the body. This might lead
+ to callees of function having fewer call sites and thus they
+ may need updating. */
+ if (callee->global.inlined_to)
+ update_all_callee_keys (heap, callee, updated_nodes);
+ else
+ update_callee_keys (heap, edge->callee, updated_nodes);
}
where = edge->caller;
if (where->global.inlined_to)
@@ -1442,14 +1498,13 @@ cgraph_decide_inlining (void)
if (node->callers
&& !node->callers->next_caller
- && cgraph_only_called_directly_p (node)
+ && cgraph_will_be_removed_from_program_if_no_direct_calls (node)
&& node->local.inlinable
&& node->callers->inline_failed
&& node->callers->caller != node
&& node->callers->caller->global.inlined_to != node
&& !node->callers->call_stmt_cannot_inline_p
- && !DECL_EXTERNAL (node->decl)
- && !DECL_COMDAT (node->decl))
+ && !DECL_EXTERNAL (node->decl))
{
cgraph_inline_failed_t reason;
old_size = overall_size;
@@ -1832,10 +1887,12 @@ likely_eliminated_by_inlining_p (gimple stmt)
bool rhs_free = false;
bool lhs_free = false;
- while (handled_component_p (inner_lhs) || TREE_CODE (inner_lhs) == INDIRECT_REF)
+ while (handled_component_p (inner_lhs)
+ || TREE_CODE (inner_lhs) == MEM_REF)
inner_lhs = TREE_OPERAND (inner_lhs, 0);
while (handled_component_p (inner_rhs)
- || TREE_CODE (inner_rhs) == ADDR_EXPR || TREE_CODE (inner_rhs) == INDIRECT_REF)
+ || TREE_CODE (inner_rhs) == ADDR_EXPR
+ || TREE_CODE (inner_rhs) == MEM_REF)
inner_rhs = TREE_OPERAND (inner_rhs, 0);
@@ -1855,7 +1912,8 @@ likely_eliminated_by_inlining_p (gimple stmt)
|| (TREE_CODE (inner_lhs) == SSA_NAME
&& TREE_CODE (SSA_NAME_VAR (inner_lhs)) == RESULT_DECL))
lhs_free = true;
- if (lhs_free && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
+ if (lhs_free
+ && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
rhs_free = true;
if (lhs_free && rhs_free)
return true;
@@ -1931,7 +1989,7 @@ estimate_function_body_sizes (struct cgraph_node *node)
time_inlining_benefit += cost;
size_inlining_benefit += cost;
}
- for (arg = DECL_ARGUMENTS (node->decl); arg; arg = TREE_CHAIN (arg))
+ for (arg = DECL_ARGUMENTS (node->decl); arg; arg = DECL_CHAIN (arg))
if (!VOID_TYPE_P (TREE_TYPE (arg)))
{
int cost = estimate_move_cost (TREE_TYPE (arg));
@@ -1962,7 +2020,7 @@ compute_inline_parameters (struct cgraph_node *node)
/* Estimate the stack size for the function. But not at -O0
because estimated_stack_frame_size is a quadratic problem. */
- self_stack_size = optimize ? estimated_stack_frame_size () : 0;
+ self_stack_size = optimize ? estimated_stack_frame_size (node->decl) : 0;
inline_summary (node)->estimated_self_stack_size = self_stack_size;
node->global.estimated_stack_size = self_stack_size;
node->global.stack_frame_offset = 0;
@@ -2029,7 +2087,9 @@ analyze_function (struct cgraph_node *node)
current_function_decl = node->decl;
compute_inline_parameters (node);
- if (flag_indirect_inlining)
+ /* FIXME: We should remove the optimize check after we ensure we never run
+ IPA passes when not optimizng. */
+ if (flag_indirect_inlining && optimize)
inline_indirect_intraprocedural_analysis (node);
current_function_decl = NULL;