summaryrefslogtreecommitdiff
path: root/gcc/sel-sched-ir.c
diff options
context:
space:
mode:
authorabel <abel@138bc75d-0d04-0410-961f-82ee72b054a4>2009-11-13 14:27:13 +0000
committerabel <abel@138bc75d-0d04-0410-961f-82ee72b054a4>2009-11-13 14:27:13 +0000
commitd9ab20389238e942494df234f54a473bb02cec96 (patch)
tree33d70889d193e4a1474e119c509e0e1fcd50b507 /gcc/sel-sched-ir.c
parent375866ee59eebb08077a51c31b8e04251ac7248e (diff)
downloadgcc-d9ab20389238e942494df234f54a473bb02cec96.tar.gz
2009-11-13 Andrey Belevantsev <abel@ispras.ru>
* sched-deps.c (init_deps): New parameter lazy_reg_last. Don't allocate reg_last when in case lazy_reg_last is true. (init_deps_reg_last): New. (free_deps): When max_reg is 0, this context is already freed. * sched-int.h (init_deps_reg_last): Export. (init_deps): Update prototype. * sched-ebb.c (schedule_ebb): Update the call to init_deps. * sched-rgn.c (sched_rgn_compute_dependencies): Likewise. * ddg.c (build_intra_loop_deps): Likewise. * sel-sched-ir.c (copy_deps_context, create_deps_context, reset_deps_context, deps_init_id): Likewise. (init_first_time_insn_data): Lazy allocate INSN_DEPS_CONTEXT. (free_data_for_scheduled_insn): New, break down from ... (free_first_time_insn_data): ... here. (has_dependence_p): Allocate reg_last now, when it is needed. (extend_insn_data): When maximal LUID is big enough, allocate per-insn data in smaller chunks. * sel-sched-ir.h (free_data_for_scheduled_insn): Export. * sel-sched.c (update_seqnos_and_stage): Free INSN_DEPS_CONTEXT in scheduled insn. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@154147 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc/sel-sched-ir.c')
-rw-r--r--gcc/sel-sched-ir.c65
1 files changed, 49 insertions, 16 deletions
diff --git a/gcc/sel-sched-ir.c b/gcc/sel-sched-ir.c
index 9a61ed84dca..10750746660 100644
--- a/gcc/sel-sched-ir.c
+++ b/gcc/sel-sched-ir.c
@@ -433,7 +433,7 @@ reset_target_context (tc_t tc, bool clean_p)
static void
copy_deps_context (deps_t to, deps_t from)
{
- init_deps (to);
+ init_deps (to, false);
deps_join (to, from);
}
@@ -450,7 +450,7 @@ create_deps_context (void)
{
deps_t dc = alloc_deps_context ();
- init_deps (dc);
+ init_deps (dc, false);
return dc;
}
@@ -484,7 +484,7 @@ static void
reset_deps_context (deps_t dc)
{
clear_deps_context (dc);
- init_deps (dc);
+ init_deps (dc, false);
}
/* This structure describes the dependence analysis hooks for advancing
@@ -2674,7 +2674,7 @@ deps_init_id (idata_t id, insn_t insn, bool force_unique_p)
deps_init_id_data.force_unique_p = force_unique_p;
deps_init_id_data.force_use_p = false;
- init_deps (dc);
+ init_deps (dc, false);
memcpy (&deps_init_id_sched_deps_info,
&const_deps_init_id_sched_deps_info,
@@ -2746,7 +2746,7 @@ init_first_time_insn_data (insn_t insn)
/* These are needed for nops too. */
INSN_LIVE (insn) = get_regset_from_pool ();
INSN_LIVE_VALID_P (insn) = false;
-
+
if (!INSN_NOP_P (insn))
{
INSN_ANALYZED_DEPS (insn) = BITMAP_ALLOC (NULL);
@@ -2754,27 +2754,46 @@ init_first_time_insn_data (insn_t insn)
INSN_TRANSFORMED_INSNS (insn)
= htab_create (16, hash_transformed_insns,
eq_transformed_insns, free_transformed_insns);
- init_deps (&INSN_DEPS_CONTEXT (insn));
+ init_deps (&INSN_DEPS_CONTEXT (insn), true);
}
}
-/* Free the same data as above for INSN. */
-static void
-free_first_time_insn_data (insn_t insn)
+/* Free almost all above data for INSN that is scheduled already.
+ Used for extra-large basic blocks. */
+void
+free_data_for_scheduled_insn (insn_t insn)
{
gcc_assert (! first_time_insn_init (insn));
-
+
+ if (! INSN_ANALYZED_DEPS (insn))
+ return;
+
BITMAP_FREE (INSN_ANALYZED_DEPS (insn));
BITMAP_FREE (INSN_FOUND_DEPS (insn));
htab_delete (INSN_TRANSFORMED_INSNS (insn));
- return_regset_to_pool (INSN_LIVE (insn));
- INSN_LIVE (insn) = NULL;
- INSN_LIVE_VALID_P (insn) = false;
-
+
/* This is allocated only for bookkeeping insns. */
if (INSN_ORIGINATORS (insn))
BITMAP_FREE (INSN_ORIGINATORS (insn));
free_deps (&INSN_DEPS_CONTEXT (insn));
+
+ INSN_ANALYZED_DEPS (insn) = NULL;
+
+ /* Clear the readonly flag so we would ICE when trying to recalculate
+ the deps context (as we believe that it should not happen). */
+ (&INSN_DEPS_CONTEXT (insn))->readonly = 0;
+}
+
+/* Free the same data as above for INSN. */
+static void
+free_first_time_insn_data (insn_t insn)
+{
+ gcc_assert (! first_time_insn_init (insn));
+
+ free_data_for_scheduled_insn (insn);
+ return_regset_to_pool (INSN_LIVE (insn));
+ INSN_LIVE (insn) = NULL;
+ INSN_LIVE_VALID_P (insn) = false;
}
/* Initialize region-scope data structures for basic blocks. */
@@ -3211,6 +3230,11 @@ has_dependence_p (expr_t expr, insn_t pred, ds_t **has_dep_pp)
return false;
dc = &INSN_DEPS_CONTEXT (pred);
+
+ /* We init this field lazily. */
+ if (dc->reg_last == NULL)
+ init_deps_reg_last (dc);
+
if (!dc->readonly)
{
has_dependence_data.pro = NULL;
@@ -3847,8 +3871,17 @@ extend_insn_data (void)
- VEC_length (sel_insn_data_def, s_i_d));
if (reserve > 0
&& ! VEC_space (sel_insn_data_def, s_i_d, reserve))
- VEC_safe_grow_cleared (sel_insn_data_def, heap, s_i_d,
- 3 * sched_max_luid / 2);
+ {
+ int size;
+
+ if (sched_max_luid / 2 > 1024)
+ size = sched_max_luid + 1024;
+ else
+ size = 3 * sched_max_luid / 2;
+
+
+ VEC_safe_grow_cleared (sel_insn_data_def, heap, s_i_d, size);
+ }
}
/* Finalize data structures for insns from current region. */