summaryrefslogtreecommitdiff
path: root/gcc/sched-rgn.c
diff options
context:
space:
mode:
authorrakdver <rakdver@138bc75d-0d04-0410-961f-82ee72b054a4>2002-05-27 13:45:44 +0000
committerrakdver <rakdver@138bc75d-0d04-0410-961f-82ee72b054a4>2002-05-27 13:45:44 +0000
commitf20183e647ff9f2e7cb27580758622df761da4e5 (patch)
tree5e75f1f3015434e7c5c88a788dd95d9066e0d55f /gcc/sched-rgn.c
parentfa3cb24d9a3ddaa8ef0f813a3c431ec5a595374d (diff)
downloadgcc-f20183e647ff9f2e7cb27580758622df761da4e5.tar.gz
* basic-block.h (last_basic_block): Defined as synonym for
n_basic_blocks. * cfganal.c (mark_dfs_back_edges, flow_reverse_top_sort_order_compute, flow_depth_first_order_compute, flow_preorder_transversal_compute, flow_dfs_compute_reverse_init): Replaced relevant occurences of n_basic_blocks with last_basic_block. * cfgbuild.c (make_edges): Likewise. * cfgloop.c (flow_loop_scan, flow_loops_find): Likewise. * cfgrtl.c (verify_flow_info, purge_all_dead_edges): Likewise. * combine.c (combine_instructions): Likewise. * df.c (df_alloc, df_analyse_1, df_analyse, iterative_dataflow_sbitmap, iterative_dataflow_bitmap): Likewise. * dominance.c (init_dom_info, calc_dfs_tree_nonrec, calc_dfs_tree, calc_idoms, idoms_to_doms): Likewise. * flow.c (update_life_info_in_dirty_blocks, free_basic_block_vars): Likewise. * gcse.c (gcse_main, alloc_gcse_mem, compute_local_properties, compute_hash_table, expr_reaches_here_p, one_classic_gcse_pass, one_cprop_pass, compute_pre_data, pre_expr_reaches_here_p, one_pre_gcse_pass, compute_transpout, delete_null_pointer_checks_1, delete_null_pointer_checks, compute_code_hoist_vbeinout, hoist_expr_reaches_here_p, hoist_code, one_code_hoisting_pass, compute_store_table, build_store_vectors): Likewise. * haifa-sched.c (sched_init): Likewise. * ifcvt.c (if_convert): Likewise. * lcm.c (compute_antinout_edge, compute_laterin, compute_insert_delete, pre_edge_lcm, compute_available, compute_nearerout, compute_rev_insert_delete, pre_edge_rev_lcm, optimize_mode_switching): Likewise. * predict.c (estimate_probability, process_note_prediction, note_prediction_to_br_prob): Likewise. * profile.c (GCOV_INDEX_TO_BB, BB_TO_GCOV_INDEX): Likewise. * recog.c (split_all_insns, peephole2_optimize): Likewise. * regrename.c (copyprop_hardreg_forward): Likewise. * resource.c (init_resource_info): Likewise. * sched-rgn.c (build_control_flow, find_rgns, compute_trg_info, init_regions, schedule_insns): Likewise. * ssa-ccp.c (ssa_const_prop): Likewise. * ssa-dce.c (ssa_eliminate_dead_code): Likewise. * ssa.c (compute_dominance_frontiers, compute_iterated_dominance_frontiers, convert_to_ssa): Likewise. * df.c (df_refs_unlink): Fix FOR_EACH_BB usage (in #if 0'ed code) * gcse.c (alloc_rd_mem, alloc_avail_expr_mem): Use n_blocks for vector sizes consistently. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@53924 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc/sched-rgn.c')
-rw-r--r--gcc/sched-rgn.c32
1 files changed, 16 insertions, 16 deletions
diff --git a/gcc/sched-rgn.c b/gcc/sched-rgn.c
index 9f88dcc459b..dc9c3041c84 100644
--- a/gcc/sched-rgn.c
+++ b/gcc/sched-rgn.c
@@ -403,8 +403,8 @@ build_control_flow (edge_list)
}
/* ??? We can kill these soon. */
- in_edges = (int *) xcalloc (n_basic_blocks, sizeof (int));
- out_edges = (int *) xcalloc (n_basic_blocks, sizeof (int));
+ in_edges = (int *) xcalloc (last_basic_block, sizeof (int));
+ out_edges = (int *) xcalloc (last_basic_block, sizeof (int));
edge_table = (haifa_edge *) xcalloc (num_edges, sizeof (haifa_edge));
nr_edges = 0;
@@ -661,23 +661,23 @@ find_rgns (edge_list, dom)
STACK, SP and DFS_NR are only used during the first traversal. */
/* Allocate and initialize variables for the first traversal. */
- max_hdr = (int *) xmalloc (n_basic_blocks * sizeof (int));
- dfs_nr = (int *) xcalloc (n_basic_blocks, sizeof (int));
+ max_hdr = (int *) xmalloc (last_basic_block * sizeof (int));
+ dfs_nr = (int *) xcalloc (last_basic_block, sizeof (int));
stack = (int *) xmalloc (nr_edges * sizeof (int));
- inner = sbitmap_alloc (n_basic_blocks);
+ inner = sbitmap_alloc (last_basic_block);
sbitmap_ones (inner);
- header = sbitmap_alloc (n_basic_blocks);
+ header = sbitmap_alloc (last_basic_block);
sbitmap_zero (header);
passed = sbitmap_alloc (nr_edges);
sbitmap_zero (passed);
- in_queue = sbitmap_alloc (n_basic_blocks);
+ in_queue = sbitmap_alloc (last_basic_block);
sbitmap_zero (in_queue);
- in_stack = sbitmap_alloc (n_basic_blocks);
+ in_stack = sbitmap_alloc (last_basic_block);
sbitmap_zero (in_stack);
for (i = 0; i < n_basic_blocks; i++)
@@ -1197,8 +1197,8 @@ compute_trg_info (trg)
add the TO block to the update block list. This list can end
up with a lot of duplicates. We need to weed them out to avoid
overrunning the end of the bblst_table. */
- update_blocks = (char *) alloca (n_basic_blocks);
- memset (update_blocks, 0, n_basic_blocks);
+ update_blocks = (char *) alloca (last_basic_block);
+ memset (update_blocks, 0, last_basic_block);
update_idx = 0;
for (j = 0; j < el.nr_members; j++)
@@ -2890,8 +2890,8 @@ init_regions ()
nr_regions = 0;
rgn_table = (region *) xmalloc ((n_basic_blocks) * sizeof (region));
rgn_bb_table = (int *) xmalloc ((n_basic_blocks) * sizeof (int));
- block_to_bb = (int *) xmalloc ((n_basic_blocks) * sizeof (int));
- containing_rgn = (int *) xmalloc ((n_basic_blocks) * sizeof (int));
+ block_to_bb = (int *) xmalloc ((last_basic_block) * sizeof (int));
+ containing_rgn = (int *) xmalloc ((last_basic_block) * sizeof (int));
/* Compute regions for scheduling. */
if (reload_completed
@@ -2912,7 +2912,7 @@ init_regions ()
sbitmap *dom;
struct edge_list *edge_list;
- dom = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
+ dom = sbitmap_vector_alloc (last_basic_block, last_basic_block);
/* The scheduler runs after flow; therefore, we can't blindly call
back into find_basic_blocks since doing so could invalidate the
@@ -2953,7 +2953,7 @@ init_regions ()
if (CHECK_DEAD_NOTES)
{
- blocks = sbitmap_alloc (n_basic_blocks);
+ blocks = sbitmap_alloc (last_basic_block);
deaths_in_region = (int *) xmalloc (sizeof (int) * nr_regions);
/* Remove all death notes from the subroutine. */
for (rgn = 0; rgn < nr_regions; rgn++)
@@ -3021,12 +3021,12 @@ schedule_insns (dump_file)
compute_bb_for_insn (get_max_uid ());
any_large_regions = 0;
- large_region_blocks = sbitmap_alloc (n_basic_blocks);
+ large_region_blocks = sbitmap_alloc (last_basic_block);
sbitmap_zero (large_region_blocks);
FOR_EACH_BB (bb)
SET_BIT (large_region_blocks, bb->index);
- blocks = sbitmap_alloc (n_basic_blocks);
+ blocks = sbitmap_alloc (last_basic_block);
sbitmap_zero (blocks);
/* Update life information. For regions consisting of multiple blocks