diff options
author | zadeck <zadeck@138bc75d-0d04-0410-961f-82ee72b054a4> | 2005-12-17 13:40:27 +0000 |
---|---|---|
committer | zadeck <zadeck@138bc75d-0d04-0410-961f-82ee72b054a4> | 2005-12-17 13:40:27 +0000 |
commit | 4d2e5d528864f897011ae82014889955d1558c97 (patch) | |
tree | 9df368d8d09a4cf8e6f2a79bb6e4864ceb6a4907 /gcc | |
parent | f7553d0a989fa4467612016b437d8734c65a4c50 (diff) | |
download | gcc-4d2e5d528864f897011ae82014889955d1558c97.tar.gz |
2005-12-17 Danny Berlin <dberlin@dberlin.org>
Kenneth Zadeck <zadeck@naturalbridge.com>
* basic-block.h: Changed basic block numbering so that the entry
block is 0 and the exit block is 1. Changed insn iterators so
that they are tolerant of blocks with no insns.
* regrename.c (copyprop_hardreg_forward): Changed basic block
numbering so that the entry block is 0 and the exit block is 1.
* sched-ebb.c (sehedule_ebbs): Ditto.
* tracer.c (branch_ratio_cutoff): Ditto.
* cfgloopmanip.c (fix_loop_structure): Ditto.
* cfghooks.c (verify_flow_info): Ditto.
* cfg.c (compact_blocks): Ditto.
* reorg.c (dbr_schedule): Ditto.
* flow.c (calculate_global_regs_live, libcall_dead_p): Ditto.
* dominance.c (calc_dfs_tree_nonrec, calc_dfs_tree,
calculate_dominance_info): Ditto.
* cfganal.c (create_edge_list, print_edge_list,
flow_depth_first_order_compute, flow_dfs_compute_reverse_init,
flow_dfs_compute_reverse_add_bb, flow_dfs_compute_reverse_execute,
dfs_enumerate_from): Ditto.
* global.c (global_alloc, set_up_bb_rts_numbers): Ditto.
* ifcvt.c (find_if_case_2): Ditto.
* cfgbuild.c (control_flow_insn_p, count_basic_blocks,
find_basic_blocks): Ditto.
* predict.c (predict_loops, tree_bb_level_predictions,
predict_paths_leading_to, propagate_freq): Ditto.
* lcm.c (compute_antinout_edge, compute_laterin,
compute_available): Ditto.
* function.c (thread_prologue_and_epilogue_insns): Ditto.
* gcse.c (gcse_main, bypass_jumps): Ditto.
* profile.c (compute_branch_probabilities,
compute_value_histograms, branch_prob): Ditto.
* tree-flow-inline.h (bsi_start, bsi_after_labels,
bsi_last): Ditto.
* tree-ssa-phiopt.c (tree_ssa_phiopt,
blocks_in_phiopt_order): Ditto.
* bt-load.c (compute_defs_uses_and_gen, compute_kill,
compute_out, link_btr_uses, migrate_btr_defs): Ditto.
* tree-dfa.c (collect_dfa_stats): Ditto.
* cfgcleanup.c (try_forward_edges, try_optimize_cfg): Ditto.
* cfglayout.c (fixup_reorder_chain): Ditto.
* bb-reorder.c (reorder_basic_blocks, duplicate_computed_gotos,
partition_hot_cold_basic_blocks): Ditto.
* var-tracking.c (vt_find_locations): Ditto.
* cfgloop.c (flow_loops_cfg_dump, flow_loops_find, get_loop_body): Ditto.
* sched-rgn.c (compute_trg_info, init_regions, schedule_insns): Ditto.
* tree-cfg.c (init_empty_tree_cfg, build_tree_cfg, make_edges
label_to_block_fn, print_loop_ir, tree_flow_call_edges_add): Ditto.
* tree-ssa-reassoc.c (init_reassoc): Ditto.
* cfgrtl.c (entry_of_function, rtl_verify_flow_info,
rtl_flow_call_edges_add, rtl_flow_call_edges_add): Ditto.
* df.c (df_analyze_1, hybrid_search, iterative_dataflow): Ditto
and removed unused reverse orders.
* df.h (): Ditto.
* combine.c: Fix document typo.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@108713 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc')
-rw-r--r-- | gcc/ChangeLog | 57 | ||||
-rw-r--r-- | gcc/basic-block.h | 13 | ||||
-rw-r--r-- | gcc/bb-reorder.c | 10 | ||||
-rw-r--r-- | gcc/bt-load.c | 12 | ||||
-rw-r--r-- | gcc/cfg.c | 7 | ||||
-rw-r--r-- | gcc/cfganal.c | 31 | ||||
-rw-r--r-- | gcc/cfgbuild.c | 11 | ||||
-rw-r--r-- | gcc/cfgcleanup.c | 6 | ||||
-rw-r--r-- | gcc/cfghooks.c | 18 | ||||
-rw-r--r-- | gcc/cfglayout.c | 6 | ||||
-rw-r--r-- | gcc/cfgloop.c | 12 | ||||
-rw-r--r-- | gcc/cfgloopmanip.c | 2 | ||||
-rw-r--r-- | gcc/cfgrtl.c | 9 | ||||
-rw-r--r-- | gcc/combine.c | 2 | ||||
-rw-r--r-- | gcc/df.c | 20 | ||||
-rw-r--r-- | gcc/df.h | 3 | ||||
-rw-r--r-- | gcc/dominance.c | 15 | ||||
-rw-r--r-- | gcc/flow.c | 51 | ||||
-rw-r--r-- | gcc/function.c | 3 | ||||
-rw-r--r-- | gcc/gcse.c | 5 | ||||
-rw-r--r-- | gcc/global.c | 6 | ||||
-rw-r--r-- | gcc/ifcvt.c | 4 | ||||
-rw-r--r-- | gcc/lcm.c | 20 | ||||
-rw-r--r-- | gcc/predict.c | 22 | ||||
-rw-r--r-- | gcc/profile.c | 14 | ||||
-rw-r--r-- | gcc/regrename.c | 9 | ||||
-rw-r--r-- | gcc/reorg.c | 2 | ||||
-rw-r--r-- | gcc/sched-ebb.c | 2 | ||||
-rw-r--r-- | gcc/sched-rgn.c | 14 | ||||
-rw-r--r-- | gcc/tracer.c | 4 | ||||
-rw-r--r-- | gcc/tree-cfg.c | 17 | ||||
-rw-r--r-- | gcc/tree-dfa.c | 5 | ||||
-rw-r--r-- | gcc/tree-flow-inline.h | 6 | ||||
-rw-r--r-- | gcc/tree-ssa-phiopt.c | 13 | ||||
-rw-r--r-- | gcc/tree-ssa-reassoc.c | 3 | ||||
-rw-r--r-- | gcc/var-tracking.c | 4 |
36 files changed, 236 insertions, 202 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog index c310b264c6a..33e247e1dec 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,60 @@ +2005-12-17 Danny Berlin <dberlin@dberlin.org> + Kenneth Zadeck <zadeck@naturalbridge.com> + + * basic-block.h: Changed basic block numbering so that the entry + block is 0 and the exit block is 1. Changed insn iterators so + that they are tolerant of blocks with no insns. + * regrename.c (copyprop_hardreg_forward): Changed basic block + numbering so that the entry block is 0 and the exit block is 1. + * sched-ebb.c (sehedule_ebbs): Ditto. + * tracer.c (branch_ratio_cutoff): Ditto. + * cfgloopmanip.c (fix_loop_structure): Ditto. + * cfghooks.c (verify_flow_info): Ditto. + * cfg.c (compact_blocks): Ditto. + * reorg.c (dbr_schedule): Ditto. + * flow.c (calculate_global_regs_live, libcall_dead_p): Ditto. + * dominance.c (calc_dfs_tree_nonrec, calc_dfs_tree, + calculate_dominance_info): Ditto. + * cfganal.c (create_edge_list, print_edge_list, + flow_depth_first_order_compute, flow_dfs_compute_reverse_init, + flow_dfs_compute_reverse_add_bb, flow_dfs_compute_reverse_execute, + dfs_enumerate_from): Ditto. + * global.c (global_alloc, set_up_bb_rts_numbers): Ditto. + * ifcvt.c (find_if_case_2): Ditto. + * cfgbuild.c (control_flow_insn_p, count_basic_blocks, + find_basic_blocks): Ditto. + * predict.c (predict_loops, tree_bb_level_predictions, + predict_paths_leading_to, propagate_freq): Ditto. + * lcm.c (compute_antinout_edge, compute_laterin, + compute_available): Ditto. + * function.c (thread_prologue_and_epilogue_insns): Ditto. + * gcse.c (gcse_main, bypass_jumps): Ditto. + * profile.c (compute_branch_probabilities, + compute_value_histograms, branch_prob): Ditto. + * tree-flow-inline.h (bsi_start, bsi_after_labels, + bsi_last): Ditto. + * tree-ssa-phiopt.c (tree_ssa_phiopt, + blocks_in_phiopt_order): Ditto. + * bt-load.c (compute_defs_uses_and_gen, compute_kill, + compute_out, link_btr_uses, migrate_btr_defs): Ditto. + * tree-dfa.c (collect_dfa_stats): Ditto. + * cfgcleanup.c (try_forward_edges, try_optimize_cfg): Ditto. + * cfglayout.c (fixup_reorder_chain): Ditto. + * bb-reorder.c (reorder_basic_blocks, duplicate_computed_gotos, + partition_hot_cold_basic_blocks): Ditto. + * var-tracking.c (vt_find_locations): Ditto. + * cfgloop.c (flow_loops_cfg_dump, flow_loops_find, get_loop_body): Ditto. + * sched-rgn.c (compute_trg_info, init_regions, schedule_insns): Ditto. + * tree-cfg.c (init_empty_tree_cfg, build_tree_cfg, make_edges + label_to_block_fn, print_loop_ir, tree_flow_call_edges_add): Ditto. + * tree-ssa-reassoc.c (init_reassoc): Ditto. + * cfgrtl.c (entry_of_function, rtl_verify_flow_info, + rtl_flow_call_edges_add, rtl_flow_call_edges_add): Ditto. + * df.c (df_analyze_1, hybrid_search, iterative_dataflow): Ditto + and removed unused reverse orders. + * df.h (): Ditto. + * combine.c: Fix document typo. + 2005-12-17 Jan Hubicka <jh@suse.cz> * tree-flow-inline.h (set_default_def, default_def): Kill. diff --git a/gcc/basic-block.h b/gcc/basic-block.h index c2e4a16dd79..8407f3a7e3c 100644 --- a/gcc/basic-block.h +++ b/gcc/basic-block.h @@ -430,12 +430,12 @@ extern bool rediscover_loops_after_threading; /* For iterating over insns in basic block. */ #define FOR_BB_INSNS(BB, INSN) \ for ((INSN) = BB_HEAD (BB); \ - (INSN) != NEXT_INSN (BB_END (BB)); \ + (INSN) && (INSN) != NEXT_INSN (BB_END (BB)); \ (INSN) = NEXT_INSN (INSN)) #define FOR_BB_INSNS_REVERSE(BB, INSN) \ for ((INSN) = BB_END (BB); \ - (INSN) != PREV_INSN (BB_HEAD (BB)); \ + (INSN) && (INSN) != PREV_INSN (BB_HEAD (BB)); \ (INSN) = PREV_INSN (INSN)) /* Cycles through _all_ basic blocks, even the fake ones (entry and @@ -467,11 +467,12 @@ extern bitmap_obstack reg_obstack; #define BB_END(B) (B)->il.rtl->end_ /* Special block numbers [markers] for entry and exit. */ -#define ENTRY_BLOCK (-1) -#define EXIT_BLOCK (-2) +#define ENTRY_BLOCK (0) +#define EXIT_BLOCK (1) + +/* The two blocks that are always in the cfg. */ +#define NUM_FIXED_BLOCKS (2) -/* Special block number not valid for any block. */ -#define INVALID_BLOCK (-3) #define BLOCK_NUM(INSN) (BLOCK_FOR_INSN (INSN)->index + 0) #define set_block_for_insn(INSN, BB) (BLOCK_FOR_INSN (INSN) = BB) diff --git a/gcc/bb-reorder.c b/gcc/bb-reorder.c index 40d0cdf676e..f46b2cf871e 100644 --- a/gcc/bb-reorder.c +++ b/gcc/bb-reorder.c @@ -1894,7 +1894,7 @@ reorder_basic_blocks (unsigned int flags) int i; struct trace *traces; - if (n_basic_blocks <= 1) + if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1) return; if (targetm.cannot_modify_jumps_p ()) @@ -1986,7 +1986,7 @@ duplicate_computed_gotos (void) bitmap candidates; int max_size; - if (n_basic_blocks <= 1) + if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1) return; if (targetm.cannot_modify_jumps_p ()) @@ -2169,7 +2169,7 @@ partition_hot_cold_basic_blocks (void) int n_crossing_edges; int max_edges = 2 * last_basic_block; - if (n_basic_blocks <= 1) + if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1) return; crossing_edges = xcalloc (max_edges, sizeof (edge)); @@ -2177,8 +2177,8 @@ partition_hot_cold_basic_blocks (void) cfg_layout_initialize (0); FOR_EACH_BB (cur_bb) - if (cur_bb->index >= 0 - && cur_bb->next_bb->index >= 0) + if (cur_bb->index >= NUM_FIXED_BLOCKS + && cur_bb->next_bb->index >= NUM_FIXED_BLOCKS) cur_bb->aux = cur_bb->next_bb; find_rarely_executed_basic_blocks_and_crossing_edges (crossing_edges, diff --git a/gcc/bt-load.c b/gcc/bt-load.c index ae648411940..c39c5b5aaaa 100644 --- a/gcc/bt-load.c +++ b/gcc/bt-load.c @@ -461,7 +461,7 @@ compute_defs_uses_and_gen (fibheap_t all_btr_defs, btr_def *def_array, defs_uses_info info; sbitmap_vector_zero (bb_gen, n_basic_blocks); - for (i = 0; i < n_basic_blocks; i++) + for (i = NUM_FIXED_BLOCKS; i < n_basic_blocks; i++) { basic_block bb = BASIC_BLOCK (i); int reg; @@ -622,7 +622,7 @@ compute_kill (sbitmap *bb_kill, sbitmap *btr_defset, /* For each basic block, form the set BB_KILL - the set of definitions that the block kills. */ sbitmap_vector_zero (bb_kill, n_basic_blocks); - for (i = 0; i < n_basic_blocks; i++) + for (i = NUM_FIXED_BLOCKS; i < n_basic_blocks; i++) { for (regno = first_btr; regno <= last_btr; regno++) if (TEST_HARD_REG_BIT (all_btrs, regno) @@ -645,14 +645,14 @@ compute_out (sbitmap *bb_out, sbitmap *bb_gen, sbitmap *bb_kill, int max_uid) int changed; sbitmap bb_in = sbitmap_alloc (max_uid); - for (i = 0; i < n_basic_blocks; i++) + for (i = NUM_FIXED_BLOCKS; i < n_basic_blocks; i++) sbitmap_copy (bb_out[i], bb_gen[i]); changed = 1; while (changed) { changed = 0; - for (i = 0; i < n_basic_blocks; i++) + for (i = NUM_FIXED_BLOCKS; i < n_basic_blocks; i++) { sbitmap_union_of_preds (bb_in, bb_out, i); changed |= sbitmap_union_of_diff_cg (bb_out[i], bb_gen[i], @@ -671,7 +671,7 @@ link_btr_uses (btr_def *def_array, btr_user *use_array, sbitmap *bb_out, /* Link uses to the uses lists of all of their reaching defs. Count up the number of reaching defs of each use. */ - for (i = 0; i < n_basic_blocks; i++) + for (i = NUM_FIXED_BLOCKS; i < n_basic_blocks; i++) { basic_block bb = BASIC_BLOCK (i); rtx insn; @@ -1397,7 +1397,7 @@ migrate_btr_defs (enum reg_class btr_class, int allow_callee_save) { int i; - for (i = 0; i < n_basic_blocks; i++) + for (i = NUM_FIXED_BLOCKS; i < n_basic_blocks; i++) { basic_block bb = BASIC_BLOCK (i); fprintf(dump_file, diff --git a/gcc/cfg.c b/gcc/cfg.c index c46ac0b88ee..8f8593e2420 100644 --- a/gcc/cfg.c +++ b/gcc/cfg.c @@ -163,8 +163,11 @@ compact_blocks (void) int i; basic_block bb; - i = 0; - FOR_EACH_BB (bb) + BASIC_BLOCK (ENTRY_BLOCK) = ENTRY_BLOCK_PTR; + BASIC_BLOCK (EXIT_BLOCK) = EXIT_BLOCK_PTR; + + i = NUM_FIXED_BLOCKS; + FOR_EACH_BB (bb) { BASIC_BLOCK (i) = bb; bb->index = i; diff --git a/gcc/cfganal.c b/gcc/cfganal.c index 3ed1c592186..e005388d4a3 100644 --- a/gcc/cfganal.c +++ b/gcc/cfganal.c @@ -345,7 +345,7 @@ create_edge_list (void) basic_block bb; edge_iterator ei; - block_count = n_basic_blocks + 2; /* Include the entry and exit blocks. */ + block_count = n_basic_blocks; /* Include the entry and exit blocks. */ num_edges = 0; @@ -391,7 +391,7 @@ print_edge_list (FILE *f, struct edge_list *elist) int x; fprintf (f, "Compressed edge list, %d BBs + entry & exit, and %d edges\n", - elist->num_blocks - 2, elist->num_edges); + elist->num_blocks, elist->num_edges); for (x = 0; x < elist->num_edges; x++) { @@ -721,7 +721,7 @@ flow_depth_first_order_compute (int *dfs_order, int *rc_order) edge_iterator *stack; int sp; int dfsnum = 0; - int rcnum = n_basic_blocks - 1; + int rcnum = n_basic_blocks - 1 - NUM_FIXED_BLOCKS; sbitmap visited; /* Allocate stack for back-tracking up CFG. */ @@ -786,8 +786,9 @@ flow_depth_first_order_compute (int *dfs_order, int *rc_order) free (stack); sbitmap_free (visited); - /* The number of nodes visited should be the number of blocks. */ - gcc_assert (dfsnum == n_basic_blocks); + /* The number of nodes visited should be the number of blocks minus + the entry and exit blocks which are not visited here. */ + gcc_assert (dfsnum == n_basic_blocks - NUM_FIXED_BLOCKS); return dfsnum; } @@ -826,12 +827,11 @@ static void flow_dfs_compute_reverse_init (depth_first_search_ds data) { /* Allocate stack for back-tracking up CFG. */ - data->stack = xmalloc ((n_basic_blocks - (INVALID_BLOCK + 1)) - * sizeof (basic_block)); + data->stack = xmalloc (n_basic_blocks * sizeof (basic_block)); data->sp = 0; /* Allocate bitmap to track nodes that have been visited. */ - data->visited_blocks = sbitmap_alloc (last_basic_block - (INVALID_BLOCK + 1)); + data->visited_blocks = sbitmap_alloc (last_basic_block); /* None of the nodes in the CFG have been visited yet. */ sbitmap_zero (data->visited_blocks); @@ -847,7 +847,7 @@ static void flow_dfs_compute_reverse_add_bb (depth_first_search_ds data, basic_block bb) { data->stack[data->sp++] = bb; - SET_BIT (data->visited_blocks, bb->index - (INVALID_BLOCK + 1)); + SET_BIT (data->visited_blocks, bb->index); } /* Continue the depth-first search through the reverse graph starting with the @@ -869,14 +869,13 @@ flow_dfs_compute_reverse_execute (depth_first_search_ds data, /* Perform depth-first search on adjacent vertices. */ FOR_EACH_EDGE (e, ei, bb->preds) - if (!TEST_BIT (data->visited_blocks, - e->src->index - (INVALID_BLOCK + 1))) + if (!TEST_BIT (data->visited_blocks, e->src->index)) flow_dfs_compute_reverse_add_bb (data, e->src); } /* Determine if there are unvisited basic blocks. */ FOR_BB_BETWEEN (bb, last_unvisited, NULL, prev_bb) - if (!TEST_BIT (data->visited_blocks, bb->index - (INVALID_BLOCK + 1))) + if (!TEST_BIT (data->visited_blocks, bb->index)) return bb; return NULL; @@ -912,12 +911,12 @@ dfs_enumerate_from (basic_block bb, int reverse, static sbitmap visited; static unsigned v_size; -#define MARK_VISITED(BB) (SET_BIT (visited, (BB)->index + 2)) -#define UNMARK_VISITED(BB) (RESET_BIT (visited, (BB)->index + 2)) -#define VISITED_P(BB) (TEST_BIT (visited, (BB)->index + 2)) +#define MARK_VISITED(BB) (SET_BIT (visited, (BB)->index)) +#define UNMARK_VISITED(BB) (RESET_BIT (visited, (BB)->index)) +#define VISITED_P(BB) (TEST_BIT (visited, (BB)->index)) /* Resize the VISITED sbitmap if necessary. */ - size = last_basic_block + 2; + size = last_basic_block; if (size < 10) size = 10; diff --git a/gcc/cfgbuild.c b/gcc/cfgbuild.c index e860e9885cd..834119dbc0c 100644 --- a/gcc/cfgbuild.c +++ b/gcc/cfgbuild.c @@ -138,7 +138,7 @@ control_flow_insn_p (rtx insn) static int count_basic_blocks (rtx f) { - int count = 0; + int count = NUM_FIXED_BLOCKS; bool saw_insn = false; rtx insn; @@ -164,10 +164,10 @@ count_basic_blocks (rtx f) /* The rest of the compiler works a bit smoother when we don't have to check for the edge case of do-nothing functions with no basic blocks. */ - if (count == 0) + if (count == NUM_FIXED_BLOCKS) { emit_insn (gen_rtx_USE (VOIDmode, const0_rtx)); - count = 1; + count = NUM_FIXED_BLOCKS + 1; } return count; @@ -529,10 +529,11 @@ find_basic_blocks (rtx f) } n_basic_blocks = count_basic_blocks (f); - last_basic_block = 0; + last_basic_block = NUM_FIXED_BLOCKS; ENTRY_BLOCK_PTR->next_bb = EXIT_BLOCK_PTR; EXIT_BLOCK_PTR->prev_bb = ENTRY_BLOCK_PTR; + /* Size the basic block table. The actual structures will be allocated by find_basic_blocks_1, since we want to keep the structure pointers stable across calls to find_basic_blocks. */ @@ -542,6 +543,8 @@ find_basic_blocks (rtx f) actually lay them out. */ VARRAY_BB_INIT (basic_block_info, n_basic_blocks, "basic_block_info"); + BASIC_BLOCK (ENTRY_BLOCK) = ENTRY_BLOCK_PTR; + BASIC_BLOCK (EXIT_BLOCK) = EXIT_BLOCK_PTR; find_basic_blocks_1 (f); diff --git a/gcc/cfgcleanup.c b/gcc/cfgcleanup.c index c4939d07ceb..8443451a42f 100644 --- a/gcc/cfgcleanup.c +++ b/gcc/cfgcleanup.c @@ -441,7 +441,7 @@ try_forward_edges (int mode, basic_block b) } target = first = e->dest; - counter = 0; + counter = NUM_FIXED_BLOCKS; /* If we are partitioning hot/cold basic_blocks, we don't want to mess up jumps that cross between hot/cold sections. @@ -503,7 +503,7 @@ try_forward_edges (int mode, basic_block b) if (t->dest == b) break; - gcc_assert (nthreaded_edges < n_basic_blocks); + gcc_assert (nthreaded_edges < n_basic_blocks - NUM_FIXED_BLOCKS); threaded_edges[nthreaded_edges++] = t; new_target = t->dest; @@ -1676,7 +1676,7 @@ try_optimize_cfg (int mode) /* Note that forwarder_block_p true ensures that there is a successor for this block. */ && (single_succ_edge (b)->flags & EDGE_FALLTHRU) - && n_basic_blocks > 1) + && n_basic_blocks > NUM_FIXED_BLOCKS + 1) { if (dump_file) fprintf (dump_file, diff --git a/gcc/cfghooks.c b/gcc/cfghooks.c index 138bc4ab2ae..a42e4d74b7b 100644 --- a/gcc/cfghooks.c +++ b/gcc/cfghooks.c @@ -77,8 +77,8 @@ verify_flow_info (void) basic_block *last_visited; timevar_push (TV_CFG_VERIFY); - last_visited = xcalloc (last_basic_block + 2, sizeof (basic_block)); - edge_checksum = xcalloc (last_basic_block + 2, sizeof (size_t)); + last_visited = xcalloc (last_basic_block, sizeof (basic_block)); + edge_checksum = xcalloc (last_basic_block, sizeof (size_t)); /* Check bb chain & numbers. */ last_bb_seen = ENTRY_BLOCK_PTR; @@ -122,7 +122,7 @@ verify_flow_info (void) } FOR_EACH_EDGE (e, ei, bb->succs) { - if (last_visited [e->dest->index + 2] == bb) + if (last_visited [e->dest->index] == bb) { error ("verify_flow_info: Duplicate edge %i->%i", e->src->index, e->dest->index); @@ -141,7 +141,7 @@ verify_flow_info (void) err = 1; } - last_visited [e->dest->index + 2] = bb; + last_visited [e->dest->index] = bb; if (e->flags & EDGE_FALLTHRU) n_fallthru++; @@ -158,7 +158,7 @@ verify_flow_info (void) err = 1; } - edge_checksum[e->dest->index + 2] += (size_t) e; + edge_checksum[e->dest->index] += (size_t) e; } if (n_fallthru > 1) { @@ -192,7 +192,7 @@ verify_flow_info (void) err = 1; } - edge_checksum[e->dest->index + 2] -= (size_t) e; + edge_checksum[e->dest->index] -= (size_t) e; } } @@ -202,14 +202,14 @@ verify_flow_info (void) edge_iterator ei; FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) - edge_checksum[e->dest->index + 2] += (size_t) e; + edge_checksum[e->dest->index] += (size_t) e; FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) - edge_checksum[e->dest->index + 2] -= (size_t) e; + edge_checksum[e->dest->index] -= (size_t) e; } FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) - if (edge_checksum[bb->index + 2]) + if (edge_checksum[bb->index]) { error ("basic block %i edge lists are corrupted", bb->index); err = 1; diff --git a/gcc/cfglayout.c b/gcc/cfglayout.c index 9cc2f8fa709..d2580b4e19f 100644 --- a/gcc/cfglayout.c +++ b/gcc/cfglayout.c @@ -601,7 +601,7 @@ fixup_reorder_chain (void) /* First do the bulk reordering -- rechain the blocks without regard to the needed changes to jumps and labels. */ - for (bb = ENTRY_BLOCK_PTR->next_bb, index = 0; + for (bb = ENTRY_BLOCK_PTR->next_bb, index = NUM_FIXED_BLOCKS; bb != 0; bb = bb->aux, index++) { @@ -818,7 +818,7 @@ fixup_reorder_chain (void) if (dump_file) { fprintf (dump_file, "Reordered sequence:\n"); - for (bb = ENTRY_BLOCK_PTR->next_bb, index = 0; + for (bb = ENTRY_BLOCK_PTR->next_bb, index = NUM_FIXED_BLOCKS; bb; bb = bb->aux, index++) { @@ -837,7 +837,7 @@ fixup_reorder_chain (void) prev_bb = ENTRY_BLOCK_PTR; bb = ENTRY_BLOCK_PTR->next_bb; - index = 0; + index = NUM_FIXED_BLOCKS; for (; bb; prev_bb = bb, bb = bb->aux, index ++) { diff --git a/gcc/cfgloop.c b/gcc/cfgloop.c index ff977039522..dabeacf214e 100644 --- a/gcc/cfgloop.c +++ b/gcc/cfgloop.c @@ -73,7 +73,7 @@ flow_loops_cfg_dump (const struct loops *loops, FILE *file) if (loops->cfg.dfs_order) { fputs (";; DFS order: ", file); - for (i = 0; i < n_basic_blocks; i++) + for (i = NUM_FIXED_BLOCKS; i < n_basic_blocks; i++) fprintf (file, "%d ", loops->cfg.dfs_order[i]); fputs ("\n", file); @@ -83,7 +83,7 @@ flow_loops_cfg_dump (const struct loops *loops, FILE *file) if (loops->cfg.rc_order) { fputs (";; RC order: ", file); - for (i = 0; i < n_basic_blocks; i++) + for (i = NUM_FIXED_BLOCKS; i < n_basic_blocks; i++) fprintf (file, "%d ", loops->cfg.rc_order[i]); fputs ("\n", file); @@ -610,7 +610,7 @@ flow_loops_find (struct loops *loops) /* Taking care of this degenerate case makes the rest of this code simpler. */ - if (n_basic_blocks == 0) + if (n_basic_blocks == NUM_FIXED_BLOCKS) return 0; dfs_order = NULL; @@ -676,7 +676,7 @@ flow_loops_find (struct loops *loops) loops->parray[0]->outer = NULL; loops->parray[0]->depth = 0; loops->parray[0]->pred = NULL; - loops->parray[0]->num_nodes = n_basic_blocks + 2; + loops->parray[0]->num_nodes = n_basic_blocks; loops->parray[0]->latch = EXIT_BLOCK_PTR; loops->parray[0]->header = ENTRY_BLOCK_PTR; ENTRY_BLOCK_PTR->loop_father = loops->parray[0]; @@ -704,7 +704,7 @@ flow_loops_find (struct loops *loops) num_loops = 1; - for (b = 0; b < n_basic_blocks; b++) + for (b = 0; b < n_basic_blocks - NUM_FIXED_BLOCKS; b++) { struct loop *loop; edge_iterator ei; @@ -804,7 +804,7 @@ get_loop_body (const struct loop *loop) if (loop->latch == EXIT_BLOCK_PTR) { /* There may be blocks unreachable from EXIT_BLOCK. */ - gcc_assert (loop->num_nodes == (unsigned) n_basic_blocks + 2); + gcc_assert (loop->num_nodes == (unsigned) n_basic_blocks); FOR_EACH_BB (bb) tovisit[tv++] = bb; tovisit[tv++] = EXIT_BLOCK_PTR; diff --git a/gcc/cfgloopmanip.c b/gcc/cfgloopmanip.c index c8fbe31a2a5..cfc11e0f62e 100644 --- a/gcc/cfgloopmanip.c +++ b/gcc/cfgloopmanip.c @@ -1563,7 +1563,7 @@ fix_loop_structure (struct loops *loops, bitmap changed_bbs) } /* Remove the dead loops from structures. */ - loops->tree_root->num_nodes = n_basic_blocks + 2; + loops->tree_root->num_nodes = n_basic_blocks; for (i = 1; i < loops->num; i++) { loop = loops->parray[i]; diff --git a/gcc/cfgrtl.c b/gcc/cfgrtl.c index d06caba73bd..fcf359070b7 100644 --- a/gcc/cfgrtl.c +++ b/gcc/cfgrtl.c @@ -440,7 +440,8 @@ struct tree_opt_pass pass_free_cfg = rtx entry_of_function (void) { - return (n_basic_blocks ? BB_HEAD (ENTRY_BLOCK_PTR->next_bb) : get_insns ()); + return (n_basic_blocks > NUM_FIXED_BLOCKS ? + BB_HEAD (ENTRY_BLOCK_PTR->next_bb) : get_insns ()); } /* Update insns block within BB. */ @@ -2258,7 +2259,7 @@ rtl_verify_flow_info (void) curr_bb = NULL; } - if (num_bb_notes != n_basic_blocks) + if (num_bb_notes != n_basic_blocks - NUM_FIXED_BLOCKS) internal_error ("number of bb notes in insn chain (%d) != n_basic_blocks (%d)", num_bb_notes, n_basic_blocks); @@ -2913,7 +2914,7 @@ rtl_flow_call_edges_add (sbitmap blocks) int last_bb = last_basic_block; bool check_last_block = false; - if (n_basic_blocks == 0) + if (n_basic_blocks == NUM_FIXED_BLOCKS) return 0; if (! blocks) @@ -2960,7 +2961,7 @@ rtl_flow_call_edges_add (sbitmap blocks) calls since there is no way that we can determine if they will return or not... */ - for (i = 0; i < last_bb; i++) + for (i = NUM_FIXED_BLOCKS; i < last_bb; i++) { basic_block bb = BASIC_BLOCK (i); rtx insn; diff --git a/gcc/combine.c b/gcc/combine.c index f3ae4ea95fa..f21a17cd4d5 100644 --- a/gcc/combine.c +++ b/gcc/combine.c @@ -55,7 +55,7 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA - reg_live_length is not updated - reg_n_refs is not adjusted in the rare case when a register is no longer required in a computation - - there are extremely rare cases (see distribute_regnotes) when a + - there are extremely rare cases (see distribute_notes) when a REG_DEAD note is lost - a LOG_LINKS entry that refers to an insn with multiple SETs may be removed because there is no way to know which register it was @@ -1926,7 +1926,6 @@ df_analyze_1 (struct df *df, bitmap blocks, int flags, int update) { int aflags; int dflags; - int i; basic_block bb; struct dataflow dflow; @@ -1996,18 +1995,9 @@ df_analyze_1 (struct df *df, bitmap blocks, int flags, int update) df->dfs_order = xmalloc (sizeof (int) * n_basic_blocks); df->rc_order = xmalloc (sizeof (int) * n_basic_blocks); df->rts_order = xmalloc (sizeof (int) * n_basic_blocks); - df->inverse_dfs_map = xmalloc (sizeof (int) * last_basic_block); - df->inverse_rc_map = xmalloc (sizeof (int) * last_basic_block); - df->inverse_rts_map = xmalloc (sizeof (int) * last_basic_block); flow_depth_first_order_compute (df->dfs_order, df->rc_order); flow_reverse_top_sort_order_compute (df->rts_order); - for (i = 0; i < n_basic_blocks; i++) - { - df->inverse_dfs_map[df->dfs_order[i]] = i; - df->inverse_rc_map[df->rc_order[i]] = i; - df->inverse_rts_map[df->rts_order[i]] = i; - } if (aflags & DF_RD) { /* Compute the sets of gens and kills for the defs of each bb. */ @@ -2137,9 +2127,6 @@ df_analyze_1 (struct df *df, bitmap blocks, int flags, int update) free (df->dfs_order); free (df->rc_order); free (df->rts_order); - free (df->inverse_rc_map); - free (df->inverse_dfs_map); - free (df->inverse_rts_map); } @@ -3923,8 +3910,7 @@ hybrid_search (basic_block bb, struct dataflow *dataflow, DATAFLOW, producing the in and out sets. Only the part of the cfg induced by blocks in DATAFLOW->order is taken into account. - For forward problems, you probably want to pass in a mapping of - block number to rc_order (like df->inverse_rc_map). */ + For forward problems, you probably want to pass in rc_order. */ void iterative_dataflow (struct dataflow *dataflow) @@ -3939,7 +3925,7 @@ iterative_dataflow (struct dataflow *dataflow) sbitmap_zero (visited); sbitmap_zero (considered); - for (i = 0; i < dataflow->n_blocks; i++) + for (i = 0; i < dataflow->n_blocks - NUM_FIXED_BLOCKS; i++) { idx = dataflow->order[i]; SET_BIT (pending, idx); @@ -3954,7 +3940,7 @@ iterative_dataflow (struct dataflow *dataflow) while (1) { - for (i = 0; i < dataflow->n_blocks; i++) + for (i = 0; i < dataflow->n_blocks - NUM_FIXED_BLOCKS ; i++) { idx = dataflow->order[i]; @@ -155,9 +155,6 @@ struct df int *dfs_order; /* DFS order -> block number. */ int *rc_order; /* Reverse completion order -> block number. */ int *rts_order; /* Reverse top sort order -> block number. */ - int *inverse_rc_map; /* Block number -> reverse completion order. */ - int *inverse_dfs_map; /* Block number -> DFS order. */ - int *inverse_rts_map; /* Block number -> reverse top-sort order. */ }; diff --git a/gcc/dominance.c b/gcc/dominance.c index 8cfaba033a3..f89b6f61887 100644 --- a/gcc/dominance.c +++ b/gcc/dominance.c @@ -51,8 +51,7 @@ enum dom_state dom_computed[2]; 'undefined' or 'end of list'. The name of each node is given by the dfs number of the corresponding basic block. Please note, that we include the artificial ENTRY_BLOCK (or EXIT_BLOCK in the post-dom case) in our lists to - support multiple entry points. As it has no real basic block index we use - 'last_basic_block' for that. Its dfs number is of course 1. */ + support multiple entry points. Its dfs number is of course 1. */ /* Type of Basic Block aka. TBB */ typedef unsigned int TBB; @@ -149,9 +148,7 @@ static unsigned n_bbs_in_dom_tree[2]; static void init_dom_info (struct dom_info *di, enum cdi_direction dir) { - /* We need memory for n_basic_blocks nodes and the ENTRY_BLOCK or - EXIT_BLOCK. */ - unsigned int num = n_basic_blocks + 1 + 1; + unsigned int num = n_basic_blocks; init_ar (di->dfs_parent, TBB, num, 0); init_ar (di->path_min, TBB, num, i); init_ar (di->key, TBB, num, i); @@ -216,7 +213,7 @@ calc_dfs_tree_nonrec (struct dom_info *di, basic_block bb, /* Ending block. */ basic_block ex_block; - stack = xmalloc ((n_basic_blocks + 3) * sizeof (edge_iterator)); + stack = xmalloc ((n_basic_blocks + 1) * sizeof (edge_iterator)); sp = 0; /* Initialize our border blocks, and the first edge. */ @@ -372,8 +369,8 @@ calc_dfs_tree (struct dom_info *di, enum cdi_direction reverse) di->nodes = di->dfsnum - 1; - /* Make sure there is a path from ENTRY to EXIT at all. */ - gcc_assert (di->nodes == (unsigned int) n_basic_blocks + 1); + /* This aborts e.g. when there is _no_ path from ENTRY to EXIT at all. */ + gcc_assert (di->nodes == (unsigned int) n_basic_blocks - 1); } /* Compress the path from V to the root of its set and update path_min at the @@ -627,7 +624,7 @@ calculate_dominance_info (enum cdi_direction dir) { b->dom[dir] = et_new_tree (b); } - n_bbs_in_dom_tree[dir] = n_basic_blocks + 2; + n_bbs_in_dom_tree[dir] = n_basic_blocks; init_dom_info (&di, dir); calc_dfs_tree (&di, dir); diff --git a/gcc/flow.c b/gcc/flow.c index 9ef0a906ff3..436bfd7b29b 100644 --- a/gcc/flow.c +++ b/gcc/flow.c @@ -1054,17 +1054,14 @@ calculate_global_regs_live (sbitmap blocks_in, sbitmap blocks_out, int flags) SET_REGNO_REG_SET (invalidated_by_call, i); /* Allocate space for the sets of local properties. */ - local_sets = xcalloc (last_basic_block - (INVALID_BLOCK + 1), - sizeof (regset)); - cond_local_sets = xcalloc (last_basic_block - (INVALID_BLOCK + 1), - sizeof (regset)); - - /* Create a worklist. Allocate an extra slot for ENTRY_BLOCK, and one - because the `head == tail' style test for an empty queue doesn't - work with a full queue. */ - queue = xmalloc ((n_basic_blocks - (INVALID_BLOCK + 1)) * sizeof (*queue)); + local_sets = xcalloc (last_basic_block, sizeof (regset)); + cond_local_sets = xcalloc (last_basic_block, sizeof (regset)); + + /* Create a worklist. Allocate an extra slot for the `head == tail' + style test for an empty queue doesn't work with a full queue. */ + queue = xmalloc ((n_basic_blocks + 1) * sizeof (*queue)); qtail = queue; - qhead = qend = queue + n_basic_blocks - (INVALID_BLOCK + 1); + qhead = qend = queue + n_basic_blocks; /* Queue the blocks set in the initial mask. Do this in reverse block number order so that we are more likely for the first round to do @@ -1245,12 +1242,10 @@ calculate_global_regs_live (sbitmap blocks_in, sbitmap blocks_out, int flags) basic block. On subsequent passes, we get to skip out early if live_at_end wouldn't have changed. */ - if (local_sets[bb->index - (INVALID_BLOCK + 1)] == NULL) + if (local_sets[bb->index] == NULL) { - local_sets[bb->index - (INVALID_BLOCK + 1)] - = ALLOC_REG_SET (®_obstack); - cond_local_sets[bb->index - (INVALID_BLOCK + 1)] - = ALLOC_REG_SET (®_obstack); + local_sets[bb->index] = ALLOC_REG_SET (®_obstack); + cond_local_sets[bb->index] = ALLOC_REG_SET (®_obstack); rescan = 1; } else @@ -1274,7 +1269,7 @@ calculate_global_regs_live (sbitmap blocks_in, sbitmap blocks_out, int flags) successor block. We can miss changes in those sets if we only compare the new live_at_end against the previous one. */ - cond_local_set = cond_local_sets[bb->index - (INVALID_BLOCK + 1)]; + cond_local_set = cond_local_sets[bb->index]; rescan = bitmap_intersect_p (new_live_at_end, cond_local_set); } @@ -1290,7 +1285,7 @@ calculate_global_regs_live (sbitmap blocks_in, sbitmap blocks_out, int flags) /* If any of the changed bits overlap with local_sets[bb], we'll have to rescan the block. */ - local_set = local_sets[bb->index - (INVALID_BLOCK + 1)]; + local_set = local_sets[bb->index]; rescan = bitmap_intersect_p (tmp, local_set); } } @@ -1319,8 +1314,8 @@ calculate_global_regs_live (sbitmap blocks_in, sbitmap blocks_out, int flags) /* Rescan the block insn by insn to turn (a copy of) live_at_end into live_at_start. */ propagate_block (bb, new_live_at_end, - local_sets[bb->index - (INVALID_BLOCK + 1)], - cond_local_sets[bb->index - (INVALID_BLOCK + 1)], + local_sets[bb->index], + cond_local_sets[bb->index], flags); /* If live_at start didn't change, no need to go farther. */ @@ -1366,11 +1361,11 @@ calculate_global_regs_live (sbitmap blocks_in, sbitmap blocks_out, int flags) SET_BIT (blocks_out, pbb->index); /* Makes sure to really rescan the block. */ - if (local_sets[pbb->index - (INVALID_BLOCK + 1)]) + if (local_sets[pbb->index]) { - FREE_REG_SET (local_sets[pbb->index - (INVALID_BLOCK + 1)]); - FREE_REG_SET (cond_local_sets[pbb->index - (INVALID_BLOCK + 1)]); - local_sets[pbb->index - (INVALID_BLOCK + 1)] = 0; + FREE_REG_SET (local_sets[pbb->index]); + FREE_REG_SET (cond_local_sets[pbb->index]); + local_sets[pbb->index] = 0; } /* Add it to the queue. */ @@ -1416,16 +1411,16 @@ calculate_global_regs_live (sbitmap blocks_in, sbitmap blocks_out, int flags) EXECUTE_IF_SET_IN_SBITMAP (blocks_out, 0, i, sbi) { basic_block bb = BASIC_BLOCK (i); - FREE_REG_SET (local_sets[bb->index - (INVALID_BLOCK + 1)]); - FREE_REG_SET (cond_local_sets[bb->index - (INVALID_BLOCK + 1)]); + FREE_REG_SET (local_sets[bb->index]); + FREE_REG_SET (cond_local_sets[bb->index]); }; } else { FOR_EACH_BB (bb) { - FREE_REG_SET (local_sets[bb->index - (INVALID_BLOCK + 1)]); - FREE_REG_SET (cond_local_sets[bb->index - (INVALID_BLOCK + 1)]); + FREE_REG_SET (local_sets[bb->index]); + FREE_REG_SET (cond_local_sets[bb->index]); } } @@ -2472,7 +2467,7 @@ libcall_dead_p (struct propagate_block_info *pbi, rtx note, rtx insn) int regno_clobbered_at_setjmp (int regno) { - if (n_basic_blocks == 0) + if (n_basic_blocks == NUM_FIXED_BLOCKS) return 0; return ((REG_N_SETS (regno) > 1 diff --git a/gcc/function.c b/gcc/function.c index ff80df70b28..0ece698f859 100644 --- a/gcc/function.c +++ b/gcc/function.c @@ -5256,7 +5256,8 @@ thread_prologue_and_epilogue_insns (rtx f ATTRIBUTE_UNUSED) fixup_fallthru_exit_predecessor. */ cfg_layout_initialize (0); FOR_EACH_BB (cur_bb) - if (cur_bb->index >= 0 && cur_bb->next_bb->index >= 0) + if (cur_bb->index >= NUM_FIXED_BLOCKS + && cur_bb->next_bb->index >= NUM_FIXED_BLOCKS) cur_bb->aux = cur_bb->next_bb; cfg_layout_finalize (); } diff --git a/gcc/gcse.c b/gcc/gcse.c index 3a53d9e4d26..af7444e8504 100644 --- a/gcc/gcse.c +++ b/gcc/gcse.c @@ -691,7 +691,7 @@ gcse_main (rtx f ATTRIBUTE_UNUSED, FILE *file) dump_flow_info (file); /* Return if there's nothing to do, or it is too expensive. */ - if (n_basic_blocks <= 1 || is_too_expensive (_("GCSE disabled"))) + if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1 || is_too_expensive (_("GCSE disabled"))) return 0; gcc_obstack_init (&gcse_obstack); @@ -6519,7 +6519,8 @@ bypass_jumps (FILE *file) dump_flow_info (file); /* Return if there's nothing to do, or it is too expensive. */ - if (n_basic_blocks <= 1 || is_too_expensive (_ ("jump bypassing disabled"))) + if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1 || + is_too_expensive (_ ("jump bypassing disabled"))) return 0; gcc_obstack_init (&gcse_obstack); diff --git a/gcc/global.c b/gcc/global.c index d4e2b773559..e5ecd3266ef 100644 --- a/gcc/global.c +++ b/gcc/global.c @@ -621,7 +621,7 @@ global_alloc (FILE *file) #if 0 /* We need to eliminate regs even if there is no rtl code, for the sake of debugging information. */ - if (n_basic_blocks > 0) + if (n_basic_blocks > NUM_FIXED_BLOCKS) #endif { build_insn_chain (get_insns ()); @@ -2281,9 +2281,9 @@ set_up_bb_rts_numbers (void) int i; int *rts_order; - rts_order = xmalloc (sizeof (int) * n_basic_blocks); + rts_order = xmalloc (sizeof (int) * (n_basic_blocks - NUM_FIXED_BLOCKS)); flow_reverse_top_sort_order_compute (rts_order); - for (i = 0; i < n_basic_blocks; i++) + for (i = 0; i < n_basic_blocks - NUM_FIXED_BLOCKS; i++) BB_INFO_BY_INDEX (rts_order [i])->rts_number = i; free (rts_order); } diff --git a/gcc/ifcvt.c b/gcc/ifcvt.c index e7eec87bfc1..d89253573ed 100644 --- a/gcc/ifcvt.c +++ b/gcc/ifcvt.c @@ -3194,14 +3194,14 @@ find_if_case_2 (basic_block test_bb, edge then_edge, edge else_edge) return FALSE; /* THEN is not EXIT. */ - if (then_bb->index < 0) + if (then_bb->index < NUM_FIXED_BLOCKS) return FALSE; /* ELSE is predicted or SUCC(ELSE) postdominates THEN. */ note = find_reg_note (BB_END (test_bb), REG_BR_PROB, NULL_RTX); if (note && INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ; - else if (else_succ->dest->index < 0 + else if (else_succ->dest->index < NUM_FIXED_BLOCKS || dominated_by_p (CDI_POST_DOMINATORS, then_bb, else_succ->dest)) ; diff --git a/gcc/lcm.c b/gcc/lcm.c index 000e530a2ad..63910050df0 100644 --- a/gcc/lcm.c +++ b/gcc/lcm.c @@ -122,8 +122,8 @@ compute_antinout_edge (sbitmap *antloc, sbitmap *transp, sbitmap *antin, } qin = worklist; - qend = &worklist[n_basic_blocks]; - qlen = n_basic_blocks; + qend = &worklist[n_basic_blocks - NUM_FIXED_BLOCKS]; + qlen = n_basic_blocks - NUM_FIXED_BLOCKS; /* Mark blocks which are predecessors of the exit block so that we can easily identify them below. */ @@ -260,7 +260,7 @@ compute_laterin (struct edge_list *edge_list, sbitmap *earliest, list if they were not already on the list. So the size is bounded by the number of basic blocks. */ qin = qout = worklist - = xmalloc (sizeof (basic_block) * (n_basic_blocks + 1)); + = xmalloc (sizeof (basic_block) * n_basic_blocks); /* Initialize a mapping from each edge to its index. */ for (i = 0; i < num_edges; i++) @@ -294,11 +294,10 @@ compute_laterin (struct edge_list *edge_list, sbitmap *earliest, } /* Note that we do not use the last allocated element for our queue, - as EXIT_BLOCK is never inserted into it. In fact the above allocation - of n_basic_blocks + 1 elements is not necessary. */ + as EXIT_BLOCK is never inserted into it. */ qin = worklist; - qend = &worklist[n_basic_blocks]; - qlen = n_basic_blocks; + qend = &worklist[n_basic_blocks - NUM_FIXED_BLOCKS]; + qlen = n_basic_blocks - NUM_FIXED_BLOCKS; /* Iterate until the worklist is empty. */ while (qlen) @@ -485,7 +484,8 @@ compute_available (sbitmap *avloc, sbitmap *kill, sbitmap *avout, /* Allocate a worklist array/queue. Entries are only added to the list if they were not already on the list. So the size is bounded by the number of basic blocks. */ - qin = qout = worklist = xmalloc (sizeof (basic_block) * n_basic_blocks); + qin = qout = worklist = + xmalloc (sizeof (basic_block) * (n_basic_blocks - NUM_FIXED_BLOCKS)); /* We want a maximal solution. */ sbitmap_vector_ones (avout, last_basic_block); @@ -499,8 +499,8 @@ compute_available (sbitmap *avloc, sbitmap *kill, sbitmap *avout, } qin = worklist; - qend = &worklist[n_basic_blocks]; - qlen = n_basic_blocks; + qend = &worklist[n_basic_blocks - NUM_FIXED_BLOCKS]; + qlen = n_basic_blocks - NUM_FIXED_BLOCKS; /* Mark blocks which are successors of the entry block so that we can easily identify them below. */ diff --git a/gcc/predict.c b/gcc/predict.c index 5cd3cb61bd3..2081efad81d 100644 --- a/gcc/predict.c +++ b/gcc/predict.c @@ -707,7 +707,7 @@ predict_loops (struct loops *loops_info, bool rtlsimpleloops) conditional has no loop header successors as not taken. */ if (!header_found) FOR_EACH_EDGE (e, ei, bb->succs) - if (e->dest->index < 0 + if (e->dest->index < NUM_FIXED_BLOCKS || !flow_bb_inside_loop_p (loop, e->dest)) predict_edge (e, PRED_LOOP_EXIT, @@ -1271,7 +1271,7 @@ tree_bb_level_predictions (void) int *heads; heads = xmalloc (sizeof (int) * last_basic_block); - memset (heads, -1, sizeof (int) * last_basic_block); + memset (heads, ENTRY_BLOCK, sizeof (int) * last_basic_block); heads[ENTRY_BLOCK_PTR->next_bb->index] = last_basic_block; apply_return_prediction (heads); @@ -1500,7 +1500,7 @@ predict_paths_leading_to (basic_block bb, int *heads, enum br_predictor pred, edge_iterator ei; int y; - if (heads[bb->index] < 0) + if (heads[bb->index] == ENTRY_BLOCK) { /* This is first time we need this field in heads array; so find first dominator that we do not post-dominate (we are @@ -1509,7 +1509,7 @@ predict_paths_leading_to (basic_block bb, int *heads, enum br_predictor pred, basic_block next_ai = get_immediate_dominator (CDI_DOMINATORS, bb); int head; - while (heads[next_ai->index] < 0) + while (heads[next_ai->index] == ENTRY_BLOCK) { if (!dominated_by_p (CDI_POST_DOMINATORS, next_ai, bb)) break; @@ -1524,10 +1524,7 @@ predict_paths_leading_to (basic_block bb, int *heads, enum br_predictor pred, while (next_ai != bb) { next_ai = ai; - if (heads[ai->index] == ENTRY_BLOCK) - ai = ENTRY_BLOCK_PTR; - else - ai = BASIC_BLOCK (heads[ai->index]); + ai = BASIC_BLOCK (heads[ai->index]); heads[next_ai->index] = head; } } @@ -1538,7 +1535,7 @@ predict_paths_leading_to (basic_block bb, int *heads, enum br_predictor pred, if (y == last_basic_block) return; FOR_EACH_EDGE (e, ei, BASIC_BLOCK (y)->succs) - if (e->dest->index >= 0 + if (e->dest->index >= NUM_FIXED_BLOCKS && dominated_by_p (CDI_POST_DOMINATORS, e->dest, bb)) predict_edge_def (e, pred, taken); } @@ -1596,12 +1593,7 @@ propagate_freq (struct loop *loop, bitmap tovisit) /* The outermost "loop" includes the exit block, which we can not look up via BASIC_BLOCK. Detect this and use EXIT_BLOCK_PTR directly. Do the same for the entry block. */ - if (i == (unsigned)ENTRY_BLOCK) - bb = ENTRY_BLOCK_PTR; - else if (i == (unsigned)EXIT_BLOCK) - bb = EXIT_BLOCK_PTR; - else - bb = BASIC_BLOCK (i); + bb = BASIC_BLOCK (i); FOR_EACH_EDGE (e, ei, bb->preds) { diff --git a/gcc/profile.c b/gcc/profile.c index d260d66e106..45abbc40926 100644 --- a/gcc/profile.c +++ b/gcc/profile.c @@ -531,7 +531,7 @@ compute_branch_probabilities (void) { FOR_EACH_EDGE (e, ei, bb->succs) e->probability = (e->count * REG_BR_PROB_BASE + bb->count / 2) / bb->count; - if (bb->index >= 0 + if (bb->index >= NUM_FIXED_BLOCKS && block_ends_with_condjump_p (bb) && EDGE_COUNT (bb->succs) >= 2) { @@ -609,7 +609,7 @@ compute_branch_probabilities (void) FOR_EACH_EDGE (e, ei, bb->succs) e->probability = REG_BR_PROB_BASE / total; } - if (bb->index >= 0 + if (bb->index >= NUM_FIXED_BLOCKS && block_ends_with_condjump_p (bb) && EDGE_COUNT (bb->succs) >= 2) num_branches++, num_never_executed; @@ -704,7 +704,9 @@ compute_value_histograms (histogram_values values) free (histogram_counts[t]); } -#define BB_TO_GCOV_INDEX(bb) ((bb)->index + 1) +/* The entry basic block will be moved around so that it has index=1, + there is nothing at index 0 and the exit is at n_basic_block. */ +#define BB_TO_GCOV_INDEX(bb) ((bb)->index - 1) /* When passed NULL as file_name, initialize. When passed something else, output the necessary commands to change line to LINE and offset to FILE_NAME. */ @@ -917,7 +919,7 @@ branch_prob (void) num_instrumented++; } - total_num_blocks += n_basic_blocks + 2; + total_num_blocks += n_basic_blocks; if (dump_file) fprintf (dump_file, "%d basic blocks\n", n_basic_blocks); @@ -938,7 +940,7 @@ branch_prob (void) gcov_position_t offset; offset = gcov_write_tag (GCOV_TAG_BLOCKS); - for (i = 0; i != (unsigned) (n_basic_blocks + 2); i++) + for (i = 0; i != (unsigned) (n_basic_blocks); i++) gcov_write_unsigned (0); gcov_write_length (offset); } @@ -946,7 +948,7 @@ branch_prob (void) /* Keep all basic block indexes nonnegative in the gcov output. Index 0 is used for entry block, last index is for exit block. */ - ENTRY_BLOCK_PTR->index = -1; + ENTRY_BLOCK_PTR->index = 1; EXIT_BLOCK_PTR->index = last_basic_block; /* Arcs */ diff --git a/gcc/regrename.c b/gcc/regrename.c index 06b1c5b7480..c9e1ac51406 100644 --- a/gcc/regrename.c +++ b/gcc/regrename.c @@ -1776,20 +1776,19 @@ copyprop_hardreg_forward (void) all_vd = xmalloc (sizeof (struct value_data) * last_basic_block); - visited = sbitmap_alloc (last_basic_block - (INVALID_BLOCK + 1)); + visited = sbitmap_alloc (last_basic_block); sbitmap_zero (visited); FOR_EACH_BB (bb) { - SET_BIT (visited, bb->index - (INVALID_BLOCK + 1)); + SET_BIT (visited, bb->index); /* If a block has a single predecessor, that we've already processed, begin with the value data that was live at the end of the predecessor block. */ /* ??? Ought to use more intelligent queuing of blocks. */ - if (single_pred_p (bb) - && TEST_BIT (visited, - single_pred (bb)->index - (INVALID_BLOCK + 1)) + if (single_pred_p (bb) + && TEST_BIT (visited, single_pred (bb)->index) && ! (single_pred_edge (bb)->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))) all_vd[bb->index] = all_vd[single_pred (bb)->index]; else diff --git a/gcc/reorg.c b/gcc/reorg.c index 431ef3c116e..9b06bb23924 100644 --- a/gcc/reorg.c +++ b/gcc/reorg.c @@ -3539,7 +3539,7 @@ dbr_schedule (rtx first, FILE *file) /* If the current function has no insns other than the prologue and epilogue, then do not try to fill any delay slots. */ - if (n_basic_blocks == 0) + if (n_basic_blocks == NUM_FIXED_BLOCKS) return; /* Find the highest INSN_UID and allocate and initialize our map from diff --git a/gcc/sched-ebb.c b/gcc/sched-ebb.c index 58f5d338ecd..a9a7cacecd0 100644 --- a/gcc/sched-ebb.c +++ b/gcc/sched-ebb.c @@ -568,7 +568,7 @@ schedule_ebbs (FILE *dump_file) /* Taking care of this degenerate case makes the rest of this code simpler. */ - if (n_basic_blocks == 0) + if (n_basic_blocks == NUM_FIXED_BLOCKS) return; sched_init (dump_file); diff --git a/gcc/sched-rgn.c b/gcc/sched-rgn.c index 69bd3598576..f66966d6adf 100644 --- a/gcc/sched-rgn.c +++ b/gcc/sched-rgn.c @@ -999,7 +999,7 @@ compute_trg_info (int trg) sp->is_speculative = 0; sp->src_prob = 100; - visited = sbitmap_alloc (last_basic_block - (INVALID_BLOCK + 1)); + visited = sbitmap_alloc (last_basic_block); for (i = trg + 1; i < current_nr_blocks; i++) { @@ -1044,8 +1044,7 @@ compute_trg_info (int trg) block = el.first_member[j]->src; FOR_EACH_EDGE (e, ei, block->succs) { - if (!TEST_BIT (visited, - e->dest->index - (INVALID_BLOCK + 1))) + if (!TEST_BIT (visited, e->dest->index)) { for (k = 0; k < el.nr_members; k++) if (e == el.first_member[k]) @@ -1054,8 +1053,7 @@ compute_trg_info (int trg) if (k >= el.nr_members) { bblst_table[bblst_last++] = e->dest; - SET_BIT (visited, - e->dest->index - (INVALID_BLOCK + 1)); + SET_BIT (visited, e->dest->index); update_idx++; } } @@ -2469,7 +2467,7 @@ init_regions (void) /* Compute regions for scheduling. */ if (reload_completed - || n_basic_blocks == 1 + || n_basic_blocks == NUM_FIXED_BLOCKS + 1 || !flag_schedule_interblock || is_cfg_nonregular ()) { @@ -2526,18 +2524,16 @@ schedule_insns (FILE *dump_file) /* Taking care of this degenerate case makes the rest of this code simpler. */ - if (n_basic_blocks == 0) + if (n_basic_blocks == NUM_FIXED_BLOCKS) return; nr_inter = 0; nr_spec = 0; - sched_init (dump_file); init_regions (); current_sched_info = ®ion_sched_info; - /* Schedule every region in the subroutine. */ for (rgn = 0; rgn < nr_regions; rgn++) schedule_region (rgn); diff --git a/gcc/tracer.c b/gcc/tracer.c index 27f06c57d89..b65c316e9fd 100644 --- a/gcc/tracer.c +++ b/gcc/tracer.c @@ -72,7 +72,7 @@ static int branch_ratio_cutoff; static bool ignore_bb_p (basic_block bb) { - if (bb->index < 0) + if (bb->index < NUM_FIXED_BLOCKS) return true; if (!maybe_hot_bb_p (bb)) return true; @@ -363,7 +363,7 @@ layout_superblocks (void) void tracer (unsigned int flags) { - if (n_basic_blocks <= 1) + if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1) return; cfg_layout_initialize (flags); diff --git a/gcc/tree-cfg.c b/gcc/tree-cfg.c index fd6d1c703ee..6c0d1c0754b 100644 --- a/gcc/tree-cfg.c +++ b/gcc/tree-cfg.c @@ -129,14 +129,16 @@ init_empty_tree_cfg (void) /* Initialize the basic block array. */ init_flow (); profile_status = PROFILE_ABSENT; - n_basic_blocks = 0; - last_basic_block = 0; + n_basic_blocks = NUM_FIXED_BLOCKS; + last_basic_block = NUM_FIXED_BLOCKS; VARRAY_BB_INIT (basic_block_info, initial_cfg_capacity, "basic_block_info"); /* Build a mapping of labels to their associated blocks. */ VARRAY_BB_INIT (label_to_block_map, initial_cfg_capacity, "label to block map"); + BASIC_BLOCK (ENTRY_BLOCK) = ENTRY_BLOCK_PTR; + BASIC_BLOCK (EXIT_BLOCK) = EXIT_BLOCK_PTR; ENTRY_BLOCK_PTR->next_bb = EXIT_BLOCK_PTR; EXIT_BLOCK_PTR->prev_bb = ENTRY_BLOCK_PTR; } @@ -170,7 +172,7 @@ build_tree_cfg (tree *tp) factor_computed_gotos (); /* Make sure there is always at least one block, even if it's empty. */ - if (n_basic_blocks == 0) + if (n_basic_blocks == NUM_FIXED_BLOCKS) create_empty_bb (ENTRY_BLOCK_PTR); /* Adjust the size of the array. */ @@ -430,7 +432,7 @@ make_edges (void) /* Create an edge from entry to the first block with executable statements in it. */ - make_edge (ENTRY_BLOCK_PTR, BASIC_BLOCK (0), EDGE_FALLTHRU); + make_edge (ENTRY_BLOCK_PTR, BASIC_BLOCK (NUM_FIXED_BLOCKS), EDGE_FALLTHRU); /* Traverse the basic block array placing edges. */ FOR_EACH_BB (bb) @@ -794,7 +796,8 @@ label_to_block_fn (struct function *ifun, tree dest) and undefined variable warnings quite right. */ if ((errorcount || sorrycount) && uid < 0) { - block_stmt_iterator bsi = bsi_start (BASIC_BLOCK (0)); + block_stmt_iterator bsi = + bsi_start (BASIC_BLOCK (NUM_FIXED_BLOCKS)); tree stmt; stmt = build1 (LABEL_EXPR, void_type_node, dest); @@ -4656,7 +4659,7 @@ print_loop_ir (FILE *file) { basic_block bb; - bb = BASIC_BLOCK (0); + bb = BASIC_BLOCK (NUM_FIXED_BLOCKS); if (bb && bb->loop_father) print_loop (file, bb->loop_father, 0); } @@ -4738,7 +4741,7 @@ tree_flow_call_edges_add (sbitmap blocks) int last_bb = last_basic_block; bool check_last_block = false; - if (n_basic_blocks == 0) + if (n_basic_blocks == NUM_FIXED_BLOCKS) return 0; if (! blocks) diff --git a/gcc/tree-dfa.c b/gcc/tree-dfa.c index 913fd5751c0..6b52fa76083 100644 --- a/gcc/tree-dfa.c +++ b/gcc/tree-dfa.c @@ -484,10 +484,11 @@ collect_dfa_stats (struct dfa_stats_d *dfa_stats_p) memset ((void *)dfa_stats_p, 0, sizeof (struct dfa_stats_d)); /* Walk all the trees in the function counting references. Start at - basic block 0, but don't stop at block boundaries. */ + basic block NUM_FIXED_BLOCKS, but don't stop at block boundaries. */ pset = pointer_set_create (); - for (i = bsi_start (BASIC_BLOCK (0)); !bsi_end_p (i); bsi_next (&i)) + for (i = bsi_start (BASIC_BLOCK (NUM_FIXED_BLOCKS)); + !bsi_end_p (i); bsi_next (&i)) walk_tree (bsi_stmt_ptr (i), collect_dfa_stats_r, (void *) dfa_stats_p, pset); diff --git a/gcc/tree-flow-inline.h b/gcc/tree-flow-inline.h index 7aae23f967c..56104b1b700 100644 --- a/gcc/tree-flow-inline.h +++ b/gcc/tree-flow-inline.h @@ -702,7 +702,7 @@ bsi_start (basic_block bb) bsi.tsi = tsi_start (bb->stmt_list); else { - gcc_assert (bb->index < 0); + gcc_assert (bb->index < NUM_FIXED_BLOCKS); bsi.tsi.ptr = NULL; bsi.tsi.container = NULL; } @@ -723,7 +723,7 @@ bsi_after_labels (basic_block bb) if (!bb->stmt_list) { - gcc_assert (bb->index < 0); + gcc_assert (bb->index < NUM_FIXED_BLOCKS); bsi.tsi.ptr = NULL; bsi.tsi.container = NULL; return bsi; @@ -756,7 +756,7 @@ bsi_last (basic_block bb) bsi.tsi = tsi_last (bb->stmt_list); else { - gcc_assert (bb->index < 0); + gcc_assert (bb->index < NUM_FIXED_BLOCKS); bsi.tsi.ptr = NULL; bsi.tsi.container = NULL; } diff --git a/gcc/tree-ssa-phiopt.c b/gcc/tree-ssa-phiopt.c index 277e7339609..43938d7bbe2 100644 --- a/gcc/tree-ssa-phiopt.c +++ b/gcc/tree-ssa-phiopt.c @@ -148,9 +148,9 @@ tree_ssa_phiopt (void) outer ones, and also that we do not try to visit a removed block. */ bb_order = blocks_in_phiopt_order (); - n = n_basic_blocks; + n = n_basic_blocks - NUM_FIXED_BLOCKS; - for (i = 0; i < n; i++) + for (i = 0; i < n; i++) { tree cond_expr; tree phi; @@ -248,11 +248,12 @@ blocks_in_phiopt_order (void) { basic_block x, y; basic_block *order = xmalloc (sizeof (basic_block) * n_basic_blocks); - unsigned n = n_basic_blocks, np, i; - sbitmap visited = sbitmap_alloc (last_basic_block + 2); + unsigned n = n_basic_blocks - NUM_FIXED_BLOCKS; + unsigned np, i; + sbitmap visited = sbitmap_alloc (last_basic_block); -#define MARK_VISITED(BB) (SET_BIT (visited, (BB)->index + 2)) -#define VISITED_P(BB) (TEST_BIT (visited, (BB)->index + 2)) +#define MARK_VISITED(BB) (SET_BIT (visited, (BB)->index)) +#define VISITED_P(BB) (TEST_BIT (visited, (BB)->index)) sbitmap_zero (visited); diff --git a/gcc/tree-ssa-reassoc.c b/gcc/tree-ssa-reassoc.c index edca241ef40..b8dfbfc6946 100644 --- a/gcc/tree-ssa-reassoc.c +++ b/gcc/tree-ssa-reassoc.c @@ -234,7 +234,6 @@ operand_entry_eq (const void *p1, const void *p2) return vr1->op == vr2->op; } - /* Given an expression E, return the rank of the expression. */ static unsigned int @@ -1452,7 +1451,7 @@ init_reassoc (void) } /* Set up rank for each BB */ - for (i = 0; i < n_basic_blocks; i++) + for (i = 0; i < n_basic_blocks - NUM_FIXED_BLOCKS; i++) bb_rank[bbs[i]] = ++rank << 16; free (bbs); diff --git a/gcc/var-tracking.c b/gcc/var-tracking.c index fb4a1813218..766f6ef3aa1 100644 --- a/gcc/var-tracking.c +++ b/gcc/var-tracking.c @@ -1638,10 +1638,10 @@ vt_find_locations (void) /* Compute reverse completion order of depth first search of the CFG so that the data-flow runs faster. */ - rc_order = xmalloc (n_basic_blocks * sizeof (int)); + rc_order = xmalloc ((n_basic_blocks - NUM_FIXED_BLOCKS) * sizeof (int)); bb_order = xmalloc (last_basic_block * sizeof (int)); flow_depth_first_order_compute (NULL, rc_order); - for (i = 0; i < n_basic_blocks; i++) + for (i = 0; i < n_basic_blocks - NUM_FIXED_BLOCKS; i++) bb_order[rc_order[i]] = i; free (rc_order); |