From 5fe7bfff04dc8ee065bde19dba83240fee8a64ca Mon Sep 17 00:00:00 2001 From: dberlin Date: Fri, 4 Jan 2002 15:23:30 +0000 Subject: 2001-01-04 Daniel Berlin * lcm.c: Include df.h. Add available_transfer_function prototype. (compute_available): Rework to use iterative dataflow framework. (struct bb_info): s/bb_info/lcm_bb_info/g to avoid conflict with bb_info in df.h (available_transfer_function): New function. * Makefile.in (lcm.o): add df.h to dependencies. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@48536 138bc75d-0d04-0410-961f-82ee72b054a4 --- gcc/lcm.c | 139 ++++++++++++++++++++++---------------------------------------- 1 file changed, 49 insertions(+), 90 deletions(-) (limited to 'gcc/lcm.c') diff --git a/gcc/lcm.c b/gcc/lcm.c index a1e6845757c..7c5015324fb 100644 --- a/gcc/lcm.c +++ b/gcc/lcm.c @@ -60,6 +60,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA #include "recog.h" #include "basic-block.h" #include "tm_p.h" +#include "df.h" /* We want target macros for the mode switching code to be able to refer to instruction attribute values. */ @@ -92,9 +93,11 @@ static void compute_rev_insert_delete PARAMS ((struct edge_list *edge_list, sbitmap *, sbitmap *, sbitmap *, sbitmap *, sbitmap *)); + +static void available_transfer_function PARAMS ((int, int *, sbitmap, sbitmap, + sbitmap, sbitmap, void *)); /* Edge based lcm routines. */ - /* Compute expression anticipatability at entrance and exit of each block. This is done based on the flow graph, and not on the pred-succ lists. Other than that, its pretty much identical to compute_antinout. */ @@ -110,7 +113,6 @@ compute_antinout_edge (antloc, transp, antin, antout) edge e; basic_block *worklist, *qin, *qout, *qend; unsigned int qlen; - /* Allocate a worklist array/queue. Entries are only added to the list if they were not already on the list. So the size is bounded by the number of basic blocks. */ @@ -145,7 +147,6 @@ compute_antinout_edge (antloc, transp, antin, antout) basic_block b = *qout++; bb = b->index; qlen--; - if (qout >= qend) qout = worklist; @@ -487,90 +488,48 @@ pre_edge_lcm (file, n_exprs, transp, avloc, antloc, kill, insert, delete) return edge_list; } - -/* Compute the AVIN and AVOUT vectors from the AVLOC and KILL vectors. - Return the number of passes we performed to iterate to a solution. */ - +/* Availability transfer function */ +static void +available_transfer_function (bb, changed, in, out, gen, kill, data) + int bb ATTRIBUTE_UNUSED; + int *changed; + sbitmap in,out,gen,kill; + void *data ATTRIBUTE_UNUSED; +{ + *changed = sbitmap_union_of_diff (out, gen, in, kill); +} +/* Compute the AVIN and AVOUT vectors from the AVLOC and KILL + vectors. */ void compute_available (avloc, kill, avout, avin) - sbitmap *avloc, *kill, *avout, *avin; + sbitmap *avloc; + sbitmap *kill; + sbitmap *avout; + sbitmap *avin; { - int bb; - edge e; - basic_block *worklist, *qin, *qout, *qend; - unsigned int qlen; - - /* Allocate a worklist array/queue. Entries are only added to the - list if they were not already on the list. So the size is - bounded by the number of basic blocks. */ - qin = qout = worklist - = (basic_block *) xmalloc (sizeof (basic_block) * n_basic_blocks); - - /* We want a maximal solution. */ + int *dfs_order; + int *rc_order; + bitmap blocks; + int *inverse_rc_map; + int i; + dfs_order = xmalloc (sizeof (int) * n_basic_blocks); + rc_order = xmalloc (sizeof (int) * n_basic_blocks); + inverse_rc_map = xmalloc (sizeof (int) * n_basic_blocks); + flow_depth_first_order_compute (dfs_order, rc_order); + blocks = BITMAP_XMALLOC (); + for (i = 0; i < n_basic_blocks; i ++) + { + inverse_rc_map[rc_order[i]] = i; + bitmap_set_bit (blocks, i); + } sbitmap_vector_ones (avout, n_basic_blocks); - - /* Put every block on the worklist; this is necessary because of the - optimistic initialization of AVOUT above. */ - for (bb = 0; bb < n_basic_blocks; bb++) - { - *qin++ = BASIC_BLOCK (bb); - BASIC_BLOCK (bb)->aux = BASIC_BLOCK (bb); - } - - qin = worklist; - qend = &worklist[n_basic_blocks]; - qlen = n_basic_blocks; - - /* Mark blocks which are successors of the entry block so that we - can easily identify them below. */ - for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next) - e->dest->aux = ENTRY_BLOCK_PTR; - - /* Iterate until the worklist is empty. */ - while (qlen) - { - /* Take the first entry off the worklist. */ - basic_block b = *qout++; - bb = b->index; - qlen--; - - if (qout >= qend) - qout = worklist; - - /* If one of the predecessor blocks is the ENTRY block, then the - intersection of avouts is the null set. We can identify such blocks - by the special value in the AUX field in the block structure. */ - if (b->aux == ENTRY_BLOCK_PTR) - /* Do not clear the aux field for blocks which are successors of the - ENTRY block. That way we never add then to the worklist again. */ - sbitmap_zero (avin[bb]); - else - { - /* Clear the aux field of this block so that it can be added to - the worklist again if necessary. */ - b->aux = NULL; - sbitmap_intersection_of_preds (avin[bb], avout, bb); - } - - if (sbitmap_union_of_diff (avout[bb], avloc[bb], avin[bb], kill[bb])) - /* If the out state of this block changed, then we need - to add the successors of this block to the worklist - if they are not already on the worklist. */ - for (e = b->succ; e; e = e->succ_next) - if (!e->dest->aux && e->dest != EXIT_BLOCK_PTR) - { - *qin++ = e->dest; - e->dest->aux = e; - qlen++; - - if (qin >= qend) - qin = worklist; - } - } - - clear_aux_for_edges (); - clear_aux_for_blocks (); - free (worklist); + iterative_dataflow_sbitmap (avin, avout, avloc, kill, blocks, + FORWARD, INTERSECTION, + available_transfer_function, inverse_rc_map, 0); + BITMAP_XFREE (blocks); + free (dfs_order); + free (rc_order); + free (inverse_rc_map); } /* Compute the farthest vector for edge based lcm. */ @@ -876,7 +835,7 @@ struct seginfo HARD_REG_SET regs_live; }; -struct bb_info +struct lcm_bb_info { struct seginfo *seginfo; int computing; @@ -892,7 +851,7 @@ static sbitmap *delete; static sbitmap *insert; static struct seginfo * new_seginfo PARAMS ((int, rtx, int, HARD_REG_SET)); -static void add_seginfo PARAMS ((struct bb_info *, struct seginfo *)); +static void add_seginfo PARAMS ((struct lcm_bb_info *, struct seginfo *)); static void reg_dies PARAMS ((rtx, HARD_REG_SET)); static void reg_becomes_live PARAMS ((rtx, rtx, void *)); static void make_preds_opaque PARAMS ((basic_block, int)); @@ -926,7 +885,7 @@ new_seginfo (mode, insn, bb, regs_live) static void add_seginfo (head, info) - struct bb_info *head; + struct lcm_bb_info *head; struct seginfo *info; { struct seginfo *ptr; @@ -1025,7 +984,7 @@ optimize_mode_switching (file) static const int num_modes[] = NUM_MODES_FOR_MODE_SWITCHING; #define N_ENTITIES (sizeof num_modes / sizeof (int)) int entity_map[N_ENTITIES]; - struct bb_info *bb_info[N_ENTITIES]; + struct lcm_bb_info *bb_info[N_ENTITIES]; int i, j; int n_entities; int max_num_modes = 0; @@ -1041,7 +1000,7 @@ optimize_mode_switching (file) { /* Create the list of segments within each basic block. */ bb_info[n_entities] - = (struct bb_info *) xcalloc (n_basic_blocks, sizeof **bb_info); + = (struct lcm_bb_info *) xcalloc (n_basic_blocks, sizeof **bb_info); entity_map[n_entities++] = e; if (num_modes[e] > max_num_modes) max_num_modes = num_modes[e]; @@ -1080,7 +1039,7 @@ optimize_mode_switching (file) { int e = entity_map[j]; int no_mode = num_modes[e]; - struct bb_info *info = bb_info[j]; + struct lcm_bb_info *info = bb_info[j]; /* Determine what the first use (if any) need for a mode of entity E is. This will be the mode that is anticipatable for this block. @@ -1182,7 +1141,7 @@ optimize_mode_switching (file) for (j = n_entities - 1; j >= 0; j--) { int m = current_mode[j] = MODE_PRIORITY_TO_MODE (entity_map[j], i); - struct bb_info *info = bb_info[j]; + struct lcm_bb_info *info = bb_info[j]; for (bb = 0 ; bb < n_basic_blocks; bb++) { -- cgit v1.2.1