diff options
author | zadeck <zadeck@138bc75d-0d04-0410-961f-82ee72b054a4> | 2006-01-11 12:57:18 +0000 |
---|---|---|
committer | zadeck <zadeck@138bc75d-0d04-0410-961f-82ee72b054a4> | 2006-01-11 12:57:18 +0000 |
commit | e011eba9edce426cbde12b0b43cd4b274b2172d0 (patch) | |
tree | 458130d4608c530b1bd76381cc853507472b4512 /gcc/df-scan.c | |
parent | bef994231589eb141452b52e2159aa5cab657c3c (diff) | |
download | gcc-e011eba9edce426cbde12b0b43cd4b274b2172d0.tar.gz |
2005-01-11 Danny Berlin <dberlin@dberlin.org>
Kenneth Zadeck <zadeck@naturalbridge.com>
* df.h (DF_SCAN, DF_RU, DF_RD, DF_LR, DF_UR, DF_UREC, DF_CHAIN,
DF_RI, DF_LAST_PROBLEM_PLUS1, DF_DU_CHAIN, DF_UD_CHAIN,
DF_REF_TYPE_NAMES, DF_HARD_REGS, DF_EQUIV_NOTES, DF_SUBREGS,
DF_SCAN_BB_INFO, DF_RU_BB_INFO, DF_RD_BB_INFO, DF_LR_BB_INFO,
DF_UR_BB_INFO, DF_UREC_BB_INFO, DF_LIVE_IN, DF_LIVE_OUT,
DF_RA_LIVE_IN, DF_RA_LIVE_OUT, DF_UPWARD_LIVE_IN,
DF_UPWARD_LIVE_OUT, DF_REF_REAL_REG, DF_REF_REGNO,
DF_REF_REAL_LOC, DF_REF_REG, DF_REF_LOC, DF_REF_BB, DF_REF_BBNO,
DF_REF_INSN, DF_REF_INSN_UID, DF_REF_TYPE, DF_REF_CHAIN,
DF_REF_ID, DF_REF_FLAGS, DF_REF_NEXT_REG, DF_REF_PREV_REG,
DF_REF_NEXT_REF, DF_REF_DATA, DF_REF_REG_DEF_P, DF_REF_REG_USE_P,
DF_REF_REG_MEM_STORE_P, DF_REF_REG_MEM_LOAD_P, DF_REF_REG_MEM_P,
DF_DEFS_SIZE, DF_DEFS_GET, DF_DEFS_SET, DF_USES_SIZE, DF_USES_GET,
DF_USES_SET, DF_REG_SIZE, DF_REG_DEF_GET, DF_REG_DEF_SET,
DF_REG_USE_GET, DF_REG_USE_SET, DF_REGNO_FIRST_DEF,
DF_REGNO_LAST_USE, DF_INSN_SIZE, DF_INSN_GET, DF_INSN_SET,
DF_INSN_CONTAINS_ASM, DF_INSN_LUID, DF_INSN_DEFS, DF_INSN_USES,
DF_INSN_UID_GET, DF_INSN_UID_LUID, DF_INSN_UID_DEFS,
DF_INSN_UID_USES, DF_SCAN_INITIAL, DF_SCAN_GLOBAL,
DF_SCAN_POST_ALLOC): New macros.
(df_flow_dir, df_ref_type, df_ref_flags, df_alloc_function,
df_free_bb_function, df_local_compute_function, df_init_function,
df_dataflow_function, df_confluence_function_0,
df_confluence_function_n, df_transfer_function,
df_finalizer_function, df_free_function, df_dump_problem_function,
df_problem, dataflow, df_insn_info, df_reg_info, df_ref, df_link,
df_ref_info, df, df_map, df_scan_bb_info, df_ru_bb_info,
df_ru_bb_info, df_rd_bb_info, df_lr_bb_info, df_ur_bb_info,
df_urec_bb_info, ) New types.
(df_invalidated_by_call, df_all_hard_regs, df_state) New public
variables.
(df_init, df_add_problem, df_set_blocks, df_finish, df_analyze,
df_analyze_simple_change_some_blocks,
df_analyze_simple_change_one_block, df_compact_blocks,
df_bb_replace, df_bb_regno_last_use_find,
df_bb_regno_first_def_find, df_bb_regno_last_def_find,
df_insn_regno_def_p, df_find_def, df_find_use,
df_iterative_dataflow, df_dump, df_chain_dump, df_refs_chain_dump,
df_regs_chain_dump, df_insn_debug, df_insn_debug_regno,
df_regno_debug, df_ref_debug, debug_df_insn, debug_df_regno,
debug_df_reg, debug_df_defno, debug_df_useno, debug_df_ref,
debug_df_chain, df_get_dependent_problem, df_chain_create,
df_chain_unlink, df_chain_copy, df_get_live_in, df_get_live_out,
df_grow_bb_info, df_chain_dump, df_print_bb_index,
df_ru_add_problem, df_ru_get_bb_info, df_rd_add_problem,
df_rd_get_bb_info, df_lr_add_problem, df_lr_get_bb_info,
df_ur_add_problem, df_ur_get_bb_info, df_urec_add_problem,
df_urec_get_bb_info, df_chain_add_problem, df_ri_add_problem,
df_reg_lifetime, df_scan_get_bb_info, df_scan_add_problem,
df_rescan_blocks, df_ref_create, df_get_artificial_defs,
df_get_artificial_uses, df_reg_chain_create, df_reg_chain_unlink,
df_ref_remove, df_insn_refs_delete, df_refs_delete,
df_reorganize_refs, df_set_state, df_hard_reg_init,
df_read_modify_subreg_p) New public functions.
* df-core.c: The core dataflow solver and glue routines for rtl
dataflow.
(df_init, df_add_problem, df_set_blocks, df_finish,
df_hybrid_search_forward, df_hybrid_search_backward,
df_iterative_dataflow, df_prune_to_subcfg, df_analyze_problem,
df_analyze, df_get_bb_info, df_set_bb_info, df_bb_replace,
df_bb_regno_last_use_find, df_bb_regno_first_def_find,
df_bb_regno_last_def_find, df_insn_regno_def_p, df_find_def,
df_reg_defined, df_find_use, df_reg_used, df_dump,
df_refs_chain_dump, df_regs_chain_dump, df_insn_debug,
df_insn_debug_regno, df_regno_debug, df_ref_debug, debug_df_insn,
debug_df_reg, debug_df_regno, debug_df_ref debug_df_defno,
debug_df_useno, reset_df_after_reload): New functions.
* df-scan.c: The scanning fuctions, once in df.c, completely
rewritten so that they now fully model the functionality of
register usage at the backend.
(df_scan_free_internal, df_scan_get_bb_info, df_scan_set_bb_info,
df_scan_free_bb_info, df_scan_alloc, df_scan_free, df_scan_dump,
df_scan_add_problem, df_grow_reg_info, df_grow_ref_info,
df_grow_insn_info, df_rescan_blocks, df_ref_create,
df_get_artificial_defs, df_get_artificial_uses,
df_reg_chain_create, df_ref_unlink, df_reg_chain_unlink,
df_ref_remove, df_insn_create_insn_record, df_insn_refs_delete,
df_refs_delete, df_reorganize_refs, df_set_state,
df_ref_create_structure, df_ref_record, df_read_modify_subreg_p,
df_def_record_1, df_defs_record, df_uses_record,
df_insn_contains_asm_1, df_insn_contains_asm, df_insn_refs_record,
df_has_eh_preds, df_bb_refs_record, df_refs_record, df_mark_reg,
df_record_exit_block_uses, df_hard_reg_init): New functions.
* df-problems.c: Seven concrete dataflow problems that use the
scanning in df-scan.c and are solved by the engine in df-core.c.
(df_get_dependent_problem, df_chain_create, df_chain_unlink,
df_chain_copy, df_get_live_in, df_get_live_out, df_grow_bb_info,
df_chain_dump, df_print_bb_index, df_ref_bitmap, df_set_seen,
df_unset_seen, df_ru_get_bb_info, df_ru_set_bb_info,
df_ru_free_bb_info, df_ru_alloc,
df_ru_bb_local_compute_process_def,
df_ru_bb_local_compute_process_use, df_ru_bb_local_compute,
df_ru_local_compute, df_ru_init_solution, df_ru_confluence_n,
df_ru_transfer_function, df_ru_free, df_ru_dump,
df_ru_add_problem, df_rd_get_bb_info, df_rd_set_bb_info,
df_rd_free_bb_info, df_rd_alloc,
df_rd_bb_local_compute_process_def, df_rd_bb_local_compute,
df_rd_local_compute, df_rd_init_solution, df_rd_confluence_n,
df_rd_transfer_function, df_rd_free, df_rd_dump,
df_rd_add_problem, df_lr_get_bb_info, df_lr_set_bb_info,
df_lr_free_bb_info, df_lr_alloc, df_lr_bb_local_compute,
df_lr_local_compute, df_lr_init, df_lr_confluence_0,
df_lr_confluence_n, df_lr_transfer_function, df_lr_free,
df_lr_dump, df_lr_add_problem, df_ur_get_bb_info,
df_ur_set_bb_info, df_ur_free_bb_info, df_ur_alloc,
df_ur_bb_local_compute, df_ur_local_compute, df_ur_init,
df_ur_local_finalize, df_ur_confluence_n, df_ur_transfer_function,
df_ur_free, df_ur_dump, df_ur_add_problem, df_urec_get_bb_info,
df_urec_set_bb_info, df_urec_free_bb_info, df_urec_alloc,
df_urec_mark_reg_change, df_urec_check_earlyclobber,
df_urec_mark_reg_use_for_earlyclobber,
df_urec_mark_reg_use_for_earlyclobber_1, df_urec_bb_local_compute,
df_urec_local_compute, df_urec_init, df_urec_local_finalize,
df_urec_confluence_n, df_urec_transfer_function, df_urec_free,
df_urec_dump, df_urec_add_problem, df_chain_alloc,
df_chain_create_bb_process_use, df_chain_create_bb,
df_chain_finalize, df_chain_free, df_chains_dump,
df_chain_add_problem, df_ri_alloc, df_ri_bb_compute,
df_ri_compute, df_ri_free, df_ri_dump, df_ri_add_problem,
df_reg_lifetime): New functions.
* df.c: Deleted file.
* ddg.c (create_ddg_dep_no_link, build_inter_loop_deps): Made code
consistent with new df api.
* modulo-sched.c (sms_schedule, rest_of_handle_sms,
rest_of_handle_sms): Ditto.
* web.c (unionfind_union, union_defs, entry_register, web_main):
Ditto.
* loop_invariant.c (invariant_for_use, hash_invariant_expr_1,
invariant_expr_equal_p, find_defs, check_dependencies,
find_invariant_insn, find_invariants_to_move, move_invariant_reg,
free_inv_motion_data, move_loop_invariants): Ditto.
* sched-deps.c (sched_analyze_1): Ditto.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@109577 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc/df-scan.c')
-rw-r--r-- | gcc/df-scan.c | 1795 |
1 files changed, 1795 insertions, 0 deletions
diff --git a/gcc/df-scan.c b/gcc/df-scan.c new file mode 100644 index 00000000000..0aa07bf33c6 --- /dev/null +++ b/gcc/df-scan.c @@ -0,0 +1,1795 @@ +/* FIXME: We need to go back and add the warning messages about code + moved across setjmp. */ + + +/* Scanning of rtl for dataflow analysis. + Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 + Free Software Foundation, Inc. + Originally contributed by Michael P. Hayes + (m.hayes@elec.canterbury.ac.nz, mhayes@redhat.com) + Major rewrite contributed by Danny Berlin (dberlin@dberlin.org) + and Kenneth Zadeck (zadeck@naturalbridge.com). + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 2, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING. If not, write to the Free +Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA +02110-1301, USA. +*/ + +#include "config.h" +#include "system.h" +#include "coretypes.h" +#include "tm.h" +#include "rtl.h" +#include "tm_p.h" +#include "insn-config.h" +#include "recog.h" +#include "function.h" +#include "regs.h" +#include "output.h" +#include "alloc-pool.h" +#include "flags.h" +#include "hard-reg-set.h" +#include "basic-block.h" +#include "sbitmap.h" +#include "bitmap.h" +#include "timevar.h" +#include "df.h" + +#ifndef HAVE_epilogue +#define HAVE_epilogue 0 +#endif +#ifndef HAVE_prologue +#define HAVE_prologue 0 +#endif +#ifndef HAVE_sibcall_epilogue +#define HAVE_sibcall_epilogue 0 +#endif + +#ifndef EPILOGUE_USES +#define EPILOGUE_USES(REGNO) 0 +#endif + +/* Indicates where we are in the compilation. */ +int df_state; + +/* The bitmap_obstack is used to hold some static variables that + should not be reset after each function is compiled. */ + +static bitmap_obstack persistent_obstack; + +/* The set of hard registers in eliminables[i].from. */ + +static HARD_REG_SET elim_reg_set; + +/* This is a bitmap copy of regs_invalidated_by_call so that we can + easily add it into bitmaps, etc. */ + +bitmap df_invalidated_by_call = NULL; + +/* Initialize ur_in and ur_out as if all hard registers were partially + available. */ + +bitmap df_all_hard_regs = NULL; + +static void df_ref_record (struct dataflow *, rtx, rtx *, + basic_block, rtx, enum df_ref_type, + enum df_ref_flags, bool record_live); +static void df_def_record_1 (struct dataflow *, rtx, basic_block, rtx, + enum df_ref_flags, bool record_live); +static void df_defs_record (struct dataflow *, rtx, basic_block, rtx); +static void df_uses_record (struct dataflow *, rtx *, enum df_ref_type, + basic_block, rtx, enum df_ref_flags); + +static void df_insn_refs_record (struct dataflow *, basic_block, rtx); +static void df_bb_refs_record (struct dataflow *, basic_block); +static void df_refs_record (struct dataflow *, bitmap); +static struct df_ref *df_ref_create_structure (struct dataflow *, rtx, rtx *, + basic_block, rtx, enum df_ref_type, + enum df_ref_flags); +static void df_record_exit_block_uses (struct dataflow *); +static void df_grow_reg_info (struct dataflow *, struct df_ref_info *); +static void df_grow_ref_info (struct df_ref_info *, unsigned int); +static void df_grow_insn_info (struct df *); + + +/*---------------------------------------------------------------------------- + SCANNING DATAFLOW PROBLEM + + There are several ways in which scanning looks just like the other + dataflow problems. It shares the all the mechanisms for local info + as well as basic block info. Where it differs is when and how often + it gets run. It also has no need for the iterative solver. +----------------------------------------------------------------------------*/ + +/* Problem data for the scanning dataflow function. */ +struct df_scan_problem_data +{ + alloc_pool ref_pool; + alloc_pool insn_pool; + alloc_pool reg_pool; +}; + +typedef struct df_scan_bb_info *df_scan_bb_info_t; + +static void +df_scan_free_internal (struct dataflow *dflow) +{ + struct df *df = dflow->df; + struct df_scan_problem_data *problem_data = + (struct df_scan_problem_data *) dflow->problem_data; + + free (df->def_info.regs); + free (df->def_info.refs); + memset (&df->def_info, 0, (sizeof (struct df_ref_info))); + + free (df->use_info.regs); + free (df->use_info.refs); + memset (&df->use_info, 0, (sizeof (struct df_ref_info))); + + free (df->insns); + df->insns = NULL; + df->insns_size = 0; + + free (dflow->block_info); + dflow->block_info = NULL; + dflow->block_info_size = 0; + + BITMAP_FREE (df->hardware_regs_used); + BITMAP_FREE (df->exit_block_uses); + + free_alloc_pool (dflow->block_pool); + free_alloc_pool (problem_data->ref_pool); + free_alloc_pool (problem_data->insn_pool); + free_alloc_pool (problem_data->reg_pool); +} + + +/* Get basic block info. */ + +struct df_scan_bb_info * +df_scan_get_bb_info (struct dataflow *dflow, unsigned int index) +{ + gcc_assert (index < dflow->block_info_size); + return (struct df_scan_bb_info *) dflow->block_info[index]; +} + + +/* Set basic block info. */ + +static void +df_scan_set_bb_info (struct dataflow *dflow, unsigned int index, + struct df_scan_bb_info *bb_info) +{ + gcc_assert (index < dflow->block_info_size); + dflow->block_info[index] = (void *) bb_info; +} + + +/* Free basic block info. */ + +static void +df_scan_free_bb_info (struct dataflow *dflow, void *vbb_info) +{ + struct df_scan_bb_info *bb_info = (struct df_scan_bb_info *) vbb_info; + if (bb_info) + pool_free (dflow->block_pool, bb_info); +} + + +/* Allocate the problem data for the scanning problem. This should be + called when the problem is created or when the entire function is to + be rescanned. */ + +static void +df_scan_alloc (struct dataflow *dflow, bitmap blocks_to_rescan) +{ + struct df *df = dflow->df; + struct df_scan_problem_data *problem_data; + unsigned int insn_num = get_max_uid () + 1; + unsigned int block_size = 50; + unsigned int bb_index; + bitmap_iterator bi; + + /* Given the number of pools, this is really faster than tearing + everything apart. */ + if (dflow->problem_data) + df_scan_free_internal (dflow); + + dflow->block_pool + = create_alloc_pool ("df_scan_block pool", + sizeof (struct df_scan_bb_info), + block_size); + + problem_data = xmalloc (sizeof (struct df_scan_problem_data)); + dflow->problem_data = problem_data; + + problem_data->ref_pool + = create_alloc_pool ("df_scan_ref pool", + sizeof (struct df_ref), block_size); + problem_data->insn_pool + = create_alloc_pool ("df_scan_insn pool", + sizeof (struct df_insn_info), block_size); + + problem_data->reg_pool + = create_alloc_pool ("df_scan_reg pool", + sizeof (struct df_reg_info), block_size); + + insn_num += insn_num / 4; + df_grow_reg_info (dflow, &df->def_info); + df_grow_ref_info (&df->def_info, insn_num); + + df_grow_reg_info (dflow, &df->use_info); + df_grow_ref_info (&df->use_info, insn_num *2); + + df_grow_insn_info (df); + df_grow_bb_info (dflow); + + EXECUTE_IF_SET_IN_BITMAP (blocks_to_rescan, 0, bb_index, bi) + { + struct df_scan_bb_info *bb_info = df_scan_get_bb_info (dflow, bb_index); + if (!bb_info) + { + bb_info = (struct df_scan_bb_info *) pool_alloc (dflow->block_pool); + df_scan_set_bb_info (dflow, bb_index, bb_info); + } + bb_info->artificial_defs = NULL; + bb_info->artificial_uses = NULL; + } + + df->hardware_regs_used = BITMAP_ALLOC (NULL); + df->exit_block_uses = BITMAP_ALLOC (NULL); +} + + +/* Free all of the data associated with the scan problem. */ + +static void +df_scan_free (struct dataflow *dflow) +{ + struct df *df = dflow->df; + + df_scan_free_internal (dflow); + if (df->blocks_to_scan) + BITMAP_FREE (df->blocks_to_scan); + + if (df->blocks_to_analyze) + BITMAP_FREE (df->blocks_to_analyze); + + free (dflow->problem_data); + free (dflow); +} + +static void +df_scan_dump (struct dataflow *dflow ATTRIBUTE_UNUSED, FILE *file ATTRIBUTE_UNUSED) +{ + struct df *df = dflow->df; + int i; + + fprintf (file, " all hard regs \t"); + dump_bitmap (file, df_all_hard_regs); + fprintf (file, " invalidated by call \t"); + dump_bitmap (file, df_invalidated_by_call); + fprintf (file, " hardware regs used \t"); + dump_bitmap (file, df->hardware_regs_used); + fprintf (file, " exit block uses \t"); + dump_bitmap (file, df->exit_block_uses); + fprintf (file, " regs ever live \t"); + for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) + if (regs_ever_live[i]) + fprintf (file, "%d ", i); + fprintf (file, "\n"); +} + +static struct df_problem problem_SCAN = +{ + DF_SCAN, /* Problem id. */ + DF_NONE, /* Direction. */ + df_scan_alloc, /* Allocate the problem specific data. */ + df_scan_free_bb_info, /* Free basic block info. */ + NULL, /* Local compute function. */ + NULL, /* Init the solution specific data. */ + NULL, /* Iterative solver. */ + NULL, /* Confluence operator 0. */ + NULL, /* Confluence operator n. */ + NULL, /* Transfer function. */ + NULL, /* Finalize function. */ + df_scan_free, /* Free all of the problem information. */ + df_scan_dump, /* Debugging. */ + NULL /* Dependent problem. */ +}; + + +/* Create a new DATAFLOW instance and add it to an existing instance + of DF. The returned structure is what is used to get at the + solution. */ + +struct dataflow * +df_scan_add_problem (struct df *df) +{ + return df_add_problem (df, &problem_SCAN); +} + +/*---------------------------------------------------------------------------- + Storage Allocation Utilities +----------------------------------------------------------------------------*/ + + +/* First, grow the reg_info information. If the current size is less than + the number of psuedos, grow to 25% more than the number of + pseudos. + + Second, assure that all of the slots up to max_reg_num have been + filled with reg_info structures. */ + +static void +df_grow_reg_info (struct dataflow *dflow, struct df_ref_info *ref_info) +{ + unsigned int max_reg = max_reg_num (); + unsigned int new_size = max_reg; + struct df_scan_problem_data *problem_data = + (struct df_scan_problem_data *) dflow->problem_data; + unsigned int i; + + if (ref_info->regs_size < new_size) + { + new_size += new_size / 4; + ref_info->regs = xrealloc (ref_info->regs, + new_size *sizeof (struct df_reg_info*)); + ref_info->regs_size = new_size; + } + + for (i = ref_info->regs_inited; i < max_reg; i++) + { + struct df_reg_info *reg_info = pool_alloc (problem_data->reg_pool); + memset (reg_info, 0, sizeof (struct df_reg_info)); + ref_info->regs[i] = reg_info; + } + + ref_info->regs_inited = max_reg; +} + + +/* Grow the ref information. */ + +static void +df_grow_ref_info (struct df_ref_info *ref_info, unsigned int new_size) +{ + if (ref_info->refs_size < new_size) + { + ref_info->refs = xrealloc (ref_info->refs, + new_size *sizeof (struct df_ref *)); + memset (ref_info->refs + ref_info->refs_size, 0, + (new_size - ref_info->refs_size) *sizeof (struct df_ref *)); + ref_info->refs_size = new_size; + } +} + + +/* Grow the ref information. If the current size is less than the + number of instructions, grow to 25% more than the number of + instructions. */ + +static void +df_grow_insn_info (struct df *df) +{ + unsigned int new_size = get_max_uid () + 1; + if (df->insns_size < new_size) + { + new_size += new_size / 4; + df->insns = xrealloc (df->insns, + new_size *sizeof (struct df_insn_info *)); + memset (df->insns + df->insns_size, 0, + (new_size - df->insns_size) *sizeof (struct df_insn_info *)); + df->insns_size = new_size; + } +} + + + + +/*---------------------------------------------------------------------------- + PUBLIC INTERFACES FOR SMALL GRAIN CHANGES TO SCANNING. +----------------------------------------------------------------------------*/ + +/* Rescan some BLOCKS or all the blocks defined by the last call to + df_set_blocks if BLOCKS is NULL); */ + +void +df_rescan_blocks (struct df *df, bitmap blocks) +{ + bitmap local_blocks_to_scan = BITMAP_ALLOC (NULL); + + struct dataflow *dflow = df->problems_by_index [DF_SCAN]; + basic_block bb; + + df->def_info.refs_organized = false; + df->use_info.refs_organized = false; + + if (blocks) + { + /* Need to assure that there are space in all of the tables. */ + unsigned int insn_num = get_max_uid () + 1; + insn_num += insn_num / 4; + + df_grow_reg_info (dflow, &df->def_info); + df_grow_ref_info (&df->def_info, insn_num); + + df_grow_reg_info (dflow, &df->use_info); + df_grow_ref_info (&df->use_info, insn_num *2); + + df_grow_insn_info (df); + df_grow_bb_info (dflow); + + bitmap_copy (local_blocks_to_scan, blocks); + df->def_info.add_refs_inline = true; + df->use_info.add_refs_inline = true; + + df_refs_delete (dflow, local_blocks_to_scan); + + /* This may be a mistake, but if an explicit blocks is passed in + and the set of blocks to analyze has been explicitly set, add + the extra blocks to blocks_to_analyze. The alternative is to + put an assert here. We do not want this to just go by + silently or else we may get storage leaks. */ + if (df->blocks_to_analyze) + bitmap_ior_into (df->blocks_to_analyze, blocks); + } + else + { + /* If we are going to do everything, just reallocate everything. + Most stuff is allocated in pools so this is faster than + walking it. */ + if (df->blocks_to_analyze) + bitmap_copy (local_blocks_to_scan, df->blocks_to_analyze); + else + FOR_ALL_BB (bb) + { + bitmap_set_bit (local_blocks_to_scan, bb->index); + } + df_scan_alloc (dflow, local_blocks_to_scan); + + df->def_info.add_refs_inline = false; + df->use_info.add_refs_inline = false; + } + + df_refs_record (dflow, local_blocks_to_scan); +#if 0 + bitmap_print (stderr, local_blocks_to_scan, "scanning: ", "\n"); +#endif + + if (!df->blocks_to_scan) + df->blocks_to_scan = BITMAP_ALLOC (NULL); + + bitmap_ior_into (df->blocks_to_scan, local_blocks_to_scan); + BITMAP_FREE (local_blocks_to_scan); +} + +/* Create a new ref of type DF_REF_TYPE for register REG at address + LOC within INSN of BB. */ + +struct df_ref * +df_ref_create (struct df *df, rtx reg, rtx *loc, rtx insn, + basic_block bb, + enum df_ref_type ref_type, + enum df_ref_flags ref_flags) +{ + struct dataflow *dflow = df->problems_by_index[DF_SCAN]; + struct df_scan_bb_info *bb_info; + + df_grow_reg_info (dflow, &df->use_info); + df_grow_reg_info (dflow, &df->def_info); + df_grow_bb_info (dflow); + + /* Make sure there is the bb_info for this block. */ + bb_info = df_scan_get_bb_info (dflow, bb->index); + if (!bb_info) + { + bb_info = (struct df_scan_bb_info *) pool_alloc (dflow->block_pool); + df_scan_set_bb_info (dflow, bb->index, bb_info); + bb_info->artificial_defs = NULL; + bb_info->artificial_uses = NULL; + } + + if (ref_type == DF_REF_REG_DEF) + df->def_info.add_refs_inline = true; + else + df->use_info.add_refs_inline = true; + + return df_ref_create_structure (dflow, reg, loc, bb, insn, ref_type, ref_flags); +} + + + +/*---------------------------------------------------------------------------- + UTILITIES TO CREATE AND DESTROY REFS AND CHAINS. +----------------------------------------------------------------------------*/ + + +/* Get the artifical uses for a basic block. */ + +struct df_ref * +df_get_artificial_defs (struct df *df, unsigned int bb_index) +{ + struct dataflow *dflow = df->problems_by_index[DF_SCAN]; + return df_scan_get_bb_info (dflow, bb_index)->artificial_defs; +} + + +/* Get the artifical uses for a basic block. */ + +struct df_ref * +df_get_artificial_uses (struct df *df, unsigned int bb_index) +{ + struct dataflow *dflow = df->problems_by_index[DF_SCAN]; + return df_scan_get_bb_info (dflow, bb_index)->artificial_uses; +} + + +/* Link REF at the front of reg_use or reg_def chain for REGNO. */ + +void +df_reg_chain_create (struct df_reg_info *reg_info, + struct df_ref *ref) +{ + struct df_ref *head = reg_info->reg_chain; + reg_info->reg_chain = ref; + + DF_REF_NEXT_REG (ref) = head; + + /* We cannot actually link to the head of the chain. */ + DF_REF_PREV_REG (ref) = NULL; + + if (head) + DF_REF_PREV_REG (head) = ref; +} + + +/* Remove REF from the CHAIN. Return the head of the chain. This + will be CHAIN unless the REF was at the beginning of the chain. */ + +static struct df_ref * +df_ref_unlink (struct df_ref *chain, struct df_ref *ref) +{ + struct df_ref *orig_chain = chain; + struct df_ref *prev = NULL; + while (chain) + { + if (chain == ref) + { + if (prev) + { + prev->next_ref = ref->next_ref; + ref->next_ref = NULL; + return orig_chain; + } + else + { + chain = ref->next_ref; + ref->next_ref = NULL; + return chain; + } + } + + prev = chain; + chain = chain->next_ref; + } + + /* Someone passed in a ref that was not in the chain. */ + gcc_unreachable (); + return NULL; +} + + +/* Unlink and delete REF at the reg_use or reg_def chain. Also delete + the def-use or use-def chain if it exists. Returns the next ref in + uses or defs chain. */ + +struct df_ref * +df_reg_chain_unlink (struct dataflow *dflow, struct df_ref *ref) +{ + struct df *df = dflow->df; + struct df_ref *next = DF_REF_NEXT_REG (ref); + struct df_ref *prev = DF_REF_PREV_REG (ref); + struct df_scan_problem_data *problem_data = + (struct df_scan_problem_data *) dflow->problem_data; + struct df_reg_info *reg_info; + struct df_ref *next_ref = ref->next_ref; + unsigned int id = DF_REF_ID (ref); + + if (DF_REF_TYPE (ref) == DF_REF_REG_DEF) + { + reg_info = DF_REG_DEF_GET (df, DF_REF_REGNO (ref)); + df->def_info.bitmap_size--; + if (df->def_info.refs && (id < df->def_info.refs_size)) + DF_DEFS_SET (df, id, NULL); + } + else + { + reg_info = DF_REG_USE_GET (df, DF_REF_REGNO (ref)); + df->use_info.bitmap_size--; + if (df->use_info.refs && (id < df->use_info.refs_size)) + DF_USES_SET (df, id, NULL); + } + + /* Delete any def-use or use-def chains that start here. */ + if (DF_REF_CHAIN (ref)) + df_chain_unlink (df->problems_by_index[DF_CHAIN], ref, NULL); + + reg_info->n_refs--; + + /* Unlink from the reg chain. If there is no prev, this is the + first of the list. If not, just join the next and prev. */ + if (prev) + { + DF_REF_NEXT_REG (prev) = next; + if (next) + DF_REF_PREV_REG (next) = prev; + } + else + { + reg_info->reg_chain = next; + if (next) + DF_REF_PREV_REG (next) = NULL; + } + + pool_free (problem_data->ref_pool, ref); + return next_ref; +} + + +/* Unlink REF from all def-use/use-def chains, etc. */ + +void +df_ref_remove (struct df *df, struct df_ref *ref) +{ + struct dataflow *dflow = df->problems_by_index [DF_SCAN]; + if (DF_REF_REG_DEF_P (ref)) + { + if (DF_REF_FLAGS (ref) & DF_REF_ARTIFICIAL) + { + struct df_scan_bb_info *bb_info + = df_scan_get_bb_info (dflow, DF_REF_BB (ref)->index); + bb_info->artificial_defs + = df_ref_unlink (bb_info->artificial_defs, ref); + } + else + DF_INSN_UID_DEFS (df, DF_REF_INSN_UID (ref)) = + df_ref_unlink (DF_INSN_UID_DEFS (df, DF_REF_INSN_UID (ref)), ref); + + if (df->def_info.add_refs_inline) + DF_DEFS_SET (df, DF_REF_ID (ref), NULL); + } + else + { + if (DF_REF_FLAGS (ref) & DF_REF_ARTIFICIAL) + { + struct df_scan_bb_info *bb_info + = df_scan_get_bb_info (dflow, DF_REF_BB (ref)->index); + bb_info->artificial_uses + = df_ref_unlink (bb_info->artificial_uses, ref); + } + else + DF_INSN_UID_USES (df, DF_REF_INSN_UID (ref)) = + df_ref_unlink (DF_INSN_UID_USES (df, DF_REF_INSN_UID (ref)), ref); + + if (df->use_info.add_refs_inline) + DF_USES_SET (df, DF_REF_ID (ref), NULL); + } + + df_reg_chain_unlink (dflow, ref); +} + + +/* Create the insn record for INSN. If there was one there, zero it out. */ + +static struct df_insn_info * +df_insn_create_insn_record (struct dataflow *dflow, rtx insn) +{ + struct df *df = dflow->df; + struct df_scan_problem_data *problem_data = + (struct df_scan_problem_data *) dflow->problem_data; + + struct df_insn_info *insn_rec = DF_INSN_GET (df, insn); + if (!insn_rec) + { + insn_rec = pool_alloc (problem_data->insn_pool); + DF_INSN_SET (df, insn, insn_rec); + } + memset (insn_rec, 0, sizeof (struct df_insn_info)); + + return insn_rec; +} + +/* Delete all of the refs information from BLOCKS. */ + +void +df_insn_refs_delete (struct dataflow *dflow, rtx insn) +{ + struct df *df = dflow->df; + unsigned int uid = INSN_UID (insn); + struct df_insn_info *insn_info = DF_INSN_UID_GET (df, uid); + struct df_ref *ref; + struct df_scan_problem_data *problem_data = + (struct df_scan_problem_data *) dflow->problem_data; + + if (insn_info) + { + ref = insn_info->defs; + while (ref) + ref = df_reg_chain_unlink (dflow, ref); + + ref = insn_info->uses; + while (ref) + ref = df_reg_chain_unlink (dflow, ref); + + pool_free (problem_data->insn_pool, insn_info); + DF_INSN_SET (df, insn, NULL); + } +} + + +/* Delete all of the refs information from BLOCKS. */ + +void +df_refs_delete (struct dataflow *dflow, bitmap blocks) +{ + bitmap_iterator bi; + unsigned int bb_index; + struct df_ref *def; + struct df_ref *use; + + EXECUTE_IF_SET_IN_BITMAP (blocks, 0, bb_index, bi) + { + struct df_scan_bb_info *bb_info + = df_scan_get_bb_info (dflow, bb_index); + rtx insn; + basic_block bb = BASIC_BLOCK (bb_index); + FOR_BB_INSNS (bb, insn) + { + if (INSN_P (insn)) + { + /* Record defs within INSN. */ + df_insn_refs_delete (dflow, insn); + } + } + + /* Get rid of any artifical uses. */ + if (bb_info) + { + def = bb_info->artificial_defs; + while (def) + def = df_reg_chain_unlink (dflow, def); + bb_info->artificial_defs = NULL; + use = bb_info->artificial_uses; + while (use) + use = df_reg_chain_unlink (dflow, use); + bb_info->artificial_uses = NULL; + } + } +} + + +/* Take build ref table for either the uses or defs from the reg-use + or reg-def chains. */ + +void +df_reorganize_refs (struct df_ref_info *ref_info) +{ + unsigned int m = ref_info->regs_inited; + unsigned int regno; + unsigned int offset = 0; + unsigned int size = 0; + + if (ref_info->refs_organized) + return; + + if (ref_info->refs_size < ref_info->bitmap_size) + { + int new_size = ref_info->bitmap_size + ref_info->bitmap_size / 4; + df_grow_ref_info (ref_info, new_size); + } + + for (regno = 0; regno < m; regno++) + { + struct df_reg_info *reg_info = ref_info->regs[regno]; + int count = 0; + if (reg_info) + { + struct df_ref *ref = reg_info->reg_chain; + reg_info->begin = offset; + while (ref) + { + ref_info->refs[offset] = ref; + DF_REF_ID (ref) = offset++; + ref = DF_REF_NEXT_REG (ref); + count++; + size++; + } + reg_info->n_refs = count; + } + } + + /* The bitmap size is not decremented when refs are deleted. So + reset it now that we have squished out all of the empty + slots. */ + ref_info->bitmap_size = size; + ref_info->refs_organized = true; + ref_info->add_refs_inline = true; +} + + +/* Local miscellaneous routines. */ + +/* Local routines for recording refs. */ + +/* Set where we are in the compilation. */ + +void +df_set_state (int state) +{ + df_state = state; +} + + + +/*---------------------------------------------------------------------------- + Hard core instruction scanning code. No external interfaces here, + just a lot of routines that look inside insns. +----------------------------------------------------------------------------*/ + +/* Create a ref and add it to the reg-def or reg-use chains. */ + +static struct df_ref * +df_ref_create_structure (struct dataflow *dflow, rtx reg, rtx *loc, + basic_block bb, rtx insn, + enum df_ref_type ref_type, + enum df_ref_flags ref_flags) +{ + struct df_ref *this_ref; + struct df *df = dflow->df; + int regno = REGNO (GET_CODE (reg) == SUBREG ? SUBREG_REG (reg) : reg); + struct df_scan_problem_data *problem_data = + (struct df_scan_problem_data *) dflow->problem_data; + + this_ref = pool_alloc (problem_data->ref_pool); + DF_REF_REG (this_ref) = reg; + DF_REF_REGNO (this_ref) = regno; + DF_REF_LOC (this_ref) = loc; + DF_REF_INSN (this_ref) = insn; + DF_REF_CHAIN (this_ref) = NULL; + DF_REF_TYPE (this_ref) = ref_type; + DF_REF_FLAGS (this_ref) = ref_flags; + DF_REF_DATA (this_ref) = NULL; + DF_REF_BB (this_ref) = bb; + + /* Link the ref into the reg_def and reg_use chains and keep a count + of the instances. */ + if (ref_type == DF_REF_REG_DEF) + { + struct df_reg_info *reg_info = DF_REG_DEF_GET (df, regno); + reg_info->n_refs++; + + /* Add the ref to the reg_def chain. */ + df_reg_chain_create (reg_info, this_ref); + DF_REF_ID (this_ref) = df->def_info.bitmap_size; + if (df->def_info.add_refs_inline) + { + if (DF_DEFS_SIZE (df) >= df->def_info.refs_size) + { + int new_size = df->def_info.bitmap_size + + df->def_info.bitmap_size / 4; + df_grow_ref_info (&df->def_info, new_size); + } + /* Add the ref to the big array of defs. */ + DF_DEFS_SET (df, df->def_info.bitmap_size, this_ref); + df->def_info.refs_organized = false; + } + + df->def_info.bitmap_size++; + + if (DF_REF_FLAGS (this_ref) & DF_REF_ARTIFICIAL) + { + struct df_scan_bb_info *bb_info + = df_scan_get_bb_info (dflow, bb->index); + this_ref->next_ref = bb_info->artificial_defs; + bb_info->artificial_defs = this_ref; + } + else + { + this_ref->next_ref = DF_INSN_GET (df, insn)->defs; + DF_INSN_GET (df, insn)->defs = this_ref; + } + } + else + { + struct df_reg_info *reg_info = DF_REG_USE_GET (df, regno); + reg_info->n_refs++; + + /* Add the ref to the reg_use chain. */ + df_reg_chain_create (reg_info, this_ref); + DF_REF_ID (this_ref) = df->use_info.bitmap_size; + if (df->use_info.add_refs_inline) + { + if (DF_USES_SIZE (df) >= df->use_info.refs_size) + { + int new_size = df->use_info.bitmap_size + + df->use_info.bitmap_size / 4; + df_grow_ref_info (&df->use_info, new_size); + } + /* Add the ref to the big array of defs. */ + DF_USES_SET (df, df->use_info.bitmap_size, this_ref); + df->use_info.refs_organized = false; + } + + df->use_info.bitmap_size++; + if (DF_REF_FLAGS (this_ref) & DF_REF_ARTIFICIAL) + { + struct df_scan_bb_info *bb_info + = df_scan_get_bb_info (dflow, bb->index); + this_ref->next_ref = bb_info->artificial_uses; + bb_info->artificial_uses = this_ref; + } + else + { + this_ref->next_ref = DF_INSN_GET (df, insn)->uses; + DF_INSN_GET (df, insn)->uses = this_ref; + } + } + return this_ref; +} + + +/* Create new references of type DF_REF_TYPE for each part of register REG + at address LOC within INSN of BB. */ + +static void +df_ref_record (struct dataflow *dflow, rtx reg, rtx *loc, + basic_block bb, rtx insn, + enum df_ref_type ref_type, + enum df_ref_flags ref_flags, + bool record_live) +{ + unsigned int regno; + struct df *df = dflow->df; + + gcc_assert (REG_P (reg) || GET_CODE (reg) == SUBREG); + + /* For the reg allocator we are interested in some SUBREG rtx's, but not + all. Notably only those representing a word extraction from a multi-word + reg. As written in the docu those should have the form + (subreg:SI (reg:M A) N), with size(SImode) > size(Mmode). + XXX Is that true? We could also use the global word_mode variable. */ + if ((df->flags & DF_SUBREGS) == 0 + && GET_CODE (reg) == SUBREG + && (GET_MODE_SIZE (GET_MODE (reg)) < GET_MODE_SIZE (word_mode) + || GET_MODE_SIZE (GET_MODE (reg)) + >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (reg))))) + { + loc = &SUBREG_REG (reg); + reg = *loc; + ref_flags |= DF_REF_STRIPPED; + } + + regno = REGNO (GET_CODE (reg) == SUBREG ? SUBREG_REG (reg) : reg); + if (regno < FIRST_PSEUDO_REGISTER) + { + int i; + int endregno; + + if (! (df->flags & DF_HARD_REGS)) + return; + + /* GET_MODE (reg) is correct here. We do not want to go into a SUBREG + for the mode, because we only want to add references to regs, which + are really referenced. E.g., a (subreg:SI (reg:DI 0) 0) does _not_ + reference the whole reg 0 in DI mode (which would also include + reg 1, at least, if 0 and 1 are SImode registers). */ + endregno = hard_regno_nregs[regno][GET_MODE (reg)]; + if (GET_CODE (reg) == SUBREG) + regno += subreg_regno_offset (regno, GET_MODE (SUBREG_REG (reg)), + SUBREG_BYTE (reg), GET_MODE (reg)); + endregno += regno; + + for (i = regno; i < endregno; i++) + { + /* Calls are handled at call site because regs_ever_live + doesn't include clobbered regs, only used ones. */ + if (ref_type == DF_REF_REG_DEF && record_live) + regs_ever_live[i] = 1; + else if ((ref_type == DF_REF_REG_USE + || ref_type == DF_REF_REG_MEM_STORE + || ref_type == DF_REF_REG_MEM_LOAD) + && ((ref_flags & DF_REF_ARTIFICIAL) == 0)) + { + /* Set regs_ever_live on uses of non-eliminable frame + pointers and arg pointers. */ + if (! (TEST_HARD_REG_BIT (elim_reg_set, regno) + && (regno == FRAME_POINTER_REGNUM + || regno == ARG_POINTER_REGNUM))) + regs_ever_live[i] = 1; + } + + df_ref_create_structure (dflow, regno_reg_rtx[i], loc, + bb, insn, ref_type, ref_flags); + } + } + else + { + df_ref_create_structure (dflow, reg, loc, + bb, insn, ref_type, ref_flags); + } +} + + +/* A set to a non-paradoxical SUBREG for which the number of word_mode units + covered by the outer mode is smaller than that covered by the inner mode, + is a read-modify-write operation. + This function returns true iff the SUBREG X is such a SUBREG. */ + +bool +df_read_modify_subreg_p (rtx x) +{ + unsigned int isize, osize; + if (GET_CODE (x) != SUBREG) + return false; + isize = GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))); + osize = GET_MODE_SIZE (GET_MODE (x)); + return (isize > osize && isize > UNITS_PER_WORD); +} + + +/* Process all the registers defined in the rtx, X. + Autoincrement/decrement definitions will be picked up by + df_uses_record. */ + +static void +df_def_record_1 (struct dataflow *dflow, rtx x, + basic_block bb, rtx insn, + enum df_ref_flags flags, bool record_live) +{ + rtx *loc; + rtx dst; + + /* We may recursively call ourselves on EXPR_LIST when dealing with PARALLEL + construct. */ + if (GET_CODE (x) == EXPR_LIST || GET_CODE (x) == CLOBBER) + loc = &XEXP (x, 0); + else + loc = &SET_DEST (x); + dst = *loc; + + /* Some targets place small structures in registers for + return values of functions. */ + if (GET_CODE (dst) == PARALLEL && GET_MODE (dst) == BLKmode) + { + int i; + + for (i = XVECLEN (dst, 0) - 1; i >= 0; i--) + { + rtx temp = XVECEXP (dst, 0, i); + if (GET_CODE (temp) == EXPR_LIST || GET_CODE (temp) == CLOBBER + || GET_CODE (temp) == SET) + df_def_record_1 (dflow, temp, bb, insn, + GET_CODE (temp) == CLOBBER ? flags | DF_REF_CLOBBER : flags, + record_live); + } + return; + } + + /* Maybe, we should flag the use of STRICT_LOW_PART somehow. It might + be handy for the reg allocator. */ + while (GET_CODE (dst) == STRICT_LOW_PART + || GET_CODE (dst) == ZERO_EXTRACT + || df_read_modify_subreg_p (dst)) + { +#if 0 + /* Strict low part always contains SUBREG, but we do not want to make + it appear outside, as whole register is always considered. */ + if (GET_CODE (dst) == STRICT_LOW_PART) + { + loc = &XEXP (dst, 0); + dst = *loc; + } +#endif + loc = &XEXP (dst, 0); + dst = *loc; + flags |= DF_REF_READ_WRITE; + } + + if (REG_P (dst) + || (GET_CODE (dst) == SUBREG && REG_P (SUBREG_REG (dst)))) + df_ref_record (dflow, dst, loc, bb, insn, + DF_REF_REG_DEF, flags, record_live); +} + + +/* Process all the registers defined in the pattern rtx, X. */ + +static void +df_defs_record (struct dataflow *dflow, rtx x, basic_block bb, rtx insn) +{ + RTX_CODE code = GET_CODE (x); + + if (code == SET || code == CLOBBER) + { + /* Mark the single def within the pattern. */ + df_def_record_1 (dflow, x, bb, insn, + code == CLOBBER ? DF_REF_CLOBBER : 0, true); + } + else if (code == COND_EXEC) + { + df_defs_record (dflow, COND_EXEC_CODE (x), bb, insn); + } + else if (code == PARALLEL) + { + int i; + + /* Mark the multiple defs within the pattern. */ + for (i = XVECLEN (x, 0) - 1; i >= 0; i--) + df_defs_record (dflow, XVECEXP (x, 0, i), bb, insn); + } +} + + +/* Process all the registers used in the rtx at address LOC. */ + +static void +df_uses_record (struct dataflow *dflow, rtx *loc, enum df_ref_type ref_type, + basic_block bb, rtx insn, enum df_ref_flags flags) +{ + RTX_CODE code; + rtx x; + retry: + x = *loc; + if (!x) + return; + code = GET_CODE (x); + switch (code) + { + case LABEL_REF: + case SYMBOL_REF: + case CONST_INT: + case CONST: + case CONST_DOUBLE: + case CONST_VECTOR: + case PC: + case CC0: + case ADDR_VEC: + case ADDR_DIFF_VEC: + return; + + case CLOBBER: + /* If we are clobbering a MEM, mark any registers inside the address + as being used. */ + if (MEM_P (XEXP (x, 0))) + df_uses_record (dflow, &XEXP (XEXP (x, 0), 0), + DF_REF_REG_MEM_STORE, bb, insn, flags); + + /* If we're clobbering a REG then we have a def so ignore. */ + return; + + case MEM: + df_uses_record (dflow, &XEXP (x, 0), DF_REF_REG_MEM_LOAD, bb, insn, + flags & DF_REF_IN_NOTE); + return; + + case SUBREG: + /* While we're here, optimize this case. */ + + /* In case the SUBREG is not of a REG, do not optimize. */ + if (!REG_P (SUBREG_REG (x))) + { + loc = &SUBREG_REG (x); + df_uses_record (dflow, loc, ref_type, bb, insn, flags); + return; + } + /* ... Fall through ... */ + + case REG: + df_ref_record (dflow, x, loc, bb, insn, ref_type, flags, true); + return; + + case SET: + { + rtx dst = SET_DEST (x); + gcc_assert (!(flags & DF_REF_IN_NOTE)); + df_uses_record (dflow, &SET_SRC (x), DF_REF_REG_USE, bb, insn, 0); + + switch (GET_CODE (dst)) + { + case SUBREG: + if (df_read_modify_subreg_p (dst)) + { + df_uses_record (dflow, &SUBREG_REG (dst), + DF_REF_REG_USE, bb, + insn, DF_REF_READ_WRITE); + break; + } + /* Fall through. */ + case REG: + case PARALLEL: + case SCRATCH: + case PC: + case CC0: + break; + case MEM: + df_uses_record (dflow, &XEXP (dst, 0), + DF_REF_REG_MEM_STORE, + bb, insn, 0); + break; + case STRICT_LOW_PART: + { + rtx *temp = &XEXP (dst, 0); + /* A strict_low_part uses the whole REG and not just the + SUBREG. */ + dst = XEXP (dst, 0); + df_uses_record (dflow, + (GET_CODE (dst) == SUBREG) + ? &SUBREG_REG (dst) : temp, + DF_REF_REG_USE, bb, + insn, DF_REF_READ_WRITE); + } + break; + case ZERO_EXTRACT: + case SIGN_EXTRACT: + df_uses_record (dflow, &XEXP (dst, 0), + DF_REF_REG_USE, bb, insn, + DF_REF_READ_WRITE); + df_uses_record (dflow, &XEXP (dst, 1), + DF_REF_REG_USE, bb, insn, 0); + df_uses_record (dflow, &XEXP (dst, 2), + DF_REF_REG_USE, bb, insn, 0); + dst = XEXP (dst, 0); + break; + default: + gcc_unreachable (); + } + return; + } + + case RETURN: + break; + + case ASM_OPERANDS: + case UNSPEC_VOLATILE: + case TRAP_IF: + case ASM_INPUT: + { + /* Traditional and volatile asm instructions must be + considered to use and clobber all hard registers, all + pseudo-registers and all of memory. So must TRAP_IF and + UNSPEC_VOLATILE operations. + + Consider for instance a volatile asm that changes the fpu + rounding mode. An insn should not be moved across this + even if it only uses pseudo-regs because it might give an + incorrectly rounded result. + + However, flow.c's liveness computation did *not* do this, + giving the reasoning as " ?!? Unfortunately, marking all + hard registers as live causes massive problems for the + register allocator and marking all pseudos as live creates + mountains of uninitialized variable warnings." + + In order to maintain the status quo with regard to liveness + and uses, we do what flow.c did and just mark any regs we + can find in ASM_OPERANDS as used. Later on, when liveness + is computed, asm insns are scanned and regs_asm_clobbered + is filled out. + + For all ASM_OPERANDS, we must traverse the vector of input + operands. We can not just fall through here since then we + would be confused by the ASM_INPUT rtx inside ASM_OPERANDS, + which do not indicate traditional asms unlike their normal + usage. */ + if (code == ASM_OPERANDS) + { + int j; + + for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++) + df_uses_record (dflow, &ASM_OPERANDS_INPUT (x, j), + DF_REF_REG_USE, bb, insn, 0); + return; + } + break; + } + + case PRE_DEC: + case POST_DEC: + case PRE_INC: + case POST_INC: + case PRE_MODIFY: + case POST_MODIFY: + /* Catch the def of the register being modified. */ + df_ref_record (dflow, XEXP (x, 0), &XEXP (x, 0), bb, insn, + DF_REF_REG_DEF, DF_REF_READ_WRITE, true); + + /* ... Fall through to handle uses ... */ + + default: + break; + } + + /* Recursively scan the operands of this expression. */ + { + const char *fmt = GET_RTX_FORMAT (code); + int i; + + for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) + { + if (fmt[i] == 'e') + { + /* Tail recursive case: save a function call level. */ + if (i == 0) + { + loc = &XEXP (x, 0); + goto retry; + } + df_uses_record (dflow, &XEXP (x, i), ref_type, bb, insn, flags); + } + else if (fmt[i] == 'E') + { + int j; + for (j = 0; j < XVECLEN (x, i); j++) + df_uses_record (dflow, &XVECEXP (x, i, j), ref_type, + bb, insn, flags); + } + } + } +} + +/* Return true if *LOC contains an asm. */ + +static int +df_insn_contains_asm_1 (rtx *loc, void *data ATTRIBUTE_UNUSED) +{ + if ( !*loc) + return 0; + if (GET_CODE (*loc) == ASM_OPERANDS) + return 1; + return 0; +} + + +/* Return true if INSN contains an ASM. */ + +static int +df_insn_contains_asm (rtx insn) +{ + return for_each_rtx (&insn, df_insn_contains_asm_1, NULL); +} + + + +/* Record all the refs for DF within INSN of basic block BB. */ + +static void +df_insn_refs_record (struct dataflow *dflow, basic_block bb, rtx insn) +{ + int i; + struct df *df = dflow->df; + + if (INSN_P (insn)) + { + rtx note; + + if (df_insn_contains_asm (insn)) + DF_INSN_CONTAINS_ASM (df, insn) = true; + + /* Record register defs. */ + df_defs_record (dflow, PATTERN (insn), bb, insn); + + if (df->flags & DF_EQUIV_NOTES) + for (note = REG_NOTES (insn); note; + note = XEXP (note, 1)) + { + switch (REG_NOTE_KIND (note)) + { + case REG_EQUIV: + case REG_EQUAL: + df_uses_record (dflow, &XEXP (note, 0), DF_REF_REG_USE, + bb, insn, DF_REF_IN_NOTE); + default: + break; + } + } + + if (CALL_P (insn)) + { + rtx note; + + /* Record the registers used to pass arguments, and explicitly + noted as clobbered. */ + for (note = CALL_INSN_FUNCTION_USAGE (insn); note; + note = XEXP (note, 1)) + { + if (GET_CODE (XEXP (note, 0)) == USE) + df_uses_record (dflow, &XEXP (XEXP (note, 0), 0), + DF_REF_REG_USE, + bb, insn, 0); + else if (GET_CODE (XEXP (note, 0)) == CLOBBER) + { + df_defs_record (dflow, XEXP (note, 0), bb, insn); + if (REG_P (XEXP (XEXP (note, 0), 0))) + { + rtx reg = XEXP (XEXP (note, 0), 0); + int regno_last; + int regno_first; + int i; + + regno_last = regno_first = REGNO (reg); + if (regno_first < FIRST_PSEUDO_REGISTER) + regno_last + += hard_regno_nregs[regno_first][GET_MODE (reg)] - 1; + for (i = regno_first; i <= regno_last; i++) + regs_ever_live[i] = 1; + } + } + } + + /* The stack ptr is used (honorarily) by a CALL insn. */ + df_uses_record (dflow, ®no_reg_rtx[STACK_POINTER_REGNUM], + DF_REF_REG_USE, bb, insn, + 0); + + if (df->flags & DF_HARD_REGS) + { + bitmap_iterator bi; + unsigned int ui; + /* Calls may also reference any of the global registers, + so they are recorded as used. */ + for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) + if (global_regs[i]) + df_uses_record (dflow, ®no_reg_rtx[i], + DF_REF_REG_USE, bb, insn, + 0); + EXECUTE_IF_SET_IN_BITMAP (df_invalidated_by_call, 0, ui, bi) + df_ref_record (dflow, regno_reg_rtx[ui], ®no_reg_rtx[ui], bb, insn, + DF_REF_REG_DEF, DF_REF_CLOBBER, false); + } + } + + /* Record the register uses. */ + df_uses_record (dflow, &PATTERN (insn), + DF_REF_REG_USE, bb, insn, 0); + + } +} + +static bool +df_has_eh_preds (basic_block bb) +{ + edge e; + edge_iterator ei; + + FOR_EACH_EDGE (e, ei, bb->preds) + { + if (e->flags & EDGE_EH) + return true; + } + return false; +} + +/* Record all the refs within the basic block BB. */ + +static void +df_bb_refs_record (struct dataflow *dflow, basic_block bb) +{ + struct df *df = dflow->df; + rtx insn; + int luid = 0; + struct df_scan_bb_info *bb_info = df_scan_get_bb_info (dflow, bb->index); + + /* Need to make sure that there is a record in the basic block info. */ + if (!bb_info) + { + bb_info = (struct df_scan_bb_info *) pool_alloc (dflow->block_pool); + df_scan_set_bb_info (dflow, bb->index, bb_info); + bb_info->artificial_defs = NULL; + bb_info->artificial_uses = NULL; + } + + /* Scan the block an insn at a time from beginning to end. */ + FOR_BB_INSNS (bb, insn) + { + df_insn_create_insn_record (dflow, insn); + if (INSN_P (insn)) + { + /* Record defs within INSN. */ + DF_INSN_LUID (df, insn) = luid++; + df_insn_refs_record (dflow, bb, insn); + } + DF_INSN_LUID (df, insn) = luid; + } + +#ifdef EH_RETURN_DATA_REGNO + if ((df->flags & DF_HARD_REGS) + && df_has_eh_preds (bb)) + { + unsigned int i; + /* Mark the registers that will contain data for the handler. */ + if (current_function_calls_eh_return) + for (i = 0; ; ++i) + { + unsigned regno = EH_RETURN_DATA_REGNO (i); + if (regno == INVALID_REGNUM) + break; + df_ref_record (dflow, regno_reg_rtx[i], ®no_reg_rtx[i], bb, NULL, + DF_REF_REG_DEF, DF_REF_ARTIFICIAL | DF_REF_AT_TOP, false); + } + } +#endif + +#ifdef EH_USES + /* This code is putting in a artificial ref for the use at the TOP + of the block that receives the exception. It is too cumbersome + to actually put the ref on the edge. We could either model this + at the top of the receiver block or the bottom of the sender + block. + + The bottom of the sender block is problematic because not all + out-edges of the a block are eh-edges. However, it is true that + all edges into a block are either eh-edges or none of them are + eh-edges. Thus, we can model this at the top of the eh-receiver + for all of the edges at once. */ + if ((df->flags & DF_HARD_REGS) + && df_has_eh_preds (bb)) + { + unsigned int i; + for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) + if (EH_USES (i)) + df_uses_record (dflow, ®no_reg_rtx[i], + DF_REF_REG_USE, EXIT_BLOCK_PTR, NULL, + DF_REF_ARTIFICIAL | DF_REF_AT_TOP); + } +#endif + + if ((df->flags & DF_HARD_REGS) + && bb->index >= NUM_FIXED_BLOCKS) + { + /* Before reload, there are a few registers that must be forced + live everywhere -- which might not already be the case for + blocks within infinite loops. */ + if (! reload_completed) + { + + /* Any reference to any pseudo before reload is a potential + reference of the frame pointer. */ + df_uses_record (dflow, ®no_reg_rtx [FRAME_POINTER_REGNUM], + DF_REF_REG_USE, bb, NULL, DF_REF_ARTIFICIAL); + +#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM + /* Pseudos with argument area equivalences may require + reloading via the argument pointer. */ + if (fixed_regs[ARG_POINTER_REGNUM]) + df_uses_record (dflow, ®no_reg_rtx[ARG_POINTER_REGNUM], + DF_REF_REG_USE, bb, NULL, + DF_REF_ARTIFICIAL); +#endif + + /* Any constant, or pseudo with constant equivalences, may + require reloading from memory using the pic register. */ + if ((unsigned) PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM + && fixed_regs[PIC_OFFSET_TABLE_REGNUM]) + df_uses_record (dflow, ®no_reg_rtx[PIC_OFFSET_TABLE_REGNUM], + DF_REF_REG_USE, bb, NULL, + DF_REF_ARTIFICIAL); + } + /* The all-important stack pointer must always be live. */ + df_uses_record (dflow, ®no_reg_rtx[STACK_POINTER_REGNUM], + DF_REF_REG_USE, bb, NULL, DF_REF_ARTIFICIAL); + } +} + + +/* Record all the refs in the basic blocks specified by BLOCKS. */ + +static void +df_refs_record (struct dataflow *dflow, bitmap blocks) +{ + unsigned int bb_index; + bitmap_iterator bi; + + EXECUTE_IF_SET_IN_BITMAP (blocks, 0, bb_index, bi) + { + basic_block bb = BASIC_BLOCK (bb_index); + df_bb_refs_record (dflow, bb); + } + + if (bitmap_bit_p (blocks, EXIT_BLOCK)) + df_record_exit_block_uses (dflow); +} + + +/*---------------------------------------------------------------------------- + Specialized hard register scanning functions. +----------------------------------------------------------------------------*/ + +/* Mark a register in SET. Hard registers in large modes get all + of their component registers set as well. */ + +static void +df_mark_reg (rtx reg, void *vset) +{ + bitmap set = (bitmap) vset; + int regno = REGNO (reg); + + gcc_assert (GET_MODE (reg) != BLKmode); + + bitmap_set_bit (set, regno); + if (regno < FIRST_PSEUDO_REGISTER) + { + int n = hard_regno_nregs[regno][GET_MODE (reg)]; + while (--n > 0) + bitmap_set_bit (set, regno + n); + } +} + +/* Record the set of hard registers that are used in the exit block. */ + +static void +df_record_exit_block_uses (struct dataflow *dflow) +{ + unsigned int i; + bitmap_iterator bi; + struct df *df = dflow->df; + + bitmap_clear (df->exit_block_uses); + + if (! (df->flags & DF_HARD_REGS)) + return; + + /* If exiting needs the right stack value, consider the stack + pointer live at the end of the function. */ + if ((HAVE_epilogue && epilogue_completed) + || ! EXIT_IGNORE_STACK + || (! FRAME_POINTER_REQUIRED + && ! current_function_calls_alloca + && flag_omit_frame_pointer) + || current_function_sp_is_unchanging) + { + bitmap_set_bit (df->exit_block_uses, STACK_POINTER_REGNUM); + } + + /* Mark the frame pointer if needed at the end of the function. + If we end up eliminating it, it will be removed from the live + list of each basic block by reload. */ + + if (! reload_completed || frame_pointer_needed) + { + bitmap_set_bit (df->exit_block_uses, FRAME_POINTER_REGNUM); +#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM + /* If they are different, also mark the hard frame pointer as live. */ + if (! LOCAL_REGNO (HARD_FRAME_POINTER_REGNUM)) + bitmap_set_bit (df->exit_block_uses, HARD_FRAME_POINTER_REGNUM); +#endif + } + +#ifndef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED + /* Many architectures have a GP register even without flag_pic. + Assume the pic register is not in use, or will be handled by + other means, if it is not fixed. */ + if ((unsigned) PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM + && fixed_regs[PIC_OFFSET_TABLE_REGNUM]) + bitmap_set_bit (df->exit_block_uses, PIC_OFFSET_TABLE_REGNUM); +#endif + + /* Mark all global registers, and all registers used by the + epilogue as being live at the end of the function since they + may be referenced by our caller. */ + for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) + if (global_regs[i] || EPILOGUE_USES (i)) + bitmap_set_bit (df->exit_block_uses, i); + + if (HAVE_epilogue && epilogue_completed) + { + /* Mark all call-saved registers that we actually used. */ + for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) + if (regs_ever_live[i] && ! LOCAL_REGNO (i) + && ! TEST_HARD_REG_BIT (regs_invalidated_by_call, i)) + bitmap_set_bit (df->exit_block_uses, i); + } + +#ifdef EH_RETURN_DATA_REGNO + /* Mark the registers that will contain data for the handler. */ + if (reload_completed && current_function_calls_eh_return) + for (i = 0; ; ++i) + { + unsigned regno = EH_RETURN_DATA_REGNO (i); + if (regno == INVALID_REGNUM) + break; + bitmap_set_bit (df->exit_block_uses, regno); + } +#endif + +#ifdef EH_RETURN_STACKADJ_RTX + if ((! HAVE_epilogue || ! epilogue_completed) + && current_function_calls_eh_return) + { + rtx tmp = EH_RETURN_STACKADJ_RTX; + if (tmp && REG_P (tmp)) + df_mark_reg (tmp, df->exit_block_uses); + } +#endif + +#ifdef EH_RETURN_HANDLER_RTX + if ((! HAVE_epilogue || ! epilogue_completed) + && current_function_calls_eh_return) + { + rtx tmp = EH_RETURN_HANDLER_RTX; + if (tmp && REG_P (tmp)) + df_mark_reg (tmp, df->exit_block_uses); + } +#endif + + /* Mark function return value. */ + diddle_return_value (df_mark_reg, (void*) df->exit_block_uses); + + if (df->flags & DF_HARD_REGS) + EXECUTE_IF_SET_IN_BITMAP (df->exit_block_uses, 0, i, bi) + df_uses_record (dflow, ®no_reg_rtx[i], + DF_REF_REG_USE, EXIT_BLOCK_PTR, NULL, + DF_REF_ARTIFICIAL); +} + +static bool initialized = false; + +/* Initialize some platform specific structures. */ + +void +df_hard_reg_init (void) +{ +#ifdef ELIMINABLE_REGS + int i; + static const struct {const int from, to; } eliminables[] = ELIMINABLE_REGS; +#endif + /* After reload, some ports add certain bits to regs_ever_live so + this cannot be reset. */ + + if (!reload_completed) + memset (regs_ever_live, 0, sizeof (regs_ever_live)); + + if (initialized) + return; + + bitmap_obstack_initialize (&persistent_obstack); + + /* Record which registers will be eliminated. We use this in + mark_used_regs. */ + CLEAR_HARD_REG_SET (elim_reg_set); + +#ifdef ELIMINABLE_REGS + for (i = 0; i < (int) ARRAY_SIZE (eliminables); i++) + SET_HARD_REG_BIT (elim_reg_set, eliminables[i].from); +#else + SET_HARD_REG_BIT (elim_reg_set, FRAME_POINTER_REGNUM); +#endif + + df_invalidated_by_call = BITMAP_ALLOC (&persistent_obstack); + + /* Inconveniently, this is only readily available in hard reg set + form. */ + for (i = 0; i < FIRST_PSEUDO_REGISTER; ++i) + if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i)) + bitmap_set_bit (df_invalidated_by_call, i); + + df_all_hard_regs = BITMAP_ALLOC (&persistent_obstack); + + for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) + bitmap_set_bit (df_all_hard_regs, i); + + initialized = true; +} |