summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorbstarynk <bstarynk@138bc75d-0d04-0410-961f-82ee72b054a4>2009-01-29 19:10:25 +0000
committerbstarynk <bstarynk@138bc75d-0d04-0410-961f-82ee72b054a4>2009-01-29 19:10:25 +0000
commit1b927e1cb81240f4d6c73eb7cdec45246f6a3c72 (patch)
tree56e5cd4a6dc56809fa98da6b09135f74ba2ad785
parent87e27de18fdbc0c21dcbc3108b52cdea8183acad (diff)
downloadgcc-1b927e1cb81240f4d6c73eb7cdec45246f6a3c72.tar.gz
2009-01-29 Basile Starynkevitch <basile@starynkevitch.net>
MELT branch merged with trunk r143767 git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/branches/melt-branch@143769 138bc75d-0d04-0410-961f-82ee72b054a4
-rw-r--r--ChangeLog.melt3
-rw-r--r--config/ChangeLog4
-rw-r--r--config/tls.m418
-rw-r--r--gcc/ChangeLog507
-rw-r--r--gcc/DATESTAMP2
-rw-r--r--gcc/Makefile.in28
-rw-r--r--gcc/alias.c39
-rw-r--r--gcc/auto-inc-dec.c2
-rw-r--r--gcc/bitmap.h4
-rw-r--r--gcc/c-common.c73
-rw-r--r--gcc/c-decl.c71
-rw-r--r--gcc/c-opts.c11
-rw-r--r--gcc/c-typeck.c4
-rw-r--r--gcc/c.opt4
-rw-r--r--gcc/caller-save.c2
-rw-r--r--gcc/calls.c1
-rw-r--r--gcc/cfgexpand.c2
-rw-r--r--gcc/cfgloopanal.c4
-rw-r--r--gcc/common.opt4
-rw-r--r--gcc/config/arm/arm-cores.def1
-rw-r--r--gcc/config/arm/arm-tune.md2
-rw-r--r--gcc/config/arm/arm.c1
-rw-r--r--gcc/config/arm/arm.h13
-rw-r--r--gcc/config/arm/arm.md77
-rw-r--r--gcc/config/arm/bpabi.h2
-rw-r--r--gcc/config/arm/linux-atomic.c280
-rw-r--r--gcc/config/arm/neon.md74
-rw-r--r--gcc/config/arm/t-linux-eabi2
-rw-r--r--gcc/config/i386/cygwin.h23
-rw-r--r--gcc/config/i386/i386.md14
-rw-r--r--gcc/config/i386/sol2-10.h11
-rw-r--r--gcc/config/mips/mips.c2
-rw-r--r--gcc/config/rs6000/rs6000-protos.h1
-rw-r--r--gcc/config/rs6000/rs6000.c20
-rw-r--r--gcc/config/rs6000/rs6000.md75
-rw-r--r--gcc/config/rs6000/rs6000.opt4
-rw-r--r--gcc/config/s390/s390.md102
-rw-r--r--gcc/config/sparc/linux.h4
-rw-r--r--gcc/config/sparc/linux64.h4
-rw-r--r--gcc/config/sparc/sysv4.h16
-rw-r--r--gcc/cp/ChangeLog31
-rw-r--r--gcc/cp/call.c2
-rw-r--r--gcc/cp/class.c3
-rw-r--r--gcc/cp/cp-tree.h9
-rw-r--r--gcc/cp/decl.c4
-rw-r--r--gcc/cp/decl2.c16
-rw-r--r--gcc/cp/name-lookup.c30
-rw-r--r--gcc/cp/pt.c95
-rw-r--r--gcc/cp/semantics.c24
-rw-r--r--gcc/cp/typeck.c4
-rw-r--r--gcc/doc/extend.texi8
-rw-r--r--gcc/doc/invoke.texi368
-rw-r--r--gcc/doc/passes.texi20
-rw-r--r--gcc/except.c2
-rw-r--r--gcc/fold-const.c18
-rw-r--r--gcc/fortran/ChangeLog13
-rw-r--r--gcc/fortran/trans-intrinsic.c7
-rw-r--r--gcc/fortran/trans-stmt.c28
-rw-r--r--gcc/gimplify.c3
-rw-r--r--gcc/global.c1849
-rw-r--r--gcc/graphite.c29
-rw-r--r--gcc/hard-reg-set.h3
-rw-r--r--gcc/ira-color.c7
-rw-r--r--gcc/ira.c1216
-rw-r--r--gcc/local-alloc.c2545
-rw-r--r--gcc/lower-subreg.c2
-rw-r--r--gcc/mode-switching.c2
-rw-r--r--gcc/opts.c3
-rw-r--r--gcc/passes.c4
-rw-r--r--gcc/ra-conflict.c1241
-rw-r--r--gcc/reginfo.c (renamed from gcc/regclass.c)1437
-rw-r--r--gcc/regmove.c864
-rw-r--r--gcc/reload.c8
-rw-r--r--gcc/reload1.c95
-rw-r--r--gcc/reorg.c3
-rw-r--r--gcc/rtl.h12
-rw-r--r--gcc/sel-sched.c47
-rw-r--r--gcc/stor-layout.c20
-rw-r--r--gcc/testsuite/ChangeLog202
-rw-r--r--gcc/testsuite/g++.dg/ext/bitfield2.C12
-rw-r--r--gcc/testsuite/g++.dg/ext/bitfield3.C11
-rw-r--r--gcc/testsuite/g++.dg/ext/bitfield4.C11
-rw-r--r--gcc/testsuite/g++.dg/ext/bitfield5.C11
-rw-r--r--gcc/testsuite/g++.dg/parse/crash51.C9
-rw-r--r--gcc/testsuite/g++.dg/template/access11.C4
-rw-r--r--gcc/testsuite/g++.dg/template/sfinae3.C10
-rw-r--r--gcc/testsuite/g++.dg/template/typedef11.C25
-rw-r--r--gcc/testsuite/g++.dg/template/typedef12.C23
-rw-r--r--gcc/testsuite/g++.dg/template/typedef13.C16
-rw-r--r--gcc/testsuite/g++.dg/template/typedef14.C16
-rw-r--r--gcc/testsuite/g++.dg/torture/pr38745.C36
-rw-r--r--gcc/testsuite/g++.dg/torture/stackalign/test-unwind.h21
-rw-r--r--gcc/testsuite/g++.dg/torture/stackalign/unwind-0.C2
-rw-r--r--gcc/testsuite/g++.dg/torture/stackalign/unwind-1.C2
-rw-r--r--gcc/testsuite/g++.dg/torture/stackalign/unwind-2.C2
-rw-r--r--gcc/testsuite/g++.dg/torture/stackalign/unwind-3.C2
-rw-r--r--gcc/testsuite/g++.dg/torture/stackalign/unwind-4.C2
-rw-r--r--gcc/testsuite/g++.dg/torture/stackalign/unwind-5.C2
-rw-r--r--gcc/testsuite/g++.dg/torture/stackalign/unwind-6.C2
-rw-r--r--gcc/testsuite/g++.dg/warn/Wstrict-aliasing-bogus-placement-new.C29
-rw-r--r--gcc/testsuite/g++.dg/warn/Wuninitialized-1.C15
-rw-r--r--gcc/testsuite/g++.dg/warn/Wuninitialized-2.C53
-rw-r--r--gcc/testsuite/g++.old-deja/g++.pt/crash9.C6
-rw-r--r--gcc/testsuite/g++.old-deja/g++.pt/typename8.C6
-rw-r--r--gcc/testsuite/gcc.c-torture/compile/pr38857.c22
-rw-r--r--gcc/testsuite/gcc.c-torture/compile/pr38926.c41
-rw-r--r--gcc/testsuite/gcc.c-torture/execute/pr38969.c25
-rw-r--r--gcc/testsuite/gcc.dg/bitfld-15.c12
-rw-r--r--gcc/testsuite/gcc.dg/bitfld-16.c11
-rw-r--r--gcc/testsuite/gcc.dg/bitfld-17.c11
-rw-r--r--gcc/testsuite/gcc.dg/bitfld-18.c11
-rw-r--r--gcc/testsuite/gcc.dg/lower-subreg-1.c6
-rw-r--r--gcc/testsuite/gcc.dg/pr17112-1.c2
-rw-r--r--gcc/testsuite/gcc.dg/pr35729.c5
-rw-r--r--gcc/testsuite/gcc.dg/pr38615.c19
-rw-r--r--gcc/testsuite/gcc.dg/pr38932.c19
-rw-r--r--gcc/testsuite/gcc.dg/pr38934.c19
-rw-r--r--gcc/testsuite/gcc.dg/pr38957.c16
-rw-r--r--gcc/testsuite/gcc.dg/pr38984.c19
-rw-r--r--gcc/testsuite/gcc.dg/sms-7.c6
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/pr38997.c16
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/pr39007.c13
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/ssa-store-ccp-3.c4
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/vrp47.c4
-rw-r--r--gcc/testsuite/gcc.target/arm/neon-cond-1.c30
-rw-r--r--gcc/testsuite/gcc.target/i386/pr38931.c23
-rw-r--r--gcc/testsuite/gcc.target/i386/pr38988.c24
-rw-r--r--gcc/testsuite/gcc.target/mips/dpaq_sa_l_w.c2
-rw-r--r--gcc/testsuite/gcc.target/mips/dpsq_sa_l_w.c2
-rw-r--r--gcc/testsuite/gcc.target/mips/fixed-scalar-type.c2
-rw-r--r--gcc/testsuite/gcc.target/mips/fixed-vector-type.c2
-rw-r--r--gcc/testsuite/gcc.target/powerpc/avoid-indexed-addresses.c14
-rw-r--r--gcc/testsuite/gfortran.dg/array_constructor_24.f2
-rw-r--r--gcc/testsuite/gfortran.dg/bound_6.f9071
-rw-r--r--gcc/testsuite/gfortran.dg/graphite/pr38953.f90115
-rw-r--r--gcc/testsuite/gfortran.dg/mvbits_6.f9033
-rw-r--r--gcc/testsuite/gfortran.dg/mvbits_7.f9030
-rw-r--r--gcc/testsuite/gfortran.dg/mvbits_8.f9036
-rw-r--r--gcc/toplev.h1
-rw-r--r--gcc/tree-loop-distribution.c17
-rw-r--r--gcc/tree-pass.h4
-rw-r--r--gcc/tree-ssa-alias.c18
-rw-r--r--gcc/tree-ssa-ccp.c17
-rw-r--r--gcc/tree-ssa-dse.c32
-rw-r--r--gcc/tree-ssa-pre.c27
-rw-r--r--gcc/tree-ssa-sccvn.c11
-rw-r--r--gcc/tree-ssa-sccvn.h1
-rw-r--r--gcc/tree-ssa-structalias.c28
-rw-r--r--gcc/tree-ssa.c13
-rw-r--r--gcc/tree-vrp.c4
-rw-r--r--gcc/tree.h4
-rw-r--r--libjava/ChangeLog6
-rw-r--r--libjava/gcj/javaprims.h6
-rw-r--r--libjava/libgcj_bc.c3
-rw-r--r--libmudflap/ChangeLog10
-rw-r--r--libmudflap/config.h.in3
-rwxr-xr-xlibmudflap/configure31
-rw-r--r--libmudflap/configure.ac1
-rw-r--r--libmudflap/mf-hooks3.c4
-rw-r--r--libmudflap/mf-impl.h2
-rw-r--r--libmudflap/mf-runtime.c2
-rw-r--r--libstdc++-v3/ChangeLog51
-rw-r--r--libstdc++-v3/acinclude.m42
-rw-r--r--libstdc++-v3/config/abi/pre/gnu.ver2
-rwxr-xr-xlibstdc++-v3/configure2
-rw-r--r--libstdc++-v3/include/ext/bitmap_allocator.h2
-rw-r--r--libstdc++-v3/include/std/thread30
-rw-r--r--libstdc++-v3/src/thread.cc4
-rw-r--r--libstdc++-v3/testsuite/29_atomics/atomic/cons/assign_neg.cc2
-rw-r--r--libstdc++-v3/testsuite/29_atomics/atomic/cons/copy_neg.cc2
-rw-r--r--libstdc++-v3/testsuite/29_atomics/atomic_address/requirements/standard_layout.cc (renamed from libstdc++-v3/testsuite/29_atomics/atomic/requirements/standard_layout.cc)15
-rw-r--r--libstdc++-v3/testsuite/29_atomics/atomic_address/requirements/trivial.cc38
-rw-r--r--libstdc++-v3/testsuite/29_atomics/atomic_flag/requirements/standard_layout.cc21
-rw-r--r--libstdc++-v3/testsuite/29_atomics/atomic_flag/requirements/trivial.cc38
-rw-r--r--libstdc++-v3/testsuite/29_atomics/atomic_integral/cons/assign_neg.cc4
-rw-r--r--libstdc++-v3/testsuite/29_atomics/atomic_integral/cons/copy_neg.cc4
-rw-r--r--libstdc++-v3/testsuite/29_atomics/atomic_integral/requirements/trivial.cc39
-rw-r--r--libstdc++-v3/testsuite/29_atomics/headers/stdatomic.h/functions.c3
-rw-r--r--libstdc++-v3/testsuite/30_threads/condition_variable/requirements/standard_layout.cc40
-rw-r--r--libstdc++-v3/testsuite/30_threads/condition_variable/requirements/typedefs.cc41
-rw-r--r--libstdc++-v3/testsuite/30_threads/mutex/requirements/standard_layout.cc40
-rw-r--r--libstdc++-v3/testsuite/30_threads/recursive_mutex/requirements/standard_layout.cc40
-rw-r--r--libstdc++-v3/testsuite/30_threads/timed_mutex/requirements/standard_layout.cc40
-rw-r--r--libstdc++-v3/testsuite/util/testsuite_common_types.h32
184 files changed, 4740 insertions, 8777 deletions
diff --git a/ChangeLog.melt b/ChangeLog.melt
index fa153105e05..bd61a18f09a 100644
--- a/ChangeLog.melt
+++ b/ChangeLog.melt
@@ -1,3 +1,6 @@
+2009-01-29 Basile Starynkevitch <basile@starynkevitch.net>
+ MELT branch merged with trunk r143767
+
2009-01-21 Basile Starynkevitch <basile@starynkevitch.net>
MELT branch merged with trunk r143546
diff --git a/config/ChangeLog b/config/ChangeLog
index c57ec35e6a0..feb73b94a0f 100644
--- a/config/ChangeLog
+++ b/config/ChangeLog
@@ -1,3 +1,7 @@
+2009-01-23 Jie Zhang <jie.zhang@analog.com>
+
+ * tls.m4 (GCC_CHECK_EMUTLS): Define.
+
2008-12-21 Andrew Pinski <pinskia@gmail.com>
PR target/38300
diff --git a/config/tls.m4 b/config/tls.m4
index acb123f6986..e77742c7603 100644
--- a/config/tls.m4
+++ b/config/tls.m4
@@ -86,3 +86,21 @@ AC_DEFUN([GCC_CHECK_CC_TLS], [
AC_DEFINE(HAVE_CC_TLS, 1,
[Define to 1 if the target assembler supports thread-local storage.])
fi])
+
+dnl Check whether TLS is emulated.
+AC_DEFUN([GCC_CHECK_EMUTLS], [
+ AC_CACHE_CHECK([whether the thread-local storage support is from emutls],
+ gcc_cv_use_emutls, [
+ gcc_cv_use_emutls=no
+ echo '__thread int a; int b; int main() { return a = b; }' > conftest.c
+ if AC_TRY_COMMAND(${CC-cc} -Werror -S -o conftest.s conftest.c 1>&AS_MESSAGE_LOG_FD); then
+ if grep __emutls_get_address conftest.s > /dev/null; then
+ gcc_cv_use_emutls=yes
+ fi
+ fi
+ rm -f conftest.*
+ ])
+ if test "$gcc_cv_use_emutls" = "yes" ; then
+ AC_DEFINE(USE_EMUTLS, 1,
+ [Define to 1 if the target use emutls for thread-local storage.])
+ fi])
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index bc4c3619059..1194f97cf0c 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,9 +1,456 @@
+2009-01-29 Kazu Hirata <kazu@codesourcery.com>
+
+ PR tree-optimization/39007
+ * tree-loop-distribution.c (generate_builtin): Use
+ recompute_dominator to compute the immediate dominator of the
+ basic block just after the loop.
+
+2008-01-29 Rainer Orth <ro@TechFak.Uni-Bielefeld.DE>
+
+ * config/i386/sol2-10.h [!HAVE_AS_IX86_DIFF_SECT_DELTA]
+ (ASM_OUTPUT_DWARF_PCREL): Define.
+
+2008-01-29 Vladimir Makarov <vmakarov@redhat.com>
+
+ * doc/tm.texi (TARGET_IRA_COVER_CLASSES): Modify description.
+ * doc/passes.texi: Remove entries about regclass, local-alloc, and
+ global. Modify entries about regmove and IRA.
+
+ * ra-conflict.c: Remove the file.
+
+ * reload.c (push_reload, find_dummy_reload): Remove flag_ira.
+
+ * tree-pass.h (pass_local_alloc, pass_global_alloc): Remove.
+ (pass_regclass_init): Rename to pass_reginfo_init.
+
+ * cfgloopanal.c (estimate_reg_pressure_cost): Remove flag_ira.
+
+ * toplev.h (flag_ira): Remove.
+
+ * caller-save.c (setup_save_areas): Remove flag_ira.
+
+ * ira-color.c (ira_reuse_stack_slot, ira_mark_new_stack_slot):
+ Ditto.
+
+ * global.c: Remove the file.
+
+ * opts.c: (decode_options): Remove flag_ira.
+
+ * hard-reg-set.h (losing_caller_save_reg_set): Remove.
+
+ * regmove.c: Modify file description.
+ (find_use_as_address, try_auto_increment): Define them only if
+ AUTO_INC_DEC is defined.
+ (replacement_quality, replace_in_call_usage, fixup_match_1,
+ stable_and_no_regs_but_for_p): Remove.
+ (reg_set_in_bb): Make it static.
+ (regmove_optimize): Remove flag_ira and code which worked for
+ !flag_ira.
+
+ * local-alloc.c: Remove the file.
+
+ * common.opt (fira): Remove.
+
+ * ira.c: Include except.h.
+ (eliminable_regset): Move from global.c.
+ (mark_elimination): Ditto. Remove flag_ira.
+ (reg_renumber, struct equivalence, reg_equiv, equiv_mem,
+ equiv_mem_modified, validate_equiv_mem_from_store,
+ validate_equiv_mem, equiv_init_varies_p, equiv_init_movable_p,
+ contains_replace_regs, memref_referenced_p, memref_used_between_p,
+ no_equiv, recorded_label_ref): Move from local-alloc.c.
+ (update_equiv_regs): Ditto. Make it static.
+ (print_insn_chain, print_insn_chains): Move it from global.c.
+ pseudo_for_reload_consideration_p): Ditto. Remove flag_ira.
+ (build_insn_chain): Ditto. Make it static.
+ (ra_init_live_subregs): Move from ra-conflict.c. Make it static.
+ Rename to init_live_subregs.
+ (gate_ira): Remove flag_ira.
+
+ * regclass.c: Rename reginfo.c. Change file description.
+ (FORBIDDEN_INC_DEC_CLASSES): Remove.
+ (reg_class_superclasses, forbidden_inc_dec_class, in_inc_dec):
+ Remove.
+ (init_reg_sets_1): Remove code for evaluation of
+ reg_class_superclasses and losing_caller_save_reg_set.
+ (init_regs): Remove init_reg_autoinc.
+ (struct costs, costs, init_cost, ok_for_index_p_nonstrict,
+ ok_for_base_p_nonstrict): Remove.
+ (regclass_init): Rename to reginfo_init. Don't initialize
+ init_cost.
+ (pass_regclass_init): Rename to pass_reginfo_init. Modify
+ corresponding entries.
+ (dump_regclass, record_operand_costs, scan_one_insn,
+ init_reg_autoinc, regclass, record_reg_classes, copy_cost,
+ record_address_regs, auto_inc_dec_reg_p): Remove.
+ (gt-regclass.h): Rename to gt-reginfo.h.
+
+ * rtl.h (dump_global_regs, retry_global_alloc,
+ build_insn_chain, dump_local_alloc, update_equiv_regs):
+ Remove.
+
+ * Makefile.in (RA_H): Remove.
+ (OBJS-common): Remove global.o, local-alloc.o, and ra-conflict.o.
+ Rename regclass.o to reginfo.o.
+ (regclass.o): Rename to reginfo.o. Rename gt-regclass.h to
+ gt-reginfo.h.
+ (global.o, local-alloc.o, ra-conflict.o): Remove entries.
+ (GTFILES): Rename regclass.c to reginfo.c.
+
+ * passes.c (init_optimization_passes): Remove pass_local_alloc and
+ pass_global_alloc. Rename pass_regclass_init to
+ pass_reginfo_init.
+
+ * reload1.c (compute_use_by_pseudos, reload, count_pseudo,
+ count_spilled_pseudo, find_reg, alter_reg, delete_output_reload):
+ Remove flag_ira.
+ (finish_spills): Ditto. Remove code for !flga_ira.
+
+2009-01-29 Kenneth Zadeck <zadeck@naturalbridge.com>
+
+ PR middle-end/35854
+ * doc/invoke.texi (rtl debug options): Complete rewrite.
+ * auto-inc-dec.c (pass_inc_dec): Rename pass from "auto-inc-dec"
+ to auto_inc_dec".
+ * mode-switching.c (pass_mode_switching): Rename pass from
+ "mode-sw" to "mode_sw".
+ * except.c (pass_convert_to_eh_ranges): Rename pass from
+ "eh-ranges" to "eh_ranges".
+ * lower-subreg.c (pass_lower_subreg): Renamed pass from "subreg"
+ to "subreg1".
+
+
+2009-01-29 Andrey Belevantsev <abel@ispras.ru>
+ Alexander Monakov <amonakov@ispras.ru>
+
+ PR middle-end/38857
+ * sel-sched.c (count_occurrences_1): Check that *cur_rtx is a hard
+ register.
+ (move_exprs_to_boundary): Change return type and pass through
+ should_move from move_op. Relax assert. Update usage ...
+ (schedule_expr_on_boundary): ... here. Use should_move instead of
+ cant_move.
+ (move_op_orig_expr_found): Indicate that insn was disconnected from
+ stream.
+ (code_motion_process_successors): Do not call after_merge_succs
+ callback if original expression was not found when traversing any of
+ the branches.
+ (code_motion_path_driver): Change return type. Update prototype.
+ (move_op): Update comment. Add a new parameter (should_move). Update
+ prototype. Set *should_move based on indication provided by
+ move_op_orig_expr_found.
+
+2009-01-28 Pat Haugen <pthaugen@us.ibm.com>
+
+ * doc/invoke.texi (avoid-indexed-addresses): Document new option.
+ * config/rs6000/rs6000-protos.h (avoiding_indexed_address_p): Declare.
+ * config/rs6000/rs6000.opt (avoid-indexed-addresses): New option.
+ * config/rs6000/rs6000.c (rs6000_override_options): Default
+ avoid-indexed-addresses on for Power6, off for everything else.
+ (avoiding_indexed_address_p): New function.
+ (rs6000_legitimize_address): Use it.
+ (rs6000_legitimate_address): Likewise.
+ * config/rs6000/rs6000.md (movXX_updateX): Likewise
+
+2009-01-28 Kazu Hirata <kazu@codesourcery.com>
+
+ PR tree-optimization/38997
+ * tree-loop-distribution.c (generate_memset_zero): Use
+ POINTER_PLUS_EXPR for a pointer addition.
+
+2009-01-28 Andreas Krebbel <krebbel1@de.ibm.com>
+
+ * config/s390/s390.md (bswap<mode>2): New pattern added.
+
+2009-01-28 Wolfgang Gellerich <gellerich@de.ibm.com>
+
+ * config/s390/s390.md (*tls_load_31): Added type attribute.
+
+2009-01-28 Wolfgang Gellerich <gellerich@de.ibm.com>
+
+ * config/s390/s390.md: Fix a few comments.
+
+2009-01-28 Wolfgang Gellerich <gellerich@de.ibm.com>
+
+ * config/s390/s390.md (*tmsi_reg): Fixed z10prop attribute.
+ (*tm<mode>_full): Fixed z10prop attribute.
+ (*tst<mode>_extimm): Fixed z10prop attribute.
+ (*tst<mode>_cconly_extimm): Fixed z10prop attribute.
+ (*tstqiCCT_cconly): Fixed z10prop attribute.
+ (*cmpsi_ccu_zerohi_rlsi): Fixed z10prop attribute.
+ (*movsi_larl): Fixed z10prop attribute.
+ (*movsi_zarch): Fixed z10prop attribute.
+ (*movsi_eas): Fixed z10prop attribute.
+ (*movhi): Fixed z10prop attribute.
+ (*movqi): Fixed z10prop attribute.
+ (*movstrictqi): Fixed z10prop attribute.
+ (*mov<mode>): Fixed z10prop attribute.
+ (*movcc): Fixed z10prop attribute.
+ (*sethighpartdi_64): Fixed z10prop attribute.
+ (*zero_extendhi<mode>2_z10): Fixed z10prop attribute.
+ (*negdi2_sign_cc): Fixed z10prop attribute.
+ (*negdi2_sign): Fixed z10prop attribute.
+ (*absdi2_sign_cc): Fixed z10prop attribute.
+ (*absdi2_sign): Fixed z10prop attribute.
+ (*negabsdi2_sign_cc): Fixed z10prop attribute.
+ (*negabsdi2_sign): Fixed z10prop attribute.
+ (*cmp_and_trap_signed_int<mode>): Fixed z10prop attribute.
+ (*cmp_and_trap_unsigned_int<mode>): Fixed z10prop attribute.
+ (doloop_si64): Fixed z10prop attribute.
+ (doloop_si31): Fixed z10prop attribute.
+ (doloop_long): Fixed z10prop attribute.
+ (indirect_jump): Fixed z10prop attribute.
+ (nop): Fixed z10prop attribute.
+ (main_base_64): Fixed z10prop attribute.
+ (reload_base_64): Fixed z10prop attribute.
+
+2009-01-28 Jakub Jelinek <jakub@redhat.com>
+
+ PR rtl-optimization/38740
+ * reorg.c (gate_handle_delay_slots): Avoid dbr scheduling
+ if !optimize.
+ * config/mips/mips.c (mips_reorg): Likewise.
+
+2009-01-28 Richard Guenther <rguenther@suse.de>
+
+ PR tree-optimization/38926
+ * tree-ssa-pre.c (add_to_value): Assert we add only expressions
+ with the correct value id to a value.
+ (do_regular_insertion): Use the value number of edoubleprime
+ for the value number of the expr.
+
+ Revert
+ 2008-08-21 Richard Guenther <rguenther@suse.de>
+
+ * tree-ssa-pre.c (insert_into_preds_of_block): Before inserting
+ a PHI ask VN if it is already available.
+ * tree-ssa-sccvn.h (vn_phi_lookup): Declare.
+ * tree-ssa-sccvn.c (vn_phi_lookup): Export.
+
+2009-01-28 Jakub Jelinek <jakub@redhat.com>
+
+ PR middle-end/38934
+ * tree-vrp.c (extract_range_from_assert): For LE_EXPR and LT_EXPR
+ set to varying whenever max has TREE_OVERFLOW set, similarly
+ for GE_EXPR and GT_EXPR and TREE_OVERFLOW min.
+
+2009-01-28 Richard Guenther <rguenther@suse.de>
+
+ PR middle-end/38908
+ * tree-ssa.c (warn_uninitialized_var): Do not warn for seemingly
+ uninitialized aggregate uses in call arguments.
+
+2009-01-28 Paolo Bonzini <bonzini@gnu.org>
+
+ PR tree-optimization/38984
+ * tree-ssa-structalias.c (get_constraints_for_1): Do not use
+ the nothing_id variable if -fno-delete-null-pointer-checks.
+
+2009-01-28 Uros Bizjak <ubizjak@gmail.com>
+
+ PR target/38988
+ * config/i386/i386.md (set_rip_rex64): Wrap operand 1 in label_ref.
+ (set_got_offset_rex64): Ditto.
+
+2009-01-27 H.J. Lu <hongjiu.lu@intel.com>
+
+ PR target/38941
+ * doc/extend.texi: Improve local variable with asm reg.
+
+2009-01-27 Adam Nemet <anemet@caviumnetworks.com>
+
+ * c.opt (Wpacked-bitfield-compat): Change init value to -1.
+ * c-opts.c (c_common_post_options): If -W*packed-bitfield-compat
+ was not supplied then set warn_packed_bitfield_compat to the
+ default value of 1.
+ * stor-layout.c (place_field): Check warn_packed_bitfield_compat
+ against 1.
+
+2009-01-27 Richard Guenther <rguenther@suse.de>
+
+ PR tree-optimization/38503
+ * cfgexpand.c (expand_gimple_basic_block): Ignore
+ GIMPLE_CHANGE_DYNAMIC_TYPE during expansion.
+ * tree-ssa-structalias.c (set_uids_in_ptset): Do not prune
+ variables that cannot have TBAA applied.
+ (compute_points_to_sets): Do not remove GIMPLE_CHANGE_DYNAMIC_TYPE
+ statements.
+
+2009-01-27 Uros Bizjak <ubizjak@gmail.com>
+
+ PR middle-end/38969
+ * calls.c (initialize_argument_information): Do not wrap complex
+ arguments in SAVE_EXPR.
+
+2009-01-26 Andreas Tobler <a.tobler@schweiz.org>
+
+ * config/t-vxworks (LIBGCC2_INCLUDES): Fix typo.
+ (INSTALL_LIBGCC): Revert typo commit.
+
+2009-01-26 Richard Guenther <rguenther@suse.de>
+
+ PR tree-optimization/38745
+ * tree-ssa-alias.c (update_alias_info_1): Exclude RESULT_DECL
+ from special handling.
+
+2009-01-26 Richard Guenther <rguenther@suse.de>
+
+ PR tree-optimization/38745
+ * tree-ssa.c (execute_update_addresses_taken): Do not include
+ variables that cannot possibly be a register in not_reg_needs.
+ Do not clear TREE_ADDRESSABLE on vars that may not become
+ registers.
+ * tree-ssa.c (update_alias_info_1): Include those in the set
+ of addressable vars.
+
+2009-01-26 Richard Guenther <rguenther@suse.de>
+
+ PR middle-end/38851
+ * Makefile.in (tree-ssa-dse.o): Add langhooks.h.
+ * tree-ssa-dse.c: Include langhooks.h
+ (execute_simple_dse): Remove stores with zero size.
+
+2009-01-24 Jakub Jelinek <jakub@redhat.com>
+
+ PR c/38957
+ * c-typeck.c (c_finish_return): Handle POINTER_PLUS_EXPR the same way
+ as PLUS_EXPR.
+
+2009-01-24 Julian Brown <julian@codesourcery.com>
+
+ * config/arm/t-linux-eabi (LIB2FUNCS_STATIC_EXTRA): Add
+ config/arm/linux-atomic.c.
+ * config/arm/linux-atomic.c: New.
+
+2009-01-24 Eric Botcazou <ebotcazou@adacore.com>
+
+ * config/sparc/linux.h (DBX_REGISTER_NUMBER): Delete.
+ * config/sparc/linux64.h (DBX_REGISTER_NUMBER): Likewise.
+ * config/sparc/sysv4.h (DBX_REGISTER_NUMBER): Likewise.
+
+2009-01-24 H.J. Lu <hongjiu.lu@intel.com>
+
+ PR c/38938
+ * c-opts.c (c_common_handle_option): Update warn_pointer_sign
+ properly.
+
+2009-01-24 Sebastian Pop <sebastian.pop@amd.com>
+
+ PR tree-optimization/38953
+ * graphite.c (graphite_verify): Add a call to verify_loop_closed_ssa.
+ (scop_adjust_phis_for_liveouts): Initialize false_i to zero.
+ (gloog): Split the exit of the scop when the scop exit is a loop exit.
+ (graphite_transform_loops): Only call cleanup_tree_cfg if gloog
+ changed the CFG.
+
+2009-01-24 Paul Brook <paul@codesourcery.com>
+
+ * config/arm/neon.md (neon_type): Move to arm.md.
+ (neon_mov<VSTRUCT>): Add neon_type attribute.
+ * config/arm/arm.md (neon_type): Move to here.
+ (conds): Add "unconditioal" and use as default for NEON insns.
+
+2009-01-24 Ben Elliston <bje@au.ibm.com>
+
+ * bitmap.h (BITMAP_FREE): Eliminate `implicit conversion from
+ void *' warning from -Wc++-compat.
+ * Makefile.in (dominance.o-warn): Remove.
+
+2008-01-23 Paolo Bonzini <bonzini@gnu.org>
+
+ PR tree-optimization/38932
+ * fold-const.c (fold_unary_ignore_overflow): New.
+ * tree.h (fold_unary_ignore_overflow): Declare.
+ * tree-ssa-ccp.c (ccp_fold): Use fold_unary_ignore_overflow.
+ * tree-ssa-sccvn.c (visit_reference_op_load,
+ simplify_unary_expression): Likewise.
+
+2009-01-22 Adam Nemet <anemet@caviumnetworks.com>
+
+ * c-decl.c (finish_struct): Move code to set DECL_PACKED after
+ DECL_BIT_FIELD is alreay known. Also inherit packed for bitfields
+ regardless of their type.
+ * c-common.c (handle_packed_attribute): Don't ignore packed on
+ bitfields.
+ * c.opt (Wpacked-bitfield-compat): New warning option.
+ * stor-layout.c (place_field): Warn if offset of a field changed.
+ * doc/extend.texi (packed): Mention the ABI change.
+ * doc/invoke.texi (-Wpacked-bitfield-compat): Document.
+ (Warning Options): Add it to the list.
+
+2009-01-22 H.J. Lu <hongjiu.lu@intel.com>
+
+ * c-opts.c (c_common_post_options): Fix a typo in comments.
+
+2009-01-22 Steve Ellcey <sje@cup.hp.com>
+
+ PR middle-end/38615
+ * gimplify.c (gimplify_init_constructor): Fix promotion of const
+ variables to static.
+ * doc/invoke.texi (-fmerge-all-constants): Update description.
+
+2009-01-22 Uros Bizjak <ubizjak@gmail.com>
+
+ PR target/38931
+ * config/i386/i386.md (*movsi_1): Use type "mmx" for alternative 2.
+ (*movdi_1_rex64): Use type "mmx" for alternative 5.
+
+2009-01-22 Richard Earnshaw <rearnsha@arm.com>
+
+ * arm.h (DATA_ALIGNMENT): Align structures, unions and arrays to
+ a word boundary.
+ (LOCAL_ALIGNMENT): Similarly.
+
+2009-01-22 Mark Shinwell <shinwell@codesourcery.com>
+ Joseph Myers <joseph@codesourcery.com>
+
+ * config/arm/arm.c (all_architectures): Add iWMMXt2 entry.
+ * config/arm/arm-cores.def: New ARM_CORE entry for iWMMXt2.
+ * config/arm/arm-tune.md: Regenerate.
+ * doc/invoke.texi (ARM Options): Document -mcpu=iwmmxt2 and
+ -march=iwmmxt2.
+
+2009-01-22 Mark Shinwell <shinwell@codesourcery.com>
+
+ * config/arm/bpabi.h (SUBTARGET_EXTRA_ASM_SPEC): Bump EABI
+ version number to five.
+
+2009-01-22 Dodji Seketeli <dodji@redhat.com>
+
+ PR c++/38930
+ * c-decl.c: (clone_underlying_type): Revert PR c++/26693 changes.
+ * c-common.c (set_underlying_type): Likewise.
+ (is_typedef_decl ): Likewise
+ * tree.h: Likewise
+ (set_underlying_type): Likewise.
+ (is_typedef_type): Likewise.
+
+2009-01-21 Vladimir Makarov <vmakarov@redhat.com>
+
+ PR middle-end/38587
+ * ira-color.c (coalesce_spill_slots): Don't coalesce allocnos
+ crossing setjmps.
+
+2009-01-21 Dave Korn <dave.korn.cygwin@gmail.com>
+
+ PR bootstrap/37660
+ * config/i386/cygwin.h (SHARED_LIBGCC_SPEC): New helper macro.
+ (LIBGCC_SPEC): Don't define.
+ (REAL_LIBGCC_SPEC): Define instead, using SHARED_LIBGCC_SPEC.
+
+2009-01-21 Uros Bizjak <ubizjak@gmail.com>
+
+ PR rtl-optimization/38879
+ * alias.c (base_alias_check): Unaligned access via AND address can
+ alias all surrounding object types except those with sizes equal
+ or wider than the size of unaligned access.
+
2009-01-21 Dodji Seketeli <dodji@redhat.com>
PR c++/26693
- * c-decl.c: (clone_underlying_type): Move this ...
+ * c-decl.c: (clone_underlying_type): Move this ...
* c-common.c (set_underlying_type): ... here.
- Also, make sure the function properly sets TYPE_STUB_DECL() on
+ Also, make sure the function properly sets TYPE_STUB_DECL() on
the newly created typedef variant type.
(is_typedef_decl ): New entry point.
* tree.h: Added a new member member_types_needing_access_check to
@@ -35,7 +482,7 @@
(version_id): Modify.
2009-01-20 Andrew Pinski <andrew_pinski@playstation.sony.com>
- Richard Guenther <rguenther@suse.de>
+ Richard Guenther <rguenther@suse.de>
PR tree-optimization/38747
PR tree-optimization/38748
@@ -62,14 +509,13 @@
2009-01-20 Ben Elliston <bje@au.ibm.com>
* libgcov.c (__gcov_execl, __gcov_execlp, __gcov_execle): Remove
- const qualifier from arg parameter. Remove unnecessary cast to
- char *.
+ const qualifier from arg parameter. Remove unnecessary cast to char *.
* gcov-io.h (__gcov_execl, __gcov_execlp, __gcov_execle): Remove
const qualifier from arg 2.
2009-01-19 Iain Sandoe <iain.sandoe@sandoe-acoustics.co.uk>
- * config/darwin.h: Add static-libgfortran to LINK_SPEC.
+ * config/darwin.h: Add static-libgfortran to LINK_SPEC.
2009-01-19 Vladimir Makarov <vmakarov@redhat.com>
@@ -228,8 +674,7 @@
2009-01-15 Kenneth Zadeck <zadeck@naturalbridge.com>
- * dce.c (find_call_stack_args, delete_unmarked_insns): Fixed
- comments.
+ * dce.c (find_call_stack_args, delete_unmarked_insns): Fixed comments.
2009-01-14 Jakub Jelinek <jakub@redhat.com>
@@ -255,9 +700,9 @@
2009-01-14 Michael Meissner <gnu@the-meissners.org>
PR target/22599
- * i386.c (print_operand): Add tests for 'D', 'C', 'F', 'f' to make
- sure the insn is a conditional test (bug 22599). Reformat a few long
- lines.
+ * config/i386/i386.c (print_operand): Add tests for 'D', 'C', 'F', 'f'
+ to make sure the insn is a conditional test (bug 22599). Reformat a
+ few long lines.
2009-01-14 Sebastian Pop <sebastian.pop@amd.com>
@@ -268,10 +713,6 @@
(graphite_transform_loops): Call cleanup_tree_cfg after all
scops have been code generated.
-2009-01-14 Vladimir Makarov <vmakarov@redhat.com>
-
- * testsuite/g++.dg/torture/pr38811.C: New file.
-
2009-01-14 Basile Starynkevitch <basile@starynkevitch.net>
* doc/gty.texi (Invoking the garbage collector): Added new node
and section documenting ggc_collect.
@@ -291,10 +732,8 @@
* ira-conflicts.c: Include addresses.h for the definition of
base_reg_class.
- (ira_build_conflicts): Use base_reg_class instead of
- BASE_REG_CLASS.
- * Makefile.in: Add a dependency of ira-conflicts.o on
- addresses.h.
+ (ira_build_conflicts): Use base_reg_class instead of BASE_REG_CLASS.
+ * Makefile.in: Add a dependency of ira-conflicts.o on addresses.h.
2009-01-13 Vladimir Makarov <vmakarov@redhat.com>
@@ -326,7 +765,8 @@
used to index the memory access. Do not pass loop_p.
Fix comment. Stop recursion on tcc_constant or tcc_declaration.
(expand_scalar_variables_stmt): Pass to expand_scalar_variables_expr
- the gimple_stmt_iterator where it inserts new code. Do not pass loop_p.
+ the gimple_stmt_iterator where it inserts new code.
+ Do not pass loop_p.
(copy_bb_and_scalar_dependences): Do not pass loop_p.
(translate_clast): Update call to copy_bb_and_scalar_dependences.
@@ -337,13 +777,13 @@
2009-01-13 Richard Earnshaw <rearnsha@arm.com>
- * arm.c (output_move_double): Don't synthesize thumb-2 ldrd/strd with
- two 32-bit instructions.
+ * config/arm/arm.c (output_move_double): Don't synthesize thumb-2
+ ldrd/strd with two 32-bit instructions.
2009-01-13 Richard Earnshaw <rearnsha@arm.com>
- * arm.c (struct processors): Pass for speed down into cost helper
- functions.
+ * config/arm/arm.c (struct processors): Pass for speed down into
+ cost helper functions.
(const_ok_for_op): Handle COMPARE and inequality nodes.
(arm_rtx_costs_1): Rewrite.
(arm_size_rtx_costs): Update prototype.
@@ -355,7 +795,7 @@
2009-01-13 Uros Bizjak <ubizjak@gmail.com>
* config/alpha/alpha.c (alpha_legitimate_address_p): Explicit
- relocations of local symbols wider than UNITS_PER_WORD are not valid.
+ relocations of local symbols wider than UNITS_PER_WORD are not valid.
(alpha_legitimize_address): Do not split local symbols wider than
UNITS_PER_WORD into HIGH/LO_SUM parts.
@@ -652,7 +1092,7 @@
2009-01-06 Jan Hubicka <jh@suse.cz>
PR target/38744
- * i386.c (ix86_expand_call): Use ARRAY_SIZE.
+ * config/i386/i386.c (ix86_expand_call): Use ARRAY_SIZE.
2009-01-06 Gerald Pfeifer <gerald@pfeifer.com>
@@ -662,14 +1102,15 @@
2009-01-06 Jan Hubicka <jh@suse.cz>
Kai Tietz <kai.tietz@onevision.com>
- * i386.md (*msabi_syvabi): Add SSE regs clobbers.
- * i386.c (ix86_expand_call): Add clobbers.
+ * config/i386/i386.md (*msabi_syvabi): Add SSE regs clobbers.
+ * config/i386/i386.c (ix86_expand_call): Add clobbers.
2009-01-06 Jan Hubicka <jh@suse.cz>
Kai Tietz <kai.tietz@onevision.com>
- * i386.h (CONDITIONAL_CALL_USAGE): SSE regs are not used for w64 ABI.
- * i386.c (struct ix86_frame): Add padding0 and nsseregs.
+ * config/i386/i386.h (CONDITIONAL_CALL_USAGE): SSE regs are not used
+ for w64 ABI.
+ * config/i386/i386.c (struct ix86_frame): Add padding0 and nsseregs.
(ix86_nsaved_regs): Count only general purpose regs.
(ix86_nsaved_sseregs): New.
(ix86_compute_frame_layout): Update nsseregs; set preferred alignment
@@ -685,9 +1126,9 @@
2009-01-06 Jan Hubicka <jh@suse.cz>
Kai Tietz <kai.tietz@onevision.com>
- * i386.h (ACCUMULATE_OUTGOING_ARGS): Enable for MSABI
- * i386.c (init_cumulative_args): Disallow calls of MSABI functions
- when accumulate outgoing args is off.
+ * config/i386/i386.h (ACCUMULATE_OUTGOING_ARGS): Enable for MSABI
+ * config/i386/i386.c (init_cumulative_args): Disallow calls of MSABI
+ functions when accumulate outgoing args is off.
2009-01-06 H.J. Lu <hongjiu.lu@intel.com>
diff --git a/gcc/DATESTAMP b/gcc/DATESTAMP
index 005f246d839..140b96e4d4f 100644
--- a/gcc/DATESTAMP
+++ b/gcc/DATESTAMP
@@ -1 +1 @@
-20090121
+20090129
diff --git a/gcc/Makefile.in b/gcc/Makefile.in
index 77547e94b16..82c32cd758c 100644
--- a/gcc/Makefile.in
+++ b/gcc/Makefile.in
@@ -179,8 +179,6 @@ build/gengtype-lex.o-warn = -Wno-error
SYSCALLS.c.X-warn = -Wno-strict-prototypes -Wno-error
# dfp.c contains alias violations
dfp.o-warn = -Wno-error
-# dominance.c contains a -Wc++compat warning.
-dominance.o-warn = -Wno-error
# mips-tfile.c contains -Wcast-qual warnings.
mips-tfile.o-warn = -Wno-error
@@ -853,7 +851,6 @@ FUNCTION_H = function.h $(TREE_H) $(HASHTAB_H) varray.h
EXPR_H = expr.h insn-config.h $(FUNCTION_H) $(RTL_H) $(FLAGS_H) $(TREE_H) $(MACHMODE_H) $(EMIT_RTL_H)
OPTABS_H = optabs.h insn-codes.h
REGS_H = regs.h varray.h $(MACHMODE_H) $(OBSTACK_H) $(BASIC_BLOCK_H) $(FUNCTION_H)
-RA_H = ra.h $(REGS_H)
RESOURCE_H = resource.h hard-reg-set.h
SCHED_INT_H = sched-int.h $(INSN_ATTR_H) $(BASIC_BLOCK_H) $(RTL_H) $(DF_H) vecprim.h
SEL_SCHED_IR_H = sel-sched-ir.h $(INSN_ATTR_H) $(BASIC_BLOCK_H) $(RTL_H) \
@@ -1146,7 +1143,6 @@ OBJS-common = \
gimple-low.o \
gimple-pretty-print.o \
gimplify.o \
- global.o \
graph.o \
graphds.o \
graphite.o \
@@ -1171,7 +1167,6 @@ OBJS-common = \
langhooks.o \
lcm.o \
lists.o \
- local-alloc.o \
loop-doloop.o \
loop-init.o \
loop-invariant.o \
@@ -1198,11 +1193,10 @@ OBJS-common = \
print-rtl.o \
print-tree.o \
profile.o \
- ra-conflict.o \
real.o \
recog.o \
reg-stack.o \
- regclass.o \
+ reginfo.o \
regmove.o \
regrename.o \
regstat.o \
@@ -2155,7 +2149,7 @@ tree-outof-ssa.o : tree-outof-ssa.c $(TREE_FLOW_H) $(CONFIG_H) $(SYSTEM_H) \
tree-ssa-dse.o : tree-ssa-dse.c $(CONFIG_H) $(SYSTEM_H) coretypes.h \
$(TM_H) $(GGC_H) $(TREE_H) $(RTL_H) $(TM_P_H) $(BASIC_BLOCK_H) \
$(TREE_FLOW_H) tree-pass.h $(TREE_DUMP_H) domwalk.h $(FLAGS_H) \
- $(DIAGNOSTIC_H) $(TIMEVAR_H)
+ $(DIAGNOSTIC_H) $(TIMEVAR_H) langhooks.h
tree-ssa-forwprop.o : tree-ssa-forwprop.c $(CONFIG_H) $(SYSTEM_H) coretypes.h \
$(TM_H) $(GGC_H) $(TREE_H) $(RTL_H) $(TM_P_H) $(BASIC_BLOCK_H) \
$(TREE_FLOW_H) tree-pass.h $(TREE_DUMP_H) $(DIAGNOSTIC_H) $(TIMEVAR_H) \
@@ -2911,25 +2905,13 @@ combine.o : combine.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) \
rtlhooks-def.h $(BASIC_BLOCK_H) $(RECOG_H) $(REAL_H) hard-reg-set.h \
$(TOPLEV_H) $(TM_P_H) $(TREE_H) $(TARGET_H) output.h $(PARAMS_H) $(OPTABS_H) \
insn-codes.h $(TIMEVAR_H) tree-pass.h $(DF_H) vecprim.h $(CGRAPH_H)
-regclass.o : regclass.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) \
+reginfo.o : reginfo.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) \
hard-reg-set.h $(FLAGS_H) $(BASIC_BLOCK_H) addresses.h $(REGS_H) insn-config.h \
$(RECOG_H) reload.h $(REAL_H) $(TOPLEV_H) $(FUNCTION_H) output.h $(GGC_H) \
- $(TM_P_H) $(EXPR_H) $(TIMEVAR_H) gt-regclass.h $(HASHTAB_H) \
+ $(TM_P_H) $(EXPR_H) $(TIMEVAR_H) gt-reginfo.h $(HASHTAB_H) \
$(TARGET_H) tree-pass.h $(DF_H) ira.h
-local-alloc.o : local-alloc.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \
- $(RTL_H) $(FLAGS_H) $(REGS_H) hard-reg-set.h insn-config.h $(RECOG_H) \
- output.h $(FUNCTION_H) $(INSN_ATTR_H) $(TOPLEV_H) except.h reload.h $(TM_P_H) \
- $(GGC_H) $(INTEGRATE_H) $(TIMEVAR_H) tree-pass.h $(DF_H) $(DBGCNT_H)
bitmap.o : bitmap.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) \
$(FLAGS_H) $(GGC_H) gt-bitmap.h $(BITMAP_H) $(OBSTACK_H) $(HASHTAB_H)
-global.o : global.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) \
- $(FLAGS_H) reload.h $(FUNCTION_H) $(RECOG_H) $(REGS_H) hard-reg-set.h \
- insn-config.h output.h $(TOPLEV_H) $(TM_P_H) $(MACHMODE_H) tree-pass.h \
- $(TIMEVAR_H) vecprim.h $(DF_H) $(DBGCNT_H) $(RA_H) ira.h
-ra-conflict.o : ra-conflict.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) \
- $(FLAGS_H) reload.h $(FUNCTION_H) $(RECOG_H) $(REGS_H) hard-reg-set.h \
- insn-config.h output.h $(TOPLEV_H) $(TM_P_H) $(MACHMODE_H) tree-pass.h \
- $(TIMEVAR_H) vecprim.h $(DF_H) $(RA_H) sbitmap.h sparseset.h
varray.o : varray.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(GGC_H) \
$(HASHTAB_H) $(BCONFIG_H) $(VARRAY_H) $(TOPLEV_H)
vec.o : vec.c $(CONFIG_H) $(SYSTEM_H) coretypes.h vec.h $(GGC_H) \
@@ -3373,7 +3355,7 @@ GTFILES = $(CPP_ID_DATA_H) $(srcdir)/input.h $(srcdir)/coretypes.h \
$(srcdir)/function.c $(srcdir)/except.h \
$(BASILYS_H) \
$(srcdir)/gcse.c $(srcdir)/integrate.c $(srcdir)/lists.c $(srcdir)/optabs.c \
- $(srcdir)/profile.c $(srcdir)/regclass.c $(srcdir)/mcf.c \
+ $(srcdir)/profile.c $(srcdir)/reginfo.c $(srcdir)/mcf.c \
$(srcdir)/reg-stack.c $(srcdir)/cfglayout.c $(srcdir)/cfglayout.h \
$(srcdir)/sdbout.c $(srcdir)/stor-layout.c \
$(srcdir)/stringpool.c $(srcdir)/tree.c $(srcdir)/varasm.c \
diff --git a/gcc/alias.c b/gcc/alias.c
index 18c7d87a52f..13c94bc6dff 100644
--- a/gcc/alias.c
+++ b/gcc/alias.c
@@ -1559,26 +1559,27 @@ base_alias_check (rtx x, rtx y, enum machine_mode x_mode,
if (rtx_equal_p (x_base, y_base))
return 1;
- /* The base addresses of the read and write are different expressions.
- If they are both symbols and they are not accessed via AND, there is
- no conflict. We can bring knowledge of object alignment into play
- here. For example, on alpha, "char a, b;" can alias one another,
- though "char a; long b;" cannot. */
+ /* The base addresses are different expressions. If they are not accessed
+ via AND, there is no conflict. We can bring knowledge of object
+ alignment into play here. For example, on alpha, "char a, b;" can
+ alias one another, though "char a; long b;" cannot. AND addesses may
+ implicitly alias surrounding objects; i.e. unaligned access in DImode
+ via AND address can alias all surrounding object types except those
+ with aligment 8 or higher. */
+ if (GET_CODE (x) == AND && GET_CODE (y) == AND)
+ return 1;
+ if (GET_CODE (x) == AND
+ && (GET_CODE (XEXP (x, 1)) != CONST_INT
+ || (int) GET_MODE_UNIT_SIZE (y_mode) < -INTVAL (XEXP (x, 1))))
+ return 1;
+ if (GET_CODE (y) == AND
+ && (GET_CODE (XEXP (y, 1)) != CONST_INT
+ || (int) GET_MODE_UNIT_SIZE (x_mode) < -INTVAL (XEXP (y, 1))))
+ return 1;
+
+ /* Differing symbols not accessed via AND never alias. */
if (GET_CODE (x_base) != ADDRESS && GET_CODE (y_base) != ADDRESS)
- {
- if (GET_CODE (x) == AND && GET_CODE (y) == AND)
- return 1;
- if (GET_CODE (x) == AND
- && (GET_CODE (XEXP (x, 1)) != CONST_INT
- || (int) GET_MODE_UNIT_SIZE (y_mode) < -INTVAL (XEXP (x, 1))))
- return 1;
- if (GET_CODE (y) == AND
- && (GET_CODE (XEXP (y, 1)) != CONST_INT
- || (int) GET_MODE_UNIT_SIZE (x_mode) < -INTVAL (XEXP (y, 1))))
- return 1;
- /* Differing symbols never alias. */
- return 0;
- }
+ return 0;
/* If one address is a stack reference there can be no alias:
stack references using different base registers do not alias,
diff --git a/gcc/auto-inc-dec.c b/gcc/auto-inc-dec.c
index 16b708c9252..0fda67aa302 100644
--- a/gcc/auto-inc-dec.c
+++ b/gcc/auto-inc-dec.c
@@ -1544,7 +1544,7 @@ struct rtl_opt_pass pass_inc_dec =
{
{
RTL_PASS,
- "auto-inc-dec", /* name */
+ "auto_inc_dec", /* name */
gate_auto_inc_dec, /* gate */
rest_of_handle_auto_inc_dec, /* execute */
NULL, /* sub */
diff --git a/gcc/bitmap.h b/gcc/bitmap.h
index 7f17dc1d361..905eed359e9 100644
--- a/gcc/bitmap.h
+++ b/gcc/bitmap.h
@@ -194,8 +194,8 @@ extern hashval_t bitmap_hash(const_bitmap);
#define BITMAP_GGC_ALLOC() bitmap_gc_alloc ()
/* Do any cleanup needed on a bitmap when it is no longer used. */
-#define BITMAP_FREE(BITMAP) \
- ((void)(bitmap_obstack_free (BITMAP), (BITMAP) = NULL))
+#define BITMAP_FREE(BITMAP) \
+ ((void) (bitmap_obstack_free ((bitmap) BITMAP), (BITMAP) = (bitmap) NULL))
/* Iterator for bitmaps. */
diff --git a/gcc/c-common.c b/gcc/c-common.c
index b766c6f0abf..78c6afb8f4a 100644
--- a/gcc/c-common.c
+++ b/gcc/c-common.c
@@ -5125,7 +5125,9 @@ handle_packed_attribute (tree *node, tree name, tree ARG_UNUSED (args),
}
else if (TREE_CODE (*node) == FIELD_DECL)
{
- if (TYPE_ALIGN (TREE_TYPE (*node)) <= BITS_PER_UNIT)
+ if (TYPE_ALIGN (TREE_TYPE (*node)) <= BITS_PER_UNIT
+ /* Still pack bitfields. */
+ && ! DECL_INITIAL (*node))
warning (OPT_Wattributes,
"%qE attribute ignored for field of type %qT",
name, TREE_TYPE (*node));
@@ -8400,73 +8402,4 @@ warn_for_sign_compare (location_t location,
}
}
-/* Setup a TYPE_DECL node as a typedef representation.
-
- X is a TYPE_DECL for a typedef statement. Create a brand new
- ..._TYPE node (which will be just a variant of the existing
- ..._TYPE node with identical properties) and then install X
- as the TYPE_NAME of this brand new (duplicate) ..._TYPE node.
-
- The whole point here is to end up with a situation where each
- and every ..._TYPE node the compiler creates will be uniquely
- associated with AT MOST one node representing a typedef name.
- This way, even though the compiler substitutes corresponding
- ..._TYPE nodes for TYPE_DECL (i.e. "typedef name") nodes very
- early on, later parts of the compiler can always do the reverse
- translation and get back the corresponding typedef name. For
- example, given:
-
- typedef struct S MY_TYPE;
- MY_TYPE object;
-
- Later parts of the compiler might only know that `object' was of
- type `struct S' if it were not for code just below. With this
- code however, later parts of the compiler see something like:
-
- struct S' == struct S
- typedef struct S' MY_TYPE;
- struct S' object;
-
- And they can then deduce (from the node for type struct S') that
- the original object declaration was:
-
- MY_TYPE object;
-
- Being able to do this is important for proper support of protoize,
- and also for generating precise symbolic debugging information
- which takes full account of the programmer's (typedef) vocabulary.
-
- Obviously, we don't want to generate a duplicate ..._TYPE node if
- the TYPE_DECL node that we are now processing really represents a
- standard built-in type. */
-
-void
-set_underlying_type (tree x)
-{
- if (DECL_IS_BUILTIN (x))
- {
- if (TYPE_NAME (TREE_TYPE (x)) == 0)
- TYPE_NAME (TREE_TYPE (x)) = x;
- }
- else if (TREE_TYPE (x) != error_mark_node
- && DECL_ORIGINAL_TYPE (x) == NULL_TREE)
- {
- tree tt = TREE_TYPE (x);
- DECL_ORIGINAL_TYPE (x) = tt;
- tt = build_variant_type_copy (tt);
- TYPE_STUB_DECL (tt) = TYPE_STUB_DECL (DECL_ORIGINAL_TYPE (x));
- TYPE_NAME (tt) = x;
- TREE_USED (tt) = TREE_USED (x);
- TREE_TYPE (x) = tt;
- }
-}
-
-/* Returns true if X is a typedef type. */
-bool
-is_typedef_decl (tree x)
-{
- return (x && TREE_CODE (x) == TYPE_DECL
- && DECL_ORIGINAL_TYPE (x) != NULL_TREE);
-}
-
#include "gt-c-common.h"
diff --git a/gcc/c-decl.c b/gcc/c-decl.c
index 8b444d4177a..6ebee1a337b 100644
--- a/gcc/c-decl.c
+++ b/gcc/c-decl.c
@@ -1971,6 +1971,67 @@ warn_if_shadowing (tree new_decl)
}
}
+
+/* Subroutine of pushdecl.
+
+ X is a TYPE_DECL for a typedef statement. Create a brand new
+ ..._TYPE node (which will be just a variant of the existing
+ ..._TYPE node with identical properties) and then install X
+ as the TYPE_NAME of this brand new (duplicate) ..._TYPE node.
+
+ The whole point here is to end up with a situation where each
+ and every ..._TYPE node the compiler creates will be uniquely
+ associated with AT MOST one node representing a typedef name.
+ This way, even though the compiler substitutes corresponding
+ ..._TYPE nodes for TYPE_DECL (i.e. "typedef name") nodes very
+ early on, later parts of the compiler can always do the reverse
+ translation and get back the corresponding typedef name. For
+ example, given:
+
+ typedef struct S MY_TYPE;
+ MY_TYPE object;
+
+ Later parts of the compiler might only know that `object' was of
+ type `struct S' if it were not for code just below. With this
+ code however, later parts of the compiler see something like:
+
+ struct S' == struct S
+ typedef struct S' MY_TYPE;
+ struct S' object;
+
+ And they can then deduce (from the node for type struct S') that
+ the original object declaration was:
+
+ MY_TYPE object;
+
+ Being able to do this is important for proper support of protoize,
+ and also for generating precise symbolic debugging information
+ which takes full account of the programmer's (typedef) vocabulary.
+
+ Obviously, we don't want to generate a duplicate ..._TYPE node if
+ the TYPE_DECL node that we are now processing really represents a
+ standard built-in type. */
+
+static void
+clone_underlying_type (tree x)
+{
+ if (DECL_IS_BUILTIN (x))
+ {
+ if (TYPE_NAME (TREE_TYPE (x)) == 0)
+ TYPE_NAME (TREE_TYPE (x)) = x;
+ }
+ else if (TREE_TYPE (x) != error_mark_node
+ && DECL_ORIGINAL_TYPE (x) == NULL_TREE)
+ {
+ tree tt = TREE_TYPE (x);
+ DECL_ORIGINAL_TYPE (x) = tt;
+ tt = build_variant_type_copy (tt);
+ TYPE_NAME (tt) = x;
+ TREE_USED (tt) = TREE_USED (x);
+ TREE_TYPE (x) = tt;
+ }
+}
+
/* Record a decl-node X as belonging to the current lexical scope.
Check for errors (such as an incompatible declaration for the same
name already seen in the same scope).
@@ -2193,7 +2254,7 @@ pushdecl (tree x)
skip_external_and_shadow_checks:
if (TREE_CODE (x) == TYPE_DECL)
- set_underlying_type (x);
+ clone_underlying_type (x);
bind (name, x, scope, /*invisible=*/false, nested);
@@ -5503,9 +5564,6 @@ finish_struct (tree t, tree fieldlist, tree attributes)
DECL_CONTEXT (x) = t;
- if (TYPE_PACKED (t) && TYPE_ALIGN (TREE_TYPE (x)) > BITS_PER_UNIT)
- DECL_PACKED (x) = 1;
-
/* If any field is const, the structure type is pseudo-const. */
if (TREE_READONLY (x))
C_TYPE_FIELDS_READONLY (t) = 1;
@@ -5537,6 +5595,11 @@ finish_struct (tree t, tree fieldlist, tree attributes)
SET_DECL_C_BIT_FIELD (x);
}
+ if (TYPE_PACKED (t)
+ && (DECL_BIT_FIELD (x)
+ || TYPE_ALIGN (TREE_TYPE (x)) > BITS_PER_UNIT))
+ DECL_PACKED (x) = 1;
+
/* Detect flexible array member in an invalid context. */
if (TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE
&& TYPE_SIZE (TREE_TYPE (x)) == NULL_TREE
diff --git a/gcc/c-opts.c b/gcc/c-opts.c
index 589b05be389..a7ff6cc6d45 100644
--- a/gcc/c-opts.c
+++ b/gcc/c-opts.c
@@ -1,5 +1,5 @@
/* C/ObjC/C++ command line option handling.
- Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
Free Software Foundation, Inc.
Contributed by Neil Booth.
@@ -423,7 +423,7 @@ c_common_handle_option (size_t scode, const char *arg, int value)
cpp_opts->warn_num_sign_change = value;
if (warn_pointer_sign == -1)
- warn_pointer_sign = 1;
+ warn_pointer_sign = value;
break;
case OPT_Wbuiltin_macro_redefined:
@@ -1054,7 +1054,7 @@ c_common_post_options (const char **pfilename)
if (warn_ignored_qualifiers == -1)
warn_ignored_qualifiers = extra_warnings;
- /* -Wpointer_sign is disabled by default, but it is enabled if any
+ /* -Wpointer-sign is disabled by default, but it is enabled if any
of -Wall or -pedantic are given. */
if (warn_pointer_sign == -1)
warn_pointer_sign = 0;
@@ -1085,6 +1085,11 @@ c_common_post_options (const char **pfilename)
if (warn_sign_conversion == -1)
warn_sign_conversion = (c_dialect_cxx ()) ? 0 : warn_conversion;
+ /* -Wpacked-bitfield-compat is on by default for the C languages. The
+ warning is issued in stor-layout.c which is not part of the front-end so
+ we need to selectively turn it on here. */
+ if (warn_packed_bitfield_compat == -1)
+ warn_packed_bitfield_compat = 1;
/* Special format checking options don't work without -Wformat; warn if
they are used. */
diff --git a/gcc/c-typeck.c b/gcc/c-typeck.c
index 35c8d232878..4b9b2b30d00 100644
--- a/gcc/c-typeck.c
+++ b/gcc/c-typeck.c
@@ -7226,8 +7226,10 @@ c_finish_return (tree retval)
{
switch (TREE_CODE (inner))
{
- CASE_CONVERT: case NON_LVALUE_EXPR:
+ CASE_CONVERT:
+ case NON_LVALUE_EXPR:
case PLUS_EXPR:
+ case POINTER_PLUS_EXPR:
inner = TREE_OPERAND (inner, 0);
continue;
diff --git a/gcc/c.opt b/gcc/c.opt
index 1888ecde63c..e512ec66394 100644
--- a/gcc/c.opt
+++ b/gcc/c.opt
@@ -364,6 +364,10 @@ Woverride-init
C ObjC Var(warn_override_init) Init(-1) Warning
Warn about overriding initializers without side effects
+Wpacked-bitfield-compat
+C ObjC C++ ObjC++ Var(warn_packed_bitfield_compat) Init(-1) Warning
+Warn about packed bit-fields whose offset changed in GCC 4.4
+
Wparentheses
C ObjC C++ ObjC++ Var(warn_parentheses) Warning
Warn about possibly missing parentheses
diff --git a/gcc/caller-save.c b/gcc/caller-save.c
index be1718c1db2..303da88bb48 100644
--- a/gcc/caller-save.c
+++ b/gcc/caller-save.c
@@ -448,7 +448,7 @@ setup_save_areas (void)
SET_HARD_REG_BIT (hard_regs_used, r);
}
- if (flag_ira && optimize && flag_ira_share_save_slots)
+ if (optimize && flag_ira_share_save_slots)
{
rtx insn, slot;
struct insn_chain *chain, *next;
diff --git a/gcc/calls.c b/gcc/calls.c
index a75e3b36569..e6e882f24b1 100644
--- a/gcc/calls.c
+++ b/gcc/calls.c
@@ -992,7 +992,6 @@ initialize_argument_information (int num_actuals ATTRIBUTE_UNUSED,
&& targetm.calls.split_complex_arg (argtype))
{
tree subtype = TREE_TYPE (argtype);
- arg = save_expr (arg);
args[j].tree_value = build1 (REALPART_EXPR, subtype, arg);
j += inc;
args[j].tree_value = build1 (IMAGPART_EXPR, subtype, arg);
diff --git a/gcc/cfgexpand.c b/gcc/cfgexpand.c
index 6d7fe772fe7..a9a52c45351 100644
--- a/gcc/cfgexpand.c
+++ b/gcc/cfgexpand.c
@@ -1970,7 +1970,7 @@ expand_gimple_basic_block (basic_block bb)
return new_bb;
}
}
- else
+ else if (gimple_code (stmt) != GIMPLE_CHANGE_DYNAMIC_TYPE)
{
tree stmt_tree = gimple_to_tree (stmt);
last = get_last_insn ();
diff --git a/gcc/cfgloopanal.c b/gcc/cfgloopanal.c
index 88c5e956264..120371368a2 100644
--- a/gcc/cfgloopanal.c
+++ b/gcc/cfgloopanal.c
@@ -396,8 +396,8 @@ estimate_reg_pressure_cost (unsigned n_new, unsigned n_old, bool speed)
one. */
cost = target_spill_cost [speed] * n_new;
- if (optimize && flag_ira && (flag_ira_region == IRA_REGION_ALL
- || flag_ira_region == IRA_REGION_MIXED)
+ if (optimize && (flag_ira_region == IRA_REGION_ALL
+ || flag_ira_region == IRA_REGION_MIXED)
&& number_of_loops () <= (unsigned) IRA_MAX_LOOPS_NUM)
/* IRA regional allocation deals with high register pressure
better. So decrease the cost (to do more accurate the cost
diff --git a/gcc/common.opt b/gcc/common.opt
index c636bf45234..20e7d8e202e 100644
--- a/gcc/common.opt
+++ b/gcc/common.opt
@@ -753,10 +753,6 @@ Common Report Var(flag_ipa_struct_reorg)
Perform structure layout optimizations based
on profiling information.
-fira
-Common Report Var(flag_ira) Init(0)
-Use integrated register allocator.
-
fira-algorithm=
Common Joined RejectNegative
-fira-algorithm=[CB|priority] Set the used IRA algorithm
diff --git a/gcc/config/arm/arm-cores.def b/gcc/config/arm/arm-cores.def
index fe2f2b53792..ca868fe2d0c 100644
--- a/gcc/config/arm/arm-cores.def
+++ b/gcc/config/arm/arm-cores.def
@@ -102,6 +102,7 @@ ARM_CORE("arm1020e", arm1020e, 5TE, FL_LDSCHED, fastmul)
ARM_CORE("arm1022e", arm1022e, 5TE, FL_LDSCHED, fastmul)
ARM_CORE("xscale", xscale, 5TE, FL_LDSCHED | FL_STRONG | FL_XSCALE, xscale)
ARM_CORE("iwmmxt", iwmmxt, 5TE, FL_LDSCHED | FL_STRONG | FL_XSCALE | FL_IWMMXT, xscale)
+ARM_CORE("iwmmxt2", iwmmxt2, 5TE, FL_LDSCHED | FL_STRONG | FL_XSCALE | FL_IWMMXT, xscale)
/* V5TEJ Architecture Processors */
ARM_CORE("arm926ej-s", arm926ejs, 5TEJ, FL_LDSCHED, 9e)
diff --git a/gcc/config/arm/arm-tune.md b/gcc/config/arm/arm-tune.md
index beb8f9f4173..ea728dcb67c 100644
--- a/gcc/config/arm/arm-tune.md
+++ b/gcc/config/arm/arm-tune.md
@@ -1,5 +1,5 @@
;; -*- buffer-read-only: t -*-
;; Generated automatically by gentune.sh from arm-cores.def
(define_attr "tune"
- "arm2,arm250,arm3,arm6,arm60,arm600,arm610,arm620,arm7,arm7d,arm7di,arm70,arm700,arm700i,arm710,arm720,arm710c,arm7100,arm7500,arm7500fe,arm7m,arm7dm,arm7dmi,arm8,arm810,strongarm,strongarm110,strongarm1100,strongarm1110,arm7tdmi,arm7tdmis,arm710t,arm720t,arm740t,arm9,arm9tdmi,arm920,arm920t,arm922t,arm940t,ep9312,arm10tdmi,arm1020t,arm9e,arm946es,arm966es,arm968es,arm10e,arm1020e,arm1022e,xscale,iwmmxt,arm926ejs,arm1026ejs,arm1136js,arm1136jfs,arm1176jzs,arm1176jzfs,mpcorenovfp,mpcore,arm1156t2s,cortexa8,cortexa9,cortexr4,cortexr4f,cortexm3,cortexm1"
+ "arm2,arm250,arm3,arm6,arm60,arm600,arm610,arm620,arm7,arm7d,arm7di,arm70,arm700,arm700i,arm710,arm720,arm710c,arm7100,arm7500,arm7500fe,arm7m,arm7dm,arm7dmi,arm8,arm810,strongarm,strongarm110,strongarm1100,strongarm1110,arm7tdmi,arm7tdmis,arm710t,arm720t,arm740t,arm9,arm9tdmi,arm920,arm920t,arm922t,arm940t,ep9312,arm10tdmi,arm1020t,arm9e,arm946es,arm966es,arm968es,arm10e,arm1020e,arm1022e,xscale,iwmmxt,iwmmxt2,arm926ejs,arm1026ejs,arm1136js,arm1136jfs,arm1176jzs,arm1176jzfs,mpcorenovfp,mpcore,arm1156t2s,cortexa8,cortexa9,cortexr4,cortexr4f,cortexm3,cortexm1"
(const (symbol_ref "arm_tune")))
diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
index 209682bb0fe..ddcdf5a619b 100644
--- a/gcc/config/arm/arm.c
+++ b/gcc/config/arm/arm.c
@@ -665,6 +665,7 @@ static const struct processors all_architectures[] =
{"armv7-m", cortexm3, "7M", FL_CO_PROC | FL_FOR_ARCH7M, NULL},
{"ep9312", ep9312, "4T", FL_LDSCHED | FL_CIRRUS | FL_FOR_ARCH4, NULL},
{"iwmmxt", iwmmxt, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
+ {"iwmmxt2", iwmmxt2, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
{NULL, arm_none, NULL, 0 , NULL}
};
diff --git a/gcc/config/arm/arm.h b/gcc/config/arm/arm.h
index f83aabac038..fb636f3d540 100644
--- a/gcc/config/arm/arm.h
+++ b/gcc/config/arm/arm.h
@@ -564,6 +564,19 @@ extern int arm_arch_hwdiv;
&& (ALIGN) < BITS_PER_WORD * CONSTANT_ALIGNMENT_FACTOR) \
? BITS_PER_WORD * CONSTANT_ALIGNMENT_FACTOR : (ALIGN))
+/* Align definitions of arrays, unions and structures so that
+ initializations and copies can be made more efficient. This is not
+ ABI-changing, so it only affects places where we can see the
+ definition. */
+#define DATA_ALIGNMENT(EXP, ALIGN) \
+ ((((ALIGN) < BITS_PER_WORD) \
+ && (TREE_CODE (EXP) == ARRAY_TYPE \
+ || TREE_CODE (EXP) == UNION_TYPE \
+ || TREE_CODE (EXP) == RECORD_TYPE)) ? BITS_PER_WORD : (ALIGN))
+
+/* Similarly, make sure that objects on the stack are sensibly aligned. */
+#define LOCAL_ALIGNMENT(EXP, ALIGN) DATA_ALIGNMENT(EXP, ALIGN)
+
/* Setting STRUCTURE_SIZE_BOUNDARY to 32 produces more efficient code, but the
value set in previous versions of this toolchain was 8, which produces more
compact structures. The command line option -mstructure_size_boundary=<n>
diff --git a/gcc/config/arm/arm.md b/gcc/config/arm/arm.md
index 96e2f3b1bb3..0acf257cb43 100644
--- a/gcc/config/arm/arm.md
+++ b/gcc/config/arm/arm.md
@@ -250,6 +250,75 @@
; initialized by arm_override_options()
(define_attr "ldsched" "no,yes" (const (symbol_ref "arm_ld_sched")))
+;; Classification of NEON instructions for scheduling purposes.
+;; Do not set this attribute and the "type" attribute together in
+;; any one instruction pattern.
+(define_attr "neon_type"
+ "neon_int_1,\
+ neon_int_2,\
+ neon_int_3,\
+ neon_int_4,\
+ neon_int_5,\
+ neon_vqneg_vqabs,\
+ neon_vmov,\
+ neon_vaba,\
+ neon_vsma,\
+ neon_vaba_qqq,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mul_qdd_64_32_long_qqd_16_ddd_32_scalar_64_32_long_scalar,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long,\
+ neon_mla_qqq_32_qqd_32_scalar,\
+ neon_mul_ddd_16_scalar_32_16_long_scalar,\
+ neon_mul_qqd_32_scalar,\
+ neon_mla_ddd_16_scalar_qdd_32_16_long_scalar,\
+ neon_shift_1,\
+ neon_shift_2,\
+ neon_shift_3,\
+ neon_vshl_ddd,\
+ neon_vqshl_vrshl_vqrshl_qqq,\
+ neon_vsra_vrsra,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vsum,\
+ neon_fp_vmul_ddd,\
+ neon_fp_vmul_qqd,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vmla_ddd_scalar,\
+ neon_fp_vmla_qqq_scalar,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq,\
+ neon_bp_simple,\
+ neon_bp_2cycle,\
+ neon_bp_3cycle,\
+ neon_ldr,\
+ neon_str,\
+ neon_vld1_1_2_regs,\
+ neon_vld1_3_4_regs,\
+ neon_vld2_2_regs_vld1_vld2_all_lanes,\
+ neon_vld2_4_regs,\
+ neon_vld3_vld4,\
+ neon_vst1_1_2_regs_vst2_2_regs,\
+ neon_vst1_3_4_regs,\
+ neon_vst2_4_regs_vst3_vst4,\
+ neon_vst3_vst4,\
+ neon_vld1_vld2_lane,\
+ neon_vld3_vld4_lane,\
+ neon_vst1_vst2_lane,\
+ neon_vst3_vst4_lane,\
+ neon_vld3_vld4_all_lanes,\
+ neon_mcr,\
+ neon_mcr_2_mcrr,\
+ neon_mrc,\
+ neon_mrrc,\
+ neon_ldm_2,\
+ neon_stm_2,\
+ none"
+ (const_string "none"))
+
; condition codes: this one is used by final_prescan_insn to speed up
; conditionalizing instructions. It saves having to scan the rtl to see if
; it uses or alters the condition codes.
@@ -267,13 +336,17 @@
; JUMP_CLOB is used when the condition cannot be represented by a single
; instruction (UNEQ and LTGT). These cannot be predicated.
;
+; UNCONDITIONAL means the instions can not be conditionally executed.
+;
; NOCOND means that the condition codes are neither altered nor affect the
; output of this insn
-(define_attr "conds" "use,set,clob,jump_clob,nocond"
+(define_attr "conds" "use,set,clob,jump_clob,unconditional,nocond"
(if_then_else (eq_attr "type" "call")
(const_string "clob")
- (const_string "nocond")))
+ (if_then_else (eq_attr "neon_type" "none")
+ (const_string "nocond")
+ (const_string "unconditional"))))
; Predicable means that the insn can be conditionally executed based on
; an automatically added predicate (additional patterns are generated by
diff --git a/gcc/config/arm/bpabi.h b/gcc/config/arm/bpabi.h
index bc716593c17..bbd58da4d42 100644
--- a/gcc/config/arm/bpabi.h
+++ b/gcc/config/arm/bpabi.h
@@ -55,7 +55,7 @@
/* Tell the assembler to build BPABI binaries. */
#undef SUBTARGET_EXTRA_ASM_SPEC
-#define SUBTARGET_EXTRA_ASM_SPEC "%{mabi=apcs-gnu|mabi=atpcs:-meabi=gnu;:-meabi=4}" TARGET_FIX_V4BX_SPEC
+#define SUBTARGET_EXTRA_ASM_SPEC "%{mabi=apcs-gnu|mabi=atpcs:-meabi=gnu;:-meabi=5}" TARGET_FIX_V4BX_SPEC
#ifndef SUBTARGET_EXTRA_LINK_SPEC
#define SUBTARGET_EXTRA_LINK_SPEC ""
diff --git a/gcc/config/arm/linux-atomic.c b/gcc/config/arm/linux-atomic.c
new file mode 100644
index 00000000000..ac0b4d6d14b
--- /dev/null
+++ b/gcc/config/arm/linux-atomic.c
@@ -0,0 +1,280 @@
+/* Linux-specific atomic operations for ARM EABI.
+ Copyright (C) 2008 Free Software Foundation, Inc.
+ Contributed by CodeSourcery.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file. (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
+
+/* Kernel helper for compare-and-exchange. */
+typedef int (__kernel_cmpxchg_t) (int oldval, int newval, int *ptr);
+#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0)
+
+/* Kernel helper for memory barrier. */
+typedef void (__kernel_dmb_t) (void);
+#define __kernel_dmb (*(__kernel_dmb_t *) 0xffff0fa0)
+
+/* Note: we implement byte, short and int versions of atomic operations using
+ the above kernel helpers, but there is no support for "long long" (64-bit)
+ operations as yet. */
+
+#define HIDDEN __attribute__ ((visibility ("hidden")))
+
+#ifdef __ARMEL__
+#define INVERT_MASK_1 0
+#define INVERT_MASK_2 0
+#else
+#define INVERT_MASK_1 24
+#define INVERT_MASK_2 16
+#endif
+
+#define MASK_1 0xffu
+#define MASK_2 0xffffu
+
+#define FETCH_AND_OP_WORD(OP, PFX_OP, INF_OP) \
+ int HIDDEN \
+ __sync_fetch_and_##OP##_4 (int *ptr, int val) \
+ { \
+ int failure, tmp; \
+ \
+ do { \
+ tmp = *ptr; \
+ failure = __kernel_cmpxchg (tmp, PFX_OP tmp INF_OP val, ptr); \
+ } while (failure != 0); \
+ \
+ return tmp; \
+ }
+
+FETCH_AND_OP_WORD (add, , +)
+FETCH_AND_OP_WORD (sub, , -)
+FETCH_AND_OP_WORD (or, , |)
+FETCH_AND_OP_WORD (and, , &)
+FETCH_AND_OP_WORD (xor, , ^)
+FETCH_AND_OP_WORD (nand, ~, &)
+
+#define NAME_oldval(OP, WIDTH) __sync_fetch_and_##OP##_##WIDTH
+#define NAME_newval(OP, WIDTH) __sync_##OP##_and_fetch_##WIDTH
+
+/* Implement both __sync_<op>_and_fetch and __sync_fetch_and_<op> for
+ subword-sized quantities. */
+
+#define SUBWORD_SYNC_OP(OP, PFX_OP, INF_OP, TYPE, WIDTH, RETURN) \
+ TYPE HIDDEN \
+ NAME##_##RETURN (OP, WIDTH) (TYPE *ptr, TYPE val) \
+ { \
+ int *wordptr = (int *) ((unsigned int) ptr & ~3); \
+ unsigned int mask, shift, oldval, newval; \
+ int failure; \
+ \
+ shift = (((unsigned int) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
+ mask = MASK_##WIDTH << shift; \
+ \
+ do { \
+ oldval = *wordptr; \
+ newval = ((PFX_OP ((oldval & mask) >> shift) \
+ INF_OP (unsigned int) val) << shift) & mask; \
+ newval |= oldval & ~mask; \
+ failure = __kernel_cmpxchg (oldval, newval, wordptr); \
+ } while (failure != 0); \
+ \
+ return (RETURN & mask) >> shift; \
+ }
+
+SUBWORD_SYNC_OP (add, , +, short, 2, oldval)
+SUBWORD_SYNC_OP (sub, , -, short, 2, oldval)
+SUBWORD_SYNC_OP (or, , |, short, 2, oldval)
+SUBWORD_SYNC_OP (and, , &, short, 2, oldval)
+SUBWORD_SYNC_OP (xor, , ^, short, 2, oldval)
+SUBWORD_SYNC_OP (nand, ~, &, short, 2, oldval)
+
+SUBWORD_SYNC_OP (add, , +, char, 1, oldval)
+SUBWORD_SYNC_OP (sub, , -, char, 1, oldval)
+SUBWORD_SYNC_OP (or, , |, char, 1, oldval)
+SUBWORD_SYNC_OP (and, , &, char, 1, oldval)
+SUBWORD_SYNC_OP (xor, , ^, char, 1, oldval)
+SUBWORD_SYNC_OP (nand, ~, &, char, 1, oldval)
+
+#define OP_AND_FETCH_WORD(OP, PFX_OP, INF_OP) \
+ int HIDDEN \
+ __sync_##OP##_and_fetch_4 (int *ptr, int val) \
+ { \
+ int tmp, failure; \
+ \
+ do { \
+ tmp = *ptr; \
+ failure = __kernel_cmpxchg (tmp, PFX_OP tmp INF_OP val, ptr); \
+ } while (failure != 0); \
+ \
+ return PFX_OP tmp INF_OP val; \
+ }
+
+OP_AND_FETCH_WORD (add, , +)
+OP_AND_FETCH_WORD (sub, , -)
+OP_AND_FETCH_WORD (or, , |)
+OP_AND_FETCH_WORD (and, , &)
+OP_AND_FETCH_WORD (xor, , ^)
+OP_AND_FETCH_WORD (nand, ~, &)
+
+SUBWORD_SYNC_OP (add, , +, short, 2, newval)
+SUBWORD_SYNC_OP (sub, , -, short, 2, newval)
+SUBWORD_SYNC_OP (or, , |, short, 2, newval)
+SUBWORD_SYNC_OP (and, , &, short, 2, newval)
+SUBWORD_SYNC_OP (xor, , ^, short, 2, newval)
+SUBWORD_SYNC_OP (nand, ~, &, short, 2, newval)
+
+SUBWORD_SYNC_OP (add, , +, char, 1, newval)
+SUBWORD_SYNC_OP (sub, , -, char, 1, newval)
+SUBWORD_SYNC_OP (or, , |, char, 1, newval)
+SUBWORD_SYNC_OP (and, , &, char, 1, newval)
+SUBWORD_SYNC_OP (xor, , ^, char, 1, newval)
+SUBWORD_SYNC_OP (nand, ~, &, char, 1, newval)
+
+int HIDDEN
+__sync_val_compare_and_swap_4 (int *ptr, int oldval, int newval)
+{
+ int actual_oldval, fail;
+
+ while (1)
+ {
+ actual_oldval = *ptr;
+
+ if (oldval != actual_oldval)
+ return actual_oldval;
+
+ fail = __kernel_cmpxchg (actual_oldval, newval, ptr);
+
+ if (!fail)
+ return oldval;
+ }
+}
+
+#define SUBWORD_VAL_CAS(TYPE, WIDTH) \
+ TYPE HIDDEN \
+ __sync_val_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
+ TYPE newval) \
+ { \
+ int *wordptr = (int *)((unsigned int) ptr & ~3), fail; \
+ unsigned int mask, shift, actual_oldval, actual_newval; \
+ \
+ shift = (((unsigned int) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
+ mask = MASK_##WIDTH << shift; \
+ \
+ while (1) \
+ { \
+ actual_oldval = *wordptr; \
+ \
+ if (((actual_oldval & mask) >> shift) != (unsigned int) oldval) \
+ return (actual_oldval & mask) >> shift; \
+ \
+ actual_newval = (actual_oldval & ~mask) \
+ | (((unsigned int) newval << shift) & mask); \
+ \
+ fail = __kernel_cmpxchg (actual_oldval, actual_newval, \
+ wordptr); \
+ \
+ if (!fail) \
+ return oldval; \
+ } \
+ }
+
+SUBWORD_VAL_CAS (short, 2)
+SUBWORD_VAL_CAS (char, 1)
+
+typedef unsigned char bool;
+
+bool HIDDEN
+__sync_bool_compare_and_swap_4 (int *ptr, int oldval, int newval)
+{
+ int failure = __kernel_cmpxchg (oldval, newval, ptr);
+ return (failure == 0);
+}
+
+#define SUBWORD_BOOL_CAS(TYPE, WIDTH) \
+ bool HIDDEN \
+ __sync_bool_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
+ TYPE newval) \
+ { \
+ TYPE actual_oldval \
+ = __sync_val_compare_and_swap_##WIDTH (ptr, oldval, newval); \
+ return (oldval == actual_oldval); \
+ }
+
+SUBWORD_BOOL_CAS (short, 2)
+SUBWORD_BOOL_CAS (char, 1)
+
+void HIDDEN
+__sync_synchronize (void)
+{
+ __kernel_dmb ();
+}
+
+int HIDDEN
+__sync_lock_test_and_set_4 (int *ptr, int val)
+{
+ int failure, oldval;
+
+ do {
+ oldval = *ptr;
+ failure = __kernel_cmpxchg (oldval, val, ptr);
+ } while (failure != 0);
+
+ return oldval;
+}
+
+#define SUBWORD_TEST_AND_SET(TYPE, WIDTH) \
+ TYPE HIDDEN \
+ __sync_lock_test_and_set_##WIDTH (TYPE *ptr, TYPE val) \
+ { \
+ int failure; \
+ unsigned int oldval, newval, shift, mask; \
+ int *wordptr = (int *) ((unsigned int) ptr & ~3); \
+ \
+ shift = (((unsigned int) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
+ mask = MASK_##WIDTH << shift; \
+ \
+ do { \
+ oldval = *wordptr; \
+ newval = (oldval & ~mask) \
+ | (((unsigned int) val << shift) & mask); \
+ failure = __kernel_cmpxchg (oldval, newval, wordptr); \
+ } while (failure != 0); \
+ \
+ return (oldval & mask) >> shift; \
+ }
+
+SUBWORD_TEST_AND_SET (short, 2)
+SUBWORD_TEST_AND_SET (char, 1)
+
+#define SYNC_LOCK_RELEASE(TYPE, WIDTH) \
+ void HIDDEN \
+ __sync_lock_release_##WIDTH (TYPE *ptr) \
+ { \
+ *ptr = 0; \
+ __kernel_dmb (); \
+ }
+
+SYNC_LOCK_RELEASE (int, 4)
+SYNC_LOCK_RELEASE (short, 2)
+SYNC_LOCK_RELEASE (char, 1)
diff --git a/gcc/config/arm/neon.md b/gcc/config/arm/neon.md
index 8d10c1e5b42..dbbd209f79c 100644
--- a/gcc/config/arm/neon.md
+++ b/gcc/config/arm/neon.md
@@ -427,76 +427,7 @@
;; neon_type attribute definitions.
(define_attr "vqh_mnem" "vadd,vmin,vmax" (const_string "vadd"))
-;; Classification of NEON instructions for scheduling purposes.
-;; Do not set this attribute and the "type" attribute together in
-;; any one instruction pattern.
-(define_attr "neon_type"
- "neon_int_1,\
- neon_int_2,\
- neon_int_3,\
- neon_int_4,\
- neon_int_5,\
- neon_vqneg_vqabs,\
- neon_vmov,\
- neon_vaba,\
- neon_vsma,\
- neon_vaba_qqq,\
- neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
- neon_mul_qqq_8_16_32_ddd_32,\
- neon_mul_qdd_64_32_long_qqd_16_ddd_32_scalar_64_32_long_scalar,\
- neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
- neon_mla_qqq_8_16,\
- neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long,\
- neon_mla_qqq_32_qqd_32_scalar,\
- neon_mul_ddd_16_scalar_32_16_long_scalar,\
- neon_mul_qqd_32_scalar,\
- neon_mla_ddd_16_scalar_qdd_32_16_long_scalar,\
- neon_shift_1,\
- neon_shift_2,\
- neon_shift_3,\
- neon_vshl_ddd,\
- neon_vqshl_vrshl_vqrshl_qqq,\
- neon_vsra_vrsra,\
- neon_fp_vadd_ddd_vabs_dd,\
- neon_fp_vadd_qqq_vabs_qq,\
- neon_fp_vsum,\
- neon_fp_vmul_ddd,\
- neon_fp_vmul_qqd,\
- neon_fp_vmla_ddd,\
- neon_fp_vmla_qqq,\
- neon_fp_vmla_ddd_scalar,\
- neon_fp_vmla_qqq_scalar,\
- neon_fp_vrecps_vrsqrts_ddd,\
- neon_fp_vrecps_vrsqrts_qqq,\
- neon_bp_simple,\
- neon_bp_2cycle,\
- neon_bp_3cycle,\
- neon_ldr,\
- neon_str,\
- neon_vld1_1_2_regs,\
- neon_vld1_3_4_regs,\
- neon_vld2_2_regs_vld1_vld2_all_lanes,\
- neon_vld2_4_regs,\
- neon_vld3_vld4,\
- neon_vst1_1_2_regs_vst2_2_regs,\
- neon_vst1_3_4_regs,\
- neon_vst2_4_regs_vst3_vst4,\
- neon_vst3_vst4,\
- neon_vld1_vld2_lane,\
- neon_vld3_vld4_lane,\
- neon_vst1_vst2_lane,\
- neon_vst3_vst4_lane,\
- neon_vld3_vld4_all_lanes,\
- neon_mcr,\
- neon_mcr_2_mcrr,\
- neon_mrc,\
- neon_mrrc,\
- neon_ldm_2,\
- neon_stm_2,\
- none"
- (const_string "none"))
-
-;; Predicates used for setting the above attribute.
+;; Predicates used for setting neon_type
(define_mode_attr Is_float_mode [(V8QI "false") (V16QI "false")
(V4HI "false") (V8HI "false")
@@ -639,7 +570,8 @@
default: gcc_unreachable ();
}
}
- [(set_attr "length" "<V_slen>,<V_slen>,<V_slen>")])
+ [(set_attr "neon_type" "neon_int_1,neon_stm_2,neon_ldm_2")
+ (set_attr "length" "<V_slen>,<V_slen>,<V_slen>")])
(define_split
[(set (match_operand:EI 0 "s_register_operand" "")
diff --git a/gcc/config/arm/t-linux-eabi b/gcc/config/arm/t-linux-eabi
index 5e8d94d7a28..5c364be94fa 100644
--- a/gcc/config/arm/t-linux-eabi
+++ b/gcc/config/arm/t-linux-eabi
@@ -12,3 +12,5 @@ LIB1ASMFUNCS := $(filter-out _dvmd_tls,$(LIB1ASMFUNCS)) _dvmd_lnx
# Multilib the standard Linux files. Don't include crti.o or crtn.o,
# which are provided by glibc.
EXTRA_MULTILIB_PARTS=crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o
+
+LIB2FUNCS_STATIC_EXTRA += $(srcdir)/config/arm/linux-atomic.c
diff --git a/gcc/config/i386/cygwin.h b/gcc/config/i386/cygwin.h
index f61cacb3e1a..9b2fd7f2b7d 100644
--- a/gcc/config/i386/cygwin.h
+++ b/gcc/config/i386/cygwin.h
@@ -49,9 +49,26 @@ along with GCC; see the file COPYING3. If not see
GCC without making a new CYGWIN.DLL, so we leave it. Profiling is handled
by calling the init function from main. */
-#undef LIBGCC_SPEC
-#define LIBGCC_SPEC \
- "%{mno-cygwin: %{mthreads:-lmingwthrd} -lmingw32} -lgcc \
+#ifdef ENABLE_SHARED_LIBGCC
+#define SHARED_LIBGCC_SPEC " \
+ %{static|static-libgcc:-lgcc -lgcc_eh} \
+ %{!static: \
+ %{!static-libgcc: \
+ %{!shared: \
+ %{!shared-libgcc:-lgcc -lgcc_eh} \
+ %{shared-libgcc:-lgcc_s -lgcc} \
+ } \
+ %{shared:-lgcc_s -lgcc} \
+ } \
+ } "
+#else
+#define SHARED_LIBGCC_SPEC " -lgcc "
+#endif
+
+#undef REAL_LIBGCC_SPEC
+#define REAL_LIBGCC_SPEC \
+ "%{mno-cygwin: %{mthreads:-lmingwthrd} -lmingw32} \
+ " SHARED_LIBGCC_SPEC " \
%{mno-cygwin:-lmoldname -lmingwex -lmsvcrt}"
/* We have to dynamic link to get to the system DLLs. All of libc, libm and
diff --git a/gcc/config/i386/i386.md b/gcc/config/i386/i386.md
index 0569f0413a8..89a3b17607b 100644
--- a/gcc/config/i386/i386.md
+++ b/gcc/config/i386/i386.md
@@ -1534,7 +1534,7 @@
gcc_unreachable ();
}
- case TYPE_MMXADD:
+ case TYPE_MMX:
return "pxor\t%0, %0";
case TYPE_MMXMOV:
@@ -1552,7 +1552,7 @@
}
[(set (attr "type")
(cond [(eq_attr "alternative" "2")
- (const_string "mmxadd")
+ (const_string "mmx")
(eq_attr "alternative" "3,4,5")
(const_string "mmxmov")
(eq_attr "alternative" "6")
@@ -2370,7 +2370,7 @@
case TYPE_SSELOG1:
return "%vpxor\t%0, %d0";
- case TYPE_MMXADD:
+ case TYPE_MMX:
return "pxor\t%0, %0";
case TYPE_MULTI:
@@ -2391,7 +2391,7 @@
}
[(set (attr "type")
(cond [(eq_attr "alternative" "5")
- (const_string "mmxadd")
+ (const_string "mmx")
(eq_attr "alternative" "6,7,8,9,10")
(const_string "mmxmov")
(eq_attr "alternative" "11")
@@ -15299,7 +15299,7 @@
(define_insn "set_rip_rex64"
[(set (match_operand:DI 0 "register_operand" "=r")
- (unspec:DI [(match_operand:DI 1 "" "")] UNSPEC_SET_RIP))]
+ (unspec:DI [(label_ref (match_operand 1 "" ""))] UNSPEC_SET_RIP))]
"TARGET_64BIT"
"lea{q}\t{%l1(%%rip), %0|%0, %l1[rip]}"
[(set_attr "type" "lea")
@@ -15307,7 +15307,9 @@
(define_insn "set_got_offset_rex64"
[(set (match_operand:DI 0 "register_operand" "=r")
- (unspec:DI [(match_operand:DI 1 "" "")] UNSPEC_SET_GOT_OFFSET))]
+ (unspec:DI
+ [(label_ref (match_operand 1 "" ""))]
+ UNSPEC_SET_GOT_OFFSET))]
"TARGET_64BIT"
"movabs{q}\t{$_GLOBAL_OFFSET_TABLE_-%l1, %0|%0, OFFSET FLAT:_GLOBAL_OFFSET_TABLE_-%l1}"
[(set_attr "type" "imov")
diff --git a/gcc/config/i386/sol2-10.h b/gcc/config/i386/sol2-10.h
index bd5c63781af..6e7f13d8f26 100644
--- a/gcc/config/i386/sol2-10.h
+++ b/gcc/config/i386/sol2-10.h
@@ -1,5 +1,5 @@
/* Solaris 10 configuration.
- Copyright (C) 2004, 2006, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2004, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
Contributed by CodeSourcery, LLC.
This file is part of GCC.
@@ -39,6 +39,15 @@ along with GCC; see the file COPYING3. If not see
#ifndef HAVE_AS_IX86_DIFF_SECT_DELTA
#undef JUMP_TABLES_IN_TEXT_SECTION
#define JUMP_TABLES_IN_TEXT_SECTION 1
+
+/* The native Solaris assembler cannot handle the SYMBOL-. syntax, but
+ requires SYMBOL@rel/@rel64 instead. */
+#define ASM_OUTPUT_DWARF_PCREL(FILE, SIZE, LABEL) \
+ do { \
+ fputs (integer_asm_op (SIZE, FALSE), FILE); \
+ assemble_name (FILE, LABEL); \
+ fputs (SIZE == 8 ? "@rel64" : "@rel", FILE); \
+ } while (0)
#endif
#undef NO_PROFILE_COUNTERS
diff --git a/gcc/config/mips/mips.c b/gcc/config/mips/mips.c
index 55b1c22be66..c98528e7eb4 100644
--- a/gcc/config/mips/mips.c
+++ b/gcc/config/mips/mips.c
@@ -13296,7 +13296,7 @@ mips_reorg (void)
mips16_lay_out_constants ();
if (mips_r10k_cache_barrier != R10K_CACHE_BARRIER_NONE)
r10k_insert_cache_barriers ();
- if (flag_delayed_branch)
+ if (optimize > 0 && flag_delayed_branch)
dbr_schedule (get_insns ());
mips_reorg_process_insns ();
if (!TARGET_MIPS16
diff --git a/gcc/config/rs6000/rs6000-protos.h b/gcc/config/rs6000/rs6000-protos.h
index 2f1b04c4f38..4e2ecd3ec4a 100644
--- a/gcc/config/rs6000/rs6000-protos.h
+++ b/gcc/config/rs6000/rs6000-protos.h
@@ -42,6 +42,7 @@ extern void validate_condition_mode (enum rtx_code, enum machine_mode);
extern bool legitimate_constant_pool_address_p (rtx);
extern bool legitimate_indirect_address_p (rtx, int);
extern bool legitimate_indexed_address_p (rtx, int);
+extern bool avoiding_indexed_address_p (enum machine_mode);
extern rtx rs6000_got_register (rtx);
extern rtx find_addr_reg (rtx);
diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c
index 6d7327b5ea4..d2ebf628f57 100644
--- a/gcc/config/rs6000/rs6000.c
+++ b/gcc/config/rs6000/rs6000.c
@@ -1987,6 +1987,13 @@ rs6000_override_options (const char *default_cpu)
rs6000_single_float = rs6000_double_float = 1;
}
+ /* If not explicitly specified via option, decide whether to generate indexed
+ load/store instructions. */
+ if (TARGET_AVOID_XFORM == -1)
+ /* Avoid indexed addressing when targeting Power6 in order to avoid
+ the DERAT mispredict penalty. */
+ TARGET_AVOID_XFORM = (rs6000_cpu == PROCESSOR_POWER6 && TARGET_CMPB);
+
rs6000_init_hard_regno_mode_ok ();
}
@@ -3704,6 +3711,14 @@ legitimate_indexed_address_p (rtx x, int strict)
&& INT_REG_OK_FOR_INDEX_P (op0, strict))));
}
+bool
+avoiding_indexed_address_p (enum machine_mode mode)
+{
+ /* Avoid indexed addressing for modes that have non-indexed
+ load/store instruction forms. */
+ return TARGET_AVOID_XFORM && !ALTIVEC_VECTOR_MODE (mode);
+}
+
inline bool
legitimate_indirect_address_p (rtx x, int strict)
{
@@ -3830,6 +3845,7 @@ rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
|| ((mode != DImode && mode != DFmode && mode != DDmode)
|| (TARGET_E500_DOUBLE && mode != DDmode)))
&& (TARGET_POWERPC64 || mode != DImode)
+ && !avoiding_indexed_address_p (mode)
&& mode != TImode
&& mode != TFmode
&& mode != TDmode)
@@ -4441,6 +4457,7 @@ rs6000_legitimate_address (enum machine_mode mode, rtx x, int reg_ok_strict)
|| (mode != DFmode && mode != DDmode)
|| (TARGET_E500_DOUBLE && mode != DDmode))
&& (TARGET_POWERPC64 || mode != DImode)
+ && !avoiding_indexed_address_p (mode)
&& legitimate_indexed_address_p (x, reg_ok_strict))
return 1;
if (GET_CODE (x) == PRE_MODIFY
@@ -4459,7 +4476,8 @@ rs6000_legitimate_address (enum machine_mode mode, rtx x, int reg_ok_strict)
&& TARGET_UPDATE
&& legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
&& (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1), reg_ok_strict)
- || legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict))
+ || (!avoiding_indexed_address_p (mode)
+ && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
&& rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
return 1;
if (legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
diff --git a/gcc/config/rs6000/rs6000.md b/gcc/config/rs6000/rs6000.md
index a5a6ec17b26..b6f41814eba 100644
--- a/gcc/config/rs6000/rs6000.md
+++ b/gcc/config/rs6000/rs6000.md
@@ -10055,7 +10055,9 @@
(match_operand:DI 2 "reg_or_aligned_short_operand" "r,I"))))
(set (match_operand:DI 0 "gpc_reg_operand" "=b,b")
(plus:DI (match_dup 1) (match_dup 2)))]
- "TARGET_POWERPC64 && TARGET_UPDATE"
+ "TARGET_POWERPC64 && TARGET_UPDATE
+ && (!avoiding_indexed_address_p (DImode)
+ || !gpc_reg_operand (operands[2], DImode))"
"@
ldux %3,%0,%2
ldu %3,%2(%0)"
@@ -10067,7 +10069,11 @@
(match_operand:DI 3 "gpc_reg_operand" "r,r"))
(set (match_operand:P 0 "gpc_reg_operand" "=b,b")
(plus:P (match_dup 1) (match_dup 2)))]
- "TARGET_POWERPC64 && TARGET_UPDATE"
+ "TARGET_POWERPC64 && TARGET_UPDATE
+ && (!avoiding_indexed_address_p (Pmode)
+ || !gpc_reg_operand (operands[2], Pmode)
+ || (REG_P (operands[0])
+ && REGNO (operands[0]) == STACK_POINTER_REGNUM))"
"@
stdux %3,%0,%2
stdu %3,%2(%0)"
@@ -10079,7 +10085,9 @@
(match_operand:SI 2 "reg_or_short_operand" "r,I"))))
(set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
(plus:SI (match_dup 1) (match_dup 2)))]
- "TARGET_UPDATE"
+ "TARGET_UPDATE
+ && (!avoiding_indexed_address_p (SImode)
+ || !gpc_reg_operand (operands[2], SImode))"
"@
{lux|lwzux} %3,%0,%2
{lu|lwzu} %3,%2(%0)"
@@ -10092,7 +10100,8 @@
(match_operand:DI 2 "gpc_reg_operand" "r")))))
(set (match_operand:DI 0 "gpc_reg_operand" "=b")
(plus:DI (match_dup 1) (match_dup 2)))]
- "TARGET_POWERPC64 && rs6000_gen_cell_microcode"
+ "TARGET_POWERPC64 && rs6000_gen_cell_microcode
+ && !avoiding_indexed_address_p (DImode)"
"lwaux %3,%0,%2"
[(set_attr "type" "load_ext_ux")])
@@ -10102,7 +10111,11 @@
(match_operand:SI 3 "gpc_reg_operand" "r,r"))
(set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
(plus:SI (match_dup 1) (match_dup 2)))]
- "TARGET_UPDATE"
+ "TARGET_UPDATE
+ && (!avoiding_indexed_address_p (SImode)
+ || !gpc_reg_operand (operands[2], SImode)
+ || (REG_P (operands[0])
+ && REGNO (operands[0]) == STACK_POINTER_REGNUM))"
"@
{stux|stwux} %3,%0,%2
{stu|stwu} %3,%2(%0)"
@@ -10114,7 +10127,9 @@
(match_operand:SI 2 "reg_or_short_operand" "r,I"))))
(set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
(plus:SI (match_dup 1) (match_dup 2)))]
- "TARGET_UPDATE"
+ "TARGET_UPDATE
+ && (!avoiding_indexed_address_p (SImode)
+ || !gpc_reg_operand (operands[2], SImode))"
"@
lhzux %3,%0,%2
lhzu %3,%2(%0)"
@@ -10127,7 +10142,9 @@
(match_operand:SI 2 "reg_or_short_operand" "r,I")))))
(set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
(plus:SI (match_dup 1) (match_dup 2)))]
- "TARGET_UPDATE"
+ "TARGET_UPDATE
+ && (!avoiding_indexed_address_p (SImode)
+ || !gpc_reg_operand (operands[2], SImode))"
"@
lhzux %3,%0,%2
lhzu %3,%2(%0)"
@@ -10140,7 +10157,9 @@
(match_operand:SI 2 "reg_or_short_operand" "r,I")))))
(set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
(plus:SI (match_dup 1) (match_dup 2)))]
- "TARGET_UPDATE && rs6000_gen_cell_microcode"
+ "TARGET_UPDATE && rs6000_gen_cell_microcode
+ && (!avoiding_indexed_address_p (SImode)
+ || !gpc_reg_operand (operands[2], SImode))"
"@
lhaux %3,%0,%2
lhau %3,%2(%0)"
@@ -10152,7 +10171,9 @@
(match_operand:HI 3 "gpc_reg_operand" "r,r"))
(set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
(plus:SI (match_dup 1) (match_dup 2)))]
- "TARGET_UPDATE"
+ "TARGET_UPDATE
+ && (!avoiding_indexed_address_p (SImode)
+ || !gpc_reg_operand (operands[2], SImode))"
"@
sthux %3,%0,%2
sthu %3,%2(%0)"
@@ -10164,7 +10185,9 @@
(match_operand:SI 2 "reg_or_short_operand" "r,I"))))
(set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
(plus:SI (match_dup 1) (match_dup 2)))]
- "TARGET_UPDATE"
+ "TARGET_UPDATE
+ && (!avoiding_indexed_address_p (SImode)
+ || !gpc_reg_operand (operands[2], SImode))"
"@
lbzux %3,%0,%2
lbzu %3,%2(%0)"
@@ -10177,7 +10200,9 @@
(match_operand:SI 2 "reg_or_short_operand" "r,I")))))
(set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
(plus:SI (match_dup 1) (match_dup 2)))]
- "TARGET_UPDATE"
+ "TARGET_UPDATE
+ && (!avoiding_indexed_address_p (SImode)
+ || !gpc_reg_operand (operands[2], SImode))"
"@
lbzux %3,%0,%2
lbzu %3,%2(%0)"
@@ -10189,7 +10214,9 @@
(match_operand:QI 3 "gpc_reg_operand" "r,r"))
(set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
(plus:SI (match_dup 1) (match_dup 2)))]
- "TARGET_UPDATE"
+ "TARGET_UPDATE
+ && (!avoiding_indexed_address_p (SImode)
+ || !gpc_reg_operand (operands[2], SImode))"
"@
stbux %3,%0,%2
stbu %3,%2(%0)"
@@ -10201,7 +10228,9 @@
(match_operand:SI 2 "reg_or_short_operand" "r,I"))))
(set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
(plus:SI (match_dup 1) (match_dup 2)))]
- "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT && TARGET_UPDATE"
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT && TARGET_UPDATE
+ && (!avoiding_indexed_address_p (SImode)
+ || !gpc_reg_operand (operands[2], SImode))"
"@
lfsux %3,%0,%2
lfsu %3,%2(%0)"
@@ -10213,7 +10242,9 @@
(match_operand:SF 3 "gpc_reg_operand" "f,f"))
(set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
(plus:SI (match_dup 1) (match_dup 2)))]
- "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT && TARGET_UPDATE"
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT && TARGET_UPDATE
+ && (!avoiding_indexed_address_p (SImode)
+ || !gpc_reg_operand (operands[2], SImode))"
"@
stfsux %3,%0,%2
stfsu %3,%2(%0)"
@@ -10225,7 +10256,9 @@
(match_operand:SI 2 "reg_or_short_operand" "r,I"))))
(set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
(plus:SI (match_dup 1) (match_dup 2)))]
- "(TARGET_SOFT_FLOAT || !TARGET_FPRS) && TARGET_UPDATE"
+ "(TARGET_SOFT_FLOAT || !TARGET_FPRS) && TARGET_UPDATE
+ && (!avoiding_indexed_address_p (SImode)
+ || !gpc_reg_operand (operands[2], SImode))"
"@
{lux|lwzux} %3,%0,%2
{lu|lwzu} %3,%2(%0)"
@@ -10237,7 +10270,9 @@
(match_operand:SF 3 "gpc_reg_operand" "r,r"))
(set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
(plus:SI (match_dup 1) (match_dup 2)))]
- "(TARGET_SOFT_FLOAT || !TARGET_FPRS) && TARGET_UPDATE"
+ "(TARGET_SOFT_FLOAT || !TARGET_FPRS) && TARGET_UPDATE
+ && (!avoiding_indexed_address_p (SImode)
+ || !gpc_reg_operand (operands[2], SImode))"
"@
{stux|stwux} %3,%0,%2
{stu|stwu} %3,%2(%0)"
@@ -10249,7 +10284,9 @@
(match_operand:SI 2 "reg_or_short_operand" "r,I"))))
(set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
(plus:SI (match_dup 1) (match_dup 2)))]
- "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT && TARGET_UPDATE"
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT && TARGET_UPDATE
+ && (!avoiding_indexed_address_p (SImode)
+ || !gpc_reg_operand (operands[2], SImode))"
"@
lfdux %3,%0,%2
lfdu %3,%2(%0)"
@@ -10261,7 +10298,9 @@
(match_operand:DF 3 "gpc_reg_operand" "f,f"))
(set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
(plus:SI (match_dup 1) (match_dup 2)))]
- "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT && TARGET_UPDATE"
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT && TARGET_UPDATE
+ && (!avoiding_indexed_address_p (SImode)
+ || !gpc_reg_operand (operands[2], SImode))"
"@
stfdux %3,%0,%2
stfdu %3,%2(%0)"
diff --git a/gcc/config/rs6000/rs6000.opt b/gcc/config/rs6000/rs6000.opt
index 8a40fc3539b..ec5373eb877 100644
--- a/gcc/config/rs6000/rs6000.opt
+++ b/gcc/config/rs6000/rs6000.opt
@@ -119,6 +119,10 @@ mupdate
Target Report RejectNegative InverseMask(NO_UPDATE, UPDATE)
Generate load/store with update instructions
+mavoid-indexed-addresses
+Target Report Var(TARGET_AVOID_XFORM) Init(-1)
+Avoid generation of indexed load/store instructions when possible
+
mno-fused-madd
Target Report RejectNegative Mask(NO_FUSED_MADD)
Do not generate fused multiply/add instructions
diff --git a/gcc/config/s390/s390.md b/gcc/config/s390/s390.md
index e8b50830d58..9c21977d506 100644
--- a/gcc/config/s390/s390.md
+++ b/gcc/config/s390/s390.md
@@ -558,7 +558,8 @@
"@
tmh\t%0,%i1
tml\t%0,%i1"
- [(set_attr "op_type" "RI")])
+ [(set_attr "op_type" "RI")
+ (set_attr "z10prop" "z10_super,z10_super")])
(define_insn "*tm<mode>_full"
[(set (reg CC_REGNUM)
@@ -566,7 +567,8 @@
(match_operand:HQI 1 "immediate_operand" "n")))]
"s390_match_ccmode (insn, s390_tm_ccmode (constm1_rtx, operands[1], true))"
"tml\t%0,<max_uint>"
- [(set_attr "op_type" "RI")])
+ [(set_attr "op_type" "RI")
+ (set_attr "z10prop" "z10_super")])
;
@@ -604,7 +606,7 @@
lt<g>r\t%2,%0
lt<g>\t%2,%0"
[(set_attr "op_type" "RR<E>,RXY")
- (set_attr "z10prop" "z10_fr_E1,z10_fr_A3") ])
+ (set_attr "z10prop" "z10_fr_E1,z10_fwd_A3") ])
; ltr, lt, ltgr, ltg
(define_insn "*tst<mode>_cconly_extimm"
@@ -617,7 +619,7 @@
lt<g>r\t%0,%0
lt<g>\t%2,%0"
[(set_attr "op_type" "RR<E>,RXY")
- (set_attr "z10prop" "z10_fr_E1,z10_fr_A3")])
+ (set_attr "z10prop" "z10_fr_E1,z10_fwd_A3")])
(define_insn "*tstdi"
[(set (reg CC_REGNUM)
@@ -715,7 +717,7 @@
cliy\t%S0,0
tml\t%0,255"
[(set_attr "op_type" "SI,SIY,RI")
- (set_attr "z10prop" "z10_super,z10_super,*")])
+ (set_attr "z10prop" "z10_super,z10_super,z10_super")])
(define_insn "*tst<mode>"
[(set (reg CC_REGNUM)
@@ -856,7 +858,8 @@
"s390_match_ccmode(insn, CCURmode) && TARGET_Z10"
"clhrl\t%0,%1"
[(set_attr "op_type" "RIL")
- (set_attr "type" "larl")])
+ (set_attr "type" "larl")
+ (set_attr "z10prop" "z10_super")])
; clhrl, clghrl
(define_insn "*cmp<GPR:mode>_ccu_zerohi_rldi"
@@ -1003,7 +1006,7 @@
[(set_attr "op_type" "RRE")
(set_attr "type" "fsimp<mode>")])
-; cxtr, cxbr, cdbr, cebr, cxb, cdb, ceb, cxbtr, cdbtr
+; cxtr, cxbr, cdbr, cebr, cdb, ceb, cxbtr, cdbtr
(define_insn "*cmp<mode>_ccs"
[(set (reg CC_REGNUM)
(compare (match_operand:FP 0 "register_operand" "f,f")
@@ -1527,7 +1530,7 @@
"larl\t%0,%1"
[(set_attr "op_type" "RIL")
(set_attr "type" "larl")
- (set_attr "z10prop" "z10_super_A1")])
+ (set_attr "z10prop" "z10_fwd_A1")])
(define_insn "*movsi_zarch"
[(set (match_operand:SI 0 "nonimmediate_operand"
@@ -1593,7 +1596,7 @@
z10_fr_E1,
z10_fwd_A3,
z10_fwd_A3,
- z10_super,
+ z10_rec,
z10_rec,
*,
*,
@@ -1628,7 +1631,7 @@
(set_attr "z10prop" "z10_fwd_A1,
z10_fr_E1,
z10_fwd_A3,
- z10_super,
+ z10_rec,
*,
*,
*,
@@ -1766,7 +1769,7 @@
z10_super_E1,
z10_super_E1,
z10_super_E1,
- z10_super,
+ z10_rec,
z10_rec,
z10_rec,
z10_super")])
@@ -1822,7 +1825,7 @@
z10_fwd_A1,
z10_super_E1,
z10_super_E1,
- z10_super,
+ z10_rec,
z10_rec,
z10_super,
z10_super")])
@@ -1849,7 +1852,7 @@
ic\t%0,%1
icy\t%0,%1"
[(set_attr "op_type" "RX,RXY")
- (set_attr "z10prop" "z10_super_E1,z10_super")])
+ (set_attr "z10prop" "z10_super_E1,z10_super_E1")])
;
; movstricthi instruction pattern(s).
@@ -2167,7 +2170,7 @@
z10_fr_E1,
z10_fwd_A3,
z10_fwd_A3,
- z10_super,
+ z10_rec,
z10_rec")])
;
@@ -2188,7 +2191,7 @@
ly\t%1,%0"
[(set_attr "op_type" "RR,RI,RRE,RX,RXY,RX,RXY")
(set_attr "type" "lr,*,*,store,store,load,load")
- (set_attr "z10prop" "z10_fr_E1,*,*,z10_super,z10_rec,z10_fwd_A3,z10_fwd_A3")])
+ (set_attr "z10prop" "z10_fr_E1,z10_super,*,z10_rec,z10_rec,z10_fwd_A3,z10_fwd_A3")])
;
; Block move (MVC) patterns.
@@ -3153,7 +3156,8 @@
(clobber (reg:CC CC_REGNUM))]
"TARGET_64BIT"
"icmh\t%0,%2,%S1"
- [(set_attr "op_type" "RSY")])
+ [(set_attr "op_type" "RSY")
+ (set_attr "z10prop" "z10_super")])
(define_insn "*sethighpartdi_31"
[(set (match_operand:DI 0 "register_operand" "=d,d")
@@ -3696,7 +3700,7 @@
[(set_attr "op_type" "RXY,RRE,RIL")
(set_attr "type" "*,*,larl")
(set_attr "cpu_facility" "*,*,z10")
- (set_attr "z10prop" "z10_fwd_A3")])
+ (set_attr "z10prop" "z10_super_E1,z10_fwd_A3,z10_fwd_A3")])
; llhr, llcr, llghr, llgcr, llh, llc, llgh, llgc
(define_insn "*zero_extend<HQI:mode><GPR:mode>2_extimm"
@@ -4873,7 +4877,7 @@
; sub(tf|df|sf|td|dd)3 instruction pattern(s).
;
-; sxbr, sdbr, sebr, sxb, sdb, seb, sxtr, sdtr
+; sxbr, sdbr, sebr, sdb, seb, sxtr, sdtr
(define_insn "sub<mode>3"
[(set (match_operand:FP 0 "register_operand" "=f, f")
(minus:FP (match_operand:FP 1 "register_operand" "<f0>,0")
@@ -4886,7 +4890,7 @@
[(set_attr "op_type" "<RRer>,RXE")
(set_attr "type" "fsimp<mode>")])
-; sxbr, sdbr, sebr, sxb, sdb, seb, sxtr, sdtr
+; sxbr, sdbr, sebr, sdb, seb, sxtr, sdtr
(define_insn "*sub<mode>3_cc"
[(set (reg CC_REGNUM)
(compare (minus:FP (match_operand:FP 1 "nonimmediate_operand" "<f0>,0")
@@ -4901,7 +4905,7 @@
[(set_attr "op_type" "<RRer>,RXE")
(set_attr "type" "fsimp<mode>")])
-; sxbr, sdbr, sebr, sxb, sdb, seb, sxtr, sdtr
+; sxbr, sdbr, sebr, sdb, seb, sxtr, sdtr
(define_insn "*sub<mode>3_cconly"
[(set (reg CC_REGNUM)
(compare (minus:FP (match_operand:FP 1 "nonimmediate_operand" "<f0>,0")
@@ -6630,7 +6634,8 @@
(neg:DI (sign_extend:DI (match_dup 1))))]
"TARGET_64BIT && s390_match_ccmode (insn, CCAmode)"
"lcgfr\t%0,%1"
- [(set_attr "op_type" "RRE")])
+ [(set_attr "op_type" "RRE")
+ (set_attr "z10prop" "z10_c")])
(define_insn "*negdi2_sign"
[(set (match_operand:DI 0 "register_operand" "=d")
@@ -6638,7 +6643,8 @@
(clobber (reg:CC CC_REGNUM))]
"TARGET_64BIT"
"lcgfr\t%0,%1"
- [(set_attr "op_type" "RRE")])
+ [(set_attr "op_type" "RRE")
+ (set_attr "z10prop" "z10_c")])
; lcr, lcgr
(define_insn "*neg<mode>2_cc"
@@ -6774,7 +6780,8 @@
(abs:DI (sign_extend:DI (match_dup 1))))]
"TARGET_64BIT && s390_match_ccmode (insn, CCAmode)"
"lpgfr\t%0,%1"
- [(set_attr "op_type" "RRE")])
+ [(set_attr "op_type" "RRE")
+ (set_attr "z10prop" "z10_c")])
(define_insn "*absdi2_sign"
[(set (match_operand:DI 0 "register_operand" "=d")
@@ -6782,7 +6789,8 @@
(clobber (reg:CC CC_REGNUM))]
"TARGET_64BIT"
"lpgfr\t%0,%1"
- [(set_attr "op_type" "RRE")])
+ [(set_attr "op_type" "RRE")
+ (set_attr "z10prop" "z10_c")])
; lpr, lpgr
(define_insn "*abs<mode>2_cc"
@@ -6890,7 +6898,8 @@
(neg:DI (abs:DI (sign_extend:DI (match_dup 1)))))]
"TARGET_64BIT && s390_match_ccmode (insn, CCAmode)"
"lngfr\t%0,%1"
- [(set_attr "op_type" "RRE")])
+ [(set_attr "op_type" "RRE")
+ (set_attr "z10prop" "z10_c")])
(define_insn "*negabsdi2_sign"
[(set (match_operand:DI 0 "register_operand" "=d")
@@ -6899,7 +6908,8 @@
(clobber (reg:CC CC_REGNUM))]
"TARGET_64BIT"
"lngfr\t%0,%1"
- [(set_attr "op_type" "RRE")])
+ [(set_attr "op_type" "RRE")
+ (set_attr "z10prop" "z10_c")])
; lnr, lngr
(define_insn "*negabs<mode>2_cc"
@@ -7516,7 +7526,7 @@
c<g>it%C0\t%1,%h2"
[(set_attr "op_type" "RRF,RIE")
(set_attr "type" "branch")
- (set_attr "z10prop" "z10_c,*")])
+ (set_attr "z10prop" "z10_super_c,z10_super")])
; clrt, clgrt, clfit, clgit
(define_insn "*cmp_and_trap_unsigned_int<mode>"
@@ -7530,7 +7540,7 @@
cl<gf>it%C0\t%1,%x2"
[(set_attr "op_type" "RRF,RIE")
(set_attr "type" "branch")
- (set_attr "z10prop" "z10_c,*")])
+ (set_attr "z10prop" "z10_super_c,z10_super")])
;;
;;- Loop instructions.
@@ -7594,7 +7604,7 @@
[(set_attr "op_type" "RI")
; Strictly speaking, the z10 properties are valid for brct only, however, it does not
; hurt us in the (rare) case of ahi.
- (set_attr "z10prop" "z10_super")
+ (set_attr "z10prop" "z10_super_E1")
(set_attr "type" "branch")
(set (attr "length")
(if_then_else (lt (abs (minus (pc) (match_dup 0))) (const_int 60000))
@@ -7636,7 +7646,7 @@
[(set_attr "op_type" "RI")
; Strictly speaking, the z10 properties are valid for brct only, however, it does not
; hurt us in the (rare) case of ahi.
- (set_attr "z10prop" "z10_super")
+ (set_attr "z10prop" "z10_super_E1")
(set_attr "type" "branch")
(set (attr "length")
(if_then_else (eq (symbol_ref "flag_pic") (const_int 0))
@@ -7667,7 +7677,8 @@
(if_then_else (match_operand 0 "register_operand" "")
(const_string "RR") (const_string "RX")))
(set_attr "type" "branch")
- (set_attr "atype" "agen")])
+ (set_attr "atype" "agen")
+ (set_attr "z10prop" "z10_cobra")])
(define_insn_and_split "doloop_di"
[(set (pc)
@@ -7705,7 +7716,7 @@
[(set_attr "op_type" "RI")
; Strictly speaking, the z10 properties are valid for brct only, however, it does not
; hurt us in the (rare) case of ahi.
- (set_attr "z10prop" "z10_super")
+ (set_attr "z10prop" "z10_super_E1")
(set_attr "type" "branch")
(set (attr "length")
(if_then_else (lt (abs (minus (pc) (match_dup 0))) (const_int 60000))
@@ -7772,8 +7783,7 @@
(if_then_else (match_operand 0 "register_operand" "")
(const_string "RR") (const_string "RX")))
(set_attr "type" "branch")
- (set_attr "atype" "agen")
- (set_attr "z10prop" "z10_super")])
+ (set_attr "atype" "agen")])
;
; casesi instruction pattern(s).
@@ -8129,6 +8139,7 @@
l\t%0,%1%J2
ly\t%0,%1%J2"
[(set_attr "op_type" "RX,RXY")
+ (set_attr "type" "load")
(set_attr "z10prop" "z10_fwd_A3,z10_fwd_A3")])
(define_insn "*bras_tls"
@@ -8468,7 +8479,8 @@
[(const_int 0)]
""
"lr\t0,0"
- [(set_attr "op_type" "RR")])
+ [(set_attr "op_type" "RR")
+ (set_attr "z10prop" "z10_fr_E1")])
(define_insn "nop1"
[(const_int 1)]
@@ -8536,7 +8548,7 @@
"larl\t%0,%1"
[(set_attr "op_type" "RIL")
(set_attr "type" "larl")
- (set_attr "z10prop" "z10_super_A1")])
+ (set_attr "z10prop" "z10_fwd_A1")])
(define_insn "main_pool"
[(set (match_operand 0 "register_operand" "=a")
@@ -8564,7 +8576,7 @@
"larl\t%0,%1"
[(set_attr "op_type" "RIL")
(set_attr "type" "larl")
- (set_attr "z10prop" "z10_super_A1")])
+ (set_attr "z10prop" "z10_fwd_A1")])
(define_insn "pool"
[(unspec_volatile [(match_operand 0 "const_int_operand" "n")] UNSPECV_POOL)]
@@ -8729,3 +8741,19 @@
[(set_attr "type" "load,larl")
(set_attr "op_type" "RXY,RIL")
(set_attr "z10prop" "z10_super")])
+
+
+;
+; Byte swap instructions
+;
+
+(define_insn "bswap<mode>2"
+ [(set (match_operand:GPR 0 "register_operand" "=d, d")
+ (bswap:GPR (match_operand:GPR 1 "nonimmediate_operand" " d,RT")))]
+ ""
+ "@
+ lrv<g>r\t%0,%1
+ lrv<g>\t%0,%1"
+ [(set_attr "type" "*,load")
+ (set_attr "op_type" "RRE,RXY")
+ (set_attr "z10prop" "z10_super")])
diff --git a/gcc/config/sparc/linux.h b/gcc/config/sparc/linux.h
index 9014ae54ca6..4d8db56518f 100644
--- a/gcc/config/sparc/linux.h
+++ b/gcc/config/sparc/linux.h
@@ -102,10 +102,6 @@ along with GCC; see the file COPYING3. If not see
"%{V} %{v:%{!V:-V}} %{!Qn:-Qy} %{n} %{T} %{Ym,*} %{Wa,*:%*} -s \
%{fpic|fPIC|fpie|fPIE:-K PIC} %(asm_cpu) %(asm_relax)"
-/* Same as sparc.h */
-#undef DBX_REGISTER_NUMBER
-#define DBX_REGISTER_NUMBER(REGNO) (REGNO)
-
#undef ASM_OUTPUT_ALIGNED_LOCAL
#define ASM_OUTPUT_ALIGNED_LOCAL(FILE, NAME, SIZE, ALIGN) \
do { \
diff --git a/gcc/config/sparc/linux64.h b/gcc/config/sparc/linux64.h
index 4971048a31d..8155c1af8cb 100644
--- a/gcc/config/sparc/linux64.h
+++ b/gcc/config/sparc/linux64.h
@@ -239,10 +239,6 @@ along with GCC; see the file COPYING3. If not see
%{mlittle-endian:-EL} \
%(asm_cpu) %(asm_arch) %(asm_relax)"
-/* Same as sparc.h */
-#undef DBX_REGISTER_NUMBER
-#define DBX_REGISTER_NUMBER(REGNO) (REGNO)
-
#undef ASM_OUTPUT_ALIGNED_LOCAL
#define ASM_OUTPUT_ALIGNED_LOCAL(FILE, NAME, SIZE, ALIGN) \
do { \
diff --git a/gcc/config/sparc/sysv4.h b/gcc/config/sparc/sysv4.h
index 586c2399ae6..1c60debf850 100644
--- a/gcc/config/sparc/sysv4.h
+++ b/gcc/config/sparc/sysv4.h
@@ -93,22 +93,6 @@ do { ASM_OUTPUT_ALIGN ((FILE), Pmode == SImode ? 2 : 3); \
fprintf (FILE, "\n"); \
} while (0)
-/* Define how the SPARC registers should be numbered for Dwarf output.
- The numbering provided here should be compatible with the native
- svr4 SDB debugger in the SPARC/svr4 reference port. The numbering
- is as follows:
-
- Assembly name gcc internal regno Dwarf regno
- ----------------------------------------------------------
- g0-g7 0-7 0-7
- o0-o7 8-15 8-15
- l0-l7 16-23 16-23
- i0-i7 24-31 24-31
- f0-f31 32-63 40-71
-*/
-
-#define DBX_REGISTER_NUMBER(REGNO) ((REGNO) < 32 ? (REGNO) : (REGNO) + 8)
-
/* A set of symbol definitions for assembly pseudo-ops which will
get us switched to various sections of interest. These are used
in all places where we simply want to switch to a section, and
diff --git a/gcc/cp/ChangeLog b/gcc/cp/ChangeLog
index b2ca4cf3ec2..afa84bea4eb 100644
--- a/gcc/cp/ChangeLog
+++ b/gcc/cp/ChangeLog
@@ -1,3 +1,34 @@
+2009-01-29 Paolo Carlini <paolo.carlini@oracle.com>
+
+ * typeck.c (invalid_nonstatic_memfn_p): Use
+ DECL_NONSTATIC_MEMBER_FUNCTION_P.
+
+2009-01-27 Paolo Carlini <paolo.carlini@oracle.com>
+
+ PR c++/37554
+ * call.c (build_over_call): If convert_for_arg_passing returns
+ error_mark_node unconditionally return it.
+
+2009-01-22 Adam Nemet <anemet@caviumnetworks.com>
+
+ * class.c (check_field_decls): Also inherit packed for bitfields
+ regardless of their type.
+
+2009-01-22 Dodji Seketeli <dodji@redhat.com>
+
+ PR c++/38930
+ * decl2.c (grokfield): Reverting changes of PR c++/26693
+ (save_template_attributes): Likewise.
+ * decl.c (grokdeclarator): Likewise.
+ * name-lookup.c (pushdecl_maybe_friend): Likewise.
+ * cp-tree.h (MEMBER_TYPES_NEEDING_ACCESS_CHECK): Likewise.
+ (append_type_to_template_for_access_check): Likewise.
+ * semantics.c (check_accessibility_of_qualified_id): Likewise.
+ * pt.c (instantiate_class_template, instantiate_template ): Likewise.
+ (tsubst): Likewise.
+ (resolve_type_name_type): Likewise.
+ (append_type_to_template_for_access_check): Likewise.
+
2009-01-21 Dodji Seketeli <dodji@redhat.com>
PR c++/26693
diff --git a/gcc/cp/call.c b/gcc/cp/call.c
index 09dc57d7a25..f13e3bdb8e2 100644
--- a/gcc/cp/call.c
+++ b/gcc/cp/call.c
@@ -5276,7 +5276,7 @@ build_over_call (struct z_candidate *cand, int flags, tsubst_flags_t complain)
(conv, TREE_VALUE (arg), fn, i - is_method, complain);
val = convert_for_arg_passing (type, val);
- if ((complain == tf_none) && val == error_mark_node)
+ if (val == error_mark_node)
return error_mark_node;
else
argarray[j++] = val;
diff --git a/gcc/cp/class.c b/gcc/cp/class.c
index 8d326f296dc..f56edc392a1 100644
--- a/gcc/cp/class.c
+++ b/gcc/cp/class.c
@@ -2974,7 +2974,8 @@ check_field_decls (tree t, tree *access_decls,
x);
cant_pack = 1;
}
- else if (TYPE_ALIGN (TREE_TYPE (x)) > BITS_PER_UNIT)
+ else if (DECL_C_BIT_FIELD (x)
+ || TYPE_ALIGN (TREE_TYPE (x)) > BITS_PER_UNIT)
DECL_PACKED (x) = 1;
}
diff --git a/gcc/cp/cp-tree.h b/gcc/cp/cp-tree.h
index b740700e183..186ec9ab6ad 100644
--- a/gcc/cp/cp-tree.h
+++ b/gcc/cp/cp-tree.h
@@ -3179,14 +3179,6 @@ more_aggr_init_expr_args_p (const aggr_init_expr_arg_iterator *iter)
&& TREE_CODE (DECL_TEMPLATE_RESULT (NODE)) == TYPE_DECL \
&& !DECL_TEMPLATE_TEMPLATE_PARM_P (NODE))
-/* The chained list of some types that are referenced in templates.
- These types are those which need to be access checked at
- template instantiation time. For the time being, only typedef-ed types defined
- as class members are put here at parsing time.
- Other types for which access check could be required at template instantiation
- time could be added later. */
-#define MEMBER_TYPES_NEEDING_ACCESS_CHECK(NODE) DECL_ACCESS (NODE)
-
/* Nonzero if NODE which declares a type. */
#define DECL_DECLARES_TYPE_P(NODE) \
(TREE_CODE (NODE) == TYPE_DECL || DECL_CLASS_TEMPLATE_P (NODE))
@@ -4547,7 +4539,6 @@ extern tree check_explicit_specialization (tree, tree, int, int);
extern tree make_auto (void);
extern tree do_auto_deduction (tree, tree, tree);
extern tree type_uses_auto (tree);
-extern void append_type_to_template_for_access_check (tree, tree, tree);
extern tree splice_late_return_type (tree, tree);
extern bool is_auto (const_tree);
extern tree process_template_parm (tree, tree, bool, bool);
diff --git a/gcc/cp/decl.c b/gcc/cp/decl.c
index 2163e397cd2..8476959bff4 100644
--- a/gcc/cp/decl.c
+++ b/gcc/cp/decl.c
@@ -8729,7 +8729,6 @@ grokdeclarator (const cp_declarator *declarator,
decl = build_lang_decl (TYPE_DECL, unqualified_id, type);
else
decl = build_decl (TYPE_DECL, unqualified_id, type);
-
if (id_declarator && declarator->u.id.qualifying_scope) {
error ("%Jtypedef name may not be a nested-name-specifier", decl);
TREE_TYPE (decl) = error_mark_node;
@@ -8764,11 +8763,12 @@ grokdeclarator (const cp_declarator *declarator,
&& TYPE_ANONYMOUS_P (type)
&& cp_type_quals (type) == TYPE_UNQUALIFIED)
{
+ tree oldname = TYPE_NAME (type);
tree t;
/* Replace the anonymous name with the real name everywhere. */
for (t = TYPE_MAIN_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t))
- if (ANON_AGGRNAME_P (TYPE_IDENTIFIER (t)))
+ if (TYPE_NAME (t) == oldname)
TYPE_NAME (t) = decl;
if (TYPE_LANG_SPECIFIC (type))
diff --git a/gcc/cp/decl2.c b/gcc/cp/decl2.c
index 22192a001ba..c8887257ece 100644
--- a/gcc/cp/decl2.c
+++ b/gcc/cp/decl2.c
@@ -804,9 +804,6 @@ grokfield (const cp_declarator *declarator,
DECL_NONLOCAL (value) = 1;
DECL_CONTEXT (value) = current_class_type;
- if (declspecs->specs[(int)ds_typedef])
- set_underlying_type (value);
-
if (processing_template_decl)
value = push_template_decl (value);
@@ -1128,6 +1125,19 @@ save_template_attributes (tree *attr_p, tree *decl_p)
if (!late_attrs)
return;
+ /* Give this type a name so we know to look it up again at instantiation
+ time. */
+ if (TREE_CODE (*decl_p) == TYPE_DECL
+ && DECL_ORIGINAL_TYPE (*decl_p) == NULL_TREE)
+ {
+ tree oldt = TREE_TYPE (*decl_p);
+ tree newt = build_variant_type_copy (oldt);
+ DECL_ORIGINAL_TYPE (*decl_p) = oldt;
+ TREE_TYPE (*decl_p) = newt;
+ TYPE_NAME (newt) = *decl_p;
+ TREE_USED (newt) = TREE_USED (*decl_p);
+ }
+
if (DECL_P (*decl_p))
q = &DECL_ATTRIBUTES (*decl_p);
else
diff --git a/gcc/cp/name-lookup.c b/gcc/cp/name-lookup.c
index 2c69cfe68a6..f8d0204f099 100644
--- a/gcc/cp/name-lookup.c
+++ b/gcc/cp/name-lookup.c
@@ -847,20 +847,28 @@ pushdecl_maybe_friend (tree x, bool is_friend)
/* If declaring a type as a typedef, copy the type (unless we're
at line 0), and install this TYPE_DECL as the new type's typedef
- name. See the extensive comment of set_underlying_type (). */
+ name. See the extensive comment in ../c-decl.c (pushdecl). */
if (TREE_CODE (x) == TYPE_DECL)
{
tree type = TREE_TYPE (x);
-
- if (DECL_IS_BUILTIN (x)
- || (TREE_TYPE (x) != error_mark_node
- && TYPE_NAME (type) != x
- /* We don't want to copy the type when all we're
- doing is making a TYPE_DECL for the purposes of
- inlining. */
- && (!TYPE_NAME (type)
- || TYPE_NAME (type) != DECL_ABSTRACT_ORIGIN (x))))
- set_underlying_type (x);
+ if (DECL_IS_BUILTIN (x))
+ {
+ if (TYPE_NAME (type) == 0)
+ TYPE_NAME (type) = x;
+ }
+ else if (type != error_mark_node && TYPE_NAME (type) != x
+ /* We don't want to copy the type when all we're
+ doing is making a TYPE_DECL for the purposes of
+ inlining. */
+ && (!TYPE_NAME (type)
+ || TYPE_NAME (type) != DECL_ABSTRACT_ORIGIN (x)))
+ {
+ DECL_ORIGINAL_TYPE (x) = type;
+ type = build_variant_type_copy (type);
+ TYPE_STUB_DECL (type) = TYPE_STUB_DECL (DECL_ORIGINAL_TYPE (x));
+ TYPE_NAME (type) = x;
+ TREE_TYPE (x) = type;
+ }
if (type != error_mark_node
&& TYPE_NAME (type)
diff --git a/gcc/cp/pt.c b/gcc/cp/pt.c
index 36edb449e00..f6809f2bdf7 100644
--- a/gcc/cp/pt.c
+++ b/gcc/cp/pt.c
@@ -7387,31 +7387,6 @@ instantiate_class_template (tree type)
&& DECL_TEMPLATE_INFO (t))
tsubst_default_arguments (t);
- /* Some types referenced from within the template code need to be access
- checked at template instantiation time, i.e now. These types were
- added to the template at parsing time. Let's get those and perfom
- the acces checks then. */
- for (t = MEMBER_TYPES_NEEDING_ACCESS_CHECK (templ); t; t = TREE_CHAIN (t))
- {
- tree type_decl = TREE_PURPOSE (t);
- tree type_scope = TREE_VALUE (t);
-
- if (!type_decl || !type_scope || !CLASS_TYPE_P (type_scope))
- continue;
-
- if (uses_template_parms (type_decl))
- type_decl = tsubst (type_decl, args, tf_error, NULL_TREE);
-
- if (uses_template_parms (type_scope))
- type_scope = tsubst (type_scope, args, tf_error, NULL_TREE);
-
- gcc_assert (type_decl && type_decl != error_mark_node
- && type_scope && type_scope != error_mark_node);
-
- perform_or_defer_access_check (TYPE_BINFO (type_scope), type_decl, type_decl);
- }
-
- perform_deferred_access_checks ();
pop_nested_class ();
pop_from_top_level ();
pop_deferring_access_checks ();
@@ -11894,7 +11869,6 @@ instantiate_template (tree tmpl, tree targ_ptr, tsubst_flags_t complain)
tree fndecl;
tree gen_tmpl;
tree spec;
- tree t;
HOST_WIDE_INT saved_processing_template_decl;
if (tmpl == error_mark_node)
@@ -11973,24 +11947,6 @@ instantiate_template (tree tmpl, tree targ_ptr, tsubst_flags_t complain)
/* Now we know the specialization, compute access previously
deferred. */
push_access_scope (fndecl);
-
- /* Some types referenced from within the template code need to be access
- checked at template instantiation time, i.e now. These types were
- added to the template at parsing time. Let's get those and perfom
- the acces checks then. */
- for (t = MEMBER_TYPES_NEEDING_ACCESS_CHECK (tmpl); t; t = TREE_CHAIN (t))
- {
- tree type_decl = TREE_PURPOSE (t);
- tree type_scope = TREE_VALUE (t);
-
- if (!type_decl || !type_scope || !CLASS_TYPE_P (type_scope))
- continue;
-
- if (uses_template_parms (type_decl))
- type_decl = tsubst (type_decl, targ_ptr, tf_error, NULL_TREE);
-
- perform_or_defer_access_check (TYPE_BINFO (type_scope), type_decl, type_decl);
- }
perform_deferred_access_checks ();
pop_access_scope (fndecl);
pop_deferring_access_checks ();
@@ -16677,15 +16633,7 @@ resolve_typename_type (tree type, bool only_current_p)
gcc_assert (TREE_CODE (type) == TYPENAME_TYPE);
scope = TYPE_CONTEXT (type);
- /* Usually the non-qualified identifier of a TYPENAME_TYPE is
- TYPE_IDENTIFIER (type). But when 'type' is a typedef variant of
- a TYPENAME_TYPE node, then TYPE_NAME (type) is set to the TYPE_DECL representing
- the typedef. In that case TYPE_IDENTIFIER (type) is not the non-qualified
- identifier of the TYPENAME_TYPE anymore.
- So by getting the TYPE_IDENTIFIER of the _main declaration_ of the
- TYPENAME_TYPE instead, we avoid messing up with a possible
- typedef variant case. */
- name = TYPE_IDENTIFIER (TYPE_MAIN_VARIANT (type));
+ name = TYPE_IDENTIFIER (type);
/* If the SCOPE is itself a TYPENAME_TYPE, then we need to resolve
it first before we can figure out what NAME refers to. */
@@ -17010,45 +16958,4 @@ type_uses_auto (tree type)
return NULL_TREE;
}
-/* Append TYPE_DECL to the template TMPL.
- TMPL is eiter a class type or a FUNCTION_DECL associated
- to a TEMPLATE_DECL.
- At TMPL instanciation time, TYPE_DECL will be checked to see
- if it can be accessed through SCOPE. */
-void
-append_type_to_template_for_access_check (tree templ,
- tree type_decl,
- tree scope)
-{
- tree node, templ_decl;
-
- gcc_assert (templ
- && get_template_info (templ)
- && TI_TEMPLATE (get_template_info (templ))
- && type_decl
- && (TREE_CODE (type_decl) == TYPE_DECL));
-
- templ_decl = TI_TEMPLATE (get_template_info (templ));
- gcc_assert (templ_decl);
-
- /* Make sure we don't append the type to the template twice.
- If this appears to be too slow, the
- MEMBER_TYPE_NEEDING_ACCESS_CHECK property
- of templ should be a hash table instead. */
- for (node = MEMBER_TYPES_NEEDING_ACCESS_CHECK (templ_decl);
- node;
- node = TREE_CHAIN (node))
- {
- tree decl = TREE_PURPOSE (node);
- tree type_scope = TREE_VALUE (node);
-
- if (decl == type_decl && type_scope == scope)
- return;
- }
-
- MEMBER_TYPES_NEEDING_ACCESS_CHECK (templ_decl) =
- tree_cons (type_decl, scope,
- MEMBER_TYPES_NEEDING_ACCESS_CHECK (templ_decl));
-}
-
#include "gt-cp-pt.h"
diff --git a/gcc/cp/semantics.c b/gcc/cp/semantics.c
index 528a0c5de85..c9f0641f5f8 100644
--- a/gcc/cp/semantics.c
+++ b/gcc/cp/semantics.c
@@ -1529,30 +1529,6 @@ check_accessibility_of_qualified_id (tree decl,
tree scope;
tree qualifying_type = NULL_TREE;
- /* If we are parsing a template declaration and if decl is a typedef,
- add it to a list tied to the template.
- At template instantiation time, that list will be walked and
- access check performed. */
- if (is_typedef_decl (decl))
- {
- /* This the scope through which type_decl is accessed.
- It will be useful information later to do access check for
- type_decl usage. */
- tree scope = nested_name_specifier ? nested_name_specifier : DECL_CONTEXT (decl);
- tree templ_info = NULL;
- tree cs = current_scope ();
-
- if (cs && (CLASS_TYPE_P (cs) || TREE_CODE (cs) == FUNCTION_DECL))
- templ_info = get_template_info (cs);
-
- if (templ_info
- && TI_TEMPLATE (templ_info)
- && scope
- && CLASS_TYPE_P (scope)
- && !currently_open_class (scope))
- append_type_to_template_for_access_check (current_scope (), decl, scope);
- }
-
/* If we're not checking, return immediately. */
if (deferred_access_no_check)
return;
diff --git a/gcc/cp/typeck.c b/gcc/cp/typeck.c
index 6c69256aa39..bca72ce452c 100644
--- a/gcc/cp/typeck.c
+++ b/gcc/cp/typeck.c
@@ -1,6 +1,6 @@
/* Build expressions with type checking for C++ compiler.
Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
Free Software Foundation, Inc.
Hacked by Michael Tiemann (tiemann@cygnus.com)
@@ -1494,7 +1494,7 @@ cxx_sizeof_or_alignof_expr (tree e, enum tree_code op, bool complain)
bool
invalid_nonstatic_memfn_p (const_tree expr, tsubst_flags_t complain)
{
- if (TREE_CODE (TREE_TYPE (expr)) == METHOD_TYPE)
+ if (DECL_NONSTATIC_MEMBER_FUNCTION_P (expr))
{
if (complain & tf_error)
error ("invalid use of non-static member function");
diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi
index 264e88a862d..c2c13819015 100644
--- a/gcc/doc/extend.texi
+++ b/gcc/doc/extend.texi
@@ -3824,6 +3824,12 @@ struct foo
@};
@end smallexample
+@emph{Note:} The 4.1, 4.2 and 4.3 series of GCC ignore the
+@code{packed} attribute on bit-fields of type @code{char}. This has
+been fixed in GCC 4.4 but the change can lead to differences in the
+structure layout. See the documention of
+@option{-Wpacked-bitfield-compat} for more information.
+
@item section ("@var{section-name}")
@cindex @code{section} variable attribute
Normally, the compiler places the objects it generates in sections like
@@ -4846,6 +4852,8 @@ asm ("sysint" : "=r" (result) : "0" (p1), "r" (p2));
In the above example, beware that a register that is call-clobbered by
the target ABI will be overwritten by any function call in the
assignment, including library calls for arithmetic operators.
+Also a register may be clobbered when generating some operations,
+like variable shift, memory copy or memory move on x86.
Assuming it is a call-clobbered register, this may happen to @code{r0}
above by the assignment to @code{p2}. If you have to use such a
register, use temporary variables for expressions between the register
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index 25bd8e276c9..bb6bf35cada 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -249,7 +249,7 @@ Objective-C and Objective-C++ Dialects}.
-Wmissing-format-attribute -Wmissing-include-dirs @gol
-Wmissing-noreturn -Wno-mudflap @gol
-Wno-multichar -Wnonnull -Wno-overflow @gol
--Woverlength-strings -Wpacked -Wpadded @gol
+-Woverlength-strings -Wpacked -Wpacked-bitfield-compat -Wpadded @gol
-Wparentheses -Wpedantic-ms-format -Wno-pedantic-ms-format @gol
-Wpointer-arith -Wno-pointer-to-int-cast @gol
-Wredundant-decls @gol
@@ -722,6 +722,7 @@ See RS/6000 and PowerPC Options.
-msoft-float -mhard-float -mmultiple -mno-multiple @gol
-msingle-float -mdouble-float -msimple-fpu @gol
-mstring -mno-string -mupdate -mno-update @gol
+-mavoid-indexed-addresses -mno-avoid-indexed-addresses @gol
-mfused-madd -mno-fused-madd -mbit-align -mno-bit-align @gol
-mstrict-align -mno-strict-align -mrelocatable @gol
-mno-relocatable -mrelocatable-lib -mno-relocatable-lib @gol
@@ -3979,6 +3980,27 @@ struct bar @{
@end group
@end smallexample
+@item -Wpacked-bitfield-compat
+@opindex Wpacked-bitfield-compat
+@opindex Wno-packed-bitfield-compat
+The 4.1, 4.2 and 4.3 series of GCC ignore the @code{packed} attribute
+on bit-fields of type @code{char}. This has been fixed in GCC 4.4 but
+the change can lead to differences in the structure layout. GCC
+informs you when the offset of such a field has changed in GCC 4.4.
+For example there is no longer a 4-bit padding between field @code{a}
+and @code{b} in this structure:
+
+@smallexample
+struct foo
+@{
+ char a:4;
+ char b:8;
+@} __attribute__ ((packed));
+@end smallexample
+
+This warning is enabled by default. Use
+@option{-Wno-packed-bitfield-compat} to disable this warning.
+
@item -Wpadded
@opindex Wpadded
@opindex Wno-padded
@@ -4528,172 +4550,275 @@ preprocessing.
Debug dumps can be enabled with a @option{-fdump-rtl} switch or some
@option{-d} option @var{letters}. Here are the possible
-letters for use in @var{letters} and @var{pass}, and their meanings:
+letters for use in @var{pass} and @var{letters}, and their meanings:
@table @gcctabopt
-@item -dA
-@opindex dA
-Annotate the assembler output with miscellaneous debugging information.
+
+@item -fdump-rtl-alignments
+@opindex fdump-rtl-alignments
+Dump after branch alignments have been computed.
+
+@item -fdump-rtl-asmcons
+@opindex fdump-rtl-asmcons
+Dump after fixing rtl statements that have unsatisfied in/out constraints.
+
+@item -fdump-rtl-auto_inc_dec
+@opindex fdump-rtl-auto_inc_dec
+Dump after auto-inc-dec discovery. This pass is only run on
+architectures that have auto inc or auto dec instructions.
+
+@item -fdump-rtl-barriers
+@opindex fdump-rtl-barriers
+Dump after cleaning up the barrier instructions.
+
+@item -fdump-rtl-bbpart
+@opindex fdump-rtl-bbpart
+Dump after partitioning hot and cold basic blocks.
@item -fdump-rtl-bbro
@opindex fdump-rtl-bbro
-Dump after block reordering, to @file{@var{file}.148r.bbro}.
+Dump after block reordering.
+
+@item -fdump-rtl-btl1
+@itemx -fdump-rtl-btl2
+@opindex fdump-rtl-btl2
+@opindex fdump-rtl-btl2
+@option{-fdump-rtl-btl1} and @option{-fdump-rtl-btl2} enable dumping
+after the two branch
+target load optimization passes.
+
+@item -fdump-rtl-bypass
+@opindex fdump-rtl-bypass
+Dump after jump bypassing and control flow optimizations.
@item -fdump-rtl-combine
@opindex fdump-rtl-combine
-Dump after the RTL instruction combination pass, to the file
-@file{@var{file}.129r.combine}.
+Dump after the RTL instruction combination pass.
+
+@item -fdump-rtl-compgotos
+@opindex fdump-rtl-compgotos
+Dump after dumplicating the computed gotos.
@item -fdump-rtl-ce1
@itemx -fdump-rtl-ce2
+@itemx -fdump-rtl-ce3
@opindex fdump-rtl-ce1
@opindex fdump-rtl-ce2
-@option{-fdump-rtl-ce1} enable dumping after the
-first if conversion, to the file @file{@var{file}.117r.ce1}.
-@option{-fdump-rtl-ce2} enable dumping after the second if
-conversion, to the file @file{@var{file}.130r.ce2}.
+@opindex fdump-rtl-ce3
+@option{-fdump-rtl-ce1}, @option{-fdump-rtl-ce2}, and
+@option{-fdump-rtl-ce3} enable dumping after the three
+if conversion passes.
+
+@itemx -fdump-rtl-cprop_hardreg
+@opindex fdump-rtl-cprop_hardreg
+Dump after hard register copy propagation.
+
+@itemx -fdump-rtl-csa
+@opindex fdump-rtl-csa
+Dump after combining stack adjustments.
+
+@item -fdump-rtl-cse1
+@itemx -fdump-rtl-cse2
+@opindex fdump-rtl-cse1
+@opindex fdump-rtl-cse2
+@option{-fdump-rtl-cse1} and @option{-fdump-rtl-cse2} enable dumping after
+the two common sub-expression elimination passes.
+
+@itemx -fdump-rtl-dce
+@opindex fdump-rtl-dce
+Dump after the standalone dead code elimination passes.
-@item -fdump-rtl-btl
@itemx -fdump-rtl-dbr
-@opindex fdump-rtl-btl
@opindex fdump-rtl-dbr
-@option{-fdump-rtl-btl} enable dumping after branch
-target load optimization, to @file{@var{file}.31.btl}.
-@option{-fdump-rtl-dbr} enable dumping after delayed branch
-scheduling, to @file{@var{file}.36.dbr}.
+Dump after delayed branch scheduling.
-@item -dD
-@opindex dD
-Dump all macro definitions, at the end of preprocessing, in addition to
-normal output.
-
-@item -fdump-rtl-ce3
-@opindex fdump-rtl-ce3
-Dump after the third if conversion, to @file{@var{file}.146r.ce3}.
-
-@item -fdump-rtl-cfg
-@itemx -fdump-rtl-life
-@opindex fdump-rtl-cfg
-@opindex fdump-rtl-life
-@option{-fdump-rtl-cfg} enable dumping after control
-and data flow analysis, to @file{@var{file}.116r.cfg}.
-@option{-fdump-rtl-cfg} enable dumping dump after life analysis,
-to @file{@var{file}.128r.life1} and @file{@var{file}.135r.life2}.
-
-@item -fdump-rtl-greg
-@opindex fdump-rtl-greg
-Dump after global register allocation, to @file{@var{file}.139r.greg}.
-
-@item -fdump-rtl-gcse
-@itemx -fdump-rtl-bypass
-@opindex fdump-rtl-gcse
-@opindex fdump-rtl-bypass
-@option{-fdump-rtl-gcse} enable dumping after GCSE, to
-@file{@var{file}.114r.gcse}. @option{-fdump-rtl-bypass}
-enable dumping after jump bypassing and control flow optimizations, to
-@file{@var{file}.115r.bypass}.
+@item -fdump-rtl-dce1
+@itemx -fdump-rtl-dce2
+@opindex fdump-rtl-dce1
+@opindex fdump-rtl-dce2
+@option{-fdump-rtl-dce1} and @option{-fdump-rtl-dce2} enable dumping after
+the two dead store elimination passes.
@item -fdump-rtl-eh
@opindex fdump-rtl-eh
-Dump after finalization of EH handling code, to @file{@var{file}.02.eh}.
+Dump after finalization of EH handling code.
-@item -fdump-rtl-sibling
-@opindex fdump-rtl-sibling
-Dump after sibling call optimizations, to @file{@var{file}.106r.sibling}.
+@item -fdump-rtl-eh_ranges
+@opindex fdump-rtl-eh_ranges
+Dump after conversion of EH handling range regions.
+
+@item -fdump-rtl-expand
+@opindex fdump-rtl-expand
+Dump after RTL generation.
+
+@item -fdump-rtl-fwprop1
+@itemx -fdump-rtl-fwprop2
+@opindex fdump-rtl-fwprop1
+@opindex fdump-rtl-fwprop2
+@option{-fdump-rtl-fwprop1} and @option{-fdump-rtl-fwprop2} enable
+dumping after the two forward propagation passes.
+
+@item -fdump-rtl-gcse1
+@itemx -fdump-rtl-gcse2
+@opindex fdump-rtl-gcse1
+@opindex fdump-rtl-gcse2
+@option{-fdump-rtl-gcse1} and @option{-fdump-rtl-gcse2} enable dumping
+after global common subexpression elimination.
+
+@item -fdump-rtl-init-regs
+@opindex fdump-rtl-init-regs
+Dump after the initialization of the registers.
+
+@item -fdump-rtl-initvals
+@opindex fdump-rtl-initvals
+Dump after the computation of the initial value sets.
+
+@itemx -fdump-rtl-into_cfglayout
+@opindex fdump-rtl-into_cfglayout
+Dump after converting to cfglayout mode.
+
+@item -fdump-rtl-ira
+@opindex fdump-rtl-ira
+Dump after iterated register allocation.
@item -fdump-rtl-jump
@opindex fdump-rtl-jump
-Dump after the first jump optimization, to @file{@var{file}.112r.jump}.
-
-@item -fdump-rtl-stack
-@opindex fdump-rtl-stack
-Dump after conversion from GCC's "flat register file" registers to the
-x87's stack-like registers, to @file{@var{file}.152r.stack}.
-
-@item -fdump-rtl-lreg
-@opindex fdump-rtl-lreg
-Dump after local register allocation, to @file{@var{file}.138r.lreg}.
+Dump after the second jump optimization.
@item -fdump-rtl-loop2
@opindex fdump-rtl-loop2
-@option{-fdump-rtl-loop2} enables dumping after the
-loop optimization pass, to @file{@var{file}.119r.loop2},
-@file{@var{file}.120r.loop2_init},
-@file{@var{file}.121r.loop2_invariant}, and
-@file{@var{file}.125r.loop2_done}.
-
-@item -fdump-rtl-sms
-@opindex fdump-rtl-sms
-Dump after modulo scheduling, to @file{@var{file}.136r.sms}.
+@option{-fdump-rtl-loop2} enables dumping after the rtl
+loop optimization passes.
@item -fdump-rtl-mach
@opindex fdump-rtl-mach
-Dump after performing the machine dependent reorganization pass, to
-@file{@var{file}.155r.mach} if that pass exists.
+Dump after performing the machine dependent reorganization pass, if that
+pass exists.
+
+@item -fdump-rtl-mode_sw
+@opindex fdump-rtl-mode_sw
+Dump after removing redundant mode switches.
@item -fdump-rtl-rnreg
@opindex fdump-rtl-rnreg
-Dump after register renumbering, to @file{@var{file}.147r.rnreg}.
+Dump after register renumbering.
-@item -fdump-rtl-regmove
-@opindex fdump-rtl-regmove
-Dump after the register move pass, to @file{@var{file}.132r.regmove}.
+@itemx -fdump-rtl-outof_cfglayout
+@opindex fdump-rtl-outof_cfglayout
+Dump after converting from cfglayout mode.
+
+@item -fdump-rtl-peephole2
+@opindex fdump-rtl-peephole2
+Dump after the peephole pass.
@item -fdump-rtl-postreload
@opindex fdump-rtl-postreload
-Dump after post-reload optimizations, to @file{@var{file}.24.postreload}.
+Dump after post-reload optimizations.
-@item -fdump-rtl-expand
-@opindex fdump-rtl-expand
-Dump after RTL generation, to @file{@var{file}.104r.expand}.
+@itemx -fdump-rtl-pro_and_epilogue
+@opindex fdump-rtl-pro_and_epilogue
+Dump after generating the function pro and epilogues.
-@item -fdump-rtl-sched2
-@opindex fdump-rtl-sched2
-Dump after the second scheduling pass, to @file{@var{file}.149r.sched2}.
-
-@item -fdump-rtl-cse
-@opindex fdump-rtl-cse
-Dump after CSE (including the jump optimization that sometimes follows
-CSE), to @file{@var{file}.113r.cse}.
+@item -fdump-rtl-regmove
+@opindex fdump-rtl-regmove
+Dump after the register move pass.
@item -fdump-rtl-sched1
+@itemx -fdump-rtl-sched2
@opindex fdump-rtl-sched1
-Dump after the first scheduling pass, to @file{@var{file}.136r.sched1}.
+@opindex fdump-rtl-sched2
+@option{-fdump-rtl-sched1} and @option{-fdump-rtl-sched2} enable dumping
+after the basic block scheduling passes.
-@item -fdump-rtl-cse2
-@opindex fdump-rtl-cse2
-Dump after the second CSE pass (including the jump optimization that
-sometimes follows CSE), to @file{@var{file}.127r.cse2}.
+@item -fdump-rtl-see
+@opindex fdump-rtl-see
+Dump after sign extension elimination.
-@item -fdump-rtl-tracer
-@opindex fdump-rtl-tracer
-Dump after running tracer, to @file{@var{file}.118r.tracer}.
+@item -fdump-rtl-seqabstr
+@opindex fdump-rtl-seqabstr
+Dump after common sequence discovery.
-@item -fdump-rtl-vpt
-@itemx -fdump-rtl-vartrack
-@opindex fdump-rtl-vpt
-@opindex fdump-rtl-vartrack
-@option{-fdump-rtl-vpt} enable dumping after the value
-profile transformations, to @file{@var{file}.10.vpt}.
-@option{-fdump-rtl-vartrack} enable dumping after variable tracking,
-to @file{@var{file}.154r.vartrack}.
+@item -fdump-rtl-shorten
+@opindex fdump-rtl-shorten
+Dump after shortening branches.
-@item -fdump-rtl-flow2
-@opindex fdump-rtl-flow2
-Dump after the second flow pass, to @file{@var{file}.142r.flow2}.
+@item -fdump-rtl-sibling
+@opindex fdump-rtl-sibling
+Dump after sibling call optimizations.
+
+@item -fdump-rtl-split1
+@itemx -fdump-rtl-split2
+@itemx -fdump-rtl-split3
+@itemx -fdump-rtl-split4
+@itemx -fdump-rtl-split5
+@opindex fdump-rtl-split1
+@opindex fdump-rtl-split2
+@opindex fdump-rtl-split3
+@opindex fdump-rtl-split4
+@opindex fdump-rtl-split5
+@option{-fdump-rtl-split1}, @option{-fdump-rtl-split2},
+@option{-fdump-rtl-split3}, @option{-fdump-rtl-split4} and
+@option{-fdump-rtl-split5} enable dumping after five rounds of
+instruction splitting.
-@item -fdump-rtl-peephole2
-@opindex fdump-rtl-peephole2
-Dump after the peephole pass, to @file{@var{file}.145r.peephole2}.
+@item -fdump-rtl-sms
+@opindex fdump-rtl-sms
+Dump after modulo scheduling. This pass is only run on some
+architectures.
+
+@item -fdump-rtl-stack
+@opindex fdump-rtl-stack
+Dump after conversion from GCC's "flat register file" registers to the
+x87's stack-like registers. This pass is only run on x86 variants.
+
+@item -fdump-rtl-subreg1
+@itemx -fdump-rtl-subreg2
+@opindex fdump-rtl-subreg1
+@opindex fdump-rtl-subreg2
+@option{-fdump-rtl-subreg1} and @option{-fdump-rtl-subreg2} enable dumping after
+the two subreg expansion passes.
+
+@item -fdump-rtl-unshare
+@opindex fdump-rtl-unshare
+Dump after all rtl has been unshared.
+
+@item -fdump-rtl-vartrack
+@opindex fdump-rtl-vartrack
+Dump after variable tracking.
+
+@item -fdump-rtl-vregs
+@opindex fdump-rtl-vregs
+Dump after converting virtual registers to hard registers.
@item -fdump-rtl-web
@opindex fdump-rtl-web
-Dump after live range splitting, to @file{@var{file}.126r.web}.
+Dump after live range splitting.
+
+@item -fdump-rtl-regclass
+@itemx -fdump-rtl-subregs_of_mode_init
+@itemx -fdump-rtl-subregs_of_mode_finish
+@itemx -fdump-rtl-dfinit
+@itemx -fdump-rtl-dfinish
+@opindex fdump-rtl-regclass
+@opindex fdump-rtl-subregs_of_mode_init
+@opindex fdump-rtl-subregs_of_mode_finish
+@opindex fdump-rtl-dfinit
+@opindex fdump-rtl-dfinish
+These dumps are defined but always produce empty files.
@item -fdump-rtl-all
@opindex fdump-rtl-all
Produce all the dumps listed above.
+@item -dA
+@opindex dA
+Annotate the assembler output with miscellaneous debugging information.
+
+@item -dD
+@opindex dD
+Dump all macro definitions, at the end of preprocessing, in addition to
+normal output.
+
@item -dH
@opindex dH
Produce a core dump whenever an error occurs.
@@ -5473,8 +5598,9 @@ Attempt to merge identical constants and identical variables.
This option implies @option{-fmerge-constants}. In addition to
@option{-fmerge-constants} this considers e.g.@: even constant initialized
arrays or initialized constant variables with integral or floating point
-types. Languages like C or C++ require each non-automatic variable to
-have distinct location, so using this option will result in non-conforming
+types. Languages like C or C++ require each variable, including multiple
+instances of the same variable in recursive calls, to have distinct locations,
+so using this option will result in non-conforming
behavior.
@item -fmodulo-sched
@@ -8893,7 +9019,7 @@ assembly code. Permissible names are: @samp{arm2}, @samp{arm250},
@samp{cortex-a8}, @samp{cortex-a9},
@samp{cortex-r4}, @samp{cortex-r4f}, @samp{cortex-m3},
@samp{cortex-m1},
-@samp{xscale}, @samp{iwmmxt}, @samp{ep9312}.
+@samp{xscale}, @samp{iwmmxt}, @samp{iwmmxt2}, @samp{ep9312}.
@item -mtune=@var{name}
@opindex mtune
@@ -8917,7 +9043,7 @@ of the @option{-mcpu=} option. Permissible names are: @samp{armv2},
@samp{armv6}, @samp{armv6j},
@samp{armv6t2}, @samp{armv6z}, @samp{armv6zk}, @samp{armv6-m},
@samp{armv7}, @samp{armv7-a}, @samp{armv7-r}, @samp{armv7-m},
-@samp{iwmmxt}, @samp{ep9312}.
+@samp{iwmmxt}, @samp{iwmmxt2}, @samp{ep9312}.
@item -mfpu=@var{name}
@itemx -mfpe=@var{number}
@@ -13797,6 +13923,16 @@ stack pointer is updated and the address of the previous frame is
stored, which means code that walks the stack frame across interrupts or
signals may get corrupted data.
+@item -mavoid-indexed-addresses
+@item -mno-avoid-indexed-addresses
+@opindex mavoid-indexed-addresses
+@opindex mno-avoid-indexed-addresses
+Generate code that tries to avoid (not avoid) the use of indexed load
+or store instructions. These instructions can incur a performance
+penalty on Power6 processors in certain situations, such as when
+stepping through large arrays that cross a 16M boundary. This option
+is enabled by default when targetting Power6 and disabled otherwise.
+
@item -mfused-madd
@itemx -mno-fused-madd
@opindex mfused-madd
diff --git a/gcc/doc/passes.texi b/gcc/doc/passes.texi
index 9004dd763ec..6d32b07efe7 100644
--- a/gcc/doc/passes.texi
+++ b/gcc/doc/passes.texi
@@ -826,24 +826,12 @@ them on the stack. This is done in several subpasses:
@itemize @bullet
@item
-Register class preferencing. The RTL code is scanned to find out
-which register class is best for each pseudo register. The source
-file is @file{regclass.c}.
+Register move optimizations. This pass makes some simple RTL code
+transformations which improve the subsequent register allocation. The
+source file is @file{regmove.c}.
@item
-Local register allocation. This pass allocates hard registers to
-pseudo registers that are used only within one basic block. Because
-the basic block is linear, it can use fast and powerful techniques to
-do a decent job. The source is located in @file{local-alloc.c}.
-
-@item
-Global register allocation. This pass allocates hard registers for
-the remaining pseudo registers (those whose life spans are not
-contained in one basic block). The pass is located in @file{global.c}.
-
-@item
-The optional integrated register allocator (@acronym{IRA}). It can be
-used instead of the local and global allocator. It is called
+The integrated register allocator (@acronym{IRA}). It is called
integrated because coalescing, register live range splitting, and hard
register preferencing are done on-the-fly during coloring. It also
has better integration with the reload pass. Pseudo-registers spilled
diff --git a/gcc/except.c b/gcc/except.c
index 77a3049ba5e..8a44f259715 100644
--- a/gcc/except.c
+++ b/gcc/except.c
@@ -3332,7 +3332,7 @@ struct rtl_opt_pass pass_convert_to_eh_region_ranges =
{
{
RTL_PASS,
- "eh-ranges", /* name */
+ "eh_ranges", /* name */
NULL, /* gate */
convert_to_eh_region_ranges, /* execute */
NULL, /* sub */
diff --git a/gcc/fold-const.c b/gcc/fold-const.c
index eb7d7c91210..da4d50d5e94 100644
--- a/gcc/fold-const.c
+++ b/gcc/fold-const.c
@@ -8628,6 +8628,24 @@ fold_unary (enum tree_code code, tree type, tree op0)
} /* switch (code) */
}
+
+/* If the operation was a conversion do _not_ mark a resulting constant
+ with TREE_OVERFLOW if the original constant was not. These conversions
+ have implementation defined behavior and retaining the TREE_OVERFLOW
+ flag here would confuse later passes such as VRP. */
+tree
+fold_unary_ignore_overflow (enum tree_code code, tree type, tree op0)
+{
+ tree res = fold_unary (code, type, op0);
+ if (res
+ && TREE_CODE (res) == INTEGER_CST
+ && TREE_CODE (op0) == INTEGER_CST
+ && CONVERT_EXPR_CODE_P (code))
+ TREE_OVERFLOW (res) = TREE_OVERFLOW (op0);
+
+ return res;
+}
+
/* Fold a binary expression of code CODE and type TYPE with operands
OP0 and OP1, containing either a MIN-MAX or a MAX-MIN combination.
Return the folded expression if folding is successful. Otherwise,
diff --git a/gcc/fortran/ChangeLog b/gcc/fortran/ChangeLog
index a744290e67d..1186064d029 100644
--- a/gcc/fortran/ChangeLog
+++ b/gcc/fortran/ChangeLog
@@ -1,3 +1,16 @@
+2009-01-28 Paul Thomas <pault@gcc.gnu.org>
+
+ PR fortran/38852
+ PR fortran/39006
+ * trans-intrinsic.c (gfc_conv_intrinsic_bound): Use the array
+ descriptor ubound for UBOUND, when the array lbound == 1.
+
+2009-01-27 Daniel Kraft <d@domob.eu>
+
+ PR fortran/38883
+ * trans-stmt.c (gfc_conv_elemental_dependencies): Create temporary
+ for the real type needed to make it work for subcomponent-references.
+
2009-01-21 Daniel Kraft <d@domob.eu>
* trans-stmt.c (gfc_conv_elemental_dependencies): Cleaned up comment.
diff --git a/gcc/fortran/trans-intrinsic.c b/gcc/fortran/trans-intrinsic.c
index e3941c55414..50b429368b1 100644
--- a/gcc/fortran/trans-intrinsic.c
+++ b/gcc/fortran/trans-intrinsic.c
@@ -972,12 +972,17 @@ gfc_conv_intrinsic_bound (gfc_se * se, gfc_expr * expr, int upper)
cond4 = fold_build2 (LT_EXPR, boolean_type_node, stride,
gfc_index_zero_node);
- cond4 = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, cond4, cond2);
if (upper)
{
+ tree cond5;
cond = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, cond3, cond4);
+ cond5 = fold_build2 (EQ_EXPR, boolean_type_node, gfc_index_one_node, lbound);
+ cond5 = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, cond4, cond5);
+
+ cond = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, cond, cond5);
+
se->expr = fold_build3 (COND_EXPR, gfc_array_index_type, cond,
ubound, gfc_index_zero_node);
}
diff --git a/gcc/fortran/trans-stmt.c b/gcc/fortran/trans-stmt.c
index 82ecca80ac2..42f0ac438ab 100644
--- a/gcc/fortran/trans-stmt.c
+++ b/gcc/fortran/trans-stmt.c
@@ -213,7 +213,6 @@ gfc_conv_elemental_dependencies (gfc_se * se, gfc_se * loopse,
gfc_ss_info *info;
gfc_symbol *fsym;
int n;
- stmtblock_t block;
tree data;
tree offset;
tree size;
@@ -252,7 +251,7 @@ gfc_conv_elemental_dependencies (gfc_se * se, gfc_se * loopse,
&& gfc_check_fncall_dependency (e, fsym->attr.intent,
sym, arg0, check_variable))
{
- tree initial;
+ tree initial, temptype;
stmtblock_t temp_post;
/* Make a local loopinfo for the temporary creation, so that
@@ -278,24 +277,31 @@ gfc_conv_elemental_dependencies (gfc_se * se, gfc_se * loopse,
else
initial = NULL_TREE;
- /* Generate the temporary. Merge the block so that the
- declarations are put at the right binding level. Cleaning up the
- temporary should be the very last thing done, so we add the code to
- a new block and add it to se->post as last instructions. */
+ /* Find the type of the temporary to create; we don't use the type
+ of e itself as this breaks for subcomponent-references in e (where
+ the type of e is that of the final reference, but parmse.expr's
+ type corresponds to the full derived-type). */
+ /* TODO: Fix this somehow so we don't need a temporary of the whole
+ array but instead only the components referenced. */
+ temptype = TREE_TYPE (parmse.expr); /* Pointer to descriptor. */
+ gcc_assert (TREE_CODE (temptype) == POINTER_TYPE);
+ temptype = TREE_TYPE (temptype);
+ temptype = gfc_get_element_type (temptype);
+
+ /* Generate the temporary. Cleaning up the temporary should be the
+ very last thing done, so we add the code to a new block and add it
+ to se->post as last instructions. */
size = gfc_create_var (gfc_array_index_type, NULL);
data = gfc_create_var (pvoid_type_node, NULL);
- gfc_start_block (&block);
gfc_init_block (&temp_post);
- tmp = gfc_typenode_for_spec (&e->ts);
tmp = gfc_trans_create_temp_array (&se->pre, &temp_post,
- &tmp_loop, info, tmp,
+ &tmp_loop, info, temptype,
initial,
false, true, false,
&arg->expr->where);
gfc_add_modify (&se->pre, size, tmp);
tmp = fold_convert (pvoid_type_node, info->data);
gfc_add_modify (&se->pre, data, tmp);
- gfc_merge_block_scope (&block);
/* Calculate the offset for the temporary. */
offset = gfc_index_zero_node;
@@ -315,7 +321,7 @@ gfc_conv_elemental_dependencies (gfc_se * se, gfc_se * loopse,
tmp = build_call_expr (gfor_fndecl_in_unpack, 2, parmse.expr, data);
gfc_add_expr_to_block (&se->post, tmp);
- gfc_add_block_to_block (&se->pre, &parmse.pre);
+ /* parmse.pre is already added above. */
gfc_add_block_to_block (&se->post, &parmse.post);
gfc_add_block_to_block (&se->post, &temp_post);
}
diff --git a/gcc/gimplify.c b/gcc/gimplify.c
index 5aef12875aa..be0c1ac00e5 100644
--- a/gcc/gimplify.c
+++ b/gcc/gimplify.c
@@ -3526,7 +3526,8 @@ gimplify_init_constructor (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
if (valid_const_initializer
&& num_nonzero_elements > 1
&& TREE_READONLY (object)
- && TREE_CODE (object) == VAR_DECL)
+ && TREE_CODE (object) == VAR_DECL
+ && (flag_merge_constants >= 2 || !TREE_ADDRESSABLE (object)))
{
if (notify_temp_creation)
return GS_ERROR;
diff --git a/gcc/global.c b/gcc/global.c
deleted file mode 100644
index abf070d91c2..00000000000
--- a/gcc/global.c
+++ /dev/null
@@ -1,1849 +0,0 @@
-/* Allocate registers for pseudo-registers that span basic blocks.
- Copyright (C) 1987, 1988, 1991, 1994, 1996, 1997, 1998,
- 1999, 2000, 2002, 2003, 2004, 2005, 2006, 2007
- Free Software Foundation, Inc.
-
-This file is part of GCC.
-
-GCC is free software; you can redistribute it and/or modify it under
-the terms of the GNU General Public License as published by the Free
-Software Foundation; either version 3, or (at your option) any later
-version.
-
-GCC is distributed in the hope that it will be useful, but WITHOUT ANY
-WARRANTY; without even the implied warranty of MERCHANTABILITY or
-FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-for more details.
-
-You should have received a copy of the GNU General Public License
-along with GCC; see the file COPYING3. If not see
-<http://www.gnu.org/licenses/>. */
-
-
-#include "config.h"
-#include "system.h"
-#include "coretypes.h"
-#include "tm.h"
-#include "machmode.h"
-#include "hard-reg-set.h"
-#include "rtl.h"
-#include "tm_p.h"
-#include "flags.h"
-#include "regs.h"
-#include "function.h"
-#include "insn-config.h"
-#include "recog.h"
-#include "reload.h"
-#include "output.h"
-#include "toplev.h"
-#include "tree-pass.h"
-#include "timevar.h"
-#include "df.h"
-#include "vecprim.h"
-#include "dbgcnt.h"
-#include "ra.h"
-#include "ira.h"
-
-/* This pass of the compiler performs global register allocation.
- It assigns hard register numbers to all the pseudo registers
- that were not handled in local_alloc. Assignments are recorded
- in the vector reg_renumber, not by changing the rtl code.
- (Such changes are made by final). The entry point is
- the function global_alloc.
-
- After allocation is complete, the reload pass is run as a subroutine
- of this pass, so that when a pseudo reg loses its hard reg due to
- spilling it is possible to make a second attempt to find a hard
- reg for it. The reload pass is independent in other respects
- and it is run even when stupid register allocation is in use.
-
- 1. Assign allocation-numbers (allocnos) to the pseudo-registers
- still needing allocations and to the pseudo-registers currently
- allocated by local-alloc which may be spilled by reload.
- Set up tables reg_allocno and allocno_reg to map
- reg numbers to allocnos and vice versa.
- max_allocno gets the number of allocnos in use.
-
- 2. Allocate a max_allocno by max_allocno compressed triangular conflict
- bit matrix (a triangular bit matrix with portions removed for which we
- can guarantee there are no conflicts, example: two local pseudos that
- live in different basic blocks) and clear it. This is called "conflict".
- Note that for triangular bit matrices, there are two possible equations
- for computing the bit number for two allocnos: LOW and HIGH (LOW < HIGH):
-
- 1) BITNUM = f(HIGH) + LOW, where
- f(HIGH) = (HIGH * (HIGH - 1)) / 2
-
- 2) BITNUM = f(LOW) + HIGH, where
- f(LOW) = LOW * (max_allocno - LOW) + (LOW * (LOW - 1)) / 2 - LOW - 1
-
- We use the second (and less common) equation as this gives us better
- cache locality for local allocnos that are live within the same basic
- block. Also note that f(HIGH) and f(LOW) can be precalculated for all
- values of HIGH and LOW, so all that is necessary to compute the bit
- number for two allocnos LOW and HIGH is a load followed by an addition.
-
- Allocate a max_allocno by FIRST_PSEUDO_REGISTER conflict matrix for
- conflicts between allocnos and explicit hard register use (which
- includes use of pseudo-registers allocated by local_alloc). This
- is the hard_reg_conflicts inside each allocno.
-
- 3. For each basic block, walk backward through the block, recording
- which pseudo-registers and which hardware registers are live.
- Build the conflict matrix between the pseudo-registers and another of
- pseudo-registers versus hardware registers.
-
- 4. For each basic block, walk backward through the block, recording
- the preferred hardware registers for each pseudo-register.
-
- 5. Sort a table of the allocnos into order of desirability of the variables.
-
- 6. Allocate the variables in that order; each if possible into
- a preferred register, else into another register. */
-
-/* A vector of the integers from 0 to max_allocno-1,
- sorted in the order of first-to-be-allocated first. */
-
-static int *allocno_order;
-
-/* Set of registers that global-alloc isn't supposed to use. */
-
-static HARD_REG_SET no_global_alloc_regs;
-
-/* Set of registers used so far. */
-
-static HARD_REG_SET regs_used_so_far;
-
-/* Number of refs to each hard reg, as used by local alloc.
- It is zero for a reg that contains global pseudos or is explicitly used. */
-
-static int local_reg_n_refs[FIRST_PSEUDO_REGISTER];
-
-/* Frequency of uses of given hard reg. */
-static int local_reg_freq[FIRST_PSEUDO_REGISTER];
-
-/* Guess at live length of each hard reg, as used by local alloc.
- This is actually the sum of the live lengths of the specific regs. */
-
-static int local_reg_live_length[FIRST_PSEUDO_REGISTER];
-
-/* Set to 1 a bit in a vector TABLE of HARD_REG_SETs, for vector
- element I, and hard register number J. */
-
-#define SET_REGBIT(TABLE, I, J) SET_HARD_REG_BIT (allocno[I].TABLE, J)
-
-/* Return true if *LOC contains an asm. */
-
-static int
-insn_contains_asm_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
-{
- if ( !*loc)
- return 0;
- if (GET_CODE (*loc) == ASM_OPERANDS)
- return 1;
- return 0;
-}
-
-
-/* Return true if INSN contains an ASM. */
-
-static int
-insn_contains_asm (rtx insn)
-{
- return for_each_rtx (&insn, insn_contains_asm_1, NULL);
-}
-
-
-static void
-compute_regs_asm_clobbered (char *regs_asm_clobbered)
-{
- basic_block bb;
-
- memset (regs_asm_clobbered, 0, sizeof (char) * FIRST_PSEUDO_REGISTER);
-
- FOR_EACH_BB (bb)
- {
- rtx insn;
- FOR_BB_INSNS_REVERSE (bb, insn)
- {
- df_ref *def_rec;
- if (insn_contains_asm (insn))
- for (def_rec = DF_INSN_DEFS (insn); *def_rec; def_rec++)
- {
- df_ref def = *def_rec;
- unsigned int dregno = DF_REF_REGNO (def);
- if (dregno < FIRST_PSEUDO_REGISTER)
- {
- unsigned int i;
- enum machine_mode mode = GET_MODE (DF_REF_REAL_REG (def));
- unsigned int end = dregno
- + hard_regno_nregs[dregno][mode] - 1;
- for (i = dregno; i <= end; ++i)
- regs_asm_clobbered[i] = 1;
- }
- }
- }
- }
-}
-
-
-/* All registers that can be eliminated. */
-
-HARD_REG_SET eliminable_regset;
-
-static int regno_compare (const void *, const void *);
-static int allocno_compare (const void *, const void *);
-static void expand_preferences (void);
-static void prune_preferences (void);
-static void set_preferences (void);
-static void find_reg (int, HARD_REG_SET, int, int, int);
-static void dump_conflicts (FILE *);
-
-
-/* Look through the list of eliminable registers. Set ELIM_SET to the
- set of registers which may be eliminated. Set NO_GLOBAL_SET to the
- set of registers which may not be used across blocks.
-
- This will normally be called with ELIM_SET as the file static
- variable eliminable_regset, and NO_GLOBAL_SET as the file static
- variable NO_GLOBAL_ALLOC_REGS.
-
- It also initializes global flag frame_pointer_needed. */
-
-static void
-compute_regsets (HARD_REG_SET *elim_set,
- HARD_REG_SET *no_global_set)
-{
-
-/* Like regs_ever_live, but 1 if a reg is set or clobbered from an asm.
- Unlike regs_ever_live, elements of this array corresponding to
- eliminable regs like the frame pointer are set if an asm sets them. */
- char *regs_asm_clobbered = XALLOCAVEC (char, FIRST_PSEUDO_REGISTER);
-
-#ifdef ELIMINABLE_REGS
- static const struct {const int from, to; } eliminables[] = ELIMINABLE_REGS;
- size_t i;
-#endif
-
- /* FIXME: If EXIT_IGNORE_STACK is set, we will not save and restore
- sp for alloca. So we can't eliminate the frame pointer in that
- case. At some point, we should improve this by emitting the
- sp-adjusting insns for this case. */
- int need_fp
- = (! flag_omit_frame_pointer
- || (cfun->calls_alloca && EXIT_IGNORE_STACK)
- || crtl->accesses_prior_frames
- || crtl->stack_realign_needed
- || FRAME_POINTER_REQUIRED);
-
- frame_pointer_needed = need_fp;
-
- max_regno = max_reg_num ();
- compact_blocks ();
-
- max_allocno = 0;
-
- /* A machine may have certain hard registers that
- are safe to use only within a basic block. */
-
- CLEAR_HARD_REG_SET (*no_global_set);
- CLEAR_HARD_REG_SET (*elim_set);
-
- compute_regs_asm_clobbered (regs_asm_clobbered);
- /* Build the regset of all eliminable registers and show we can't use those
- that we already know won't be eliminated. */
-#ifdef ELIMINABLE_REGS
- for (i = 0; i < ARRAY_SIZE (eliminables); i++)
- {
- bool cannot_elim
- = (! CAN_ELIMINATE (eliminables[i].from, eliminables[i].to)
- || (eliminables[i].to == STACK_POINTER_REGNUM
- && need_fp
- && (! SUPPORTS_STACK_ALIGNMENT
- || ! stack_realign_fp)));
-
- if (!regs_asm_clobbered[eliminables[i].from])
- {
- SET_HARD_REG_BIT (*elim_set, eliminables[i].from);
-
- if (cannot_elim)
- SET_HARD_REG_BIT (*no_global_set, eliminables[i].from);
- }
- else if (cannot_elim)
- error ("%s cannot be used in asm here",
- reg_names[eliminables[i].from]);
- else
- df_set_regs_ever_live (eliminables[i].from, true);
- }
-#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
- if (!regs_asm_clobbered[HARD_FRAME_POINTER_REGNUM])
- {
- SET_HARD_REG_BIT (*elim_set, HARD_FRAME_POINTER_REGNUM);
- if (need_fp)
- SET_HARD_REG_BIT (*no_global_set, HARD_FRAME_POINTER_REGNUM);
- }
- else if (need_fp)
- error ("%s cannot be used in asm here",
- reg_names[HARD_FRAME_POINTER_REGNUM]);
- else
- df_set_regs_ever_live (HARD_FRAME_POINTER_REGNUM, true);
-#endif
-
-#else
- if (!regs_asm_clobbered[FRAME_POINTER_REGNUM])
- {
- SET_HARD_REG_BIT (*elim_set, FRAME_POINTER_REGNUM);
- if (need_fp)
- SET_HARD_REG_BIT (*no_global_set, FRAME_POINTER_REGNUM);
- }
- else if (need_fp)
- error ("%s cannot be used in asm here", reg_names[FRAME_POINTER_REGNUM]);
- else
- df_set_regs_ever_live (FRAME_POINTER_REGNUM, true);
-#endif
-}
-
-/* Perform allocation of pseudo-registers not allocated by local_alloc.
-
- Return value is nonzero if reload failed
- and we must not do any more for this function. */
-
-static int
-global_alloc (void)
-{
- int retval;
- size_t i;
- int max_blk;
- int *num_allocnos_per_blk;
-
- compute_regsets (&eliminable_regset, &no_global_alloc_regs);
-
- /* Track which registers have already been used. Start with registers
- explicitly in the rtl, then registers allocated by local register
- allocation. */
-
- CLEAR_HARD_REG_SET (regs_used_so_far);
-#ifdef LEAF_REGISTERS
- /* If we are doing the leaf function optimization, and this is a leaf
- function, it means that the registers that take work to save are those
- that need a register window. So prefer the ones that can be used in
- a leaf function. */
- {
- const char *cheap_regs;
- const char *const leaf_regs = LEAF_REGISTERS;
-
- if (only_leaf_regs_used () && leaf_function_p ())
- cheap_regs = leaf_regs;
- else
- cheap_regs = call_used_regs;
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- if (df_regs_ever_live_p (i) || cheap_regs[i])
- SET_HARD_REG_BIT (regs_used_so_far, i);
- }
-#else
- /* We consider registers that do not have to be saved over calls as if
- they were already used since there is no cost in using them. */
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- if (df_regs_ever_live_p (i) || call_used_regs[i])
- SET_HARD_REG_BIT (regs_used_so_far, i);
-#endif
-
- for (i = FIRST_PSEUDO_REGISTER; i < (size_t) max_regno; i++)
- if (reg_renumber[i] >= 0)
- SET_HARD_REG_BIT (regs_used_so_far, reg_renumber[i]);
-
- /* Establish mappings from register number to allocation number
- and vice versa. In the process, count the allocnos. */
-
- reg_allocno = XNEWVEC (int, max_regno);
-
- /* Initially fill the reg_allocno array with regno's... */
- max_blk = 0;
- max_allocno = 0;
- for (i = FIRST_PSEUDO_REGISTER; i < (size_t) max_regno; i++)
- /* Note that reg_live_length[i] < 0 indicates a "constant" reg
- that we are supposed to refrain from putting in a hard reg.
- -2 means do make an allocno but don't allocate it. */
- if (REG_N_REFS (i) != 0 && REG_LIVE_LENGTH (i) != -1
- /* Don't allocate pseudos that cross calls,
- if this function receives a nonlocal goto. */
- && (! cfun->has_nonlocal_label
- || REG_N_CALLS_CROSSED (i) == 0))
- {
- int blk = regno_basic_block (i);
- reg_allocno[max_allocno++] = i;
- if (blk > max_blk)
- max_blk = blk;
- gcc_assert (REG_LIVE_LENGTH (i));
- }
-
- allocno = XCNEWVEC (struct allocno, max_allocno);
- partial_bitnum = XNEWVEC (HOST_WIDE_INT, max_allocno);
- num_allocnos_per_blk = XCNEWVEC (int, max_blk + 1);
-
- /* ...so we can sort them in the order we want them to receive
- their allocnos. */
- qsort (reg_allocno, max_allocno, sizeof (int), regno_compare);
-
- for (i = 0; i < (size_t) max_allocno; i++)
- {
- int regno = reg_allocno[i];
- int blk = regno_basic_block (regno);
- num_allocnos_per_blk[blk]++;
- allocno[i].reg = regno;
- allocno[i].size = PSEUDO_REGNO_SIZE (regno);
- allocno[i].calls_crossed += REG_N_CALLS_CROSSED (regno);
- allocno[i].freq_calls_crossed += REG_FREQ_CALLS_CROSSED (regno);
- allocno[i].throwing_calls_crossed
- += REG_N_THROWING_CALLS_CROSSED (regno);
- allocno[i].n_refs += REG_N_REFS (regno);
- allocno[i].freq += REG_FREQ (regno);
- if (allocno[i].live_length < REG_LIVE_LENGTH (regno))
- allocno[i].live_length = REG_LIVE_LENGTH (regno);
- }
-
- /* The "global" block must contain all allocnos. */
- num_allocnos_per_blk[0] = max_allocno;
-
- /* Now reinitialize the reg_allocno array in terms of the
- optimized regno to allocno mapping we created above. */
- for (i = 0; i < (size_t) max_regno; i++)
- reg_allocno[i] = -1;
-
- max_bitnum = 0;
- for (i = 0; i < (size_t) max_allocno; i++)
- {
- int regno = allocno[i].reg;
- int blk = regno_basic_block (regno);
- int row_size = --num_allocnos_per_blk[blk];
- reg_allocno[regno] = (int) i;
- partial_bitnum[i] = (row_size > 0) ? max_bitnum - ((int) i + 1) : -1;
- max_bitnum += row_size;
- }
-
-#ifdef ENABLE_CHECKING
- gcc_assert (max_bitnum <=
- (((HOST_WIDE_INT) max_allocno *
- ((HOST_WIDE_INT) max_allocno - 1)) / 2));
-#endif
-
- if (dump_file)
- {
- HOST_WIDE_INT num_bits, num_bytes, actual_bytes;
-
- fprintf (dump_file, "## max_blk: %d\n", max_blk);
- fprintf (dump_file, "## max_regno: %d\n", max_regno);
- fprintf (dump_file, "## max_allocno: %d\n", max_allocno);
-
- num_bits = max_bitnum;
- num_bytes = CEIL (num_bits, 8);
- actual_bytes = num_bytes;
- fprintf (dump_file, "## Compressed triangular bitmatrix size: ");
- fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC " bits, ", num_bits);
- fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC " bytes\n", num_bytes);
-
- num_bits = ((HOST_WIDE_INT) max_allocno *
- ((HOST_WIDE_INT) max_allocno - 1)) / 2;
- num_bytes = CEIL (num_bits, 8);
- fprintf (dump_file, "## Standard triangular bitmatrix size: ");
- fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC " bits, ", num_bits);
- fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC " bytes [%.2f%%]\n",
- num_bytes, 100.0 * ((double) actual_bytes / (double) num_bytes));
-
- num_bits = (HOST_WIDE_INT) max_allocno * (HOST_WIDE_INT) max_allocno;
- num_bytes = CEIL (num_bits, 8);
- fprintf (dump_file, "## Square bitmatrix size: ");
- fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC " bits, ", num_bits);
- fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC " bytes [%.2f%%]\n",
- num_bytes, 100.0 * ((double) actual_bytes / (double) num_bytes));
- }
-
- /* Calculate amount of usage of each hard reg by pseudos
- allocated by local-alloc. This is to see if we want to
- override it. */
- memset (local_reg_live_length, 0, sizeof local_reg_live_length);
- memset (local_reg_n_refs, 0, sizeof local_reg_n_refs);
- memset (local_reg_freq, 0, sizeof local_reg_freq);
- for (i = FIRST_PSEUDO_REGISTER; i < (size_t) max_regno; i++)
- if (reg_renumber[i] >= 0)
- {
- int regno = reg_renumber[i];
- int endregno = end_hard_regno (PSEUDO_REGNO_MODE (i), regno);
- int j;
-
- for (j = regno; j < endregno; j++)
- {
- local_reg_n_refs[j] += REG_N_REFS (i);
- local_reg_freq[j] += REG_FREQ (i);
- local_reg_live_length[j] += REG_LIVE_LENGTH (i);
- }
- }
-
- /* We can't override local-alloc for a reg used not just by local-alloc. */
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- if (df_regs_ever_live_p (i))
- local_reg_n_refs[i] = 0, local_reg_freq[i] = 0;
-
- if (dump_file)
- {
- for (i = FIRST_PSEUDO_REGISTER; i < (size_t) max_regno; i++)
- {
- fprintf (dump_file, "%d REG_N_REFS=%d, REG_FREQ=%d, REG_LIVE_LENGTH=%d\n",
- (int)i, REG_N_REFS (i), REG_FREQ (i), REG_LIVE_LENGTH (i));
- }
- fprintf (dump_file, "regs_ever_live =");
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- if (df_regs_ever_live_p (i))
- fprintf (dump_file, " %d", (int)i);
- fprintf (dump_file, "\n");
- }
-
- conflicts = NULL;
- adjacency = NULL;
- adjacency_pool = NULL;
-
- /* If there is work to be done (at least one reg to allocate),
- perform global conflict analysis and allocate the regs. */
-
- if (max_allocno > 0)
- {
- /* We used to use alloca here, but the size of what it would try to
- allocate would occasionally cause it to exceed the stack limit and
- cause unpredictable core dumps. Some examples were > 2Mb in size. */
- conflicts = XCNEWVEC (HOST_WIDEST_FAST_INT,
- CEIL(max_bitnum, HOST_BITS_PER_WIDEST_FAST_INT));
-
- adjacency = XCNEWVEC (adjacency_t *, max_allocno);
- adjacency_pool = create_alloc_pool ("global_alloc adjacency list pool",
- sizeof (adjacency_t), 1024);
-
- /* Scan all the insns and compute the conflicts among allocnos
- and between allocnos and hard regs. */
-
- global_conflicts ();
-
- /* There is just too much going on in the register allocators to
- keep things up to date. At the end we have to rescan anyway
- because things change when the reload_completed flag is set.
- So we just turn off scanning and we will rescan by hand.
-
- However, we needed to do the rescanning before this point to
- get the new insns scanned inserted by local_alloc scanned for
- global_conflicts. */
- df_set_flags (DF_NO_INSN_RESCAN);
-
- /* Eliminate conflicts between pseudos and eliminable registers. If
- the register is not eliminated, the pseudo won't really be able to
- live in the eliminable register, so the conflict doesn't matter.
- If we do eliminate the register, the conflict will no longer exist.
- So in either case, we can ignore the conflict. Likewise for
- preferences. */
-
- set_preferences ();
-
- for (i = 0; i < (size_t) max_allocno; i++)
- {
- AND_COMPL_HARD_REG_SET (allocno[i].hard_reg_conflicts,
- eliminable_regset);
- AND_COMPL_HARD_REG_SET (allocno[i].hard_reg_copy_preferences,
- eliminable_regset);
- AND_COMPL_HARD_REG_SET (allocno[i].hard_reg_preferences,
- eliminable_regset);
- }
-
- /* Try to expand the preferences by merging them between allocnos. */
-
- expand_preferences ();
-
- /* Determine the order to allocate the remaining pseudo registers. */
-
- allocno_order = XNEWVEC (int, max_allocno);
- for (i = 0; i < (size_t) max_allocno; i++)
- allocno_order[i] = i;
-
- /* Default the size to 1, since allocno_compare uses it to divide by.
- Also convert allocno_live_length of zero to -1. A length of zero
- can occur when all the registers for that allocno have reg_live_length
- equal to -2. In this case, we want to make an allocno, but not
- allocate it. So avoid the divide-by-zero and set it to a low
- priority. */
-
- for (i = 0; i < (size_t) max_allocno; i++)
- {
- if (allocno[i].size == 0)
- allocno[i].size = 1;
- if (allocno[i].live_length == 0)
- allocno[i].live_length = -1;
- }
-
- qsort (allocno_order, max_allocno, sizeof (int), allocno_compare);
-
- prune_preferences ();
-
- if (dump_file)
- dump_conflicts (dump_file);
-
- /* Try allocating them, one by one, in that order,
- except for parameters marked with reg_live_length[regno] == -2. */
-
- for (i = 0; i < (size_t) max_allocno; i++)
- if (reg_renumber[allocno[allocno_order[i]].reg] < 0
- && REG_LIVE_LENGTH (allocno[allocno_order[i]].reg) >= 0)
- {
- if (!dbg_cnt (global_alloc_at_reg))
- break;
- /* If we have more than one register class,
- first try allocating in the class that is cheapest
- for this pseudo-reg. If that fails, try any reg. */
- if (N_REG_CLASSES > 1)
- {
- find_reg (allocno_order[i], 0, 0, 0, 0);
- if (reg_renumber[allocno[allocno_order[i]].reg] >= 0)
- continue;
- }
- if (reg_alternate_class (allocno[allocno_order[i]].reg) != NO_REGS)
- find_reg (allocno_order[i], 0, 1, 0, 0);
- }
-
- free (allocno_order);
- free (conflicts);
- }
-
- /* Do the reloads now while the allocno data still exists, so that we can
- try to assign new hard regs to any pseudo regs that are spilled. */
-
-#if 0 /* We need to eliminate regs even if there is no rtl code,
- for the sake of debugging information. */
- if (n_basic_blocks > NUM_FIXED_BLOCKS)
-#endif
- {
- build_insn_chain ();
- retval = reload (get_insns (), 1);
- }
-
- /* Clean up. */
- free (reg_allocno);
- free (num_allocnos_per_blk);
- free (partial_bitnum);
- free (allocno);
- if (adjacency != NULL)
- {
- free_alloc_pool (adjacency_pool);
- free (adjacency);
- }
-
- return retval;
-}
-
-/* Sort predicate for ordering the regnos. We want the regno to allocno
- mapping to have the property that all "global" regnos (ie, regnos that
- are referenced in more than one basic block) have smaller allocno values
- than "local" regnos (ie, regnos referenced in only one basic block).
- In addition, for two basic blocks "i" and "j" with i < j, all regnos
- local to basic block i should have smaller allocno values than regnos
- local to basic block j.
- Returns -1 (1) if *v1p should be allocated before (after) *v2p. */
-
-static int
-regno_compare (const void *v1p, const void *v2p)
-{
- int regno1 = *(const int *)v1p;
- int regno2 = *(const int *)v2p;
- int blk1 = REG_BASIC_BLOCK (regno1);
- int blk2 = REG_BASIC_BLOCK (regno2);
-
- /* Prefer lower numbered basic blocks. Note that global and unknown
- blocks have negative values, giving them high precedence. */
- if (blk1 - blk2)
- return blk1 - blk2;
-
- /* If both regs are referenced from the same block, sort by regno. */
- return regno1 - regno2;
-}
-
-/* Sort predicate for ordering the allocnos.
- Returns -1 (1) if *v1 should be allocated before (after) *v2. */
-
-static int
-allocno_compare (const void *v1p, const void *v2p)
-{
- int v1 = *(const int *)v1p, v2 = *(const int *)v2p;
- /* Note that the quotient will never be bigger than
- the value of floor_log2 times the maximum number of
- times a register can occur in one insn (surely less than 100)
- weighted by the frequency (maximally REG_FREQ_MAX).
- Multiplying this by 10000/REG_FREQ_MAX can't overflow. */
- int pri1
- = (((double) (floor_log2 (allocno[v1].n_refs) * allocno[v1].freq)
- / allocno[v1].live_length)
- * (10000 / REG_FREQ_MAX) * allocno[v1].size);
- int pri2
- = (((double) (floor_log2 (allocno[v2].n_refs) * allocno[v2].freq)
- / allocno[v2].live_length)
- * (10000 / REG_FREQ_MAX) * allocno[v2].size);
- if (pri2 - pri1)
- return pri2 - pri1;
-
- /* If regs are equally good, sort by allocno,
- so that the results of qsort leave nothing to chance. */
- return v1 - v2;
-}
-
-/* Expand the preference information by looking for cases where one allocno
- dies in an insn that sets an allocno. If those two allocnos don't conflict,
- merge any preferences between those allocnos. */
-
-static void
-expand_preferences (void)
-{
- rtx insn;
- rtx link;
- rtx set;
-
- /* We only try to handle the most common cases here. Most of the cases
- where this wins are reg-reg copies. */
-
- for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
- if (INSN_P (insn)
- && (set = single_set (insn)) != 0
- && REG_P (SET_DEST (set))
- && reg_allocno[REGNO (SET_DEST (set))] >= 0)
- for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
- if (REG_NOTE_KIND (link) == REG_DEAD
- && REG_P (XEXP (link, 0))
- && reg_allocno[REGNO (XEXP (link, 0))] >= 0
- && ! conflict_p (reg_allocno[REGNO (SET_DEST (set))],
- reg_allocno[REGNO (XEXP (link, 0))]))
- {
- int a1 = reg_allocno[REGNO (SET_DEST (set))];
- int a2 = reg_allocno[REGNO (XEXP (link, 0))];
-
- if (XEXP (link, 0) == SET_SRC (set))
- {
- IOR_HARD_REG_SET (allocno[a1].hard_reg_copy_preferences,
- allocno[a2].hard_reg_copy_preferences);
- IOR_HARD_REG_SET (allocno[a2].hard_reg_copy_preferences,
- allocno[a1].hard_reg_copy_preferences);
- }
-
- IOR_HARD_REG_SET (allocno[a1].hard_reg_preferences,
- allocno[a2].hard_reg_preferences);
- IOR_HARD_REG_SET (allocno[a2].hard_reg_preferences,
- allocno[a1].hard_reg_preferences);
- IOR_HARD_REG_SET (allocno[a1].hard_reg_full_preferences,
- allocno[a2].hard_reg_full_preferences);
- IOR_HARD_REG_SET (allocno[a2].hard_reg_full_preferences,
- allocno[a1].hard_reg_full_preferences);
- }
-}
-
-
-/* Try to set a preference for an allocno to a hard register.
- We are passed DEST and SRC which are the operands of a SET. It is known
- that SRC is a register. If SRC or the first operand of SRC is a register,
- try to set a preference. If one of the two is a hard register and the other
- is a pseudo-register, mark the preference.
-
- Note that we are not as aggressive as local-alloc in trying to tie a
- pseudo-register to a hard register. */
-
-static void
-set_preference (rtx dest, rtx src)
-{
- unsigned int src_regno, dest_regno, end_regno;
- /* Amount to add to the hard regno for SRC, or subtract from that for DEST,
- to compensate for subregs in SRC or DEST. */
- int offset = 0;
- unsigned int i;
- int copy = 1;
-
- if (GET_RTX_FORMAT (GET_CODE (src))[0] == 'e')
- src = XEXP (src, 0), copy = 0;
-
- /* Get the reg number for both SRC and DEST.
- If neither is a reg, give up. */
-
- if (REG_P (src))
- src_regno = REGNO (src);
- else if (GET_CODE (src) == SUBREG && REG_P (SUBREG_REG (src)))
- {
- src_regno = REGNO (SUBREG_REG (src));
-
- if (REGNO (SUBREG_REG (src)) < FIRST_PSEUDO_REGISTER)
- offset += subreg_regno_offset (REGNO (SUBREG_REG (src)),
- GET_MODE (SUBREG_REG (src)),
- SUBREG_BYTE (src),
- GET_MODE (src));
- else
- offset += (SUBREG_BYTE (src)
- / REGMODE_NATURAL_SIZE (GET_MODE (src)));
- }
- else
- return;
-
- if (REG_P (dest))
- dest_regno = REGNO (dest);
- else if (GET_CODE (dest) == SUBREG && REG_P (SUBREG_REG (dest)))
- {
- dest_regno = REGNO (SUBREG_REG (dest));
-
- if (REGNO (SUBREG_REG (dest)) < FIRST_PSEUDO_REGISTER)
- offset -= subreg_regno_offset (REGNO (SUBREG_REG (dest)),
- GET_MODE (SUBREG_REG (dest)),
- SUBREG_BYTE (dest),
- GET_MODE (dest));
- else
- offset -= (SUBREG_BYTE (dest)
- / REGMODE_NATURAL_SIZE (GET_MODE (dest)));
- }
- else
- return;
-
- /* Convert either or both to hard reg numbers. */
-
- if (reg_renumber[src_regno] >= 0)
- src_regno = reg_renumber[src_regno];
-
- if (reg_renumber[dest_regno] >= 0)
- dest_regno = reg_renumber[dest_regno];
-
- /* Now if one is a hard reg and the other is a global pseudo
- then give the other a preference. */
-
- if (dest_regno < FIRST_PSEUDO_REGISTER && src_regno >= FIRST_PSEUDO_REGISTER
- && reg_allocno[src_regno] >= 0)
- {
- dest_regno -= offset;
- if (dest_regno < FIRST_PSEUDO_REGISTER)
- {
- if (copy)
- SET_REGBIT (hard_reg_copy_preferences,
- reg_allocno[src_regno], dest_regno);
-
- SET_REGBIT (hard_reg_preferences,
- reg_allocno[src_regno], dest_regno);
- end_regno = end_hard_regno (GET_MODE (dest), dest_regno);
- for (i = dest_regno; i < end_regno; i++)
- SET_REGBIT (hard_reg_full_preferences, reg_allocno[src_regno], i);
- }
- }
-
- if (src_regno < FIRST_PSEUDO_REGISTER && dest_regno >= FIRST_PSEUDO_REGISTER
- && reg_allocno[dest_regno] >= 0)
- {
- src_regno += offset;
- if (src_regno < FIRST_PSEUDO_REGISTER)
- {
- if (copy)
- SET_REGBIT (hard_reg_copy_preferences,
- reg_allocno[dest_regno], src_regno);
-
- SET_REGBIT (hard_reg_preferences,
- reg_allocno[dest_regno], src_regno);
- end_regno = end_hard_regno (GET_MODE (src), src_regno);
- for (i = src_regno; i < end_regno; i++)
- SET_REGBIT (hard_reg_full_preferences, reg_allocno[dest_regno], i);
- }
- }
-}
-
-/* Helper function for set_preferences. */
-static void
-set_preferences_1 (rtx reg, const_rtx setter, void *data ATTRIBUTE_UNUSED)
-{
- if (GET_CODE (reg) == SUBREG)
- reg = SUBREG_REG (reg);
-
- if (!REG_P (reg))
- return;
-
- gcc_assert (setter);
- if (GET_CODE (setter) != CLOBBER)
- set_preference (reg, SET_SRC (setter));
-}
-
-/* Scan all of the insns and initialize the preferences. */
-
-static void
-set_preferences (void)
-{
- basic_block bb;
- rtx insn;
- FOR_EACH_BB (bb)
- FOR_BB_INSNS_REVERSE (bb, insn)
- {
- if (!INSN_P (insn))
- continue;
-
- note_stores (PATTERN (insn), set_preferences_1, NULL);
- }
-}
-
-
-
-/* Prune the preferences for global registers to exclude registers that cannot
- be used.
-
- Compute `regs_someone_prefers', which is a bitmask of the hard registers
- that are preferred by conflicting registers of lower priority. If possible,
- we will avoid using these registers. */
-
-static void
-prune_preferences (void)
-{
- int i;
- int num;
- int *allocno_to_order = XNEWVEC (int, max_allocno);
-
- /* Scan least most important to most important.
- For each allocno, remove from preferences registers that cannot be used,
- either because of conflicts or register type. Then compute all registers
- preferred by each lower-priority register that conflicts. */
-
- for (i = max_allocno - 1; i >= 0; i--)
- {
- HARD_REG_SET temp;
-
- num = allocno_order[i];
- allocno_to_order[num] = i;
- COPY_HARD_REG_SET (temp, allocno[num].hard_reg_conflicts);
-
- if (allocno[num].calls_crossed == 0)
- IOR_HARD_REG_SET (temp, fixed_reg_set);
- else
- IOR_HARD_REG_SET (temp, call_used_reg_set);
-
- IOR_COMPL_HARD_REG_SET
- (temp,
- reg_class_contents[(int) reg_preferred_class (allocno[num].reg)]);
-
- AND_COMPL_HARD_REG_SET (allocno[num].hard_reg_preferences, temp);
- AND_COMPL_HARD_REG_SET (allocno[num].hard_reg_copy_preferences, temp);
- AND_COMPL_HARD_REG_SET (allocno[num].hard_reg_full_preferences, temp);
- }
-
- for (i = max_allocno - 1; i >= 0; i--)
- {
- /* Merge in the preferences of lower-priority registers (they have
- already been pruned). If we also prefer some of those registers,
- don't exclude them unless we are of a smaller size (in which case
- we want to give the lower-priority allocno the first chance for
- these registers). */
- HARD_REG_SET temp, temp2;
- int allocno2;
- adjacency_iter ai;
-
- num = allocno_order[i];
-
- CLEAR_HARD_REG_SET (temp);
- CLEAR_HARD_REG_SET (temp2);
-
- FOR_EACH_CONFLICT (num, allocno2, ai)
- {
- if (allocno_to_order[allocno2] > i)
- {
- if (allocno[allocno2].size <= allocno[num].size)
- IOR_HARD_REG_SET (temp,
- allocno[allocno2].hard_reg_full_preferences);
- else
- IOR_HARD_REG_SET (temp2,
- allocno[allocno2].hard_reg_full_preferences);
- }
- }
-
- AND_COMPL_HARD_REG_SET (temp, allocno[num].hard_reg_full_preferences);
- IOR_HARD_REG_SET (temp, temp2);
- COPY_HARD_REG_SET (allocno[num].regs_someone_prefers, temp);
- }
- free (allocno_to_order);
-}
-
-/* Assign a hard register to allocno NUM; look for one that is the beginning
- of a long enough stretch of hard regs none of which conflicts with ALLOCNO.
- The registers marked in PREFREGS are tried first.
-
- LOSERS, if nonzero, is a HARD_REG_SET indicating registers that cannot
- be used for this allocation.
-
- If ALT_REGS_P is zero, consider only the preferred class of ALLOCNO's reg.
- Otherwise ignore that preferred class and use the alternate class.
-
- If ACCEPT_CALL_CLOBBERED is nonzero, accept a call-clobbered hard reg that
- will have to be saved and restored at calls.
-
- RETRYING is nonzero if this is called from retry_global_alloc.
-
- If we find one, record it in reg_renumber.
- If not, do nothing. */
-
-static void
-find_reg (int num, HARD_REG_SET losers, int alt_regs_p, int accept_call_clobbered, int retrying)
-{
- int i, best_reg, pass;
- HARD_REG_SET used, used1, used2;
-
- enum reg_class rclass = (alt_regs_p
- ? reg_alternate_class (allocno[num].reg)
- : reg_preferred_class (allocno[num].reg));
- enum machine_mode mode = PSEUDO_REGNO_MODE (allocno[num].reg);
-
- if (accept_call_clobbered)
- COPY_HARD_REG_SET (used1, call_fixed_reg_set);
- else if (allocno[num].calls_crossed == 0)
- COPY_HARD_REG_SET (used1, fixed_reg_set);
- else
- COPY_HARD_REG_SET (used1, call_used_reg_set);
-
- /* Some registers should not be allocated in global-alloc. */
- IOR_HARD_REG_SET (used1, no_global_alloc_regs);
- if (losers)
- IOR_HARD_REG_SET (used1, losers);
-
- IOR_COMPL_HARD_REG_SET (used1, reg_class_contents[(int) rclass]);
-
-#ifdef EH_RETURN_DATA_REGNO
- if (allocno[num].no_eh_reg)
- {
- unsigned int j;
- for (j = 0; ; ++j)
- {
- unsigned int regno = EH_RETURN_DATA_REGNO (j);
- if (regno == INVALID_REGNUM)
- break;
- SET_HARD_REG_BIT (used1, regno);
- }
- }
-#endif
-
- COPY_HARD_REG_SET (used2, used1);
-
- IOR_HARD_REG_SET (used1, allocno[num].hard_reg_conflicts);
-
-#ifdef CANNOT_CHANGE_MODE_CLASS
- cannot_change_mode_set_regs (&used1, mode, allocno[num].reg);
-#endif
-
- /* Try each hard reg to see if it fits. Do this in two passes.
- In the first pass, skip registers that are preferred by some other pseudo
- to give it a better chance of getting one of those registers. Only if
- we can't get a register when excluding those do we take one of them.
- However, we never allocate a register for the first time in pass 0. */
-
- COPY_HARD_REG_SET (used, used1);
- IOR_COMPL_HARD_REG_SET (used, regs_used_so_far);
- IOR_HARD_REG_SET (used, allocno[num].regs_someone_prefers);
-
- best_reg = -1;
- for (i = FIRST_PSEUDO_REGISTER, pass = 0;
- pass <= 1 && i >= FIRST_PSEUDO_REGISTER;
- pass++)
- {
- if (pass == 1)
- COPY_HARD_REG_SET (used, used1);
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- {
-#ifdef REG_ALLOC_ORDER
- int regno = reg_alloc_order[i];
-#else
- int regno = i;
-#endif
- if (! TEST_HARD_REG_BIT (used, regno)
- && HARD_REGNO_MODE_OK (regno, mode)
- && (allocno[num].calls_crossed == 0
- || accept_call_clobbered
- || ! HARD_REGNO_CALL_PART_CLOBBERED (regno, mode)))
- {
- int j;
- int lim = end_hard_regno (mode, regno);
- for (j = regno + 1;
- (j < lim
- && ! TEST_HARD_REG_BIT (used, j));
- j++);
- if (j == lim)
- {
- best_reg = regno;
- break;
- }
-#ifndef REG_ALLOC_ORDER
- i = j; /* Skip starting points we know will lose */
-#endif
- }
- }
- }
-
- /* See if there is a preferred register with the same class as the register
- we allocated above. Making this restriction prevents register
- preferencing from creating worse register allocation.
-
- Remove from the preferred registers and conflicting registers. Note that
- additional conflicts may have been added after `prune_preferences' was
- called.
-
- First do this for those register with copy preferences, then all
- preferred registers. */
-
- AND_COMPL_HARD_REG_SET (allocno[num].hard_reg_copy_preferences, used);
- if (!hard_reg_set_empty_p (allocno[num].hard_reg_copy_preferences)
- && best_reg >= 0)
- {
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- if (TEST_HARD_REG_BIT (allocno[num].hard_reg_copy_preferences, i)
- && HARD_REGNO_MODE_OK (i, mode)
- && (allocno[num].calls_crossed == 0
- || accept_call_clobbered
- || ! HARD_REGNO_CALL_PART_CLOBBERED (i, mode))
- && (REGNO_REG_CLASS (i) == REGNO_REG_CLASS (best_reg)
- || reg_class_subset_p (REGNO_REG_CLASS (i),
- REGNO_REG_CLASS (best_reg))
- || reg_class_subset_p (REGNO_REG_CLASS (best_reg),
- REGNO_REG_CLASS (i))))
- {
- int j;
- int lim = end_hard_regno (mode, i);
- for (j = i + 1;
- (j < lim
- && ! TEST_HARD_REG_BIT (used, j)
- && (REGNO_REG_CLASS (j)
- == REGNO_REG_CLASS (best_reg + (j - i))
- || reg_class_subset_p (REGNO_REG_CLASS (j),
- REGNO_REG_CLASS (best_reg + (j - i)))
- || reg_class_subset_p (REGNO_REG_CLASS (best_reg + (j - i)),
- REGNO_REG_CLASS (j))));
- j++);
- if (j == lim)
- {
- best_reg = i;
- goto no_prefs;
- }
- }
- }
-
- AND_COMPL_HARD_REG_SET (allocno[num].hard_reg_preferences, used);
- if (!hard_reg_set_empty_p (allocno[num].hard_reg_preferences)
- && best_reg >= 0)
- {
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- if (TEST_HARD_REG_BIT (allocno[num].hard_reg_preferences, i)
- && HARD_REGNO_MODE_OK (i, mode)
- && (allocno[num].calls_crossed == 0
- || accept_call_clobbered
- || ! HARD_REGNO_CALL_PART_CLOBBERED (i, mode))
- && (REGNO_REG_CLASS (i) == REGNO_REG_CLASS (best_reg)
- || reg_class_subset_p (REGNO_REG_CLASS (i),
- REGNO_REG_CLASS (best_reg))
- || reg_class_subset_p (REGNO_REG_CLASS (best_reg),
- REGNO_REG_CLASS (i))))
- {
- int j;
- int lim = end_hard_regno (mode, i);
- for (j = i + 1;
- (j < lim
- && ! TEST_HARD_REG_BIT (used, j)
- && (REGNO_REG_CLASS (j)
- == REGNO_REG_CLASS (best_reg + (j - i))
- || reg_class_subset_p (REGNO_REG_CLASS (j),
- REGNO_REG_CLASS (best_reg + (j - i)))
- || reg_class_subset_p (REGNO_REG_CLASS (best_reg + (j - i)),
- REGNO_REG_CLASS (j))));
- j++);
- if (j == lim)
- {
- best_reg = i;
- break;
- }
- }
- }
- no_prefs:
-
- /* If we haven't succeeded yet, try with caller-saves.
- We need not check to see if the current function has nonlocal
- labels because we don't put any pseudos that are live over calls in
- registers in that case. */
-
- if (flag_caller_saves && best_reg < 0)
- {
- /* Did not find a register. If it would be profitable to
- allocate a call-clobbered register and save and restore it
- around calls, do that. Don't do this if it crosses any calls
- that might throw. */
- if (! accept_call_clobbered
- && allocno[num].calls_crossed != 0
- && allocno[num].throwing_calls_crossed == 0
- && CALLER_SAVE_PROFITABLE (optimize_function_for_size_p (cfun) ? allocno[num].n_refs : allocno[num].freq,
- optimize_function_for_size_p (cfun) ? allocno[num].calls_crossed
- : allocno[num].freq_calls_crossed))
- {
- HARD_REG_SET new_losers;
- if (! losers)
- CLEAR_HARD_REG_SET (new_losers);
- else
- COPY_HARD_REG_SET (new_losers, losers);
-
- IOR_HARD_REG_SET(new_losers, losing_caller_save_reg_set);
- find_reg (num, new_losers, alt_regs_p, 1, retrying);
- if (reg_renumber[allocno[num].reg] >= 0)
- {
- caller_save_needed = 1;
- return;
- }
- }
- }
-
- /* If we haven't succeeded yet,
- see if some hard reg that conflicts with us
- was utilized poorly by local-alloc.
- If so, kick out the regs that were put there by local-alloc
- so we can use it instead. */
- if (best_reg < 0 && !retrying
- /* Let's not bother with multi-reg allocnos. */
- && allocno[num].size == 1
- && REG_BASIC_BLOCK (allocno[num].reg) == REG_BLOCK_GLOBAL)
- {
- /* Count from the end, to find the least-used ones first. */
- for (i = FIRST_PSEUDO_REGISTER - 1; i >= 0; i--)
- {
-#ifdef REG_ALLOC_ORDER
- int regno = reg_alloc_order[i];
-#else
- int regno = i;
-#endif
-
- if (local_reg_n_refs[regno] != 0
- /* Don't use a reg no good for this pseudo. */
- && ! TEST_HARD_REG_BIT (used2, regno)
- && HARD_REGNO_MODE_OK (regno, mode)
- /* The code below assumes that we need only a single
- register, but the check of allocno[num].size above
- was not enough. Sometimes we need more than one
- register for a single-word value. */
- && hard_regno_nregs[regno][mode] == 1
- && (allocno[num].calls_crossed == 0
- || accept_call_clobbered
- || ! HARD_REGNO_CALL_PART_CLOBBERED (regno, mode))
-#ifdef CANNOT_CHANGE_MODE_CLASS
- && ! invalid_mode_change_p (regno, REGNO_REG_CLASS (regno),
- mode)
-#endif
-#ifdef STACK_REGS
- && (!allocno[num].no_stack_reg
- || regno < FIRST_STACK_REG || regno > LAST_STACK_REG)
-#endif
- )
- {
- /* We explicitly evaluate the divide results into temporary
- variables so as to avoid excess precision problems that occur
- on an i386-unknown-sysv4.2 (unixware) host. */
-
- double tmp1 = ((double) local_reg_freq[regno] * local_reg_n_refs[regno]
- / local_reg_live_length[regno]);
- double tmp2 = ((double) allocno[num].freq * allocno[num].n_refs
- / allocno[num].live_length);
-
- if (tmp1 < tmp2)
- {
- /* Hard reg REGNO was used less in total by local regs
- than it would be used by this one allocno! */
- int k;
- if (dump_file)
- {
- fprintf (dump_file, "Regno %d better for global %d, ",
- regno, allocno[num].reg);
- fprintf (dump_file, "fr:%d, ll:%d, nr:%d ",
- allocno[num].freq, allocno[num].live_length,
- allocno[num].n_refs);
- fprintf (dump_file, "(was: fr:%d, ll:%d, nr:%d)\n",
- local_reg_freq[regno],
- local_reg_live_length[regno],
- local_reg_n_refs[regno]);
- }
-
- for (k = 0; k < max_regno; k++)
- if (reg_renumber[k] >= 0)
- {
- int r = reg_renumber[k];
- int endregno
- = end_hard_regno (PSEUDO_REGNO_MODE (k), r);
-
- if (regno >= r && regno < endregno)
- {
- if (dump_file)
- fprintf (dump_file,
- "Local Reg %d now on stack\n", k);
- reg_renumber[k] = -1;
- }
- }
-
- best_reg = regno;
- break;
- }
- }
- }
- }
-
- /* Did we find a register? */
-
- if (best_reg >= 0)
- {
- int lim, j;
- HARD_REG_SET this_reg;
- adjacency_iter ai;
-
- /* Yes. Record it as the hard register of this pseudo-reg. */
- reg_renumber[allocno[num].reg] = best_reg;
-
- /* Make a set of the hard regs being allocated. */
- CLEAR_HARD_REG_SET (this_reg);
- lim = end_hard_regno (mode, best_reg);
- for (j = best_reg; j < lim; j++)
- {
- SET_HARD_REG_BIT (this_reg, j);
- SET_HARD_REG_BIT (regs_used_so_far, j);
- /* This is no longer a reg used just by local regs. */
- local_reg_n_refs[j] = 0;
- local_reg_freq[j] = 0;
- }
- /* For each other pseudo-reg conflicting with this one,
- mark it as conflicting with the hard regs this one occupies. */
- FOR_EACH_CONFLICT (num, j, ai)
- {
- IOR_HARD_REG_SET (allocno[j].hard_reg_conflicts, this_reg);
- }
- }
-}
-
-/* Called from `reload' to look for a hard reg to put pseudo reg REGNO in.
- Perhaps it had previously seemed not worth a hard reg,
- or perhaps its old hard reg has been commandeered for reloads.
- FORBIDDEN_REGS indicates certain hard regs that may not be used, even if
- they do not appear to be allocated.
- If FORBIDDEN_REGS is zero, no regs are forbidden. */
-
-void
-retry_global_alloc (int regno, HARD_REG_SET forbidden_regs)
-{
- int alloc_no = reg_allocno[regno];
- if (alloc_no >= 0)
- {
- /* If we have more than one register class,
- first try allocating in the class that is cheapest
- for this pseudo-reg. If that fails, try any reg. */
- if (N_REG_CLASSES > 1)
- find_reg (alloc_no, forbidden_regs, 0, 0, 1);
- if (reg_renumber[regno] < 0
- && reg_alternate_class (regno) != NO_REGS)
- find_reg (alloc_no, forbidden_regs, 1, 0, 1);
-
- /* If we found a register, modify the RTL for the register to
- show the hard register, and mark that register live. */
- if (reg_renumber[regno] >= 0)
- {
- SET_REGNO (regno_reg_rtx[regno], reg_renumber[regno]);
- mark_home_live (regno);
- }
- }
-}
-
-/* Indicate that hard register number FROM was eliminated and replaced with
- an offset from hard register number TO. The status of hard registers live
- at the start of a basic block is updated by replacing a use of FROM with
- a use of TO. */
-
-void
-mark_elimination (int from, int to)
-{
- basic_block bb;
-
- FOR_EACH_BB (bb)
- {
- /* We don't use LIVE info in IRA. */
- regset r = (flag_ira ? DF_LR_IN (bb) : DF_LIVE_IN (bb));
- if (REGNO_REG_SET_P (r, from))
- {
- CLEAR_REGNO_REG_SET (r, from);
- SET_REGNO_REG_SET (r, to);
- }
- }
-}
-
-/* Print chain C to FILE. */
-
-static void
-print_insn_chain (FILE *file, struct insn_chain *c)
-{
- fprintf (file, "insn=%d, ", INSN_UID(c->insn));
- bitmap_print (file, &c->live_throughout, "live_throughout: ", ", ");
- bitmap_print (file, &c->dead_or_set, "dead_or_set: ", "\n");
-}
-
-
-/* Print all reload_insn_chains to FILE. */
-
-static void
-print_insn_chains (FILE *file)
-{
- struct insn_chain *c;
- for (c = reload_insn_chain; c ; c = c->next)
- print_insn_chain (file, c);
-}
-
-/* Return true if pseudo REGNO should be added to set live_throughout
- or dead_or_set of the insn chains for reload consideration. */
-
-static bool
-pseudo_for_reload_consideration_p (int regno)
-{
- /* Consider spilled pseudos too for IRA because they still have a
- chance to get hard-registers in the reload when IRA is used. */
- return (reg_renumber[regno] >= 0
- || (flag_ira && ira_conflicts_p && flag_ira_share_spill_slots));
-}
-
-/* Walk the insns of the current function and build reload_insn_chain,
- and record register life information. */
-
-void
-build_insn_chain (void)
-{
- unsigned int i;
- struct insn_chain **p = &reload_insn_chain;
- basic_block bb;
- struct insn_chain *c = NULL;
- struct insn_chain *next = NULL;
- bitmap live_relevant_regs = BITMAP_ALLOC (NULL);
- bitmap elim_regset = BITMAP_ALLOC (NULL);
- /* live_subregs is a vector used to keep accurate information about
- which hardregs are live in multiword pseudos. live_subregs and
- live_subregs_used are indexed by pseudo number. The live_subreg
- entry for a particular pseudo is only used if the corresponding
- element is non zero in live_subregs_used. The value in
- live_subregs_used is number of bytes that the pseudo can
- occupy. */
- sbitmap *live_subregs = XCNEWVEC (sbitmap, max_regno);
- int *live_subregs_used = XNEWVEC (int, max_regno);
-
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- if (TEST_HARD_REG_BIT (eliminable_regset, i))
- bitmap_set_bit (elim_regset, i);
- FOR_EACH_BB_REVERSE (bb)
- {
- bitmap_iterator bi;
- rtx insn;
-
- CLEAR_REG_SET (live_relevant_regs);
- memset (live_subregs_used, 0, max_regno * sizeof (int));
-
- EXECUTE_IF_SET_IN_BITMAP (df_get_live_out (bb), 0, i, bi)
- {
- if (i >= FIRST_PSEUDO_REGISTER)
- break;
- bitmap_set_bit (live_relevant_regs, i);
- }
-
- EXECUTE_IF_SET_IN_BITMAP (df_get_live_out (bb), FIRST_PSEUDO_REGISTER, i, bi)
- {
- if (pseudo_for_reload_consideration_p (i))
- bitmap_set_bit (live_relevant_regs, i);
- }
-
- FOR_BB_INSNS_REVERSE (bb, insn)
- {
- if (!NOTE_P (insn) && !BARRIER_P (insn))
- {
- unsigned int uid = INSN_UID (insn);
- df_ref *def_rec;
- df_ref *use_rec;
-
- c = new_insn_chain ();
- c->next = next;
- next = c;
- *p = c;
- p = &c->prev;
-
- c->insn = insn;
- c->block = bb->index;
-
- if (INSN_P (insn))
- for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
- {
- df_ref def = *def_rec;
- unsigned int regno = DF_REF_REGNO (def);
-
- /* Ignore may clobbers because these are generated
- from calls. However, every other kind of def is
- added to dead_or_set. */
- if (!DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER))
- {
- if (regno < FIRST_PSEUDO_REGISTER)
- {
- if (!fixed_regs[regno])
- bitmap_set_bit (&c->dead_or_set, regno);
- }
- else if (pseudo_for_reload_consideration_p (regno))
- bitmap_set_bit (&c->dead_or_set, regno);
- }
-
- if ((regno < FIRST_PSEUDO_REGISTER
- || reg_renumber[regno] >= 0
- || (flag_ira && ira_conflicts_p))
- && (!DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL)))
- {
- rtx reg = DF_REF_REG (def);
-
- /* We can model subregs, but not if they are
- wrapped in ZERO_EXTRACTS. */
- if (GET_CODE (reg) == SUBREG
- && !DF_REF_FLAGS_IS_SET (def, DF_REF_ZERO_EXTRACT))
- {
- unsigned int start = SUBREG_BYTE (reg);
- unsigned int last = start
- + GET_MODE_SIZE (GET_MODE (reg));
-
- ra_init_live_subregs (bitmap_bit_p (live_relevant_regs,
- regno),
- live_subregs,
- live_subregs_used,
- regno, reg);
-
- if (!DF_REF_FLAGS_IS_SET
- (def, DF_REF_STRICT_LOW_PART))
- {
- /* Expand the range to cover entire words.
- Bytes added here are "don't care". */
- start = start / UNITS_PER_WORD * UNITS_PER_WORD;
- last = ((last + UNITS_PER_WORD - 1)
- / UNITS_PER_WORD * UNITS_PER_WORD);
- }
-
- /* Ignore the paradoxical bits. */
- if ((int)last > live_subregs_used[regno])
- last = live_subregs_used[regno];
-
- while (start < last)
- {
- RESET_BIT (live_subregs[regno], start);
- start++;
- }
-
- if (sbitmap_empty_p (live_subregs[regno]))
- {
- live_subregs_used[regno] = 0;
- bitmap_clear_bit (live_relevant_regs, regno);
- }
- else
- /* Set live_relevant_regs here because
- that bit has to be true to get us to
- look at the live_subregs fields. */
- bitmap_set_bit (live_relevant_regs, regno);
- }
- else
- {
- /* DF_REF_PARTIAL is generated for
- subregs, STRICT_LOW_PART, and
- ZERO_EXTRACT. We handle the subreg
- case above so here we have to keep from
- modeling the def as a killing def. */
- if (!DF_REF_FLAGS_IS_SET (def, DF_REF_PARTIAL))
- {
- bitmap_clear_bit (live_relevant_regs, regno);
- live_subregs_used[regno] = 0;
- }
- }
- }
- }
-
- bitmap_and_compl_into (live_relevant_regs, elim_regset);
- bitmap_copy (&c->live_throughout, live_relevant_regs);
-
- if (INSN_P (insn))
- for (use_rec = DF_INSN_UID_USES (uid); *use_rec; use_rec++)
- {
- df_ref use = *use_rec;
- unsigned int regno = DF_REF_REGNO (use);
- rtx reg = DF_REF_REG (use);
-
- /* DF_REF_READ_WRITE on a use means that this use
- is fabricated from a def that is a partial set
- to a multiword reg. Here, we only model the
- subreg case that is not wrapped in ZERO_EXTRACT
- precisely so we do not need to look at the
- fabricated use. */
- if (DF_REF_FLAGS_IS_SET (use, DF_REF_READ_WRITE)
- && !DF_REF_FLAGS_IS_SET (use, DF_REF_ZERO_EXTRACT)
- && DF_REF_FLAGS_IS_SET (use, DF_REF_SUBREG))
- continue;
-
- /* Add the last use of each var to dead_or_set. */
- if (!bitmap_bit_p (live_relevant_regs, regno))
- {
- if (regno < FIRST_PSEUDO_REGISTER)
- {
- if (!fixed_regs[regno])
- bitmap_set_bit (&c->dead_or_set, regno);
- }
- else if (pseudo_for_reload_consideration_p (regno))
- bitmap_set_bit (&c->dead_or_set, regno);
- }
-
- if (regno < FIRST_PSEUDO_REGISTER
- || pseudo_for_reload_consideration_p (regno))
- {
- if (GET_CODE (reg) == SUBREG
- && !DF_REF_FLAGS_IS_SET (use,
- DF_REF_SIGN_EXTRACT | DF_REF_ZERO_EXTRACT))
- {
- unsigned int start = SUBREG_BYTE (reg);
- unsigned int last = start
- + GET_MODE_SIZE (GET_MODE (reg));
-
- ra_init_live_subregs (bitmap_bit_p (live_relevant_regs,
- regno),
- live_subregs,
- live_subregs_used,
- regno, reg);
-
- /* Ignore the paradoxical bits. */
- if ((int)last > live_subregs_used[regno])
- last = live_subregs_used[regno];
-
- while (start < last)
- {
- SET_BIT (live_subregs[regno], start);
- start++;
- }
- }
- else
- /* Resetting the live_subregs_used is
- effectively saying do not use the subregs
- because we are reading the whole
- pseudo. */
- live_subregs_used[regno] = 0;
- bitmap_set_bit (live_relevant_regs, regno);
- }
- }
- }
- }
-
- /* FIXME!! The following code is a disaster. Reload needs to see the
- labels and jump tables that are just hanging out in between
- the basic blocks. See pr33676. */
- insn = BB_HEAD (bb);
-
- /* Skip over the barriers and cruft. */
- while (insn && (BARRIER_P (insn) || NOTE_P (insn)
- || BLOCK_FOR_INSN (insn) == bb))
- insn = PREV_INSN (insn);
-
- /* While we add anything except barriers and notes, the focus is
- to get the labels and jump tables into the
- reload_insn_chain. */
- while (insn)
- {
- if (!NOTE_P (insn) && !BARRIER_P (insn))
- {
- if (BLOCK_FOR_INSN (insn))
- break;
-
- c = new_insn_chain ();
- c->next = next;
- next = c;
- *p = c;
- p = &c->prev;
-
- /* The block makes no sense here, but it is what the old
- code did. */
- c->block = bb->index;
- c->insn = insn;
- bitmap_copy (&c->live_throughout, live_relevant_regs);
- }
- insn = PREV_INSN (insn);
- }
- }
-
- for (i = 0; i < (unsigned int) max_regno; i++)
- if (live_subregs[i])
- free (live_subregs[i]);
-
- reload_insn_chain = c;
- *p = NULL;
-
- free (live_subregs);
- free (live_subregs_used);
- BITMAP_FREE (live_relevant_regs);
- BITMAP_FREE (elim_regset);
-
- if (dump_file)
- print_insn_chains (dump_file);
-}
-
-/* Print debugging trace information if -dg switch is given,
- showing the information on which the allocation decisions are based. */
-
-static void
-dump_conflicts (FILE *file)
-{
- int i;
- int regno;
- int has_preferences;
- int nregs;
- nregs = 0;
- for (i = 0; i < max_allocno; i++)
- {
- if (reg_renumber[allocno[allocno_order[i]].reg] >= 0)
- continue;
- nregs++;
- }
- fprintf (file, ";; %d regs to allocate:", nregs);
- for (regno = 0; regno < max_regno; regno++)
- if ((i = reg_allocno[regno]) >= 0)
- {
- int j;
- if (reg_renumber[allocno[allocno_order[i]].reg] >= 0)
- continue;
- fprintf (file, " %d", allocno[allocno_order[i]].reg);
- for (j = 0; j < max_regno; j++)
- if (reg_allocno[j] == allocno_order[i]
- && j != allocno[allocno_order[i]].reg)
- fprintf (file, "+%d", j);
- if (allocno[allocno_order[i]].size != 1)
- fprintf (file, " (%d)", allocno[allocno_order[i]].size);
- }
- fprintf (file, "\n");
-
- for (regno = 0; regno < max_regno; regno++)
- if ((i = reg_allocno[regno]) >= 0)
- {
- int j;
- adjacency_iter ai;
- fprintf (file, ";; %d conflicts:", allocno[i].reg);
- FOR_EACH_CONFLICT (i, j, ai)
- {
- fprintf (file, " %d", allocno[j].reg);
- }
- for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
- if (TEST_HARD_REG_BIT (allocno[i].hard_reg_conflicts, j)
- && !fixed_regs[j])
- fprintf (file, " %d", j);
- fprintf (file, "\n");
-
- has_preferences = 0;
- for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
- if (TEST_HARD_REG_BIT (allocno[i].hard_reg_preferences, j))
- has_preferences = 1;
-
- if (!has_preferences)
- continue;
- fprintf (file, ";; %d preferences:", allocno[i].reg);
- for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
- if (TEST_HARD_REG_BIT (allocno[i].hard_reg_preferences, j))
- fprintf (file, " %d", j);
- fprintf (file, "\n");
- }
- fprintf (file, "\n");
-}
-
-void
-dump_global_regs (FILE *file)
-{
- int i, j;
-
- fprintf (file, ";; Register dispositions:\n");
- for (i = FIRST_PSEUDO_REGISTER, j = 0; i < max_regno; i++)
- if (reg_renumber[i] >= 0)
- {
- fprintf (file, "%d in %d ", i, reg_renumber[i]);
- if (++j % 6 == 0)
- fprintf (file, "\n");
- }
-
- fprintf (file, "\n\n;; Hard regs used: ");
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- if (df_regs_ever_live_p (i))
- fprintf (file, " %d", i);
- fprintf (file, "\n\n");
-}
-
-
-static bool
-gate_handle_global_alloc (void)
-{
- return ! flag_ira;
-}
-
-/* Run old register allocator. Return TRUE if we must exit
- rest_of_compilation upon return. */
-static unsigned int
-rest_of_handle_global_alloc (void)
-{
- bool failure;
-
- /* If optimizing, allocate remaining pseudo-regs. Do the reload
- pass fixing up any insns that are invalid. */
- if (optimize && dbg_cnt (global_alloc_at_func))
- failure = global_alloc ();
- else
- {
- /* There is just too much going on in the register allocators to
- keep things up to date. At the end we have to rescan anyway
- because things change when the reload_completed flag is set.
- So we just turn off scanning and we will rescan by hand. */
- df_set_flags (DF_NO_INSN_RESCAN);
- compute_regsets (&eliminable_regset, &no_global_alloc_regs);
- build_insn_chain ();
- df_set_flags (DF_NO_INSN_RESCAN);
- failure = reload (get_insns (), 0);
- }
-
- if (dump_enabled_p (pass_global_alloc.pass.static_pass_number))
- {
- timevar_push (TV_DUMP);
- dump_global_regs (dump_file);
- timevar_pop (TV_DUMP);
- }
-
- /* FIXME: This appears on the surface to be wrong thing to be doing.
- So much of the compiler is designed to check reload_completed to
- see if it is running after reload that seems doomed to failure.
- We should be returning a value that says that we have found
- errors so that nothing but the cleanup passes are run
- afterwards. */
- gcc_assert (reload_completed || failure);
- reload_completed = !failure;
-
- /* The world has changed so much that at this point we might as well
- just rescan everything. Note that df_rescan_all_insns is not
- going to help here because it does not touch the artificial uses
- and defs. */
- df_finish_pass (true);
- if (optimize > 1)
- df_live_add_problem ();
- df_scan_alloc (NULL);
- df_scan_blocks ();
-
- if (optimize)
- df_analyze ();
-
- regstat_free_n_sets_and_refs ();
- regstat_free_ri ();
- return 0;
-}
-
-struct rtl_opt_pass pass_global_alloc =
-{
- {
- RTL_PASS,
- "greg", /* name */
- gate_handle_global_alloc, /* gate */
- rest_of_handle_global_alloc, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_GLOBAL_ALLOC, /* tv_id */
- 0, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- TODO_dump_func | TODO_verify_rtl_sharing
- | TODO_ggc_collect /* todo_flags_finish */
- }
-};
-
diff --git a/gcc/graphite.c b/gcc/graphite.c
index f169f725290..3cb24b86b16 100644
--- a/gcc/graphite.c
+++ b/gcc/graphite.c
@@ -2178,6 +2178,7 @@ graphite_verify (void)
verify_dominators (CDI_DOMINATORS);
verify_dominators (CDI_POST_DOMINATORS);
verify_ssa (false);
+ verify_loop_closed_ssa ();
#endif
}
@@ -5229,7 +5230,8 @@ scop_adjust_phis_for_liveouts (scop_p scop, basic_block bb, edge false_e,
for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
{
- unsigned i, false_i;
+ unsigned i;
+ unsigned false_i = 0;
gimple phi = gsi_stmt (si);
if (!is_gimple_reg (PHI_RESULT (phi)))
@@ -5376,9 +5378,9 @@ compute_cloog_iv_types (struct clast_stmt *stmt)
}
/* GIMPLE Loop Generator: generates loops from STMT in GIMPLE form for
- the given SCOP. */
+ the given SCOP. Return true if code generation succeeded. */
-static void
+static bool
gloog (scop_p scop, struct clast_stmt *stmt)
{
edge new_scop_exit_edge = NULL;
@@ -5387,6 +5389,19 @@ gloog (scop_p scop, struct clast_stmt *stmt)
loop_p context_loop;
ifsese if_region = NULL;
+ /* To maintain the loop closed SSA form, we have to keep the phi
+ nodes after the last loop in the scop. */
+ if (loop_depth (SESE_EXIT (SCOP_REGION (scop))->dest->loop_father)
+ != loop_depth (SESE_EXIT (SCOP_REGION (scop))->src->loop_father))
+ {
+ basic_block bb = SESE_EXIT (SCOP_REGION (scop))->dest;
+ SESE_EXIT (SCOP_REGION (scop)) = split_block_after_labels (bb);
+ bitmap_set_bit (SCOP_BBS_B (scop), bb->index);
+ pointer_set_insert (SESE_REGION_BBS (SCOP_REGION (scop)), bb);
+ }
+
+ recompute_all_dominators ();
+ graphite_verify ();
if_region = move_sese_in_condition (SCOP_REGION (scop));
sese_build_livein_liveouts (SCOP_REGION (scop));
scop_insert_phis_for_liveouts (SCOP_REGION (scop),
@@ -5412,6 +5427,7 @@ gloog (scop_p scop, struct clast_stmt *stmt)
recompute_all_dominators ();
graphite_verify ();
+ return true;
}
/* Returns the number of data references in SCOP. */
@@ -6040,6 +6056,7 @@ graphite_transform_loops (void)
{
int i;
scop_p scop;
+ bool transform_done = false;
if (number_of_loops () <= 1)
return;
@@ -6098,7 +6115,7 @@ graphite_transform_loops (void)
}
if (graphite_apply_transformations (scop))
- gloog (scop, find_transform (scop));
+ transform_done = gloog (scop, find_transform (scop));
#ifdef ENABLE_CHECKING
else
{
@@ -6109,7 +6126,9 @@ graphite_transform_loops (void)
}
/* Cleanup. */
- cleanup_tree_cfg ();
+ if (transform_done)
+ cleanup_tree_cfg ();
+
free_scops (current_scops);
cloog_finalize ();
free_original_copy_tables ();
diff --git a/gcc/hard-reg-set.h b/gcc/hard-reg-set.h
index 7e17caba9df..2b0d079006a 100644
--- a/gcc/hard-reg-set.h
+++ b/gcc/hard-reg-set.h
@@ -601,9 +601,6 @@ extern char call_really_used_regs[];
extern HARD_REG_SET call_used_reg_set;
-/* Registers that we don't want to caller save. */
-extern HARD_REG_SET losing_caller_save_reg_set;
-
/* Indexed by hard register number, contains 1 for registers that are
fixed use -- i.e. in fixed_regs -- or a function value return register
or TARGET_STRUCT_VALUE_RTX or STATIC_CHAIN_REGNUM. These are the
diff --git a/gcc/ira-color.c b/gcc/ira-color.c
index f1adb1da06a..fcad642ae12 100644
--- a/gcc/ira-color.c
+++ b/gcc/ira-color.c
@@ -2537,6 +2537,7 @@ coalesce_spill_slots (ira_allocno_t *spilled_coalesced_allocnos, int num)
int i, j, n, last_coalesced_allocno_num;
ira_allocno_t allocno, a;
bool merged_p = false;
+ bitmap set_jump_crosses = regstat_get_setjmp_crosses ();
slot_coalesced_allocnos_live_ranges
= (allocno_live_range_t *) ira_allocate (sizeof (allocno_live_range_t)
@@ -2550,6 +2551,7 @@ coalesce_spill_slots (ira_allocno_t *spilled_coalesced_allocnos, int num)
{
allocno = spilled_coalesced_allocnos[i];
if (ALLOCNO_FIRST_COALESCED_ALLOCNO (allocno) != allocno
+ || bitmap_bit_p (set_jump_crosses, ALLOCNO_REGNO (allocno))
|| (ALLOCNO_REGNO (allocno) < ira_reg_equiv_len
&& (ira_reg_equiv_const[ALLOCNO_REGNO (allocno)] != NULL_RTX
|| ira_reg_equiv_invariant_p[ALLOCNO_REGNO (allocno)])))
@@ -2559,6 +2561,7 @@ coalesce_spill_slots (ira_allocno_t *spilled_coalesced_allocnos, int num)
a = spilled_coalesced_allocnos[j];
n = ALLOCNO_TEMP (a);
if (ALLOCNO_FIRST_COALESCED_ALLOCNO (a) == a
+ && ! bitmap_bit_p (set_jump_crosses, ALLOCNO_REGNO (a))
&& (ALLOCNO_REGNO (a) >= ira_reg_equiv_len
|| (! ira_reg_equiv_invariant_p[ALLOCNO_REGNO (a)]
&& ira_reg_equiv_const[ALLOCNO_REGNO (a)] == NULL_RTX))
@@ -2959,7 +2962,7 @@ ira_reuse_stack_slot (int regno, unsigned int inherent_size,
bitmap_iterator bi;
struct ira_spilled_reg_stack_slot *slot = NULL;
- ira_assert (flag_ira && inherent_size == PSEUDO_REGNO_BYTES (regno)
+ ira_assert (inherent_size == PSEUDO_REGNO_BYTES (regno)
&& inherent_size <= total_size
&& ALLOCNO_HARD_REGNO (allocno) < 0);
if (! flag_ira_share_spill_slots)
@@ -3071,7 +3074,7 @@ ira_mark_new_stack_slot (rtx x, int regno, unsigned int total_size)
int slot_num;
ira_allocno_t allocno;
- ira_assert (flag_ira && PSEUDO_REGNO_BYTES (regno) <= total_size);
+ ira_assert (PSEUDO_REGNO_BYTES (regno) <= total_size);
allocno = ira_regno_allocno_map[regno];
slot_num = -ALLOCNO_HARD_REGNO (allocno) - 2;
if (slot_num == -1)
diff --git a/gcc/ira.c b/gcc/ira.c
index 5830bae1634..8dee2298e18 100644
--- a/gcc/ira.c
+++ b/gcc/ira.c
@@ -316,6 +316,7 @@ along with GCC; see the file COPYING3. If not see
#include "timevar.h"
#include "tree-pass.h"
#include "output.h"
+#include "except.h"
#include "reload.h"
#include "errors.h"
#include "integrate.h"
@@ -349,6 +350,10 @@ int ira_reg_cost, ira_mem_cost;
int ira_load_cost, ira_store_cost, ira_shuffle_cost;
int ira_move_loops_num, ira_additional_jumps_num;
+/* All registers that can be eliminated. */
+
+HARD_REG_SET eliminable_regset;
+
/* Map: hard regs X modes -> set of hard registers for storing value
of given mode starting with given hard register. */
HARD_REG_SET ira_reg_mode_hard_regset[FIRST_PSEUDO_REGISTER][NUM_MACHINE_MODES];
@@ -1527,6 +1532,14 @@ find_reg_equiv_invariant_const (void)
+/* Vector of substitutions of register numbers,
+ used to map pseudo regs into hardware regs.
+ This is set up as a result of register allocation.
+ Element N is the hard reg assigned to pseudo reg N,
+ or is -1 if no hard reg was assigned.
+ If N is a hard reg number, element N is N. */
+short *reg_renumber;
+
/* Set up REG_RENUMBER and CALLER_SAVE_NEEDED (used by reload) from
the allocation found by IRA. */
static void
@@ -1824,6 +1837,1207 @@ too_high_register_pressure_p (void)
+/* Indicate that hard register number FROM was eliminated and replaced with
+ an offset from hard register number TO. The status of hard registers live
+ at the start of a basic block is updated by replacing a use of FROM with
+ a use of TO. */
+
+void
+mark_elimination (int from, int to)
+{
+ basic_block bb;
+
+ FOR_EACH_BB (bb)
+ {
+ /* We don't use LIVE info in IRA. */
+ regset r = DF_LR_IN (bb);
+
+ if (REGNO_REG_SET_P (r, from))
+ {
+ CLEAR_REGNO_REG_SET (r, from);
+ SET_REGNO_REG_SET (r, to);
+ }
+ }
+}
+
+
+
+struct equivalence
+{
+ /* Set when an attempt should be made to replace a register
+ with the associated src_p entry. */
+ char replace;
+ /* Set when a REG_EQUIV note is found or created. Use to
+ keep track of what memory accesses might be created later,
+ e.g. by reload. */
+ rtx replacement;
+ rtx *src_p;
+ /* Loop depth is used to recognize equivalences which appear
+ to be present within the same loop (or in an inner loop). */
+ int loop_depth;
+ /* The list of each instruction which initializes this register. */
+ rtx init_insns;
+ /* Nonzero if this had a preexisting REG_EQUIV note. */
+ int is_arg_equivalence;
+};
+
+/* reg_equiv[N] (where N is a pseudo reg number) is the equivalence
+ structure for that register. */
+static struct equivalence *reg_equiv;
+
+/* Used for communication between the following two functions: contains
+ a MEM that we wish to ensure remains unchanged. */
+static rtx equiv_mem;
+
+/* Set nonzero if EQUIV_MEM is modified. */
+static int equiv_mem_modified;
+
+/* If EQUIV_MEM is modified by modifying DEST, indicate that it is modified.
+ Called via note_stores. */
+static void
+validate_equiv_mem_from_store (rtx dest, const_rtx set ATTRIBUTE_UNUSED,
+ void *data ATTRIBUTE_UNUSED)
+{
+ if ((REG_P (dest)
+ && reg_overlap_mentioned_p (dest, equiv_mem))
+ || (MEM_P (dest)
+ && true_dependence (dest, VOIDmode, equiv_mem, rtx_varies_p)))
+ equiv_mem_modified = 1;
+}
+
+/* Verify that no store between START and the death of REG invalidates
+ MEMREF. MEMREF is invalidated by modifying a register used in MEMREF,
+ by storing into an overlapping memory location, or with a non-const
+ CALL_INSN.
+
+ Return 1 if MEMREF remains valid. */
+static int
+validate_equiv_mem (rtx start, rtx reg, rtx memref)
+{
+ rtx insn;
+ rtx note;
+
+ equiv_mem = memref;
+ equiv_mem_modified = 0;
+
+ /* If the memory reference has side effects or is volatile, it isn't a
+ valid equivalence. */
+ if (side_effects_p (memref))
+ return 0;
+
+ for (insn = start; insn && ! equiv_mem_modified; insn = NEXT_INSN (insn))
+ {
+ if (! INSN_P (insn))
+ continue;
+
+ if (find_reg_note (insn, REG_DEAD, reg))
+ return 1;
+
+ if (CALL_P (insn) && ! MEM_READONLY_P (memref)
+ && ! RTL_CONST_OR_PURE_CALL_P (insn))
+ return 0;
+
+ note_stores (PATTERN (insn), validate_equiv_mem_from_store, NULL);
+
+ /* If a register mentioned in MEMREF is modified via an
+ auto-increment, we lose the equivalence. Do the same if one
+ dies; although we could extend the life, it doesn't seem worth
+ the trouble. */
+
+ for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
+ if ((REG_NOTE_KIND (note) == REG_INC
+ || REG_NOTE_KIND (note) == REG_DEAD)
+ && REG_P (XEXP (note, 0))
+ && reg_overlap_mentioned_p (XEXP (note, 0), memref))
+ return 0;
+ }
+
+ return 0;
+}
+
+/* Returns zero if X is known to be invariant. */
+static int
+equiv_init_varies_p (rtx x)
+{
+ RTX_CODE code = GET_CODE (x);
+ int i;
+ const char *fmt;
+
+ switch (code)
+ {
+ case MEM:
+ return !MEM_READONLY_P (x) || equiv_init_varies_p (XEXP (x, 0));
+
+ case CONST:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST_FIXED:
+ case CONST_VECTOR:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return 0;
+
+ case REG:
+ return reg_equiv[REGNO (x)].replace == 0 && rtx_varies_p (x, 0);
+
+ case ASM_OPERANDS:
+ if (MEM_VOLATILE_P (x))
+ return 1;
+
+ /* Fall through. */
+
+ default:
+ break;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ if (fmt[i] == 'e')
+ {
+ if (equiv_init_varies_p (XEXP (x, i)))
+ return 1;
+ }
+ else if (fmt[i] == 'E')
+ {
+ int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ if (equiv_init_varies_p (XVECEXP (x, i, j)))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Returns nonzero if X (used to initialize register REGNO) is movable.
+ X is only movable if the registers it uses have equivalent initializations
+ which appear to be within the same loop (or in an inner loop) and movable
+ or if they are not candidates for local_alloc and don't vary. */
+static int
+equiv_init_movable_p (rtx x, int regno)
+{
+ int i, j;
+ const char *fmt;
+ enum rtx_code code = GET_CODE (x);
+
+ switch (code)
+ {
+ case SET:
+ return equiv_init_movable_p (SET_SRC (x), regno);
+
+ case CC0:
+ case CLOBBER:
+ return 0;
+
+ case PRE_INC:
+ case PRE_DEC:
+ case POST_INC:
+ case POST_DEC:
+ case PRE_MODIFY:
+ case POST_MODIFY:
+ return 0;
+
+ case REG:
+ return (reg_equiv[REGNO (x)].loop_depth >= reg_equiv[regno].loop_depth
+ && reg_equiv[REGNO (x)].replace)
+ || (REG_BASIC_BLOCK (REGNO (x)) < NUM_FIXED_BLOCKS && ! rtx_varies_p (x, 0));
+
+ case UNSPEC_VOLATILE:
+ return 0;
+
+ case ASM_OPERANDS:
+ if (MEM_VOLATILE_P (x))
+ return 0;
+
+ /* Fall through. */
+
+ default:
+ break;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ switch (fmt[i])
+ {
+ case 'e':
+ if (! equiv_init_movable_p (XEXP (x, i), regno))
+ return 0;
+ break;
+ case 'E':
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ if (! equiv_init_movable_p (XVECEXP (x, i, j), regno))
+ return 0;
+ break;
+ }
+
+ return 1;
+}
+
+/* TRUE if X uses any registers for which reg_equiv[REGNO].replace is true. */
+static int
+contains_replace_regs (rtx x)
+{
+ int i, j;
+ const char *fmt;
+ enum rtx_code code = GET_CODE (x);
+
+ switch (code)
+ {
+ case CONST_INT:
+ case CONST:
+ case LABEL_REF:
+ case SYMBOL_REF:
+ case CONST_DOUBLE:
+ case CONST_FIXED:
+ case CONST_VECTOR:
+ case PC:
+ case CC0:
+ case HIGH:
+ return 0;
+
+ case REG:
+ return reg_equiv[REGNO (x)].replace;
+
+ default:
+ break;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ switch (fmt[i])
+ {
+ case 'e':
+ if (contains_replace_regs (XEXP (x, i)))
+ return 1;
+ break;
+ case 'E':
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ if (contains_replace_regs (XVECEXP (x, i, j)))
+ return 1;
+ break;
+ }
+
+ return 0;
+}
+
+/* TRUE if X references a memory location that would be affected by a store
+ to MEMREF. */
+static int
+memref_referenced_p (rtx memref, rtx x)
+{
+ int i, j;
+ const char *fmt;
+ enum rtx_code code = GET_CODE (x);
+
+ switch (code)
+ {
+ case CONST_INT:
+ case CONST:
+ case LABEL_REF:
+ case SYMBOL_REF:
+ case CONST_DOUBLE:
+ case CONST_FIXED:
+ case CONST_VECTOR:
+ case PC:
+ case CC0:
+ case HIGH:
+ case LO_SUM:
+ return 0;
+
+ case REG:
+ return (reg_equiv[REGNO (x)].replacement
+ && memref_referenced_p (memref,
+ reg_equiv[REGNO (x)].replacement));
+
+ case MEM:
+ if (true_dependence (memref, VOIDmode, x, rtx_varies_p))
+ return 1;
+ break;
+
+ case SET:
+ /* If we are setting a MEM, it doesn't count (its address does), but any
+ other SET_DEST that has a MEM in it is referencing the MEM. */
+ if (MEM_P (SET_DEST (x)))
+ {
+ if (memref_referenced_p (memref, XEXP (SET_DEST (x), 0)))
+ return 1;
+ }
+ else if (memref_referenced_p (memref, SET_DEST (x)))
+ return 1;
+
+ return memref_referenced_p (memref, SET_SRC (x));
+
+ default:
+ break;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ switch (fmt[i])
+ {
+ case 'e':
+ if (memref_referenced_p (memref, XEXP (x, i)))
+ return 1;
+ break;
+ case 'E':
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ if (memref_referenced_p (memref, XVECEXP (x, i, j)))
+ return 1;
+ break;
+ }
+
+ return 0;
+}
+
+/* TRUE if some insn in the range (START, END] references a memory location
+ that would be affected by a store to MEMREF. */
+static int
+memref_used_between_p (rtx memref, rtx start, rtx end)
+{
+ rtx insn;
+
+ for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
+ insn = NEXT_INSN (insn))
+ {
+ if (!INSN_P (insn))
+ continue;
+
+ if (memref_referenced_p (memref, PATTERN (insn)))
+ return 1;
+
+ /* Nonconst functions may access memory. */
+ if (CALL_P (insn) && (! RTL_CONST_CALL_P (insn)))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Mark REG as having no known equivalence.
+ Some instructions might have been processed before and furnished
+ with REG_EQUIV notes for this register; these notes will have to be
+ removed.
+ STORE is the piece of RTL that does the non-constant / conflicting
+ assignment - a SET, CLOBBER or REG_INC note. It is currently not used,
+ but needs to be there because this function is called from note_stores. */
+static void
+no_equiv (rtx reg, const_rtx store ATTRIBUTE_UNUSED, void *data ATTRIBUTE_UNUSED)
+{
+ int regno;
+ rtx list;
+
+ if (!REG_P (reg))
+ return;
+ regno = REGNO (reg);
+ list = reg_equiv[regno].init_insns;
+ if (list == const0_rtx)
+ return;
+ reg_equiv[regno].init_insns = const0_rtx;
+ reg_equiv[regno].replacement = NULL_RTX;
+ /* This doesn't matter for equivalences made for argument registers, we
+ should keep their initialization insns. */
+ if (reg_equiv[regno].is_arg_equivalence)
+ return;
+ reg_equiv_init[regno] = NULL_RTX;
+ for (; list; list = XEXP (list, 1))
+ {
+ rtx insn = XEXP (list, 0);
+ remove_note (insn, find_reg_note (insn, REG_EQUIV, NULL_RTX));
+ }
+}
+
+/* Nonzero if we recorded an equivalence for a LABEL_REF. */
+static int recorded_label_ref;
+
+/* Find registers that are equivalent to a single value throughout the
+ compilation (either because they can be referenced in memory or are set once
+ from a single constant). Lower their priority for a register.
+
+ If such a register is only referenced once, try substituting its value
+ into the using insn. If it succeeds, we can eliminate the register
+ completely.
+
+ Initialize the REG_EQUIV_INIT array of initializing insns.
+
+ Return non-zero if jump label rebuilding should be done. */
+static int
+update_equiv_regs (void)
+{
+ rtx insn;
+ basic_block bb;
+ int loop_depth;
+ bitmap cleared_regs;
+
+ /* We need to keep track of whether or not we recorded a LABEL_REF so
+ that we know if the jump optimizer needs to be rerun. */
+ recorded_label_ref = 0;
+
+ reg_equiv = XCNEWVEC (struct equivalence, max_regno);
+ reg_equiv_init = GGC_CNEWVEC (rtx, max_regno);
+ reg_equiv_init_size = max_regno;
+
+ init_alias_analysis ();
+
+ /* Scan the insns and find which registers have equivalences. Do this
+ in a separate scan of the insns because (due to -fcse-follow-jumps)
+ a register can be set below its use. */
+ FOR_EACH_BB (bb)
+ {
+ loop_depth = bb->loop_depth;
+
+ for (insn = BB_HEAD (bb);
+ insn != NEXT_INSN (BB_END (bb));
+ insn = NEXT_INSN (insn))
+ {
+ rtx note;
+ rtx set;
+ rtx dest, src;
+ int regno;
+
+ if (! INSN_P (insn))
+ continue;
+
+ for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
+ if (REG_NOTE_KIND (note) == REG_INC)
+ no_equiv (XEXP (note, 0), note, NULL);
+
+ set = single_set (insn);
+
+ /* If this insn contains more (or less) than a single SET,
+ only mark all destinations as having no known equivalence. */
+ if (set == 0)
+ {
+ note_stores (PATTERN (insn), no_equiv, NULL);
+ continue;
+ }
+ else if (GET_CODE (PATTERN (insn)) == PARALLEL)
+ {
+ int i;
+
+ for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
+ {
+ rtx part = XVECEXP (PATTERN (insn), 0, i);
+ if (part != set)
+ note_stores (part, no_equiv, NULL);
+ }
+ }
+
+ dest = SET_DEST (set);
+ src = SET_SRC (set);
+
+ /* See if this is setting up the equivalence between an argument
+ register and its stack slot. */
+ note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
+ if (note)
+ {
+ gcc_assert (REG_P (dest));
+ regno = REGNO (dest);
+
+ /* Note that we don't want to clear reg_equiv_init even if there
+ are multiple sets of this register. */
+ reg_equiv[regno].is_arg_equivalence = 1;
+
+ /* Record for reload that this is an equivalencing insn. */
+ if (rtx_equal_p (src, XEXP (note, 0)))
+ reg_equiv_init[regno]
+ = gen_rtx_INSN_LIST (VOIDmode, insn, reg_equiv_init[regno]);
+
+ /* Continue normally in case this is a candidate for
+ replacements. */
+ }
+
+ if (!optimize)
+ continue;
+
+ /* We only handle the case of a pseudo register being set
+ once, or always to the same value. */
+ /* ??? The mn10200 port breaks if we add equivalences for
+ values that need an ADDRESS_REGS register and set them equivalent
+ to a MEM of a pseudo. The actual problem is in the over-conservative
+ handling of INPADDR_ADDRESS / INPUT_ADDRESS / INPUT triples in
+ calculate_needs, but we traditionally work around this problem
+ here by rejecting equivalences when the destination is in a register
+ that's likely spilled. This is fragile, of course, since the
+ preferred class of a pseudo depends on all instructions that set
+ or use it. */
+
+ if (!REG_P (dest)
+ || (regno = REGNO (dest)) < FIRST_PSEUDO_REGISTER
+ || reg_equiv[regno].init_insns == const0_rtx
+ || (CLASS_LIKELY_SPILLED_P (reg_preferred_class (regno))
+ && MEM_P (src) && ! reg_equiv[regno].is_arg_equivalence))
+ {
+ /* This might be setting a SUBREG of a pseudo, a pseudo that is
+ also set somewhere else to a constant. */
+ note_stores (set, no_equiv, NULL);
+ continue;
+ }
+
+ note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
+
+ /* cse sometimes generates function invariants, but doesn't put a
+ REG_EQUAL note on the insn. Since this note would be redundant,
+ there's no point creating it earlier than here. */
+ if (! note && ! rtx_varies_p (src, 0))
+ note = set_unique_reg_note (insn, REG_EQUAL, copy_rtx (src));
+
+ /* Don't bother considering a REG_EQUAL note containing an EXPR_LIST
+ since it represents a function call */
+ if (note && GET_CODE (XEXP (note, 0)) == EXPR_LIST)
+ note = NULL_RTX;
+
+ if (DF_REG_DEF_COUNT (regno) != 1
+ && (! note
+ || rtx_varies_p (XEXP (note, 0), 0)
+ || (reg_equiv[regno].replacement
+ && ! rtx_equal_p (XEXP (note, 0),
+ reg_equiv[regno].replacement))))
+ {
+ no_equiv (dest, set, NULL);
+ continue;
+ }
+ /* Record this insn as initializing this register. */
+ reg_equiv[regno].init_insns
+ = gen_rtx_INSN_LIST (VOIDmode, insn, reg_equiv[regno].init_insns);
+
+ /* If this register is known to be equal to a constant, record that
+ it is always equivalent to the constant. */
+ if (DF_REG_DEF_COUNT (regno) == 1
+ && note && ! rtx_varies_p (XEXP (note, 0), 0))
+ {
+ rtx note_value = XEXP (note, 0);
+ remove_note (insn, note);
+ set_unique_reg_note (insn, REG_EQUIV, note_value);
+ }
+
+ /* If this insn introduces a "constant" register, decrease the priority
+ of that register. Record this insn if the register is only used once
+ more and the equivalence value is the same as our source.
+
+ The latter condition is checked for two reasons: First, it is an
+ indication that it may be more efficient to actually emit the insn
+ as written (if no registers are available, reload will substitute
+ the equivalence). Secondly, it avoids problems with any registers
+ dying in this insn whose death notes would be missed.
+
+ If we don't have a REG_EQUIV note, see if this insn is loading
+ a register used only in one basic block from a MEM. If so, and the
+ MEM remains unchanged for the life of the register, add a REG_EQUIV
+ note. */
+
+ note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
+
+ if (note == 0 && REG_BASIC_BLOCK (regno) >= NUM_FIXED_BLOCKS
+ && MEM_P (SET_SRC (set))
+ && validate_equiv_mem (insn, dest, SET_SRC (set)))
+ note = set_unique_reg_note (insn, REG_EQUIV, copy_rtx (SET_SRC (set)));
+
+ if (note)
+ {
+ int regno = REGNO (dest);
+ rtx x = XEXP (note, 0);
+
+ /* If we haven't done so, record for reload that this is an
+ equivalencing insn. */
+ if (!reg_equiv[regno].is_arg_equivalence)
+ reg_equiv_init[regno]
+ = gen_rtx_INSN_LIST (VOIDmode, insn, reg_equiv_init[regno]);
+
+ /* Record whether or not we created a REG_EQUIV note for a LABEL_REF.
+ We might end up substituting the LABEL_REF for uses of the
+ pseudo here or later. That kind of transformation may turn an
+ indirect jump into a direct jump, in which case we must rerun the
+ jump optimizer to ensure that the JUMP_LABEL fields are valid. */
+ if (GET_CODE (x) == LABEL_REF
+ || (GET_CODE (x) == CONST
+ && GET_CODE (XEXP (x, 0)) == PLUS
+ && (GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF)))
+ recorded_label_ref = 1;
+
+ reg_equiv[regno].replacement = x;
+ reg_equiv[regno].src_p = &SET_SRC (set);
+ reg_equiv[regno].loop_depth = loop_depth;
+
+ /* Don't mess with things live during setjmp. */
+ if (REG_LIVE_LENGTH (regno) >= 0 && optimize)
+ {
+ /* Note that the statement below does not affect the priority
+ in local-alloc! */
+ REG_LIVE_LENGTH (regno) *= 2;
+
+ /* If the register is referenced exactly twice, meaning it is
+ set once and used once, indicate that the reference may be
+ replaced by the equivalence we computed above. Do this
+ even if the register is only used in one block so that
+ dependencies can be handled where the last register is
+ used in a different block (i.e. HIGH / LO_SUM sequences)
+ and to reduce the number of registers alive across
+ calls. */
+
+ if (REG_N_REFS (regno) == 2
+ && (rtx_equal_p (x, src)
+ || ! equiv_init_varies_p (src))
+ && NONJUMP_INSN_P (insn)
+ && equiv_init_movable_p (PATTERN (insn), regno))
+ reg_equiv[regno].replace = 1;
+ }
+ }
+ }
+ }
+
+ if (!optimize)
+ goto out;
+
+ /* A second pass, to gather additional equivalences with memory. This needs
+ to be done after we know which registers we are going to replace. */
+
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ rtx set, src, dest;
+ unsigned regno;
+
+ if (! INSN_P (insn))
+ continue;
+
+ set = single_set (insn);
+ if (! set)
+ continue;
+
+ dest = SET_DEST (set);
+ src = SET_SRC (set);
+
+ /* If this sets a MEM to the contents of a REG that is only used
+ in a single basic block, see if the register is always equivalent
+ to that memory location and if moving the store from INSN to the
+ insn that set REG is safe. If so, put a REG_EQUIV note on the
+ initializing insn.
+
+ Don't add a REG_EQUIV note if the insn already has one. The existing
+ REG_EQUIV is likely more useful than the one we are adding.
+
+ If one of the regs in the address has reg_equiv[REGNO].replace set,
+ then we can't add this REG_EQUIV note. The reg_equiv[REGNO].replace
+ optimization may move the set of this register immediately before
+ insn, which puts it after reg_equiv[REGNO].init_insns, and hence
+ the mention in the REG_EQUIV note would be to an uninitialized
+ pseudo. */
+
+ if (MEM_P (dest) && REG_P (src)
+ && (regno = REGNO (src)) >= FIRST_PSEUDO_REGISTER
+ && REG_BASIC_BLOCK (regno) >= NUM_FIXED_BLOCKS
+ && DF_REG_DEF_COUNT (regno) == 1
+ && reg_equiv[regno].init_insns != 0
+ && reg_equiv[regno].init_insns != const0_rtx
+ && ! find_reg_note (XEXP (reg_equiv[regno].init_insns, 0),
+ REG_EQUIV, NULL_RTX)
+ && ! contains_replace_regs (XEXP (dest, 0)))
+ {
+ rtx init_insn = XEXP (reg_equiv[regno].init_insns, 0);
+ if (validate_equiv_mem (init_insn, src, dest)
+ && ! memref_used_between_p (dest, init_insn, insn)
+ /* Attaching a REG_EQUIV note will fail if INIT_INSN has
+ multiple sets. */
+ && set_unique_reg_note (init_insn, REG_EQUIV, copy_rtx (dest)))
+ {
+ /* This insn makes the equivalence, not the one initializing
+ the register. */
+ reg_equiv_init[regno]
+ = gen_rtx_INSN_LIST (VOIDmode, insn, NULL_RTX);
+ df_notes_rescan (init_insn);
+ }
+ }
+ }
+
+ cleared_regs = BITMAP_ALLOC (NULL);
+ /* Now scan all regs killed in an insn to see if any of them are
+ registers only used that once. If so, see if we can replace the
+ reference with the equivalent form. If we can, delete the
+ initializing reference and this register will go away. If we
+ can't replace the reference, and the initializing reference is
+ within the same loop (or in an inner loop), then move the register
+ initialization just before the use, so that they are in the same
+ basic block. */
+ FOR_EACH_BB_REVERSE (bb)
+ {
+ loop_depth = bb->loop_depth;
+ for (insn = BB_END (bb);
+ insn != PREV_INSN (BB_HEAD (bb));
+ insn = PREV_INSN (insn))
+ {
+ rtx link;
+
+ if (! INSN_P (insn))
+ continue;
+
+ /* Don't substitute into a non-local goto, this confuses CFG. */
+ if (JUMP_P (insn)
+ && find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX))
+ continue;
+
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ {
+ if (REG_NOTE_KIND (link) == REG_DEAD
+ /* Make sure this insn still refers to the register. */
+ && reg_mentioned_p (XEXP (link, 0), PATTERN (insn)))
+ {
+ int regno = REGNO (XEXP (link, 0));
+ rtx equiv_insn;
+
+ if (! reg_equiv[regno].replace
+ || reg_equiv[regno].loop_depth < loop_depth)
+ continue;
+
+ /* reg_equiv[REGNO].replace gets set only when
+ REG_N_REFS[REGNO] is 2, i.e. the register is set
+ once and used once. (If it were only set, but not used,
+ flow would have deleted the setting insns.) Hence
+ there can only be one insn in reg_equiv[REGNO].init_insns. */
+ gcc_assert (reg_equiv[regno].init_insns
+ && !XEXP (reg_equiv[regno].init_insns, 1));
+ equiv_insn = XEXP (reg_equiv[regno].init_insns, 0);
+
+ /* We may not move instructions that can throw, since
+ that changes basic block boundaries and we are not
+ prepared to adjust the CFG to match. */
+ if (can_throw_internal (equiv_insn))
+ continue;
+
+ if (asm_noperands (PATTERN (equiv_insn)) < 0
+ && validate_replace_rtx (regno_reg_rtx[regno],
+ *(reg_equiv[regno].src_p), insn))
+ {
+ rtx equiv_link;
+ rtx last_link;
+ rtx note;
+
+ /* Find the last note. */
+ for (last_link = link; XEXP (last_link, 1);
+ last_link = XEXP (last_link, 1))
+ ;
+
+ /* Append the REG_DEAD notes from equiv_insn. */
+ equiv_link = REG_NOTES (equiv_insn);
+ while (equiv_link)
+ {
+ note = equiv_link;
+ equiv_link = XEXP (equiv_link, 1);
+ if (REG_NOTE_KIND (note) == REG_DEAD)
+ {
+ remove_note (equiv_insn, note);
+ XEXP (last_link, 1) = note;
+ XEXP (note, 1) = NULL_RTX;
+ last_link = note;
+ }
+ }
+
+ remove_death (regno, insn);
+ SET_REG_N_REFS (regno, 0);
+ REG_FREQ (regno) = 0;
+ delete_insn (equiv_insn);
+
+ reg_equiv[regno].init_insns
+ = XEXP (reg_equiv[regno].init_insns, 1);
+
+ reg_equiv_init[regno] = NULL_RTX;
+ bitmap_set_bit (cleared_regs, regno);
+ }
+ /* Move the initialization of the register to just before
+ INSN. Update the flow information. */
+ else if (PREV_INSN (insn) != equiv_insn)
+ {
+ rtx new_insn;
+
+ new_insn = emit_insn_before (PATTERN (equiv_insn), insn);
+ REG_NOTES (new_insn) = REG_NOTES (equiv_insn);
+ REG_NOTES (equiv_insn) = 0;
+ /* Rescan it to process the notes. */
+ df_insn_rescan (new_insn);
+
+ /* Make sure this insn is recognized before
+ reload begins, otherwise
+ eliminate_regs_in_insn will die. */
+ INSN_CODE (new_insn) = INSN_CODE (equiv_insn);
+
+ delete_insn (equiv_insn);
+
+ XEXP (reg_equiv[regno].init_insns, 0) = new_insn;
+
+ REG_BASIC_BLOCK (regno) = bb->index;
+ REG_N_CALLS_CROSSED (regno) = 0;
+ REG_FREQ_CALLS_CROSSED (regno) = 0;
+ REG_N_THROWING_CALLS_CROSSED (regno) = 0;
+ REG_LIVE_LENGTH (regno) = 2;
+
+ if (insn == BB_HEAD (bb))
+ BB_HEAD (bb) = PREV_INSN (insn);
+
+ reg_equiv_init[regno]
+ = gen_rtx_INSN_LIST (VOIDmode, new_insn, NULL_RTX);
+ bitmap_set_bit (cleared_regs, regno);
+ }
+ }
+ }
+ }
+ }
+
+ if (!bitmap_empty_p (cleared_regs))
+ FOR_EACH_BB (bb)
+ {
+ bitmap_and_compl_into (DF_LIVE_IN (bb), cleared_regs);
+ bitmap_and_compl_into (DF_LIVE_OUT (bb), cleared_regs);
+ bitmap_and_compl_into (DF_LR_IN (bb), cleared_regs);
+ bitmap_and_compl_into (DF_LR_OUT (bb), cleared_regs);
+ }
+
+ BITMAP_FREE (cleared_regs);
+
+ out:
+ /* Clean up. */
+
+ end_alias_analysis ();
+ free (reg_equiv);
+ return recorded_label_ref;
+}
+
+
+
+/* Print chain C to FILE. */
+static void
+print_insn_chain (FILE *file, struct insn_chain *c)
+{
+ fprintf (file, "insn=%d, ", INSN_UID(c->insn));
+ bitmap_print (file, &c->live_throughout, "live_throughout: ", ", ");
+ bitmap_print (file, &c->dead_or_set, "dead_or_set: ", "\n");
+}
+
+
+/* Print all reload_insn_chains to FILE. */
+static void
+print_insn_chains (FILE *file)
+{
+ struct insn_chain *c;
+ for (c = reload_insn_chain; c ; c = c->next)
+ print_insn_chain (file, c);
+}
+
+/* Return true if pseudo REGNO should be added to set live_throughout
+ or dead_or_set of the insn chains for reload consideration. */
+static bool
+pseudo_for_reload_consideration_p (int regno)
+{
+ /* Consider spilled pseudos too for IRA because they still have a
+ chance to get hard-registers in the reload when IRA is used. */
+ return (reg_renumber[regno] >= 0
+ || (ira_conflicts_p && flag_ira_share_spill_slots));
+}
+
+/* Init LIVE_SUBREGS[ALLOCNUM] and LIVE_SUBREGS_USED[ALLOCNUM] using
+ REG to the number of nregs, and INIT_VALUE to get the
+ initialization. ALLOCNUM need not be the regno of REG. */
+static void
+init_live_subregs (bool init_value, sbitmap *live_subregs,
+ int *live_subregs_used, int allocnum, rtx reg)
+{
+ unsigned int regno = REGNO (SUBREG_REG (reg));
+ int size = GET_MODE_SIZE (GET_MODE (regno_reg_rtx[regno]));
+
+ gcc_assert (size > 0);
+
+ /* Been there, done that. */
+ if (live_subregs_used[allocnum])
+ return;
+
+ /* Create a new one with zeros. */
+ if (live_subregs[allocnum] == NULL)
+ live_subregs[allocnum] = sbitmap_alloc (size);
+
+ /* If the entire reg was live before blasting into subregs, we need
+ to init all of the subregs to ones else init to 0. */
+ if (init_value)
+ sbitmap_ones (live_subregs[allocnum]);
+ else
+ sbitmap_zero (live_subregs[allocnum]);
+
+ /* Set the number of bits that we really want. */
+ live_subregs_used[allocnum] = size;
+}
+
+/* Walk the insns of the current function and build reload_insn_chain,
+ and record register life information. */
+static void
+build_insn_chain (void)
+{
+ unsigned int i;
+ struct insn_chain **p = &reload_insn_chain;
+ basic_block bb;
+ struct insn_chain *c = NULL;
+ struct insn_chain *next = NULL;
+ bitmap live_relevant_regs = BITMAP_ALLOC (NULL);
+ bitmap elim_regset = BITMAP_ALLOC (NULL);
+ /* live_subregs is a vector used to keep accurate information about
+ which hardregs are live in multiword pseudos. live_subregs and
+ live_subregs_used are indexed by pseudo number. The live_subreg
+ entry for a particular pseudo is only used if the corresponding
+ element is non zero in live_subregs_used. The value in
+ live_subregs_used is number of bytes that the pseudo can
+ occupy. */
+ sbitmap *live_subregs = XCNEWVEC (sbitmap, max_regno);
+ int *live_subregs_used = XNEWVEC (int, max_regno);
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (TEST_HARD_REG_BIT (eliminable_regset, i))
+ bitmap_set_bit (elim_regset, i);
+ FOR_EACH_BB_REVERSE (bb)
+ {
+ bitmap_iterator bi;
+ rtx insn;
+
+ CLEAR_REG_SET (live_relevant_regs);
+ memset (live_subregs_used, 0, max_regno * sizeof (int));
+
+ EXECUTE_IF_SET_IN_BITMAP (df_get_live_out (bb), 0, i, bi)
+ {
+ if (i >= FIRST_PSEUDO_REGISTER)
+ break;
+ bitmap_set_bit (live_relevant_regs, i);
+ }
+
+ EXECUTE_IF_SET_IN_BITMAP (df_get_live_out (bb),
+ FIRST_PSEUDO_REGISTER, i, bi)
+ {
+ if (pseudo_for_reload_consideration_p (i))
+ bitmap_set_bit (live_relevant_regs, i);
+ }
+
+ FOR_BB_INSNS_REVERSE (bb, insn)
+ {
+ if (!NOTE_P (insn) && !BARRIER_P (insn))
+ {
+ unsigned int uid = INSN_UID (insn);
+ df_ref *def_rec;
+ df_ref *use_rec;
+
+ c = new_insn_chain ();
+ c->next = next;
+ next = c;
+ *p = c;
+ p = &c->prev;
+
+ c->insn = insn;
+ c->block = bb->index;
+
+ if (INSN_P (insn))
+ for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
+ {
+ df_ref def = *def_rec;
+ unsigned int regno = DF_REF_REGNO (def);
+
+ /* Ignore may clobbers because these are generated
+ from calls. However, every other kind of def is
+ added to dead_or_set. */
+ if (!DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER))
+ {
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ if (!fixed_regs[regno])
+ bitmap_set_bit (&c->dead_or_set, regno);
+ }
+ else if (pseudo_for_reload_consideration_p (regno))
+ bitmap_set_bit (&c->dead_or_set, regno);
+ }
+
+ if ((regno < FIRST_PSEUDO_REGISTER
+ || reg_renumber[regno] >= 0
+ || ira_conflicts_p)
+ && (!DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL)))
+ {
+ rtx reg = DF_REF_REG (def);
+
+ /* We can model subregs, but not if they are
+ wrapped in ZERO_EXTRACTS. */
+ if (GET_CODE (reg) == SUBREG
+ && !DF_REF_FLAGS_IS_SET (def, DF_REF_ZERO_EXTRACT))
+ {
+ unsigned int start = SUBREG_BYTE (reg);
+ unsigned int last = start
+ + GET_MODE_SIZE (GET_MODE (reg));
+
+ init_live_subregs
+ (bitmap_bit_p (live_relevant_regs, regno),
+ live_subregs, live_subregs_used, regno, reg);
+
+ if (!DF_REF_FLAGS_IS_SET
+ (def, DF_REF_STRICT_LOW_PART))
+ {
+ /* Expand the range to cover entire words.
+ Bytes added here are "don't care". */
+ start
+ = start / UNITS_PER_WORD * UNITS_PER_WORD;
+ last = ((last + UNITS_PER_WORD - 1)
+ / UNITS_PER_WORD * UNITS_PER_WORD);
+ }
+
+ /* Ignore the paradoxical bits. */
+ if ((int)last > live_subregs_used[regno])
+ last = live_subregs_used[regno];
+
+ while (start < last)
+ {
+ RESET_BIT (live_subregs[regno], start);
+ start++;
+ }
+
+ if (sbitmap_empty_p (live_subregs[regno]))
+ {
+ live_subregs_used[regno] = 0;
+ bitmap_clear_bit (live_relevant_regs, regno);
+ }
+ else
+ /* Set live_relevant_regs here because
+ that bit has to be true to get us to
+ look at the live_subregs fields. */
+ bitmap_set_bit (live_relevant_regs, regno);
+ }
+ else
+ {
+ /* DF_REF_PARTIAL is generated for
+ subregs, STRICT_LOW_PART, and
+ ZERO_EXTRACT. We handle the subreg
+ case above so here we have to keep from
+ modeling the def as a killing def. */
+ if (!DF_REF_FLAGS_IS_SET (def, DF_REF_PARTIAL))
+ {
+ bitmap_clear_bit (live_relevant_regs, regno);
+ live_subregs_used[regno] = 0;
+ }
+ }
+ }
+ }
+
+ bitmap_and_compl_into (live_relevant_regs, elim_regset);
+ bitmap_copy (&c->live_throughout, live_relevant_regs);
+
+ if (INSN_P (insn))
+ for (use_rec = DF_INSN_UID_USES (uid); *use_rec; use_rec++)
+ {
+ df_ref use = *use_rec;
+ unsigned int regno = DF_REF_REGNO (use);
+ rtx reg = DF_REF_REG (use);
+
+ /* DF_REF_READ_WRITE on a use means that this use
+ is fabricated from a def that is a partial set
+ to a multiword reg. Here, we only model the
+ subreg case that is not wrapped in ZERO_EXTRACT
+ precisely so we do not need to look at the
+ fabricated use. */
+ if (DF_REF_FLAGS_IS_SET (use, DF_REF_READ_WRITE)
+ && !DF_REF_FLAGS_IS_SET (use, DF_REF_ZERO_EXTRACT)
+ && DF_REF_FLAGS_IS_SET (use, DF_REF_SUBREG))
+ continue;
+
+ /* Add the last use of each var to dead_or_set. */
+ if (!bitmap_bit_p (live_relevant_regs, regno))
+ {
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ if (!fixed_regs[regno])
+ bitmap_set_bit (&c->dead_or_set, regno);
+ }
+ else if (pseudo_for_reload_consideration_p (regno))
+ bitmap_set_bit (&c->dead_or_set, regno);
+ }
+
+ if (regno < FIRST_PSEUDO_REGISTER
+ || pseudo_for_reload_consideration_p (regno))
+ {
+ if (GET_CODE (reg) == SUBREG
+ && !DF_REF_FLAGS_IS_SET (use,
+ DF_REF_SIGN_EXTRACT
+ | DF_REF_ZERO_EXTRACT))
+ {
+ unsigned int start = SUBREG_BYTE (reg);
+ unsigned int last = start
+ + GET_MODE_SIZE (GET_MODE (reg));
+
+ init_live_subregs
+ (bitmap_bit_p (live_relevant_regs, regno),
+ live_subregs, live_subregs_used, regno, reg);
+
+ /* Ignore the paradoxical bits. */
+ if ((int)last > live_subregs_used[regno])
+ last = live_subregs_used[regno];
+
+ while (start < last)
+ {
+ SET_BIT (live_subregs[regno], start);
+ start++;
+ }
+ }
+ else
+ /* Resetting the live_subregs_used is
+ effectively saying do not use the subregs
+ because we are reading the whole
+ pseudo. */
+ live_subregs_used[regno] = 0;
+ bitmap_set_bit (live_relevant_regs, regno);
+ }
+ }
+ }
+ }
+
+ /* FIXME!! The following code is a disaster. Reload needs to see the
+ labels and jump tables that are just hanging out in between
+ the basic blocks. See pr33676. */
+ insn = BB_HEAD (bb);
+
+ /* Skip over the barriers and cruft. */
+ while (insn && (BARRIER_P (insn) || NOTE_P (insn)
+ || BLOCK_FOR_INSN (insn) == bb))
+ insn = PREV_INSN (insn);
+
+ /* While we add anything except barriers and notes, the focus is
+ to get the labels and jump tables into the
+ reload_insn_chain. */
+ while (insn)
+ {
+ if (!NOTE_P (insn) && !BARRIER_P (insn))
+ {
+ if (BLOCK_FOR_INSN (insn))
+ break;
+
+ c = new_insn_chain ();
+ c->next = next;
+ next = c;
+ *p = c;
+ p = &c->prev;
+
+ /* The block makes no sense here, but it is what the old
+ code did. */
+ c->block = bb->index;
+ c->insn = insn;
+ bitmap_copy (&c->live_throughout, live_relevant_regs);
+ }
+ insn = PREV_INSN (insn);
+ }
+ }
+
+ for (i = 0; i < (unsigned int) max_regno; i++)
+ if (live_subregs[i])
+ free (live_subregs[i]);
+
+ reload_insn_chain = c;
+ *p = NULL;
+
+ free (live_subregs);
+ free (live_subregs_used);
+ BITMAP_FREE (live_relevant_regs);
+ BITMAP_FREE (elim_regset);
+
+ if (dump_file)
+ print_insn_chains (dump_file);
+}
+
+
+
/* All natural loops. */
struct loops ira_loops;
@@ -2085,7 +3299,7 @@ ira (FILE *f)
static bool
gate_ira (void)
{
- return flag_ira != 0;
+ return true;
}
/* Run the integrated register allocator. */
diff --git a/gcc/local-alloc.c b/gcc/local-alloc.c
deleted file mode 100644
index 648f239a65d..00000000000
--- a/gcc/local-alloc.c
+++ /dev/null
@@ -1,2545 +0,0 @@
-/* Allocate registers within a basic block, for GNU compiler.
- Copyright (C) 1987, 1988, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
- Free Software Foundation, Inc.
-
-This file is part of GCC.
-
-GCC is free software; you can redistribute it and/or modify it under
-the terms of the GNU General Public License as published by the Free
-Software Foundation; either version 3, or (at your option) any later
-version.
-
-GCC is distributed in the hope that it will be useful, but WITHOUT ANY
-WARRANTY; without even the implied warranty of MERCHANTABILITY or
-FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-for more details.
-
-You should have received a copy of the GNU General Public License
-along with GCC; see the file COPYING3. If not see
-<http://www.gnu.org/licenses/>. */
-
-/* Allocation of hard register numbers to pseudo registers is done in
- two passes. In this pass we consider only regs that are born and
- die once within one basic block. We do this one basic block at a
- time. Then the next pass allocates the registers that remain.
- Two passes are used because this pass uses methods that work only
- on linear code, but that do a better job than the general methods
- used in global_alloc, and more quickly too.
-
- The assignments made are recorded in the vector reg_renumber
- whose space is allocated here. The rtl code itself is not altered.
-
- We assign each instruction in the basic block a number
- which is its order from the beginning of the block.
- Then we can represent the lifetime of a pseudo register with
- a pair of numbers, and check for conflicts easily.
- We can record the availability of hard registers with a
- HARD_REG_SET for each instruction. The HARD_REG_SET
- contains 0 or 1 for each hard reg.
-
- To avoid register shuffling, we tie registers together when one
- dies by being copied into another, or dies in an instruction that
- does arithmetic to produce another. The tied registers are
- allocated as one. Registers with different reg class preferences
- can never be tied unless the class preferred by one is a subclass
- of the one preferred by the other.
-
- Tying is represented with "quantity numbers".
- A non-tied register is given a new quantity number.
- Tied registers have the same quantity number.
-
- We have provision to exempt registers, even when they are contained
- within the block, that can be tied to others that are not contained in it.
- This is so that global_alloc could process them both and tie them then.
- But this is currently disabled since tying in global_alloc is not
- yet implemented. */
-
-/* Pseudos allocated here can be reallocated by global.c if the hard register
- is used as a spill register. Currently we don't allocate such pseudos
- here if their preferred class is likely to be used by spills. */
-
-#include "config.h"
-#include "system.h"
-#include "coretypes.h"
-#include "tm.h"
-#include "hard-reg-set.h"
-#include "rtl.h"
-#include "tm_p.h"
-#include "flags.h"
-#include "regs.h"
-#include "function.h"
-#include "insn-config.h"
-#include "insn-attr.h"
-#include "recog.h"
-#include "output.h"
-#include "toplev.h"
-#include "except.h"
-#include "integrate.h"
-#include "reload.h"
-#include "ggc.h"
-#include "timevar.h"
-#include "tree-pass.h"
-#include "df.h"
-#include "dbgcnt.h"
-
-
-/* Next quantity number available for allocation. */
-
-static int next_qty;
-
-/* Information we maintain about each quantity. */
-struct qty
-{
- /* The number of refs to quantity Q. */
-
- int n_refs;
-
- /* The frequency of uses of quantity Q. */
-
- int freq;
-
- /* Insn number (counting from head of basic block)
- where quantity Q was born. -1 if birth has not been recorded. */
-
- int birth;
-
- /* Insn number (counting from head of basic block)
- where given quantity died. Due to the way tying is done,
- and the fact that we consider in this pass only regs that die but once,
- a quantity can die only once. Each quantity's life span
- is a set of consecutive insns. -1 if death has not been recorded. */
-
- int death;
-
- /* Number of words needed to hold the data in given quantity.
- This depends on its machine mode. It is used for these purposes:
- 1. It is used in computing the relative importance of qtys,
- which determines the order in which we look for regs for them.
- 2. It is used in rules that prevent tying several registers of
- different sizes in a way that is geometrically impossible
- (see combine_regs). */
-
- int size;
-
- /* Number of times a reg tied to given qty lives across a CALL_INSN. */
-
- int n_calls_crossed;
-
- /* Number of times a reg tied to given qty lives across a CALL_INSN. */
-
- int freq_calls_crossed;
-
- /* Number of times a reg tied to given qty lives across a CALL_INSN
- that might throw. */
-
- int n_throwing_calls_crossed;
-
- /* The register number of one pseudo register whose reg_qty value is Q.
- This register should be the head of the chain
- maintained in reg_next_in_qty. */
-
- int first_reg;
-
- /* Reg class contained in (smaller than) the preferred classes of all
- the pseudo regs that are tied in given quantity.
- This is the preferred class for allocating that quantity. */
-
- enum reg_class min_class;
-
- /* Register class within which we allocate given qty if we can't get
- its preferred class. */
-
- enum reg_class alternate_class;
-
- /* This holds the mode of the registers that are tied to given qty,
- or VOIDmode if registers with differing modes are tied together. */
-
- enum machine_mode mode;
-
- /* the hard reg number chosen for given quantity,
- or -1 if none was found. */
-
- short phys_reg;
-};
-
-static struct qty *qty;
-
-/* These fields are kept separately to speedup their clearing. */
-
-/* We maintain two hard register sets that indicate suggested hard registers
- for each quantity. The first, phys_copy_sugg, contains hard registers
- that are tied to the quantity by a simple copy. The second contains all
- hard registers that are tied to the quantity via an arithmetic operation.
-
- The former register set is given priority for allocation. This tends to
- eliminate copy insns. */
-
-/* Element Q is a set of hard registers that are suggested for quantity Q by
- copy insns. */
-
-static HARD_REG_SET *qty_phys_copy_sugg;
-
-/* Element Q is a set of hard registers that are suggested for quantity Q by
- arithmetic insns. */
-
-static HARD_REG_SET *qty_phys_sugg;
-
-/* Element Q is the number of suggested registers in qty_phys_copy_sugg. */
-
-static short *qty_phys_num_copy_sugg;
-
-/* Element Q is the number of suggested registers in qty_phys_sugg. */
-
-static short *qty_phys_num_sugg;
-
-/* If (REG N) has been assigned a quantity number, is a register number
- of another register assigned the same quantity number, or -1 for the
- end of the chain. qty->first_reg point to the head of this chain. */
-
-static int *reg_next_in_qty;
-
-/* reg_qty[N] (where N is a pseudo reg number) is the qty number of that reg
- if it is >= 0,
- of -1 if this register cannot be allocated by local-alloc,
- or -2 if not known yet.
-
- Note that if we see a use or death of pseudo register N with
- reg_qty[N] == -2, register N must be local to the current block. If
- it were used in more than one block, we would have reg_qty[N] == -1.
- This relies on the fact that if reg_basic_block[N] is >= 0, register N
- will not appear in any other block. We save a considerable number of
- tests by exploiting this.
-
- If N is < FIRST_PSEUDO_REGISTER, reg_qty[N] is undefined and should not
- be referenced. */
-
-static int *reg_qty;
-
-/* The offset (in words) of register N within its quantity.
- This can be nonzero if register N is SImode, and has been tied
- to a subreg of a DImode register. */
-
-static char *reg_offset;
-
-/* Vector of substitutions of register numbers,
- used to map pseudo regs into hardware regs.
- This is set up as a result of register allocation.
- Element N is the hard reg assigned to pseudo reg N,
- or is -1 if no hard reg was assigned.
- If N is a hard reg number, element N is N. */
-
-short *reg_renumber;
-
-/* Set of hard registers live at the current point in the scan
- of the instructions in a basic block. */
-
-static HARD_REG_SET regs_live;
-
-/* Each set of hard registers indicates registers live at a particular
- point in the basic block. For N even, regs_live_at[N] says which
- hard registers are needed *after* insn N/2 (i.e., they may not
- conflict with the outputs of insn N/2 or the inputs of insn N/2 + 1.
-
- If an object is to conflict with the inputs of insn J but not the
- outputs of insn J + 1, we say it is born at index J*2 - 1. Similarly,
- if it is to conflict with the outputs of insn J but not the inputs of
- insn J + 1, it is said to die at index J*2 + 1. */
-
-static HARD_REG_SET *regs_live_at;
-
-/* Communicate local vars `insn_number' and `insn'
- from `block_alloc' to `reg_is_set', `wipe_dead_reg', and `alloc_qty'. */
-static int this_insn_number;
-static rtx this_insn;
-
-struct equivalence
-{
- /* Set when an attempt should be made to replace a register
- with the associated src_p entry. */
-
- char replace;
-
- /* Set when a REG_EQUIV note is found or created. Use to
- keep track of what memory accesses might be created later,
- e.g. by reload. */
-
- rtx replacement;
-
- rtx *src_p;
-
- /* Loop depth is used to recognize equivalences which appear
- to be present within the same loop (or in an inner loop). */
-
- int loop_depth;
-
- /* The list of each instruction which initializes this register. */
-
- rtx init_insns;
-
- /* Nonzero if this had a preexisting REG_EQUIV note. */
-
- int is_arg_equivalence;
-};
-
-/* reg_equiv[N] (where N is a pseudo reg number) is the equivalence
- structure for that register. */
-
-static struct equivalence *reg_equiv;
-
-/* Nonzero if we recorded an equivalence for a LABEL_REF. */
-static int recorded_label_ref;
-
-static void alloc_qty (int, enum machine_mode, int, int);
-static void validate_equiv_mem_from_store (rtx, const_rtx, void *);
-static int validate_equiv_mem (rtx, rtx, rtx);
-static int equiv_init_varies_p (rtx);
-static int equiv_init_movable_p (rtx, int);
-static int contains_replace_regs (rtx);
-static int memref_referenced_p (rtx, rtx);
-static int memref_used_between_p (rtx, rtx, rtx);
-static void no_equiv (rtx, const_rtx, void *);
-static void block_alloc (basic_block);
-static int qty_sugg_compare (int, int);
-static int qty_sugg_compare_1 (const void *, const void *);
-static int qty_compare (int, int);
-static int qty_compare_1 (const void *, const void *);
-static int combine_regs (rtx, rtx, int, int, rtx);
-static int reg_meets_class_p (int, enum reg_class);
-static void update_qty_class (int, int);
-static void reg_is_set (rtx, const_rtx, void *);
-static void reg_is_born (rtx, int);
-static void wipe_dead_reg (rtx, int);
-static int find_free_reg (enum reg_class, enum machine_mode, int, int, int,
- int, int, basic_block);
-static void mark_life (int, enum machine_mode, int);
-static void post_mark_life (int, enum machine_mode, int, int, int);
-static int requires_inout (const char *);
-
-/* Allocate a new quantity (new within current basic block)
- for register number REGNO which is born at index BIRTH
- within the block. MODE and SIZE are info on reg REGNO. */
-
-static void
-alloc_qty (int regno, enum machine_mode mode, int size, int birth)
-{
- int qtyno = next_qty++;
-
- reg_qty[regno] = qtyno;
- reg_offset[regno] = 0;
- reg_next_in_qty[regno] = -1;
-
- qty[qtyno].first_reg = regno;
- qty[qtyno].size = size;
- qty[qtyno].mode = mode;
- qty[qtyno].birth = birth;
- qty[qtyno].n_calls_crossed = REG_N_CALLS_CROSSED (regno);
- qty[qtyno].freq_calls_crossed = REG_FREQ_CALLS_CROSSED (regno);
- qty[qtyno].n_throwing_calls_crossed = REG_N_THROWING_CALLS_CROSSED (regno);
- qty[qtyno].min_class = reg_preferred_class (regno);
- qty[qtyno].alternate_class = reg_alternate_class (regno);
- qty[qtyno].n_refs = REG_N_REFS (regno);
- qty[qtyno].freq = REG_FREQ (regno);
-}
-
-/* Main entry point of this file. */
-
-static int
-local_alloc (void)
-{
- int i;
- int max_qty;
- basic_block b;
-
- /* We need to keep track of whether or not we recorded a LABEL_REF so
- that we know if the jump optimizer needs to be rerun. */
- recorded_label_ref = 0;
-
- /* Leaf functions and non-leaf functions have different needs.
- If defined, let the machine say what kind of ordering we
- should use. */
-#ifdef ORDER_REGS_FOR_LOCAL_ALLOC
- ORDER_REGS_FOR_LOCAL_ALLOC;
-#endif
-
- /* Promote REG_EQUAL notes to REG_EQUIV notes and adjust status of affected
- registers. */
- update_equiv_regs ();
-
- /* This sets the maximum number of quantities we can have. Quantity
- numbers start at zero and we can have one for each pseudo. */
- max_qty = (max_regno - FIRST_PSEUDO_REGISTER);
-
- /* Allocate vectors of temporary data.
- See the declarations of these variables, above,
- for what they mean. */
-
- qty = XNEWVEC (struct qty, max_qty);
- qty_phys_copy_sugg = XNEWVEC (HARD_REG_SET, max_qty);
- qty_phys_num_copy_sugg = XNEWVEC (short, max_qty);
- qty_phys_sugg = XNEWVEC (HARD_REG_SET, max_qty);
- qty_phys_num_sugg = XNEWVEC (short, max_qty);
-
- reg_qty = XNEWVEC (int, max_regno);
- reg_offset = XNEWVEC (char, max_regno);
- reg_next_in_qty = XNEWVEC (int, max_regno);
-
- /* Determine which pseudo-registers can be allocated by local-alloc.
- In general, these are the registers used only in a single block and
- which only die once.
-
- We need not be concerned with which block actually uses the register
- since we will never see it outside that block. */
-
- for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
- {
- if (REG_BASIC_BLOCK (i) >= NUM_FIXED_BLOCKS && REG_N_DEATHS (i) == 1)
- reg_qty[i] = -2;
- else
- reg_qty[i] = -1;
- }
-
- /* Force loop below to initialize entire quantity array. */
- next_qty = max_qty;
-
- /* Allocate each block's local registers, block by block. */
-
- FOR_EACH_BB (b)
- {
- /* NEXT_QTY indicates which elements of the `qty_...'
- vectors might need to be initialized because they were used
- for the previous block; it is set to the entire array before
- block 0. Initialize those, with explicit loop if there are few,
- else with bzero and bcopy. Do not initialize vectors that are
- explicit set by `alloc_qty'. */
-
- if (next_qty < 6)
- {
- for (i = 0; i < next_qty; i++)
- {
- CLEAR_HARD_REG_SET (qty_phys_copy_sugg[i]);
- qty_phys_num_copy_sugg[i] = 0;
- CLEAR_HARD_REG_SET (qty_phys_sugg[i]);
- qty_phys_num_sugg[i] = 0;
- }
- }
- else
- {
-#define CLEAR(vector) \
- memset ((vector), 0, (sizeof (*(vector))) * next_qty);
-
- CLEAR (qty_phys_copy_sugg);
- CLEAR (qty_phys_num_copy_sugg);
- CLEAR (qty_phys_sugg);
- CLEAR (qty_phys_num_sugg);
- }
-
- next_qty = 0;
-
- block_alloc (b);
- }
-
- free (qty);
- free (qty_phys_copy_sugg);
- free (qty_phys_num_copy_sugg);
- free (qty_phys_sugg);
- free (qty_phys_num_sugg);
-
- free (reg_qty);
- free (reg_offset);
- free (reg_next_in_qty);
-
- return recorded_label_ref;
-}
-
-/* Used for communication between the following two functions: contains
- a MEM that we wish to ensure remains unchanged. */
-static rtx equiv_mem;
-
-/* Set nonzero if EQUIV_MEM is modified. */
-static int equiv_mem_modified;
-
-/* If EQUIV_MEM is modified by modifying DEST, indicate that it is modified.
- Called via note_stores. */
-
-static void
-validate_equiv_mem_from_store (rtx dest, const_rtx set ATTRIBUTE_UNUSED,
- void *data ATTRIBUTE_UNUSED)
-{
- if ((REG_P (dest)
- && reg_overlap_mentioned_p (dest, equiv_mem))
- || (MEM_P (dest)
- && true_dependence (dest, VOIDmode, equiv_mem, rtx_varies_p)))
- equiv_mem_modified = 1;
-}
-
-/* Verify that no store between START and the death of REG invalidates
- MEMREF. MEMREF is invalidated by modifying a register used in MEMREF,
- by storing into an overlapping memory location, or with a non-const
- CALL_INSN.
-
- Return 1 if MEMREF remains valid. */
-
-static int
-validate_equiv_mem (rtx start, rtx reg, rtx memref)
-{
- rtx insn;
- rtx note;
-
- equiv_mem = memref;
- equiv_mem_modified = 0;
-
- /* If the memory reference has side effects or is volatile, it isn't a
- valid equivalence. */
- if (side_effects_p (memref))
- return 0;
-
- for (insn = start; insn && ! equiv_mem_modified; insn = NEXT_INSN (insn))
- {
- if (! INSN_P (insn))
- continue;
-
- if (find_reg_note (insn, REG_DEAD, reg))
- return 1;
-
- if (CALL_P (insn) && ! MEM_READONLY_P (memref)
- && ! RTL_CONST_OR_PURE_CALL_P (insn))
- return 0;
-
- note_stores (PATTERN (insn), validate_equiv_mem_from_store, NULL);
-
- /* If a register mentioned in MEMREF is modified via an
- auto-increment, we lose the equivalence. Do the same if one
- dies; although we could extend the life, it doesn't seem worth
- the trouble. */
-
- for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
- if ((REG_NOTE_KIND (note) == REG_INC
- || REG_NOTE_KIND (note) == REG_DEAD)
- && REG_P (XEXP (note, 0))
- && reg_overlap_mentioned_p (XEXP (note, 0), memref))
- return 0;
- }
-
- return 0;
-}
-
-/* Returns zero if X is known to be invariant. */
-
-static int
-equiv_init_varies_p (rtx x)
-{
- RTX_CODE code = GET_CODE (x);
- int i;
- const char *fmt;
-
- switch (code)
- {
- case MEM:
- return !MEM_READONLY_P (x) || equiv_init_varies_p (XEXP (x, 0));
-
- case CONST:
- case CONST_INT:
- case CONST_DOUBLE:
- case CONST_FIXED:
- case CONST_VECTOR:
- case SYMBOL_REF:
- case LABEL_REF:
- return 0;
-
- case REG:
- return reg_equiv[REGNO (x)].replace == 0 && rtx_varies_p (x, 0);
-
- case ASM_OPERANDS:
- if (MEM_VOLATILE_P (x))
- return 1;
-
- /* Fall through. */
-
- default:
- break;
- }
-
- fmt = GET_RTX_FORMAT (code);
- for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
- if (fmt[i] == 'e')
- {
- if (equiv_init_varies_p (XEXP (x, i)))
- return 1;
- }
- else if (fmt[i] == 'E')
- {
- int j;
- for (j = 0; j < XVECLEN (x, i); j++)
- if (equiv_init_varies_p (XVECEXP (x, i, j)))
- return 1;
- }
-
- return 0;
-}
-
-/* Returns nonzero if X (used to initialize register REGNO) is movable.
- X is only movable if the registers it uses have equivalent initializations
- which appear to be within the same loop (or in an inner loop) and movable
- or if they are not candidates for local_alloc and don't vary. */
-
-static int
-equiv_init_movable_p (rtx x, int regno)
-{
- int i, j;
- const char *fmt;
- enum rtx_code code = GET_CODE (x);
-
- switch (code)
- {
- case SET:
- return equiv_init_movable_p (SET_SRC (x), regno);
-
- case CC0:
- case CLOBBER:
- return 0;
-
- case PRE_INC:
- case PRE_DEC:
- case POST_INC:
- case POST_DEC:
- case PRE_MODIFY:
- case POST_MODIFY:
- return 0;
-
- case REG:
- return (reg_equiv[REGNO (x)].loop_depth >= reg_equiv[regno].loop_depth
- && reg_equiv[REGNO (x)].replace)
- || (REG_BASIC_BLOCK (REGNO (x)) < NUM_FIXED_BLOCKS && ! rtx_varies_p (x, 0));
-
- case UNSPEC_VOLATILE:
- return 0;
-
- case ASM_OPERANDS:
- if (MEM_VOLATILE_P (x))
- return 0;
-
- /* Fall through. */
-
- default:
- break;
- }
-
- fmt = GET_RTX_FORMAT (code);
- for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
- switch (fmt[i])
- {
- case 'e':
- if (! equiv_init_movable_p (XEXP (x, i), regno))
- return 0;
- break;
- case 'E':
- for (j = XVECLEN (x, i) - 1; j >= 0; j--)
- if (! equiv_init_movable_p (XVECEXP (x, i, j), regno))
- return 0;
- break;
- }
-
- return 1;
-}
-
-/* TRUE if X uses any registers for which reg_equiv[REGNO].replace is true. */
-
-static int
-contains_replace_regs (rtx x)
-{
- int i, j;
- const char *fmt;
- enum rtx_code code = GET_CODE (x);
-
- switch (code)
- {
- case CONST_INT:
- case CONST:
- case LABEL_REF:
- case SYMBOL_REF:
- case CONST_DOUBLE:
- case CONST_FIXED:
- case CONST_VECTOR:
- case PC:
- case CC0:
- case HIGH:
- return 0;
-
- case REG:
- return reg_equiv[REGNO (x)].replace;
-
- default:
- break;
- }
-
- fmt = GET_RTX_FORMAT (code);
- for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
- switch (fmt[i])
- {
- case 'e':
- if (contains_replace_regs (XEXP (x, i)))
- return 1;
- break;
- case 'E':
- for (j = XVECLEN (x, i) - 1; j >= 0; j--)
- if (contains_replace_regs (XVECEXP (x, i, j)))
- return 1;
- break;
- }
-
- return 0;
-}
-
-/* TRUE if X references a memory location that would be affected by a store
- to MEMREF. */
-
-static int
-memref_referenced_p (rtx memref, rtx x)
-{
- int i, j;
- const char *fmt;
- enum rtx_code code = GET_CODE (x);
-
- switch (code)
- {
- case CONST_INT:
- case CONST:
- case LABEL_REF:
- case SYMBOL_REF:
- case CONST_DOUBLE:
- case CONST_FIXED:
- case CONST_VECTOR:
- case PC:
- case CC0:
- case HIGH:
- case LO_SUM:
- return 0;
-
- case REG:
- return (reg_equiv[REGNO (x)].replacement
- && memref_referenced_p (memref,
- reg_equiv[REGNO (x)].replacement));
-
- case MEM:
- if (true_dependence (memref, VOIDmode, x, rtx_varies_p))
- return 1;
- break;
-
- case SET:
- /* If we are setting a MEM, it doesn't count (its address does), but any
- other SET_DEST that has a MEM in it is referencing the MEM. */
- if (MEM_P (SET_DEST (x)))
- {
- if (memref_referenced_p (memref, XEXP (SET_DEST (x), 0)))
- return 1;
- }
- else if (memref_referenced_p (memref, SET_DEST (x)))
- return 1;
-
- return memref_referenced_p (memref, SET_SRC (x));
-
- default:
- break;
- }
-
- fmt = GET_RTX_FORMAT (code);
- for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
- switch (fmt[i])
- {
- case 'e':
- if (memref_referenced_p (memref, XEXP (x, i)))
- return 1;
- break;
- case 'E':
- for (j = XVECLEN (x, i) - 1; j >= 0; j--)
- if (memref_referenced_p (memref, XVECEXP (x, i, j)))
- return 1;
- break;
- }
-
- return 0;
-}
-
-/* TRUE if some insn in the range (START, END] references a memory location
- that would be affected by a store to MEMREF. */
-
-static int
-memref_used_between_p (rtx memref, rtx start, rtx end)
-{
- rtx insn;
-
- for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
- insn = NEXT_INSN (insn))
- {
- if (!INSN_P (insn))
- continue;
-
- if (memref_referenced_p (memref, PATTERN (insn)))
- return 1;
-
- /* Nonconst functions may access memory. */
- if (CALL_P (insn) && (! RTL_CONST_CALL_P (insn)))
- return 1;
- }
-
- return 0;
-}
-
-/* Find registers that are equivalent to a single value throughout the
- compilation (either because they can be referenced in memory or are set once
- from a single constant). Lower their priority for a register.
-
- If such a register is only referenced once, try substituting its value
- into the using insn. If it succeeds, we can eliminate the register
- completely.
-
- Initialize the REG_EQUIV_INIT array of initializing insns.
-
- Return non-zero if jump label rebuilding should be done. */
-
-int
-update_equiv_regs (void)
-{
- rtx insn;
- basic_block bb;
- int loop_depth;
- bitmap cleared_regs;
-
- reg_equiv = XCNEWVEC (struct equivalence, max_regno);
- reg_equiv_init = GGC_CNEWVEC (rtx, max_regno);
- reg_equiv_init_size = max_regno;
-
- init_alias_analysis ();
-
- /* Scan the insns and find which registers have equivalences. Do this
- in a separate scan of the insns because (due to -fcse-follow-jumps)
- a register can be set below its use. */
- FOR_EACH_BB (bb)
- {
- loop_depth = bb->loop_depth;
-
- for (insn = BB_HEAD (bb);
- insn != NEXT_INSN (BB_END (bb));
- insn = NEXT_INSN (insn))
- {
- rtx note;
- rtx set;
- rtx dest, src;
- int regno;
-
- if (! INSN_P (insn))
- continue;
-
- for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
- if (REG_NOTE_KIND (note) == REG_INC)
- no_equiv (XEXP (note, 0), note, NULL);
-
- set = single_set (insn);
-
- /* If this insn contains more (or less) than a single SET,
- only mark all destinations as having no known equivalence. */
- if (set == 0)
- {
- note_stores (PATTERN (insn), no_equiv, NULL);
- continue;
- }
- else if (GET_CODE (PATTERN (insn)) == PARALLEL)
- {
- int i;
-
- for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
- {
- rtx part = XVECEXP (PATTERN (insn), 0, i);
- if (part != set)
- note_stores (part, no_equiv, NULL);
- }
- }
-
- dest = SET_DEST (set);
- src = SET_SRC (set);
-
- /* See if this is setting up the equivalence between an argument
- register and its stack slot. */
- note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
- if (note)
- {
- gcc_assert (REG_P (dest));
- regno = REGNO (dest);
-
- /* Note that we don't want to clear reg_equiv_init even if there
- are multiple sets of this register. */
- reg_equiv[regno].is_arg_equivalence = 1;
-
- /* Record for reload that this is an equivalencing insn. */
- if (rtx_equal_p (src, XEXP (note, 0)))
- reg_equiv_init[regno]
- = gen_rtx_INSN_LIST (VOIDmode, insn, reg_equiv_init[regno]);
-
- /* Continue normally in case this is a candidate for
- replacements. */
- }
-
- if (!optimize)
- continue;
-
- /* We only handle the case of a pseudo register being set
- once, or always to the same value. */
- /* ??? The mn10200 port breaks if we add equivalences for
- values that need an ADDRESS_REGS register and set them equivalent
- to a MEM of a pseudo. The actual problem is in the over-conservative
- handling of INPADDR_ADDRESS / INPUT_ADDRESS / INPUT triples in
- calculate_needs, but we traditionally work around this problem
- here by rejecting equivalences when the destination is in a register
- that's likely spilled. This is fragile, of course, since the
- preferred class of a pseudo depends on all instructions that set
- or use it. */
-
- if (!REG_P (dest)
- || (regno = REGNO (dest)) < FIRST_PSEUDO_REGISTER
- || reg_equiv[regno].init_insns == const0_rtx
- || (CLASS_LIKELY_SPILLED_P (reg_preferred_class (regno))
- && MEM_P (src) && ! reg_equiv[regno].is_arg_equivalence))
- {
- /* This might be setting a SUBREG of a pseudo, a pseudo that is
- also set somewhere else to a constant. */
- note_stores (set, no_equiv, NULL);
- continue;
- }
-
- note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
-
- /* cse sometimes generates function invariants, but doesn't put a
- REG_EQUAL note on the insn. Since this note would be redundant,
- there's no point creating it earlier than here. */
- if (! note && ! rtx_varies_p (src, 0))
- note = set_unique_reg_note (insn, REG_EQUAL, copy_rtx (src));
-
- /* Don't bother considering a REG_EQUAL note containing an EXPR_LIST
- since it represents a function call */
- if (note && GET_CODE (XEXP (note, 0)) == EXPR_LIST)
- note = NULL_RTX;
-
- if (DF_REG_DEF_COUNT (regno) != 1
- && (! note
- || rtx_varies_p (XEXP (note, 0), 0)
- || (reg_equiv[regno].replacement
- && ! rtx_equal_p (XEXP (note, 0),
- reg_equiv[regno].replacement))))
- {
- no_equiv (dest, set, NULL);
- continue;
- }
- /* Record this insn as initializing this register. */
- reg_equiv[regno].init_insns
- = gen_rtx_INSN_LIST (VOIDmode, insn, reg_equiv[regno].init_insns);
-
- /* If this register is known to be equal to a constant, record that
- it is always equivalent to the constant. */
- if (DF_REG_DEF_COUNT (regno) == 1
- && note && ! rtx_varies_p (XEXP (note, 0), 0))
- {
- rtx note_value = XEXP (note, 0);
- remove_note (insn, note);
- set_unique_reg_note (insn, REG_EQUIV, note_value);
- }
-
- /* If this insn introduces a "constant" register, decrease the priority
- of that register. Record this insn if the register is only used once
- more and the equivalence value is the same as our source.
-
- The latter condition is checked for two reasons: First, it is an
- indication that it may be more efficient to actually emit the insn
- as written (if no registers are available, reload will substitute
- the equivalence). Secondly, it avoids problems with any registers
- dying in this insn whose death notes would be missed.
-
- If we don't have a REG_EQUIV note, see if this insn is loading
- a register used only in one basic block from a MEM. If so, and the
- MEM remains unchanged for the life of the register, add a REG_EQUIV
- note. */
-
- note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
-
- if (note == 0 && REG_BASIC_BLOCK (regno) >= NUM_FIXED_BLOCKS
- && MEM_P (SET_SRC (set))
- && validate_equiv_mem (insn, dest, SET_SRC (set)))
- note = set_unique_reg_note (insn, REG_EQUIV, copy_rtx (SET_SRC (set)));
-
- if (note)
- {
- int regno = REGNO (dest);
- rtx x = XEXP (note, 0);
-
- /* If we haven't done so, record for reload that this is an
- equivalencing insn. */
- if (!reg_equiv[regno].is_arg_equivalence)
- reg_equiv_init[regno]
- = gen_rtx_INSN_LIST (VOIDmode, insn, reg_equiv_init[regno]);
-
- /* Record whether or not we created a REG_EQUIV note for a LABEL_REF.
- We might end up substituting the LABEL_REF for uses of the
- pseudo here or later. That kind of transformation may turn an
- indirect jump into a direct jump, in which case we must rerun the
- jump optimizer to ensure that the JUMP_LABEL fields are valid. */
- if (GET_CODE (x) == LABEL_REF
- || (GET_CODE (x) == CONST
- && GET_CODE (XEXP (x, 0)) == PLUS
- && (GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF)))
- recorded_label_ref = 1;
-
- reg_equiv[regno].replacement = x;
- reg_equiv[regno].src_p = &SET_SRC (set);
- reg_equiv[regno].loop_depth = loop_depth;
-
- /* Don't mess with things live during setjmp. */
- if (REG_LIVE_LENGTH (regno) >= 0 && optimize)
- {
- /* Note that the statement below does not affect the priority
- in local-alloc! */
- REG_LIVE_LENGTH (regno) *= 2;
-
- /* If the register is referenced exactly twice, meaning it is
- set once and used once, indicate that the reference may be
- replaced by the equivalence we computed above. Do this
- even if the register is only used in one block so that
- dependencies can be handled where the last register is
- used in a different block (i.e. HIGH / LO_SUM sequences)
- and to reduce the number of registers alive across
- calls. */
-
- if (REG_N_REFS (regno) == 2
- && (rtx_equal_p (x, src)
- || ! equiv_init_varies_p (src))
- && NONJUMP_INSN_P (insn)
- && equiv_init_movable_p (PATTERN (insn), regno))
- reg_equiv[regno].replace = 1;
- }
- }
- }
- }
-
- if (!optimize)
- goto out;
-
- /* A second pass, to gather additional equivalences with memory. This needs
- to be done after we know which registers we are going to replace. */
-
- for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
- {
- rtx set, src, dest;
- unsigned regno;
-
- if (! INSN_P (insn))
- continue;
-
- set = single_set (insn);
- if (! set)
- continue;
-
- dest = SET_DEST (set);
- src = SET_SRC (set);
-
- /* If this sets a MEM to the contents of a REG that is only used
- in a single basic block, see if the register is always equivalent
- to that memory location and if moving the store from INSN to the
- insn that set REG is safe. If so, put a REG_EQUIV note on the
- initializing insn.
-
- Don't add a REG_EQUIV note if the insn already has one. The existing
- REG_EQUIV is likely more useful than the one we are adding.
-
- If one of the regs in the address has reg_equiv[REGNO].replace set,
- then we can't add this REG_EQUIV note. The reg_equiv[REGNO].replace
- optimization may move the set of this register immediately before
- insn, which puts it after reg_equiv[REGNO].init_insns, and hence
- the mention in the REG_EQUIV note would be to an uninitialized
- pseudo. */
-
- if (MEM_P (dest) && REG_P (src)
- && (regno = REGNO (src)) >= FIRST_PSEUDO_REGISTER
- && REG_BASIC_BLOCK (regno) >= NUM_FIXED_BLOCKS
- && DF_REG_DEF_COUNT (regno) == 1
- && reg_equiv[regno].init_insns != 0
- && reg_equiv[regno].init_insns != const0_rtx
- && ! find_reg_note (XEXP (reg_equiv[regno].init_insns, 0),
- REG_EQUIV, NULL_RTX)
- && ! contains_replace_regs (XEXP (dest, 0)))
- {
- rtx init_insn = XEXP (reg_equiv[regno].init_insns, 0);
- if (validate_equiv_mem (init_insn, src, dest)
- && ! memref_used_between_p (dest, init_insn, insn)
- /* Attaching a REG_EQUIV note will fail if INIT_INSN has
- multiple sets. */
- && set_unique_reg_note (init_insn, REG_EQUIV, copy_rtx (dest)))
- {
- /* This insn makes the equivalence, not the one initializing
- the register. */
- reg_equiv_init[regno]
- = gen_rtx_INSN_LIST (VOIDmode, insn, NULL_RTX);
- df_notes_rescan (init_insn);
- }
- }
- }
-
- cleared_regs = BITMAP_ALLOC (NULL);
- /* Now scan all regs killed in an insn to see if any of them are
- registers only used that once. If so, see if we can replace the
- reference with the equivalent form. If we can, delete the
- initializing reference and this register will go away. If we
- can't replace the reference, and the initializing reference is
- within the same loop (or in an inner loop), then move the register
- initialization just before the use, so that they are in the same
- basic block. */
- FOR_EACH_BB_REVERSE (bb)
- {
- loop_depth = bb->loop_depth;
- for (insn = BB_END (bb);
- insn != PREV_INSN (BB_HEAD (bb));
- insn = PREV_INSN (insn))
- {
- rtx link;
-
- if (! INSN_P (insn))
- continue;
-
- /* Don't substitute into a non-local goto, this confuses CFG. */
- if (JUMP_P (insn)
- && find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX))
- continue;
-
- for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
- {
- if (REG_NOTE_KIND (link) == REG_DEAD
- /* Make sure this insn still refers to the register. */
- && reg_mentioned_p (XEXP (link, 0), PATTERN (insn)))
- {
- int regno = REGNO (XEXP (link, 0));
- rtx equiv_insn;
-
- if (! reg_equiv[regno].replace
- || reg_equiv[regno].loop_depth < loop_depth)
- continue;
-
- /* reg_equiv[REGNO].replace gets set only when
- REG_N_REFS[REGNO] is 2, i.e. the register is set
- once and used once. (If it were only set, but not used,
- flow would have deleted the setting insns.) Hence
- there can only be one insn in reg_equiv[REGNO].init_insns. */
- gcc_assert (reg_equiv[regno].init_insns
- && !XEXP (reg_equiv[regno].init_insns, 1));
- equiv_insn = XEXP (reg_equiv[regno].init_insns, 0);
-
- /* We may not move instructions that can throw, since
- that changes basic block boundaries and we are not
- prepared to adjust the CFG to match. */
- if (can_throw_internal (equiv_insn))
- continue;
-
- if (asm_noperands (PATTERN (equiv_insn)) < 0
- && validate_replace_rtx (regno_reg_rtx[regno],
- *(reg_equiv[regno].src_p), insn))
- {
- rtx equiv_link;
- rtx last_link;
- rtx note;
-
- /* Find the last note. */
- for (last_link = link; XEXP (last_link, 1);
- last_link = XEXP (last_link, 1))
- ;
-
- /* Append the REG_DEAD notes from equiv_insn. */
- equiv_link = REG_NOTES (equiv_insn);
- while (equiv_link)
- {
- note = equiv_link;
- equiv_link = XEXP (equiv_link, 1);
- if (REG_NOTE_KIND (note) == REG_DEAD)
- {
- remove_note (equiv_insn, note);
- XEXP (last_link, 1) = note;
- XEXP (note, 1) = NULL_RTX;
- last_link = note;
- }
- }
-
- remove_death (regno, insn);
- SET_REG_N_REFS (regno, 0);
- REG_FREQ (regno) = 0;
- delete_insn (equiv_insn);
-
- reg_equiv[regno].init_insns
- = XEXP (reg_equiv[regno].init_insns, 1);
-
- reg_equiv_init[regno] = NULL_RTX;
- bitmap_set_bit (cleared_regs, regno);
- }
- /* Move the initialization of the register to just before
- INSN. Update the flow information. */
- else if (PREV_INSN (insn) != equiv_insn)
- {
- rtx new_insn;
-
- new_insn = emit_insn_before (PATTERN (equiv_insn), insn);
- REG_NOTES (new_insn) = REG_NOTES (equiv_insn);
- REG_NOTES (equiv_insn) = 0;
- /* Rescan it to process the notes. */
- df_insn_rescan (new_insn);
-
- /* Make sure this insn is recognized before
- reload begins, otherwise
- eliminate_regs_in_insn will die. */
- INSN_CODE (new_insn) = INSN_CODE (equiv_insn);
-
- delete_insn (equiv_insn);
-
- XEXP (reg_equiv[regno].init_insns, 0) = new_insn;
-
- REG_BASIC_BLOCK (regno) = bb->index;
- REG_N_CALLS_CROSSED (regno) = 0;
- REG_FREQ_CALLS_CROSSED (regno) = 0;
- REG_N_THROWING_CALLS_CROSSED (regno) = 0;
- REG_LIVE_LENGTH (regno) = 2;
-
- if (insn == BB_HEAD (bb))
- BB_HEAD (bb) = PREV_INSN (insn);
-
- reg_equiv_init[regno]
- = gen_rtx_INSN_LIST (VOIDmode, new_insn, NULL_RTX);
- bitmap_set_bit (cleared_regs, regno);
- }
- }
- }
- }
- }
-
- if (!bitmap_empty_p (cleared_regs))
- FOR_EACH_BB (bb)
- {
- bitmap_and_compl_into (DF_LIVE_IN (bb), cleared_regs);
- bitmap_and_compl_into (DF_LIVE_OUT (bb), cleared_regs);
- bitmap_and_compl_into (DF_LR_IN (bb), cleared_regs);
- bitmap_and_compl_into (DF_LR_OUT (bb), cleared_regs);
- }
-
- BITMAP_FREE (cleared_regs);
-
- out:
- /* Clean up. */
-
- end_alias_analysis ();
- free (reg_equiv);
- return recorded_label_ref;
-}
-
-/* Mark REG as having no known equivalence.
- Some instructions might have been processed before and furnished
- with REG_EQUIV notes for this register; these notes will have to be
- removed.
- STORE is the piece of RTL that does the non-constant / conflicting
- assignment - a SET, CLOBBER or REG_INC note. It is currently not used,
- but needs to be there because this function is called from note_stores. */
-static void
-no_equiv (rtx reg, const_rtx store ATTRIBUTE_UNUSED, void *data ATTRIBUTE_UNUSED)
-{
- int regno;
- rtx list;
-
- if (!REG_P (reg))
- return;
- regno = REGNO (reg);
- list = reg_equiv[regno].init_insns;
- if (list == const0_rtx)
- return;
- reg_equiv[regno].init_insns = const0_rtx;
- reg_equiv[regno].replacement = NULL_RTX;
- /* This doesn't matter for equivalences made for argument registers, we
- should keep their initialization insns. */
- if (reg_equiv[regno].is_arg_equivalence)
- return;
- reg_equiv_init[regno] = NULL_RTX;
- for (; list; list = XEXP (list, 1))
- {
- rtx insn = XEXP (list, 0);
- remove_note (insn, find_reg_note (insn, REG_EQUIV, NULL_RTX));
- }
-}
-
-/* Allocate hard regs to the pseudo regs used only within block number B.
- Only the pseudos that die but once can be handled. */
-
-static void
-block_alloc (basic_block b)
-{
- int i, q;
- rtx insn;
- rtx hard_reg;
- int insn_number = 0;
- int insn_count = 0;
- int max_uid = get_max_uid ();
- int *qty_order;
- df_ref *def_rec;
-
- /* Count the instructions in the basic block. */
-
- insn = BB_END (b);
- while (1)
- {
- if (!NOTE_P (insn))
- {
- ++insn_count;
- gcc_assert (insn_count <= max_uid);
- }
- if (insn == BB_HEAD (b))
- break;
- insn = PREV_INSN (insn);
- }
-
- /* +2 to leave room for a post_mark_life at the last insn and for
- the birth of a CLOBBER in the first insn. */
- regs_live_at = XCNEWVEC (HARD_REG_SET, 2 * insn_count + 2);
-
- /* Initialize table of hardware registers currently live. */
-
- REG_SET_TO_HARD_REG_SET (regs_live, DF_LR_IN (b));
-
- /* This is conservative, as this would include registers that are
- artificial-def'ed-but-not-used. However, artificial-defs are
- rare, and such uninitialized use is rarer still, and the chance
- of this having any performance impact is even less, while the
- benefit is not having to compute and keep the TOP set around. */
- for (def_rec = df_get_artificial_defs (b->index); *def_rec; def_rec++)
- {
- int regno = DF_REF_REGNO (*def_rec);
- if (regno < FIRST_PSEUDO_REGISTER)
- SET_HARD_REG_BIT (regs_live, regno);
- }
-
- /* This loop scans the instructions of the basic block
- and assigns quantities to registers.
- It computes which registers to tie. */
-
- insn = BB_HEAD (b);
- while (1)
- {
- if (!NOTE_P (insn))
- insn_number++;
-
- if (INSN_P (insn))
- {
- rtx link;
- int win = 0;
- rtx r0, r1 = NULL_RTX;
- int combined_regno = -1;
- int i;
-
- this_insn_number = insn_number;
- this_insn = insn;
-
- extract_insn (insn);
- which_alternative = -1;
-
- /* Is this insn suitable for tying two registers?
- If so, try doing that.
- Suitable insns are those with at least two operands and where
- operand 0 is an output that is a register that is not
- earlyclobber.
-
- We can tie operand 0 with some operand that dies in this insn.
- First look for operands that are required to be in the same
- register as operand 0. If we find such, only try tying that
- operand or one that can be put into that operand if the
- operation is commutative. If we don't find an operand
- that is required to be in the same register as operand 0,
- we can tie with any operand.
-
- Subregs in place of regs are also ok.
-
- If tying is done, WIN is set nonzero. */
-
- if (optimize
- && recog_data.n_operands > 1
- && recog_data.constraints[0][0] == '='
- && recog_data.constraints[0][1] != '&')
- {
- /* If non-negative, is an operand that must match operand 0. */
- int must_match_0 = -1;
- /* Counts number of alternatives that require a match with
- operand 0. */
- int n_matching_alts = 0;
-
- for (i = 1; i < recog_data.n_operands; i++)
- {
- const char *p = recog_data.constraints[i];
- int this_match = requires_inout (p);
-
- n_matching_alts += this_match;
- if (this_match == recog_data.n_alternatives)
- must_match_0 = i;
- }
-
- r0 = recog_data.operand[0];
- for (i = 1; i < recog_data.n_operands; i++)
- {
- /* Skip this operand if we found an operand that
- must match operand 0 and this operand isn't it
- and can't be made to be it by commutativity. */
-
- if (must_match_0 >= 0 && i != must_match_0
- && ! (i == must_match_0 + 1
- && recog_data.constraints[i-1][0] == '%')
- && ! (i == must_match_0 - 1
- && recog_data.constraints[i][0] == '%'))
- continue;
-
- /* Likewise if each alternative has some operand that
- must match operand zero. In that case, skip any
- operand that doesn't list operand 0 since we know that
- the operand always conflicts with operand 0. We
- ignore commutativity in this case to keep things simple. */
- if (n_matching_alts == recog_data.n_alternatives
- && 0 == requires_inout (recog_data.constraints[i]))
- continue;
-
- r1 = recog_data.operand[i];
-
- /* If the operand is an address, find a register in it.
- There may be more than one register, but we only try one
- of them. */
- if (recog_data.constraints[i][0] == 'p'
- || EXTRA_ADDRESS_CONSTRAINT (recog_data.constraints[i][0],
- recog_data.constraints[i]))
- while (GET_CODE (r1) == PLUS || GET_CODE (r1) == MULT)
- r1 = XEXP (r1, 0);
-
- /* Avoid making a call-saved register unnecessarily
- clobbered. */
- hard_reg = get_hard_reg_initial_reg (r1);
- if (hard_reg != NULL_RTX)
- {
- if (REG_P (hard_reg)
- && REGNO (hard_reg) < FIRST_PSEUDO_REGISTER
- && !call_used_regs[REGNO (hard_reg)])
- continue;
- }
-
- if (REG_P (r0) || GET_CODE (r0) == SUBREG)
- {
- /* We have two priorities for hard register preferences.
- If we have a move insn or an insn whose first input
- can only be in the same register as the output, give
- priority to an equivalence found from that insn. */
- int may_save_copy
- = (r1 == recog_data.operand[i] && must_match_0 >= 0);
-
- if (REG_P (r1) || GET_CODE (r1) == SUBREG)
- win = combine_regs (r1, r0, may_save_copy,
- insn_number, insn);
- }
- if (win)
- break;
- }
- }
-
- /* If registers were just tied, set COMBINED_REGNO
- to the number of the register used in this insn
- that was tied to the register set in this insn.
- This register's qty should not be "killed". */
-
- if (win)
- {
- while (GET_CODE (r1) == SUBREG)
- r1 = SUBREG_REG (r1);
- combined_regno = REGNO (r1);
- }
-
- /* Mark the death of everything that dies in this instruction. */
-
- for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
- if (REG_NOTE_KIND (link) == REG_DEAD
- && REG_P (XEXP (link, 0))
- && combined_regno != (int) REGNO (XEXP (link, 0)))
- wipe_dead_reg (XEXP (link, 0), 0);
-
- /* Allocate qty numbers for all registers local to this block
- that are born (set) in this instruction.
- A pseudo that already has a qty is not changed. */
-
- note_stores (PATTERN (insn), reg_is_set, NULL);
-
- /* If anything is set in this insn and then unused, mark it as dying
- after this insn, so it will conflict with our outputs. This
- can't match with something that combined, and it doesn't matter
- if it did. Do this after the calls to reg_is_set since these
- die after, not during, the current insn. */
-
- for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
- if (REG_NOTE_KIND (link) == REG_UNUSED
- && REG_P (XEXP (link, 0)))
- wipe_dead_reg (XEXP (link, 0), 1);
- }
-
- /* Set the registers live after INSN_NUMBER. Note that we never
- record the registers live before the block's first insn, since no
- pseudos we care about are live before that insn. */
-
- IOR_HARD_REG_SET (regs_live_at[2 * insn_number], regs_live);
- IOR_HARD_REG_SET (regs_live_at[2 * insn_number + 1], regs_live);
-
- if (insn == BB_END (b))
- break;
-
- insn = NEXT_INSN (insn);
- }
-
- /* Now every register that is local to this basic block
- should have been given a quantity, or else -1 meaning ignore it.
- Every quantity should have a known birth and death.
-
- Order the qtys so we assign them registers in order of the
- number of suggested registers they need so we allocate those with
- the most restrictive needs first. */
-
- qty_order = XNEWVEC (int, next_qty);
- for (i = 0; i < next_qty; i++)
- qty_order[i] = i;
-
-#define EXCHANGE(I1, I2) \
- { i = qty_order[I1]; qty_order[I1] = qty_order[I2]; qty_order[I2] = i; }
-
- switch (next_qty)
- {
- case 3:
- /* Make qty_order[2] be the one to allocate last. */
- if (qty_sugg_compare (0, 1) > 0)
- EXCHANGE (0, 1);
- if (qty_sugg_compare (1, 2) > 0)
- EXCHANGE (2, 1);
-
- /* ... Fall through ... */
- case 2:
- /* Put the best one to allocate in qty_order[0]. */
- if (qty_sugg_compare (0, 1) > 0)
- EXCHANGE (0, 1);
-
- /* ... Fall through ... */
-
- case 1:
- case 0:
- /* Nothing to do here. */
- break;
-
- default:
- qsort (qty_order, next_qty, sizeof (int), qty_sugg_compare_1);
- }
-
- /* Try to put each quantity in a suggested physical register, if it has one.
- This may cause registers to be allocated that otherwise wouldn't be, but
- this seems acceptable in local allocation (unlike global allocation). */
- for (i = 0; i < next_qty; i++)
- {
- q = qty_order[i];
- if (qty_phys_num_sugg[q] != 0 || qty_phys_num_copy_sugg[q] != 0)
- qty[q].phys_reg = find_free_reg (qty[q].min_class, qty[q].mode, q,
- 0, 1, qty[q].birth, qty[q].death, b);
- else
- qty[q].phys_reg = -1;
- }
-
- /* Order the qtys so we assign them registers in order of
- decreasing length of life. Normally call qsort, but if we
- have only a very small number of quantities, sort them ourselves. */
-
- for (i = 0; i < next_qty; i++)
- qty_order[i] = i;
-
-#define EXCHANGE(I1, I2) \
- { i = qty_order[I1]; qty_order[I1] = qty_order[I2]; qty_order[I2] = i; }
-
- switch (next_qty)
- {
- case 3:
- /* Make qty_order[2] be the one to allocate last. */
- if (qty_compare (0, 1) > 0)
- EXCHANGE (0, 1);
- if (qty_compare (1, 2) > 0)
- EXCHANGE (2, 1);
-
- /* ... Fall through ... */
- case 2:
- /* Put the best one to allocate in qty_order[0]. */
- if (qty_compare (0, 1) > 0)
- EXCHANGE (0, 1);
-
- /* ... Fall through ... */
-
- case 1:
- case 0:
- /* Nothing to do here. */
- break;
-
- default:
- qsort (qty_order, next_qty, sizeof (int), qty_compare_1);
- }
-
- /* Now for each qty that is not a hardware register,
- look for a hardware register to put it in.
- First try the register class that is cheapest for this qty,
- if there is more than one class. */
-
- for (i = 0; i < next_qty; i++)
- {
- q = qty_order[i];
- if (qty[q].phys_reg < 0)
- {
-#ifdef INSN_SCHEDULING
- /* These values represent the adjusted lifetime of a qty so
- that it conflicts with qtys which appear near the start/end
- of this qty's lifetime.
-
- The purpose behind extending the lifetime of this qty is to
- discourage the register allocator from creating false
- dependencies.
-
- The adjustment value is chosen to indicate that this qty
- conflicts with all the qtys in the instructions immediately
- before and after the lifetime of this qty.
-
- Experiments have shown that higher values tend to hurt
- overall code performance.
-
- If allocation using the extended lifetime fails we will try
- again with the qty's unadjusted lifetime. */
- int fake_birth = MAX (0, qty[q].birth - 2 + qty[q].birth % 2);
- int fake_death = MIN (insn_number * 2 + 1,
- qty[q].death + 2 - qty[q].death % 2);
-#endif
-
- if (N_REG_CLASSES > 1)
- {
-#ifdef INSN_SCHEDULING
- /* We try to avoid using hard registers allocated to qtys which
- are born immediately after this qty or die immediately before
- this qty.
-
- This optimization is only appropriate when we will run
- a scheduling pass after reload and we are not optimizing
- for code size. */
- if (flag_schedule_insns_after_reload && dbg_cnt (local_alloc_for_sched)
- && optimize_bb_for_speed_p (b)
- && !SMALL_REGISTER_CLASSES)
- {
- qty[q].phys_reg = find_free_reg (qty[q].min_class,
- qty[q].mode, q, 0, 0,
- fake_birth, fake_death, b);
- if (qty[q].phys_reg >= 0)
- continue;
- }
-#endif
- qty[q].phys_reg = find_free_reg (qty[q].min_class,
- qty[q].mode, q, 0, 0,
- qty[q].birth, qty[q].death, b);
- if (qty[q].phys_reg >= 0)
- continue;
- }
-
-#ifdef INSN_SCHEDULING
- /* Similarly, avoid false dependencies. */
- if (flag_schedule_insns_after_reload && dbg_cnt (local_alloc_for_sched)
- && optimize_bb_for_speed_p (b)
- && !SMALL_REGISTER_CLASSES
- && qty[q].alternate_class != NO_REGS)
- qty[q].phys_reg = find_free_reg (qty[q].alternate_class,
- qty[q].mode, q, 0, 0,
- fake_birth, fake_death, b);
-#endif
- if (qty[q].alternate_class != NO_REGS)
- qty[q].phys_reg = find_free_reg (qty[q].alternate_class,
- qty[q].mode, q, 0, 0,
- qty[q].birth, qty[q].death, b);
- }
- }
-
- /* Now propagate the register assignments
- to the pseudo regs belonging to the qtys. */
-
- for (q = 0; q < next_qty; q++)
- if (qty[q].phys_reg >= 0)
- {
- for (i = qty[q].first_reg; i >= 0; i = reg_next_in_qty[i])
- reg_renumber[i] = qty[q].phys_reg + reg_offset[i];
- }
-
- /* Clean up. */
- free (regs_live_at);
- free (qty_order);
-}
-
-/* Compare two quantities' priority for getting real registers.
- We give shorter-lived quantities higher priority.
- Quantities with more references are also preferred, as are quantities that
- require multiple registers. This is the identical prioritization as
- done by global-alloc.
-
- We used to give preference to registers with *longer* lives, but using
- the same algorithm in both local- and global-alloc can speed up execution
- of some programs by as much as a factor of three! */
-
-/* Note that the quotient will never be bigger than
- the value of floor_log2 times the maximum number of
- times a register can occur in one insn (surely less than 100)
- weighted by frequency (max REG_FREQ_MAX).
- Multiplying this by 10000/REG_FREQ_MAX can't overflow.
- QTY_CMP_PRI is also used by qty_sugg_compare. */
-
-#define QTY_CMP_PRI(q) \
- ((int) (((double) (floor_log2 (qty[q].n_refs) * qty[q].freq * qty[q].size) \
- / (qty[q].death - qty[q].birth)) * (10000 / REG_FREQ_MAX)))
-
-static int
-qty_compare (int q1, int q2)
-{
- return QTY_CMP_PRI (q2) - QTY_CMP_PRI (q1);
-}
-
-static int
-qty_compare_1 (const void *q1p, const void *q2p)
-{
- int q1 = *(const int *) q1p, q2 = *(const int *) q2p;
- int tem = QTY_CMP_PRI (q2) - QTY_CMP_PRI (q1);
-
- if (tem != 0)
- return tem;
-
- /* If qtys are equally good, sort by qty number,
- so that the results of qsort leave nothing to chance. */
- return q1 - q2;
-}
-
-/* Compare two quantities' priority for getting real registers. This version
- is called for quantities that have suggested hard registers. First priority
- goes to quantities that have copy preferences, then to those that have
- normal preferences. Within those groups, quantities with the lower
- number of preferences have the highest priority. Of those, we use the same
- algorithm as above. */
-
-#define QTY_CMP_SUGG(q) \
- (qty_phys_num_copy_sugg[q] \
- ? qty_phys_num_copy_sugg[q] \
- : qty_phys_num_sugg[q] * FIRST_PSEUDO_REGISTER)
-
-static int
-qty_sugg_compare (int q1, int q2)
-{
- int tem = QTY_CMP_SUGG (q1) - QTY_CMP_SUGG (q2);
-
- if (tem != 0)
- return tem;
-
- return QTY_CMP_PRI (q2) - QTY_CMP_PRI (q1);
-}
-
-static int
-qty_sugg_compare_1 (const void *q1p, const void *q2p)
-{
- int q1 = *(const int *) q1p, q2 = *(const int *) q2p;
- int tem = QTY_CMP_SUGG (q1) - QTY_CMP_SUGG (q2);
-
- if (tem != 0)
- return tem;
-
- tem = QTY_CMP_PRI (q2) - QTY_CMP_PRI (q1);
- if (tem != 0)
- return tem;
-
- /* If qtys are equally good, sort by qty number,
- so that the results of qsort leave nothing to chance. */
- return q1 - q2;
-}
-
-#undef QTY_CMP_SUGG
-#undef QTY_CMP_PRI
-
-/* Attempt to combine the two registers (rtx's) USEDREG and SETREG.
- Returns 1 if have done so, or 0 if cannot.
-
- Combining registers means marking them as having the same quantity
- and adjusting the offsets within the quantity if either of
- them is a SUBREG.
-
- We don't actually combine a hard reg with a pseudo; instead
- we just record the hard reg as the suggestion for the pseudo's quantity.
- If we really combined them, we could lose if the pseudo lives
- across an insn that clobbers the hard reg (eg, movmem).
-
- MAY_SAVE_COPY is nonzero if this insn is simply copying USEDREG to
- SETREG or if the input and output must share a register.
- In that case, we record a hard reg suggestion in QTY_PHYS_COPY_SUGG.
-
- There are elaborate checks for the validity of combining. */
-
-static int
-combine_regs (rtx usedreg, rtx setreg, int may_save_copy, int insn_number,
- rtx insn)
-{
- int ureg, sreg;
- int offset = 0;
- int usize, ssize;
- int sqty;
-
- /* Determine the numbers and sizes of registers being used. If a subreg
- is present that does not change the entire register, don't consider
- this a copy insn. */
-
- while (GET_CODE (usedreg) == SUBREG)
- {
- rtx subreg = SUBREG_REG (usedreg);
-
- if (REG_P (subreg))
- {
- if (GET_MODE_SIZE (GET_MODE (subreg)) > UNITS_PER_WORD)
- may_save_copy = 0;
-
- if (REGNO (subreg) < FIRST_PSEUDO_REGISTER)
- offset += subreg_regno_offset (REGNO (subreg),
- GET_MODE (subreg),
- SUBREG_BYTE (usedreg),
- GET_MODE (usedreg));
- else
- offset += (SUBREG_BYTE (usedreg)
- / REGMODE_NATURAL_SIZE (GET_MODE (usedreg)));
- }
-
- usedreg = subreg;
- }
-
- if (!REG_P (usedreg))
- return 0;
-
- ureg = REGNO (usedreg);
- if (ureg < FIRST_PSEUDO_REGISTER)
- usize = hard_regno_nregs[ureg][GET_MODE (usedreg)];
- else
- usize = ((GET_MODE_SIZE (GET_MODE (usedreg))
- + (REGMODE_NATURAL_SIZE (GET_MODE (usedreg)) - 1))
- / REGMODE_NATURAL_SIZE (GET_MODE (usedreg)));
-
- while (GET_CODE (setreg) == SUBREG)
- {
- rtx subreg = SUBREG_REG (setreg);
-
- if (REG_P (subreg))
- {
- if (GET_MODE_SIZE (GET_MODE (subreg)) > UNITS_PER_WORD)
- may_save_copy = 0;
-
- if (REGNO (subreg) < FIRST_PSEUDO_REGISTER)
- offset -= subreg_regno_offset (REGNO (subreg),
- GET_MODE (subreg),
- SUBREG_BYTE (setreg),
- GET_MODE (setreg));
- else
- offset -= (SUBREG_BYTE (setreg)
- / REGMODE_NATURAL_SIZE (GET_MODE (setreg)));
- }
-
- setreg = subreg;
- }
-
- if (!REG_P (setreg))
- return 0;
-
- sreg = REGNO (setreg);
- if (sreg < FIRST_PSEUDO_REGISTER)
- ssize = hard_regno_nregs[sreg][GET_MODE (setreg)];
- else
- ssize = ((GET_MODE_SIZE (GET_MODE (setreg))
- + (REGMODE_NATURAL_SIZE (GET_MODE (setreg)) - 1))
- / REGMODE_NATURAL_SIZE (GET_MODE (setreg)));
-
- /* If UREG is a pseudo-register that hasn't already been assigned a
- quantity number, it means that it is not local to this block or dies
- more than once. In either event, we can't do anything with it. */
- if ((ureg >= FIRST_PSEUDO_REGISTER && reg_qty[ureg] < 0)
- /* Do not combine registers unless one fits within the other. */
- || (offset > 0 && usize + offset > ssize)
- || (offset < 0 && usize + offset < ssize)
- /* Do not combine with a smaller already-assigned object
- if that smaller object is already combined with something bigger. */
- || (ssize > usize && ureg >= FIRST_PSEUDO_REGISTER
- && usize < qty[reg_qty[ureg]].size)
- /* Can't combine if SREG is not a register we can allocate. */
- || (sreg >= FIRST_PSEUDO_REGISTER && reg_qty[sreg] == -1)
- /* Don't tie something to itself. In most cases it would make no
- difference, but it would screw up if the reg being tied to itself
- also dies in this insn. */
- || ureg == sreg
- /* Don't try to connect two different hardware registers. */
- || (ureg < FIRST_PSEUDO_REGISTER && sreg < FIRST_PSEUDO_REGISTER)
- /* Don't connect two different machine modes if they have different
- implications as to which registers may be used. */
- || !MODES_TIEABLE_P (GET_MODE (usedreg), GET_MODE (setreg)))
- return 0;
-
- /* Now, if UREG is a hard reg and SREG is a pseudo, record the hard reg in
- qty_phys_sugg for the pseudo instead of tying them.
-
- Return "failure" so that the lifespan of UREG is terminated here;
- that way the two lifespans will be disjoint and nothing will prevent
- the pseudo reg from being given this hard reg. */
-
- if (ureg < FIRST_PSEUDO_REGISTER)
- {
- /* Allocate a quantity number so we have a place to put our
- suggestions. */
- if (reg_qty[sreg] == -2)
- reg_is_born (setreg, 2 * insn_number);
-
- if (reg_qty[sreg] >= 0)
- {
- if (may_save_copy
- && ! TEST_HARD_REG_BIT (qty_phys_copy_sugg[reg_qty[sreg]], ureg))
- {
- SET_HARD_REG_BIT (qty_phys_copy_sugg[reg_qty[sreg]], ureg);
- qty_phys_num_copy_sugg[reg_qty[sreg]]++;
- }
- else if (! TEST_HARD_REG_BIT (qty_phys_sugg[reg_qty[sreg]], ureg))
- {
- SET_HARD_REG_BIT (qty_phys_sugg[reg_qty[sreg]], ureg);
- qty_phys_num_sugg[reg_qty[sreg]]++;
- }
- }
- return 0;
- }
-
- /* Similarly for SREG a hard register and UREG a pseudo register. */
-
- if (sreg < FIRST_PSEUDO_REGISTER)
- {
- if (may_save_copy
- && ! TEST_HARD_REG_BIT (qty_phys_copy_sugg[reg_qty[ureg]], sreg))
- {
- SET_HARD_REG_BIT (qty_phys_copy_sugg[reg_qty[ureg]], sreg);
- qty_phys_num_copy_sugg[reg_qty[ureg]]++;
- }
- else if (! TEST_HARD_REG_BIT (qty_phys_sugg[reg_qty[ureg]], sreg))
- {
- SET_HARD_REG_BIT (qty_phys_sugg[reg_qty[ureg]], sreg);
- qty_phys_num_sugg[reg_qty[ureg]]++;
- }
- return 0;
- }
-
- /* At this point we know that SREG and UREG are both pseudos.
- Do nothing if SREG already has a quantity or is a register that we
- don't allocate. */
- if (reg_qty[sreg] >= -1
- /* If we are not going to let any regs live across calls,
- don't tie a call-crossing reg to a non-call-crossing reg. */
- || (cfun->has_nonlocal_label
- && ((REG_N_CALLS_CROSSED (ureg) > 0)
- != (REG_N_CALLS_CROSSED (sreg) > 0))))
- return 0;
-
- /* We don't already know about SREG, so tie it to UREG
- if this is the last use of UREG, provided the classes they want
- are compatible. */
-
- if (find_regno_note (insn, REG_DEAD, ureg)
- && reg_meets_class_p (sreg, qty[reg_qty[ureg]].min_class))
- {
- /* Add SREG to UREG's quantity. */
- sqty = reg_qty[ureg];
- reg_qty[sreg] = sqty;
- reg_offset[sreg] = reg_offset[ureg] + offset;
- reg_next_in_qty[sreg] = qty[sqty].first_reg;
- qty[sqty].first_reg = sreg;
-
- /* If SREG's reg class is smaller, set qty[SQTY].min_class. */
- update_qty_class (sqty, sreg);
-
- /* Update info about quantity SQTY. */
- qty[sqty].n_calls_crossed += REG_N_CALLS_CROSSED (sreg);
- qty[sqty].freq_calls_crossed += REG_FREQ_CALLS_CROSSED (sreg);
- qty[sqty].n_throwing_calls_crossed
- += REG_N_THROWING_CALLS_CROSSED (sreg);
- qty[sqty].n_refs += REG_N_REFS (sreg);
- qty[sqty].freq += REG_FREQ (sreg);
- if (usize < ssize)
- {
- int i;
-
- for (i = qty[sqty].first_reg; i >= 0; i = reg_next_in_qty[i])
- reg_offset[i] -= offset;
-
- qty[sqty].size = ssize;
- qty[sqty].mode = GET_MODE (setreg);
- }
- }
- else
- return 0;
-
- return 1;
-}
-
-/* Return 1 if the preferred class of REG allows it to be tied
- to a quantity or register whose class is CLASS.
- True if REG's reg class either contains or is contained in CLASS. */
-
-static int
-reg_meets_class_p (int reg, enum reg_class rclass)
-{
- enum reg_class rclass2 = reg_preferred_class (reg);
- return (reg_class_subset_p (rclass2, rclass)
- || reg_class_subset_p (rclass, rclass2));
-}
-
-/* Update the class of QTYNO assuming that REG is being tied to it. */
-
-static void
-update_qty_class (int qtyno, int reg)
-{
- enum reg_class rclass = reg_preferred_class (reg);
- if (reg_class_subset_p (rclass, qty[qtyno].min_class))
- qty[qtyno].min_class = rclass;
-
- rclass = reg_alternate_class (reg);
- if (reg_class_subset_p (rclass, qty[qtyno].alternate_class))
- qty[qtyno].alternate_class = rclass;
-}
-
-/* Handle something which alters the value of an rtx REG.
-
- REG is whatever is set or clobbered. SETTER is the rtx that
- is modifying the register.
-
- If it is not really a register, we do nothing.
- The file-global variables `this_insn' and `this_insn_number'
- carry info from `block_alloc'. */
-
-static void
-reg_is_set (rtx reg, const_rtx setter, void *data ATTRIBUTE_UNUSED)
-{
- /* Note that note_stores will only pass us a SUBREG if it is a SUBREG of
- a hard register. These may actually not exist any more. */
-
- if (GET_CODE (reg) != SUBREG
- && !REG_P (reg))
- return;
-
- /* Mark this register as being born. If it is used in a CLOBBER, mark
- it as being born halfway between the previous insn and this insn so that
- it conflicts with our inputs but not the outputs of the previous insn. */
-
- reg_is_born (reg, 2 * this_insn_number - (GET_CODE (setter) == CLOBBER));
-}
-
-/* Handle beginning of the life of register REG.
- BIRTH is the index at which this is happening. */
-
-static void
-reg_is_born (rtx reg, int birth)
-{
- int regno;
-
- if (GET_CODE (reg) == SUBREG)
- {
- regno = REGNO (SUBREG_REG (reg));
- if (regno < FIRST_PSEUDO_REGISTER)
- regno = subreg_regno (reg);
- }
- else
- regno = REGNO (reg);
-
- if (regno < FIRST_PSEUDO_REGISTER)
- {
- mark_life (regno, GET_MODE (reg), 1);
-
- /* If the register was to have been born earlier that the present
- insn, mark it as live where it is actually born. */
- if (birth < 2 * this_insn_number)
- post_mark_life (regno, GET_MODE (reg), 1, birth, 2 * this_insn_number);
- }
- else
- {
- if (reg_qty[regno] == -2)
- alloc_qty (regno, GET_MODE (reg), PSEUDO_REGNO_SIZE (regno), birth);
-
- /* If this register has a quantity number, show that it isn't dead. */
- if (reg_qty[regno] >= 0)
- qty[reg_qty[regno]].death = -1;
- }
-}
-
-/* Record the death of REG in the current insn. If OUTPUT_P is nonzero,
- REG is an output that is dying (i.e., it is never used), otherwise it
- is an input (the normal case).
- If OUTPUT_P is 1, then we extend the life past the end of this insn. */
-
-static void
-wipe_dead_reg (rtx reg, int output_p)
-{
- int regno = REGNO (reg);
-
- /* If this insn has multiple results,
- and the dead reg is used in one of the results,
- extend its life to after this insn,
- so it won't get allocated together with any other result of this insn.
-
- It is unsafe to use !single_set here since it will ignore an unused
- output. Just because an output is unused does not mean the compiler
- can assume the side effect will not occur. Consider if REG appears
- in the address of an output and we reload the output. If we allocate
- REG to the same hard register as an unused output we could set the hard
- register before the output reload insn. */
- if (GET_CODE (PATTERN (this_insn)) == PARALLEL
- && multiple_sets (this_insn))
- {
- int i;
- for (i = XVECLEN (PATTERN (this_insn), 0) - 1; i >= 0; i--)
- {
- rtx set = XVECEXP (PATTERN (this_insn), 0, i);
- if (GET_CODE (set) == SET
- && !REG_P (SET_DEST (set))
- && !rtx_equal_p (reg, SET_DEST (set))
- && reg_overlap_mentioned_p (reg, SET_DEST (set)))
- output_p = 1;
- }
- }
-
- /* If this register is used in an auto-increment address, then extend its
- life to after this insn, so that it won't get allocated together with
- the result of this insn. */
- if (! output_p && find_regno_note (this_insn, REG_INC, regno))
- output_p = 1;
-
- if (regno < FIRST_PSEUDO_REGISTER)
- {
- mark_life (regno, GET_MODE (reg), 0);
-
- /* If a hard register is dying as an output, mark it as in use at
- the beginning of this insn (the above statement would cause this
- not to happen). */
- if (output_p)
- post_mark_life (regno, GET_MODE (reg), 1,
- 2 * this_insn_number, 2 * this_insn_number + 1);
- }
-
- else if (reg_qty[regno] >= 0)
- qty[reg_qty[regno]].death = 2 * this_insn_number + output_p;
-}
-
-/* Find a block of SIZE words of hard regs in reg_class CLASS
- that can hold something of machine-mode MODE
- (but actually we test only the first of the block for holding MODE)
- and still free between insn BORN_INDEX and insn DEAD_INDEX,
- and return the number of the first of them.
- Return -1 if such a block cannot be found.
- If QTYNO crosses calls, insist on a register preserved by calls,
- unless ACCEPT_CALL_CLOBBERED is nonzero.
-
- If JUST_TRY_SUGGESTED is nonzero, only try to see if the suggested
- register is available. If not, return -1. */
-
-static int
-find_free_reg (enum reg_class rclass, enum machine_mode mode, int qtyno,
- int accept_call_clobbered, int just_try_suggested,
- int born_index, int dead_index, basic_block bb)
-{
- int i, ins;
- HARD_REG_SET first_used, used;
-#ifdef ELIMINABLE_REGS
- static const struct {const int from, to; } eliminables[] = ELIMINABLE_REGS;
-#endif
-
- /* Validate our parameters. */
- gcc_assert (born_index >= 0 && born_index <= dead_index);
-
- /* Don't let a pseudo live in a reg across a function call
- if we might get a nonlocal goto. */
- if (cfun->has_nonlocal_label
- && qty[qtyno].n_calls_crossed > 0)
- return -1;
-
- if (accept_call_clobbered)
- COPY_HARD_REG_SET (used, call_fixed_reg_set);
- else if (qty[qtyno].n_calls_crossed == 0)
- COPY_HARD_REG_SET (used, fixed_reg_set);
- else
- COPY_HARD_REG_SET (used, call_used_reg_set);
-
- if (accept_call_clobbered)
- IOR_HARD_REG_SET (used, losing_caller_save_reg_set);
-
- for (ins = born_index; ins < dead_index; ins++)
- IOR_HARD_REG_SET (used, regs_live_at[ins]);
-
- IOR_COMPL_HARD_REG_SET (used, reg_class_contents[(int) rclass]);
-
- /* Don't use the frame pointer reg in local-alloc even if
- we may omit the frame pointer, because if we do that and then we
- need a frame pointer, reload won't know how to move the pseudo
- to another hard reg. It can move only regs made by global-alloc.
-
- This is true of any register that can be eliminated. */
-#ifdef ELIMINABLE_REGS
- for (i = 0; i < (int) ARRAY_SIZE (eliminables); i++)
- SET_HARD_REG_BIT (used, eliminables[i].from);
-#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
- /* If FRAME_POINTER_REGNUM is not a real register, then protect the one
- that it might be eliminated into. */
- SET_HARD_REG_BIT (used, HARD_FRAME_POINTER_REGNUM);
-#endif
-#else
- SET_HARD_REG_BIT (used, FRAME_POINTER_REGNUM);
-#endif
-
-#ifdef CANNOT_CHANGE_MODE_CLASS
- cannot_change_mode_set_regs (&used, mode, qty[qtyno].first_reg);
-#endif
-
- /* Normally, the registers that can be used for the first register in
- a multi-register quantity are the same as those that can be used for
- subsequent registers. However, if just trying suggested registers,
- restrict our consideration to them. If there are copy-suggested
- register, try them. Otherwise, try the arithmetic-suggested
- registers. */
- COPY_HARD_REG_SET (first_used, used);
-
- if (just_try_suggested)
- {
- if (qty_phys_num_copy_sugg[qtyno] != 0)
- IOR_COMPL_HARD_REG_SET (first_used, qty_phys_copy_sugg[qtyno]);
- else
- IOR_COMPL_HARD_REG_SET (first_used, qty_phys_sugg[qtyno]);
- }
-
- /* If at least one would be suitable, test each hard reg. */
- if (!hard_reg_set_subset_p (reg_class_contents[(int) ALL_REGS], first_used))
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- {
-#ifdef REG_ALLOC_ORDER
- int regno = reg_alloc_order[i];
-#else
- int regno = i;
-#endif
- if (!TEST_HARD_REG_BIT (first_used, regno)
- && HARD_REGNO_MODE_OK (regno, mode)
- && (qty[qtyno].n_calls_crossed == 0
- || accept_call_clobbered
- || !HARD_REGNO_CALL_PART_CLOBBERED (regno, mode)))
- {
- int j;
- int size1 = hard_regno_nregs[regno][mode];
- j = 1;
- while (j < size1 && !TEST_HARD_REG_BIT (used, regno + j))
- j++;
- if (j == size1)
- {
- /* Mark that this register is in use between its birth
- and death insns. */
- post_mark_life (regno, mode, 1, born_index, dead_index);
- return regno;
- }
-#ifndef REG_ALLOC_ORDER
- /* Skip starting points we know will lose. */
- i += j;
-#endif
- }
- }
-
- /* If we are just trying suggested register, we have just tried copy-
- suggested registers, and there are arithmetic-suggested registers,
- try them. */
-
- /* If it would be profitable to allocate a call-clobbered register
- and save and restore it around calls, do that. */
- if (just_try_suggested && qty_phys_num_copy_sugg[qtyno] != 0
- && qty_phys_num_sugg[qtyno] != 0)
- {
- /* Don't try the copy-suggested regs again. */
- qty_phys_num_copy_sugg[qtyno] = 0;
- return find_free_reg (rclass, mode, qtyno, accept_call_clobbered, 1,
- born_index, dead_index, bb);
- }
-
- /* We need not check to see if the current function has nonlocal
- labels because we don't put any pseudos that are live over calls in
- registers in that case. Avoid putting pseudos crossing calls that
- might throw into call used registers. */
-
- if (! accept_call_clobbered
- && flag_caller_saves
- && ! just_try_suggested
- && qty[qtyno].n_calls_crossed != 0
- && qty[qtyno].n_throwing_calls_crossed == 0
- && CALLER_SAVE_PROFITABLE (optimize_bb_for_size_p (bb) ? qty[qtyno].n_refs
- : qty[qtyno].freq,
- optimize_bb_for_size_p (bb) ? qty[qtyno].n_calls_crossed
- : qty[qtyno].freq_calls_crossed))
- {
- i = find_free_reg (rclass, mode, qtyno, 1, 0, born_index, dead_index, bb);
- if (i >= 0)
- caller_save_needed = 1;
- return i;
- }
- return -1;
-}
-
-/* Mark that REGNO with machine-mode MODE is live starting from the current
- insn (if LIFE is nonzero) or dead starting at the current insn (if LIFE
- is zero). */
-
-static void
-mark_life (int regno, enum machine_mode mode, int life)
-{
- if (life)
- add_to_hard_reg_set (&regs_live, mode, regno);
- else
- remove_from_hard_reg_set (&regs_live, mode, regno);
-}
-
-/* Mark register number REGNO (with machine-mode MODE) as live (if LIFE
- is nonzero) or dead (if LIFE is zero) from insn number BIRTH (inclusive)
- to insn number DEATH (exclusive). */
-
-static void
-post_mark_life (int regno, enum machine_mode mode, int life, int birth,
- int death)
-{
- HARD_REG_SET this_reg;
-
- CLEAR_HARD_REG_SET (this_reg);
- add_to_hard_reg_set (&this_reg, mode, regno);
-
- if (life)
- while (birth < death)
- {
- IOR_HARD_REG_SET (regs_live_at[birth], this_reg);
- birth++;
- }
- else
- while (birth < death)
- {
- AND_COMPL_HARD_REG_SET (regs_live_at[birth], this_reg);
- birth++;
- }
-}
-
-/* Return the number of alternatives for which the constraint string P
- indicates that the operand must be equal to operand 0 and that no register
- is acceptable. */
-
-static int
-requires_inout (const char *p)
-{
- char c;
- int found_zero = 0;
- int reg_allowed = 0;
- int num_matching_alts = 0;
- int len;
-
- for ( ; (c = *p); p += len)
- {
- len = CONSTRAINT_LEN (c, p);
- switch (c)
- {
- case '=': case '+': case '?':
- case '#': case '&': case '!':
- case '*': case '%':
- case 'm': case '<': case '>': case 'V': case 'o':
- case 'E': case 'F': case 'G': case 'H':
- case 's': case 'i': case 'n':
- case 'I': case 'J': case 'K': case 'L':
- case 'M': case 'N': case 'O': case 'P':
- case 'X':
- /* These don't say anything we care about. */
- break;
-
- case ',':
- if (found_zero && ! reg_allowed)
- num_matching_alts++;
-
- found_zero = reg_allowed = 0;
- break;
-
- case '0':
- found_zero = 1;
- break;
-
- case '1': case '2': case '3': case '4': case '5':
- case '6': case '7': case '8': case '9':
- /* Skip the balance of the matching constraint. */
- do
- p++;
- while (ISDIGIT (*p));
- len = 0;
- break;
-
- default:
- if (REG_CLASS_FROM_CONSTRAINT (c, p) == NO_REGS
- && !EXTRA_ADDRESS_CONSTRAINT (c, p))
- break;
- /* Fall through. */
- case 'p':
- case 'g': case 'r':
- reg_allowed = 1;
- break;
- }
- }
-
- if (found_zero && ! reg_allowed)
- num_matching_alts++;
-
- return num_matching_alts;
-}
-
-void
-dump_local_alloc (FILE *file)
-{
- int i;
- for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
- if (reg_renumber[i] != -1)
- fprintf (file, ";; Register %d in %d.\n", i, reg_renumber[i]);
-}
-
-#ifdef STACK_REGS
-static void
-find_stack_regs (void)
-{
- bitmap stack_regs = BITMAP_ALLOC (NULL);
- int i;
- HARD_REG_SET stack_hard_regs, used;
- basic_block bb;
-
- /* Any register that MAY be allocated to a register stack (like the
- 387) is treated poorly. Each such register is marked as being
- live everywhere. This keeps the register allocator and the
- subsequent passes from doing anything useful with these values.
-
- FIXME: This seems like an incredibly poor idea. */
-
- CLEAR_HARD_REG_SET (stack_hard_regs);
- for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
- SET_HARD_REG_BIT (stack_hard_regs, i);
-
- for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
- {
- COPY_HARD_REG_SET (used, reg_class_contents[reg_preferred_class (i)]);
- IOR_HARD_REG_SET (used, reg_class_contents[reg_alternate_class (i)]);
- AND_HARD_REG_SET (used, stack_hard_regs);
- if (!hard_reg_set_empty_p (used))
- bitmap_set_bit (stack_regs, i);
- }
-
- if (dump_file)
- bitmap_print (dump_file, stack_regs, "stack regs:", "\n");
-
- FOR_EACH_BB (bb)
- {
- bitmap_ior_into (DF_LIVE_IN (bb), stack_regs);
- bitmap_and_into (DF_LIVE_IN (bb), DF_LR_IN (bb));
- bitmap_ior_into (DF_LIVE_OUT (bb), stack_regs);
- bitmap_and_into (DF_LIVE_OUT (bb), DF_LR_OUT (bb));
- }
- BITMAP_FREE (stack_regs);
-}
-#endif
-
-static bool
-gate_handle_local_alloc (void)
-{
- return ! flag_ira;
-}
-
-/* Run old register allocator. Return TRUE if we must exit
- rest_of_compilation upon return. */
-static unsigned int
-rest_of_handle_local_alloc (void)
-{
- int rebuild_notes;
- int max_regno = max_reg_num ();
-
- df_note_add_problem ();
-
- if (optimize == 1)
- {
- df_live_add_problem ();
- df_live_set_all_dirty ();
- }
-#ifdef ENABLE_CHECKING
- df->changeable_flags |= DF_VERIFY_SCHEDULED;
-#endif
- df_analyze ();
-#ifdef STACK_REGS
- if (optimize)
- find_stack_regs ();
-#endif
- regstat_init_n_sets_and_refs ();
- regstat_compute_ri ();
-
- /* If we are not optimizing, then this is the only place before
- register allocation where dataflow is done. And that is needed
- to generate these warnings. */
- if (warn_clobbered)
- generate_setjmp_warnings ();
-
- /* Determine if the current function is a leaf before running reload
- since this can impact optimizations done by the prologue and
- epilogue thus changing register elimination offsets. */
- current_function_is_leaf = leaf_function_p ();
-
- /* And the reg_equiv_memory_loc array. */
- VEC_safe_grow (rtx, gc, reg_equiv_memory_loc_vec, max_regno);
- memset (VEC_address (rtx, reg_equiv_memory_loc_vec), 0,
- sizeof (rtx) * max_regno);
- reg_equiv_memory_loc = VEC_address (rtx, reg_equiv_memory_loc_vec);
-
- allocate_initial_values (reg_equiv_memory_loc);
-
- regclass (get_insns (), max_regno);
- rebuild_notes = local_alloc ();
-
- /* Local allocation may have turned an indirect jump into a direct
- jump. If so, we must rebuild the JUMP_LABEL fields of jumping
- instructions. */
- if (rebuild_notes)
- {
- timevar_push (TV_JUMP);
-
- rebuild_jump_labels (get_insns ());
- purge_all_dead_edges ();
- timevar_pop (TV_JUMP);
- }
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- timevar_push (TV_DUMP);
- dump_flow_info (dump_file, dump_flags);
- dump_local_alloc (dump_file);
- timevar_pop (TV_DUMP);
- }
- return 0;
-}
-
-struct rtl_opt_pass pass_local_alloc =
-{
- {
- RTL_PASS,
- "lreg", /* name */
- gate_handle_local_alloc, /* gate */
- rest_of_handle_local_alloc, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_LOCAL_ALLOC, /* tv_id */
- 0, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- TODO_dump_func |
- TODO_ggc_collect /* todo_flags_finish */
- }
-};
-
diff --git a/gcc/lower-subreg.c b/gcc/lower-subreg.c
index fd3b7fe971f..69b038b2146 100644
--- a/gcc/lower-subreg.c
+++ b/gcc/lower-subreg.c
@@ -1325,7 +1325,7 @@ struct rtl_opt_pass pass_lower_subreg =
{
{
RTL_PASS,
- "subreg", /* name */
+ "subreg1", /* name */
gate_handle_lower_subreg, /* gate */
rest_of_handle_lower_subreg, /* execute */
NULL, /* sub */
diff --git a/gcc/mode-switching.c b/gcc/mode-switching.c
index e6fd61d4fda..87a2d160141 100644
--- a/gcc/mode-switching.c
+++ b/gcc/mode-switching.c
@@ -760,7 +760,7 @@ struct rtl_opt_pass pass_mode_switching =
{
{
RTL_PASS,
- "mode-sw", /* name */
+ "mode_sw", /* name */
gate_mode_switching, /* gate */
rest_of_handle_mode_switching, /* execute */
NULL, /* sub */
diff --git a/gcc/opts.c b/gcc/opts.c
index 42e3beeb926..cccb80b403f 100644
--- a/gcc/opts.c
+++ b/gcc/opts.c
@@ -870,7 +870,6 @@ decode_options (unsigned int argc, const char **argv)
}
}
- flag_ira = 1;
/* Use priority coloring if cover classes is not defined for the
target. */
if (targetm.ira_cover_classes == NULL)
@@ -1098,7 +1097,7 @@ decode_options (unsigned int argc, const char **argv)
if (!flag_sel_sched_pipelining)
flag_sel_sched_pipelining_outer_loops = 0;
- if (flag_ira && !targetm.ira_cover_classes
+ if (!targetm.ira_cover_classes
&& flag_ira_algorithm == IRA_ALGORITHM_CB)
{
inform (input_location,
diff --git a/gcc/passes.c b/gcc/passes.c
index ed3afae2b3b..7db72d9eb5a 100644
--- a/gcc/passes.c
+++ b/gcc/passes.c
@@ -776,7 +776,7 @@ init_optimization_passes (void)
NEXT_PASS (pass_cse2);
NEXT_PASS (pass_rtl_dse1);
NEXT_PASS (pass_rtl_fwprop_addr);
- NEXT_PASS (pass_regclass_init);
+ NEXT_PASS (pass_reginfo_init);
NEXT_PASS (pass_inc_dec);
NEXT_PASS (pass_initialize_regs);
NEXT_PASS (pass_outof_cfg_layout_mode);
@@ -795,8 +795,6 @@ init_optimization_passes (void)
NEXT_PASS (pass_sms);
NEXT_PASS (pass_sched);
NEXT_PASS (pass_subregs_of_mode_init);
- NEXT_PASS (pass_local_alloc);
- NEXT_PASS (pass_global_alloc);
NEXT_PASS (pass_ira);
NEXT_PASS (pass_subregs_of_mode_finish);
NEXT_PASS (pass_postreload);
diff --git a/gcc/ra-conflict.c b/gcc/ra-conflict.c
deleted file mode 100644
index eb732861442..00000000000
--- a/gcc/ra-conflict.c
+++ /dev/null
@@ -1,1241 +0,0 @@
-/* Allocate registers for pseudo-registers that span basic blocks.
- Copyright (C) 2007, 2008 Free Software Foundation, Inc.
- Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
-
-This file is part of GCC.
-
-GCC is free software; you can redistribute it and/or modify it under
-the terms of the GNU General Public License as published by the Free
-Software Foundation; either version 3, or (at your option) any later
-version.
-
-GCC is distributed in the hope that it will be useful, but WITHOUT ANY
-WARRANTY; without even the implied warranty of MERCHANTABILITY or
-FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-for more details.
-
-You should have received a copy of the GNU General Public License
-along with GCC; see the file COPYING3. If not see
-<http://www.gnu.org/licenses/>. */
-
-
-#include "config.h"
-#include "system.h"
-#include "coretypes.h"
-#include "tm.h"
-#include "machmode.h"
-#include "hard-reg-set.h"
-#include "rtl.h"
-#include "tm_p.h"
-#include "flags.h"
-#include "regs.h"
-#include "function.h"
-#include "insn-config.h"
-#include "recog.h"
-#include "reload.h"
-#include "output.h"
-#include "toplev.h"
-#include "tree-pass.h"
-#include "timevar.h"
-#include "df.h"
-#include "vecprim.h"
-#include "ra.h"
-#include "sbitmap.h"
-#include "sparseset.h"
-
-/* Externs defined in regs.h. */
-
-int max_allocno;
-struct allocno *allocno;
-HOST_WIDEST_FAST_INT *conflicts;
-int *reg_allocno;
-HOST_WIDE_INT *partial_bitnum;
-HOST_WIDE_INT max_bitnum;
-alloc_pool adjacency_pool;
-adjacency_t **adjacency;
-
-typedef df_ref df_ref_t;
-DEF_VEC_P(df_ref_t);
-DEF_VEC_ALLOC_P(df_ref_t,heap);
-
-/* Macros to determine the bit number within the triangular bit matrix for
- the two allocnos Low and HIGH, with LOW strictly less than HIGH. */
-
-#define CONFLICT_BITNUM(I, J) \
- (((I) < (J)) ? (partial_bitnum[I] + (J)) : (partial_bitnum[J] + (I)))
-
-#define CONFLICT_BITNUM_FAST(I, I_PARTIAL_BITNUM, J) \
- (((I) < (J)) ? ((I_PARTIAL_BITNUM) + (J)) : (partial_bitnum[J] + (I)))
-
-bool
-conflict_p (int allocno1, int allocno2)
-{
- HOST_WIDE_INT bitnum;
- HOST_WIDEST_FAST_INT word, mask;
-
-#ifdef ENABLE_CHECKING
- int blk1, blk2;
-
- gcc_assert (allocno1 >= 0 && allocno1 < max_allocno);
- gcc_assert (allocno2 >= 0 && allocno2 < max_allocno);
-
- blk1 = regno_basic_block (allocno[allocno1].reg);
- blk2 = regno_basic_block (allocno[allocno2].reg);
- gcc_assert (blk1 == 0 || blk2 == 0 || blk1 == blk2);
-#endif
-
- if (allocno1 == allocno2)
- /* By definition, an allocno does not conflict with itself. */
- return 0;
-
- bitnum = CONFLICT_BITNUM (allocno1, allocno2);
-
-#ifdef ENABLE_CHECKING
- gcc_assert (bitnum >= 0 && bitnum < max_bitnum);
-#endif
-
- word = conflicts[bitnum / HOST_BITS_PER_WIDEST_FAST_INT];
- mask = (HOST_WIDEST_FAST_INT) 1 << (bitnum % HOST_BITS_PER_WIDEST_FAST_INT);
- return (word & mask) != 0;
-}
-
-/* Add conflict edges between ALLOCNO1 and ALLOCNO2. */
-
-static void
-set_conflict (int allocno1, int allocno2)
-{
- HOST_WIDE_INT bitnum, index;
- HOST_WIDEST_FAST_INT word, mask;
-
-#ifdef ENABLE_CHECKING
- int blk1, blk2;
-
- gcc_assert (allocno1 >= 0 && allocno1 < max_allocno);
- gcc_assert (allocno2 >= 0 && allocno2 < max_allocno);
-
- blk1 = regno_basic_block (allocno[allocno1].reg);
- blk2 = regno_basic_block (allocno[allocno2].reg);
- gcc_assert (blk1 == 0 || blk2 == 0 || blk1 == blk2);
-#endif
-
- /* By definition, an allocno does not conflict with itself. */
- if (allocno1 == allocno2)
- return;
-
- bitnum = CONFLICT_BITNUM (allocno1, allocno2);
-
-#ifdef ENABLE_CHECKING
- gcc_assert (bitnum >= 0 && bitnum < max_bitnum);
-#endif
-
- index = bitnum / HOST_BITS_PER_WIDEST_FAST_INT;
- word = conflicts[index];
- mask = (HOST_WIDEST_FAST_INT) 1 << (bitnum % HOST_BITS_PER_WIDEST_FAST_INT);
-
- if ((word & mask) == 0)
- {
- conflicts[index] = word | mask;
- add_neighbor (allocno1, allocno2);
- add_neighbor (allocno2, allocno1);
- }
-}
-
-/* Add conflict edges between ALLOCNO1 and all allocnos currently live. */
-
-static void
-set_conflicts (int allocno1, sparseset live)
-{
- int i;
- HOST_WIDE_INT bitnum, index;
- HOST_WIDEST_FAST_INT word, mask;
- HOST_WIDE_INT partial_bitnum_allocno1;
-
-#ifdef ENABLE_CHECKING
- gcc_assert (allocno1 >= 0 && allocno1 < max_allocno);
-#endif
-
- partial_bitnum_allocno1 = partial_bitnum[allocno1];
-
- EXECUTE_IF_SET_IN_SPARSESET (live, i)
- {
- /* By definition, an allocno does not conflict with itself. */
- if (allocno1 == i)
- continue;
-
-#ifdef ENABLE_CHECKING
- gcc_assert (i >= 0 && i < max_allocno);
-#endif
-
- bitnum = CONFLICT_BITNUM_FAST (allocno1, partial_bitnum_allocno1, i);
-
-#ifdef ENABLE_CHECKING
- gcc_assert (bitnum >= 0 && bitnum < max_bitnum);
-#endif
-
- index = bitnum / HOST_BITS_PER_WIDEST_FAST_INT;
- word = conflicts[index];
- mask = (HOST_WIDEST_FAST_INT) 1 << (bitnum % HOST_BITS_PER_WIDEST_FAST_INT);
-
- if ((word & mask) == 0)
- {
- conflicts[index] = word | mask;
- add_neighbor (allocno1, i);
- add_neighbor (i, allocno1);
- }
- }
-}
-
-
-/* Add a conflict between R1 and R2. */
-
-static void
-record_one_conflict_between_regnos (enum machine_mode mode1, int r1,
- enum machine_mode mode2, int r2)
-{
- int allocno1 = reg_allocno[r1];
- int allocno2 = reg_allocno[r2];
-
- if (dump_file)
- fprintf (dump_file, " rocbr adding %d<=>%d\n", r1, r2);
-
- if (allocno1 >= 0 && allocno2 >= 0)
- set_conflict (allocno1, allocno2);
- else if (allocno1 >= 0)
- {
- if (r2 < FIRST_PSEUDO_REGISTER)
- add_to_hard_reg_set (&allocno[allocno1].hard_reg_conflicts, mode2, r2);
- }
- else if (allocno2 >= 0)
- {
- if (r1 < FIRST_PSEUDO_REGISTER)
- add_to_hard_reg_set (&allocno[allocno2].hard_reg_conflicts, mode1, r1);
- }
-
- /* Now, recursively handle the reg_renumber cases. */
- if (reg_renumber[r1] >= 0)
- record_one_conflict_between_regnos (mode1, reg_renumber[r1], mode2, r2);
-
- if (reg_renumber[r2] >= 0)
- record_one_conflict_between_regnos (mode1, r1, mode2, reg_renumber[r2]);
-}
-
-
-/* Record a conflict between register REGNO and everything currently
- live. REGNO must not be a pseudo reg that was allocated by
- local_alloc; such numbers must be translated through reg_renumber
- before calling here. */
-
-static void
-record_one_conflict (sparseset allocnos_live,
- HARD_REG_SET *hard_regs_live, int regno)
-{
- int i;
-
- if (regno < FIRST_PSEUDO_REGISTER)
- /* When a hard register becomes live, record conflicts with live
- pseudo regs. */
- EXECUTE_IF_SET_IN_SPARSESET (allocnos_live, i)
- {
- SET_HARD_REG_BIT (allocno[i].hard_reg_conflicts, regno);
- if (dump_file)
- fprintf (dump_file, " roc adding %d<=>%d\n", allocno[i].reg, regno);
- }
- else
- /* When a pseudo-register becomes live, record conflicts first
- with hard regs, then with other pseudo regs. */
- {
- int ialloc = reg_allocno[regno];
-
- if (dump_file)
- {
- fprintf (dump_file, " roc adding %d<=>(", regno);
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- if (TEST_HARD_REG_BIT (*hard_regs_live, i)
- && !TEST_HARD_REG_BIT (allocno[ialloc].hard_reg_conflicts, i))
- fprintf (dump_file, "%d ", i);
-
- EXECUTE_IF_SET_IN_SPARSESET (allocnos_live, i)
- {
- if (!conflict_p (ialloc, i))
- fprintf (dump_file, "%d ", allocno[i].reg);
- }
- fprintf (dump_file, ")\n");
- }
-
- IOR_HARD_REG_SET (allocno[ialloc].hard_reg_conflicts, *hard_regs_live);
- set_conflicts (ialloc, allocnos_live);
- }
-}
-
-
-/* Handle the case where REG is set by the insn being scanned, during
- the backward scan to accumulate conflicts. Record a conflict with
- all other registers already live.
-
- REG might actually be something other than a register; if so, we do
- nothing. */
-
-static void
-mark_reg_store (sparseset allocnos_live,
- HARD_REG_SET *hard_regs_live,
- df_ref ref)
-{
- rtx reg = DF_REF_REG (ref);
- unsigned int regno = DF_REF_REGNO (ref);
- enum machine_mode mode = GET_MODE (reg);
-
- /* Either this is one of the max_allocno pseudo regs not allocated,
- or it is or has a hardware reg. First handle the pseudo-regs. */
- if (regno >= FIRST_PSEUDO_REGISTER && reg_allocno[regno] >= 0)
- record_one_conflict (allocnos_live, hard_regs_live, regno);
-
- if (reg_renumber[regno] >= 0)
- regno = reg_renumber[regno];
-
- /* Handle hardware regs (and pseudos allocated to hard regs). */
- if (regno < FIRST_PSEUDO_REGISTER && ! fixed_regs[regno])
- {
- unsigned int start = regno;
- unsigned int last = end_hard_regno (mode, regno);
- if ((GET_CODE (reg) == SUBREG) && !DF_REF_FLAGS_IS_SET (ref, DF_REF_ZERO_EXTRACT))
- {
- start += subreg_regno_offset (regno, GET_MODE (SUBREG_REG (reg)),
- SUBREG_BYTE (reg), GET_MODE (reg));
- last = start + subreg_nregs_with_regno (regno, reg);
- }
-
- regno = start;
- while (regno < last)
- record_one_conflict (allocnos_live, hard_regs_live, regno++);
- }
-}
-
-
-/* Return true if REGNO with MODE can be assigned to a register in
- CL. */
-
-static bool
-may_overlap_class_p (enum machine_mode mode, unsigned int regno,
- enum reg_class rc)
-{
- if (regno >= FIRST_PSEUDO_REGISTER)
- {
- enum reg_class pref_class = reg_preferred_class (regno);
- enum reg_class alt_class = reg_alternate_class (regno);
- return (reg_classes_intersect_p (rc, pref_class)
- || reg_classes_intersect_p (rc, alt_class));
- }
- else
- return in_hard_reg_set_p (reg_class_contents[rc], mode, regno);
-}
-
-
-/* SRC is an input operand to an instruction in which register DEST is
- an output operand. SRC may be bound to a member of class SRC_CLASS
- and DEST may be bound to an earlyclobbered register that overlaps
- SRC_CLASS. If SRC is a register that might be allocated a member
- of SRC_CLASS, add a conflict between it and DEST. */
-
-static void
-add_conflicts_for_earlyclobber (rtx dest, enum reg_class src_class, rtx src)
-{
- if (GET_CODE (src) == SUBREG)
- src = SUBREG_REG (src);
- if (REG_P (src)
- && may_overlap_class_p (GET_MODE (src), REGNO (src), src_class))
- record_one_conflict_between_regnos (GET_MODE (src), REGNO (src),
- GET_MODE (dest), REGNO (dest));
-}
-
-
-/* Look at the defs in INSN and determine if any of them are marked as
- early clobber. If they are marked as early clobber, add a conflict
- between any input operand that could be allocated to the same
- register. */
-
-static void
-set_conflicts_for_earlyclobber (rtx insn)
-{
- int alt;
- int def;
- int use;
-
- extract_insn (insn);
- preprocess_constraints ();
-
- if (dump_file)
- fprintf (dump_file, " starting early clobber conflicts.\n");
-
- for (alt = 0; alt < recog_data.n_alternatives; alt++)
- for (def = 0; def < recog_data.n_operands; def++)
- if ((recog_op_alt[def][alt].earlyclobber)
- && (recog_op_alt[def][alt].cl != NO_REGS))
- {
- rtx dreg = recog_data.operand[def];
- enum machine_mode dmode = recog_data.operand_mode[def];
- if (GET_CODE (dreg) == SUBREG)
- dreg = SUBREG_REG (dreg);
- if (REG_P (dreg)
- && may_overlap_class_p (dmode, REGNO (dreg), recog_op_alt[def][alt].cl))
-
- for (use = 0; use < recog_data.n_operands; use++)
- if (use != def
- && recog_data.operand_type[use] != OP_OUT
- && reg_classes_intersect_p (recog_op_alt[def][alt].cl,
- recog_op_alt[use][alt].cl))
- {
- add_conflicts_for_earlyclobber (dreg,
- recog_op_alt[use][alt].cl,
- recog_data.operand[use]);
- /* Reload may end up swapping commutative operands,
- so you have to take both orderings into account.
- The constraints for the two operands can be
- completely different. (Indeed, if the
- constraints for the two operands are the same
- for all alternatives, there's no point marking
- them as commutative.) */
- if (use < recog_data.n_operands + 1
- && recog_data.constraints[use][0] == '%')
- add_conflicts_for_earlyclobber (dreg,
- recog_op_alt[use][alt].cl,
- recog_data.operand[use + 1]);
- }
- }
-}
-
-
-/* Init LIVE_SUBREGS[ALLOCNUM] and LIVE_SUBREGS_USED[ALLOCNUM] using
- REG to the number of nregs, and INIT_VALUE to get the
- initialization. ALLOCNUM need not be the regno of REG. */
-
-void
-ra_init_live_subregs (bool init_value,
- sbitmap *live_subregs,
- int *live_subregs_used,
- int allocnum,
- rtx reg)
-{
- unsigned int regno = REGNO (SUBREG_REG (reg));
- int size = GET_MODE_SIZE (GET_MODE (regno_reg_rtx[regno]));
-
- gcc_assert (size > 0);
-
- /* Been there, done that. */
- if (live_subregs_used[allocnum])
- return;
-
- /* Create a new one with zeros. */
- if (live_subregs[allocnum] == NULL)
- live_subregs[allocnum] = sbitmap_alloc (size);
-
- /* If the entire reg was live before blasting into subregs, we need
- to init all of the subregs to ones else init to 0. */
- if (init_value)
- sbitmap_ones (live_subregs[allocnum]);
- else
- sbitmap_zero (live_subregs[allocnum]);
-
- /* Set the number of bits that we really want. */
- live_subregs_used[allocnum] = size;
-}
-
-
-/* Set REG to be not live in the sets ALLOCNOS_LIVE, LIVE_SUBREGS,
- HARD_REGS_LIVE. DEF is the definition of the register. */
-
-inline static void
-clear_reg_in_live (sparseset allocnos_live,
- sbitmap *live_subregs,
- int *live_subregs_used,
- HARD_REG_SET *hard_regs_live,
- rtx reg, df_ref def)
-{
- unsigned int regno = (GET_CODE (reg) == SUBREG)
- ? REGNO (SUBREG_REG (reg)): REGNO (reg);
- int allocnum = reg_allocno[regno];
-
- if (allocnum >= 0)
- {
- if (GET_CODE (reg) == SUBREG
- && !DF_REF_FLAGS_IS_SET (def, DF_REF_ZERO_EXTRACT))
- {
- unsigned int start = SUBREG_BYTE (reg);
- unsigned int last = start + GET_MODE_SIZE (GET_MODE (reg));
-
- ra_init_live_subregs (sparseset_bit_p (allocnos_live, allocnum),
- live_subregs, live_subregs_used, allocnum, reg);
-
- if (!DF_REF_FLAGS_IS_SET (def, DF_REF_STRICT_LOW_PART))
- {
- /* Expand the range to cover entire words.
- Bytes added here are "don't care". */
- start = start / UNITS_PER_WORD * UNITS_PER_WORD;
- last = ((last + UNITS_PER_WORD - 1)
- / UNITS_PER_WORD * UNITS_PER_WORD);
- }
-
- /* Ignore the paradoxical bits. */
- if ((int)last > live_subregs_used[allocnum])
- last = live_subregs_used[allocnum];
-
- while (start < last)
- {
- RESET_BIT (live_subregs[allocnum], start);
- start++;
- }
-
- if (sbitmap_empty_p (live_subregs[allocnum]))
- {
- live_subregs_used[allocnum] = 0;
- sparseset_clear_bit (allocnos_live, allocnum);
- }
- else
- /* Set the allocnos live here because that bit has to be
- true to get us to look at the live_subregs fields. */
- sparseset_set_bit (allocnos_live, allocnum);
- }
- else
- {
- /* Resetting the live_subregs_used is effectively saying do not use the
- subregs because we are writing the whole pseudo. */
- live_subregs_used[allocnum] = 0;
- sparseset_clear_bit (allocnos_live, allocnum);
- }
- }
-
- if (regno >= FIRST_PSEUDO_REGISTER)
- return;
-
- /* Handle hardware regs (and pseudos allocated to hard regs). */
- if (! fixed_regs[regno])
- {
- unsigned int start = regno;
- if (GET_CODE (reg) == SUBREG
- && !DF_REF_FLAGS_IS_SET (def, DF_REF_ZERO_EXTRACT))
- {
- unsigned int last;
- start += SUBREG_BYTE (reg);
- last = start + subreg_nregs_with_regno (regno, reg);
- regno = start;
-
- while (regno < last)
- {
- CLEAR_HARD_REG_BIT (*hard_regs_live, regno);
- regno++;
- }
- }
- else
- remove_from_hard_reg_set (hard_regs_live, GET_MODE (reg), regno);
- }
-}
-
-
-
-/* Set REG to be live in the sets ALLOCNOS_LIVE, LIVE_SUBREGS,
- HARD_REGS_LIVE. If EXTRACT is false, assume that the entire reg is
- set live even if REG is a subreg. */
-
-inline static void
-set_reg_in_live (sparseset allocnos_live,
- sbitmap *live_subregs,
- int *live_subregs_used,
- HARD_REG_SET *hard_regs_live,
- rtx reg,
- bool extract)
-{
- unsigned int regno = (GET_CODE (reg) == SUBREG)
- ? REGNO (SUBREG_REG (reg)): REGNO (reg);
- int allocnum = reg_allocno[regno];
-
- if (allocnum >= 0)
- {
- if ((GET_CODE (reg) == SUBREG) && !extract)
- {
- unsigned int start = SUBREG_BYTE (reg);
- unsigned int last = start + GET_MODE_SIZE (GET_MODE (reg));
-
- ra_init_live_subregs (sparseset_bit_p (allocnos_live, allocnum),
- live_subregs, live_subregs_used, allocnum, reg);
-
- /* Ignore the paradoxical bits. */
- if ((int)last > live_subregs_used[allocnum])
- last = live_subregs_used[allocnum];
-
- while (start < last)
- {
- SET_BIT (live_subregs[allocnum], start);
- start++;
- }
- }
- else
- /* Resetting the live_subregs_used is effectively saying do not use the
- subregs because we are writing the whole pseudo. */
- live_subregs_used[allocnum] = 0;
-
- sparseset_set_bit (allocnos_live, allocnum);
- }
-
- if (regno >= FIRST_PSEUDO_REGISTER)
- return;
-
- /* Handle hardware regs (and pseudos allocated to hard regs). */
- if (! fixed_regs[regno])
- {
- if ((GET_CODE (reg) == SUBREG) && !extract)
- {
- unsigned int start = regno;
- unsigned int last;
-
- start += subreg_regno_offset (regno, GET_MODE (SUBREG_REG (reg)),
- SUBREG_BYTE (reg), GET_MODE (reg));
- last = start + subreg_nregs_with_regno (regno, reg);
- regno = start;
-
- while (regno < last)
- {
- SET_HARD_REG_BIT (*hard_regs_live, regno);
- regno++;
- }
- }
- else
- add_to_hard_reg_set (hard_regs_live, GET_MODE (reg), regno);
- }
-}
-
-
-/* Add hard reg conflicts to RENUMBERS_LIVE assuming that pseudo in
- allocno[ALLOCNUM] is allocated to a set of hard regs starting at
- RENUMBER.
-
- We are smart about the case where only subregs of REG have been
- set, as indicated by LIVE_SUBREGS[ALLOCNUM] and
- LIVE_SUBREGS_USED[ALLOCNUM]. See global_conflicts for description
- of LIVE_SUBREGS and LIVE_SUBREGS_USED. */
-
-inline static void
-set_renumbers_live (HARD_REG_SET *renumbers_live,
- sbitmap *live_subregs,
- int *live_subregs_used,
- int allocnum, int renumber)
-{
- /* The width of the pseudo. */
- int nbytes = live_subregs_used[allocnum];
- int regno = allocno[allocnum].reg;
- enum machine_mode mode = GET_MODE (regno_reg_rtx[regno]);
-
- if (dump_file)
- fprintf (dump_file, " set_renumbers_live %d->%d ",
- regno, renumber);
-
- if (nbytes > 0)
- {
- int i;
- sbitmap live_subs = live_subregs[allocnum];
-
- /* First figure out how many hard regs we are considering using. */
- int target_nregs = hard_regno_nregs[renumber][mode];
-
- /* Now figure out the number of bytes per hard reg. Note that
- this may be different that what would be obtained by looking
- at the mode in the pseudo. For instance, a complex number
- made up of 2 32-bit parts gets mapped to 2 hard regs, even if
- the hardregs are 64-bit floating point values. */
- int target_width = nbytes / target_nregs;
-
- if (dump_file)
- fprintf (dump_file, "target_nregs=%d target_width=%d nbytes=%d",
- target_nregs, target_width, nbytes);
-
- for (i = 0; i < target_nregs; i++)
- {
- int j;
- bool set = false;
- for (j = 0; j < target_width; j++)
- {
- int reg_start = i * target_width;
- if (reg_start + j >= nbytes)
- break;
- set |= TEST_BIT (live_subs, reg_start + j);
- }
-
- if (set)
- SET_HARD_REG_BIT (*renumbers_live, renumber + i);
- }
- }
- else
- add_to_hard_reg_set (renumbers_live, mode, renumber);
-
- if (dump_file)
- fprintf (dump_file, "\n");
-}
-
-/* Dump out a REF with its reg_renumber range to FILE using
- PREFIX. */
-
-static void
-dump_ref (FILE *file,
- const char * prefix,
- const char * suffix,
- rtx reg,
- unsigned int regno,
- sbitmap *live_subregs,
- int *live_subregs_used
-)
-{
- int allocnum = reg_allocno[regno];
-
- fprintf (file, "%s %d", prefix, regno);
- if (allocnum >= 0
- && live_subregs_used[allocnum] > 0)
- {
- int j;
- char s = '[';
-
- for (j = 0; j < live_subregs_used[allocnum]; j++)
- if (TEST_BIT (live_subregs[allocnum], j))
- {
- fprintf (dump_file, "%c%d", s, j);
- s = ',';
- }
- fprintf (dump_file, "]");
- }
-
- if (reg_renumber[regno] >= 0)
- {
- enum machine_mode mode = GET_MODE (reg);
- unsigned int start;
- unsigned int last;
-
- regno = reg_renumber[regno];
-
- start = regno;
- last = end_hard_regno (mode, regno);
- if (GET_CODE (reg) == SUBREG)
- {
- start += subreg_regno_offset (regno, GET_MODE (SUBREG_REG (reg)),
- SUBREG_BYTE (reg), GET_MODE (reg));
- last = start + subreg_nregs_with_regno (regno, reg);
- }
-
- if (start == last - 1)
- fprintf (file, "(%d)", start);
- else
- fprintf (file, "(%d:%d..%d)", regno, start, last-1);
- }
- fprintf (file, suffix);
-}
-
-
-/* Scan the rtl code and record all conflicts and register preferences in the
- conflict matrices and preference tables. */
-
-void
-global_conflicts (void)
-{
- unsigned int i;
- basic_block bb;
- rtx insn;
-
- /* Regs that have allocnos can be in either
- hard_regs_live (if regno < FIRST_PSEUDO_REGISTER) or
- allocnos_live (if regno >= FIRST_PSEUDO_REGISTER) or
- both if local_alloc has preallocated it and reg_renumber >= 0. */
-
- HARD_REG_SET hard_regs_live;
- HARD_REG_SET renumbers_live;
- sparseset allocnos_live;
- bitmap live = BITMAP_ALLOC (NULL);
- VEC (df_ref_t, heap) *clobbers = NULL;
- VEC (df_ref_t, heap) *dying_regs = NULL;
-
- /* live_subregs is a vector used to keep accurate information about
- which hardregs are live in multiword pseudos. live_subregs and
- live_subregs_used are indexed by reg_allocno. The live_subreg
- entry for a particular pseudo is a bitmap with one bit per byte
- of the register. It is only used if the corresponding element is
- non zero in live_subregs_used. The value in live_subregs_used is
- number of bytes that the pseudo can occupy. */
- sbitmap *live_subregs = XCNEWVEC (sbitmap, max_allocno);
- int *live_subregs_used = XNEWVEC (int, max_allocno);
-
- if (dump_file)
- {
- fprintf (dump_file, "fixed registers : ");
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- if (fixed_regs[i])
- fprintf (dump_file, "%d ", i);
- fprintf (dump_file, "\n");
- }
-
- allocnos_live = sparseset_alloc (max_allocno);
-
- FOR_EACH_BB (bb)
- {
- bitmap_iterator bi;
-
- bitmap_copy (live, DF_LIVE_OUT (bb));
- df_simulate_initialize_backwards (bb, live);
-
- sparseset_clear (allocnos_live);
- memset (live_subregs_used, 0, max_allocno * sizeof (int));
- CLEAR_HARD_REG_SET (hard_regs_live);
- CLEAR_HARD_REG_SET (renumbers_live);
-
- /* Initialize allocnos_live and hard_regs_live for bottom of block. */
- EXECUTE_IF_SET_IN_BITMAP (live, 0, i, bi)
- {
- if (i >= FIRST_PSEUDO_REGISTER)
- break;
- if (! fixed_regs[i])
- SET_HARD_REG_BIT (hard_regs_live, i);
- }
-
- EXECUTE_IF_SET_IN_BITMAP (live, FIRST_PSEUDO_REGISTER, i, bi)
- {
- int allocnum = reg_allocno[i];
-
- if (allocnum >= 0)
- {
- int renumber = reg_renumber[i];
- rtx reg = regno_reg_rtx[i];
-
- set_reg_in_live (allocnos_live, live_subregs, live_subregs_used,
- &hard_regs_live, reg, false);
- if (renumber >= 0 && renumber < FIRST_PSEUDO_REGISTER)
- set_renumbers_live (&renumbers_live, live_subregs, live_subregs_used,
- allocnum, renumber);
- }
- }
-
- if (dump_file)
- fprintf (dump_file, "\nstarting basic block %d\n\n", bb->index);
-
- FOR_BB_INSNS_REVERSE (bb, insn)
- {
- unsigned int uid = INSN_UID (insn);
- df_ref *def_rec;
- df_ref *use_rec;
-
- if (!INSN_P (insn))
- continue;
-
- if (dump_file)
- {
- fprintf (dump_file, "insn = %d live = hardregs [", uid);
-
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- if (TEST_HARD_REG_BIT (hard_regs_live, i))
- fprintf (dump_file, "%d ", i);
-
- fprintf (dump_file, "] renumbered [");
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- if (TEST_HARD_REG_BIT (renumbers_live, i))
- fprintf (dump_file, "%d ", i);
-
- fprintf (dump_file, "] pseudos [");
- EXECUTE_IF_SET_IN_SPARSESET (allocnos_live, i)
- {
- dump_ref (dump_file, " ", "", regno_reg_rtx[allocno[i].reg],
- allocno[i].reg, live_subregs, live_subregs_used);
- }
- fprintf (dump_file, "]\n");
- }
-
- /* Add the defs into live. Most of them will already be
- there, the ones that are missing are the unused ones and
- the clobbers. We do this in order to make sure that
- interferences are added between every def and everything
- that is live across the insn. These defs will be removed
- later. */
- for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
- {
- df_ref def = *def_rec;
-
- /* FIXME: Ignoring may clobbers is technically the wrong
- thing to do. However the old version of the this
- code ignores may clobbers (and instead has many
- places in the register allocator to handle these
- constraints). It is quite likely that with a new
- allocator, the correct thing to do is to not ignore
- the constraints and then do not put in the large
- number of special checks. */
- if (!DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER))
- {
- rtx reg = DF_REF_REG (def);
- set_reg_in_live (allocnos_live, live_subregs, live_subregs_used,
- &hard_regs_live, reg,
- DF_REF_FLAGS_IS_SET (def, DF_REF_ZERO_EXTRACT));
- if (dump_file)
- dump_ref (dump_file, " adding def", "\n",
- reg, DF_REF_REGNO (def), live_subregs, live_subregs_used);
- }
- }
-
- /* Add the hardregs into renumbers_live to build the
- interferences. Renumbers_live will be rebuilt in the
- next step from scratch, so corrupting it here is no
- problem. */
- IOR_HARD_REG_SET (renumbers_live, hard_regs_live);
-
- /* Add the interferences for the defs. */
- for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
- {
- df_ref def = *def_rec;
- if (!DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER))
- mark_reg_store (allocnos_live, &renumbers_live, def);
- }
-
- /* Remove the defs from the live sets. Leave the partial
- and conditional defs in the set because they do not
- kill. */
- VEC_truncate (df_ref_t, clobbers, 0);
- for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
- {
- df_ref def = *def_rec;
-
- if (!DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL))
- {
- rtx reg = DF_REF_REG (def);
-
- clear_reg_in_live (allocnos_live, live_subregs, live_subregs_used,
- &hard_regs_live, reg, def);
- if (dump_file)
- dump_ref (dump_file, " clearing def", "\n",
- reg, DF_REF_REGNO (def), live_subregs, live_subregs_used);
- }
-
- if (DF_REF_FLAGS_IS_SET (def, DF_REF_MUST_CLOBBER))
- VEC_safe_push (df_ref_t, heap, clobbers, def);
- }
-
- /* Go thru all of the live pseudos and reset renumbers_live.
- We must start from scratch here because there could have
- been several pseudos alive that have the same
- reg_renumber and if we see a clobber for one of them, we
- cannot not want to kill the renumbers from the other
- pseudos. */
- CLEAR_HARD_REG_SET (renumbers_live);
- EXECUTE_IF_SET_IN_SPARSESET (allocnos_live, i)
- {
- unsigned int regno = allocno[i].reg;
- int renumber = reg_renumber[regno];
-
- if (renumber >= 0 && renumber < FIRST_PSEUDO_REGISTER)
- set_renumbers_live (&renumbers_live, live_subregs, live_subregs_used,
- i, renumber);
- }
-
- /* Add the uses to the live sets. Keep track of the regs
- that are dying inside the insn, this set will be useful
- later. */
- VEC_truncate (df_ref_t, dying_regs, 0);
- for (use_rec = DF_INSN_UID_USES (uid); *use_rec; use_rec++)
- {
- df_ref use = *use_rec;
- unsigned int regno = DF_REF_REGNO (use);
- bool added = false;
- int renumber = reg_renumber[regno];
- int allocnum = reg_allocno[regno];
- bool renumbering = false;
- rtx reg = DF_REF_REG (use);
-
- /* DF_REF_READ_WRITE on a use means that this use is
- fabricated from a def that is a partial set to a
- multiword reg. Here, we only model the subreg case
- precisely so we do not need to look at the fabricated
- use unless that set also happens to wrapped in a
- ZERO_EXTRACT. */
- if (DF_REF_FLAGS_IS_SET (use, DF_REF_READ_WRITE)
- && (!DF_REF_FLAGS_IS_SET (use, DF_REF_ZERO_EXTRACT))
- && DF_REF_FLAGS_IS_SET (use, DF_REF_SUBREG))
- continue;
-
- if (dump_file)
- dump_ref (dump_file, " seeing use", "\n",
- reg, regno, live_subregs, live_subregs_used);
-
- if (allocnum >= 0)
- {
- if (GET_CODE (reg) == SUBREG
- && !DF_REF_FLAGS_IS_SET (use, DF_REF_ZERO_EXTRACT))
- {
- unsigned int start = SUBREG_BYTE (reg);
- unsigned int last = start + GET_MODE_SIZE (GET_MODE (reg));
-
- ra_init_live_subregs (sparseset_bit_p (allocnos_live, allocnum),
- live_subregs, live_subregs_used, allocnum, reg);
-
- /* Ignore the paradoxical bits. */
- if ((int)last > live_subregs_used[allocnum])
- last = live_subregs_used[allocnum];
-
- while (start < last)
- {
- if (!TEST_BIT (live_subregs[allocnum], start))
- {
- if (dump_file)
- fprintf (dump_file, " dying pseudo subreg %d[%d]\n", regno, start);
- SET_BIT (live_subregs[allocnum], start);
-
- added = true;
- }
- start++;
- }
-
- sparseset_set_bit (allocnos_live, allocnum);
- if (renumber >= 0 && renumber < FIRST_PSEUDO_REGISTER)
- set_renumbers_live (&renumbers_live, live_subregs, live_subregs_used,
- allocnum, renumber);
- }
- else if (live_subregs_used[allocnum] > 0
- || !sparseset_bit_p (allocnos_live, allocnum))
- {
- if (dump_file)
- fprintf (dump_file, " %sdying pseudo\n",
- (live_subregs_used[allocnum] > 0) ? "partially ": "");
- /* Resetting the live_subregs_used is
- effectively saying do not use the subregs
- because we are reading the whole pseudo. */
- live_subregs_used[allocnum] = 0;
- sparseset_set_bit (allocnos_live, allocnum);
- if (renumber >= 0 && renumber < FIRST_PSEUDO_REGISTER)
- set_renumbers_live (&renumbers_live, live_subregs, live_subregs_used,
- allocnum, renumber);
- added = true;
- }
- }
-
- if (renumber >= 0 && renumber < FIRST_PSEUDO_REGISTER)
- {
- regno = renumber;
- renumbering = true;
- }
-
- if (regno < FIRST_PSEUDO_REGISTER)
- {
- unsigned int start = regno;
- unsigned int last;
- if (GET_CODE (reg) == SUBREG)
- {
- start += subreg_regno_offset (regno, GET_MODE (SUBREG_REG (reg)),
- SUBREG_BYTE (reg), GET_MODE (reg));
- last = start + subreg_nregs_with_regno (regno, reg);
- }
- else
- last = end_hard_regno (GET_MODE (reg), regno);
-
- regno = start;
- while (regno < last)
- {
- if ((!TEST_HARD_REG_BIT (hard_regs_live, regno))
- && (!TEST_HARD_REG_BIT (renumbers_live, regno))
- && ! fixed_regs[regno])
- {
- if (dump_file)
- fprintf (dump_file, " dying hard reg %d\n", regno);
- if (renumbering)
- SET_HARD_REG_BIT (renumbers_live, regno);
- else
- SET_HARD_REG_BIT (hard_regs_live, regno);
-
- added = true;
- }
- regno++;
- }
- }
- if (added)
- VEC_safe_push (df_ref_t, heap, dying_regs, use);
- }
-
- /* These three cases are all closely related, they all deal
- with some set of outputs of the insn need to conflict
- with some of the registers that are used by the insn but
- die within the insn. If no registers die within the insn,
- the tests can be skipped. */
-
- if (VEC_length (df_ref_t, dying_regs) > 0)
- {
- int k;
- /* There appears to be an ambiguity as to what a clobber
- means in an insn. In some cases, the clobber happens
- within the processing of the insn and in some cases
- it happens at the end of processing the insn. There
- is currently no way to distinguish these two cases so
- this code causes real clobbers to interfere with
- registers that die within an insn.
-
- This is consistent with the prior version of
- interference graph builder but is was discovered
- while developing this version of the code, that on
- some architectures such as the x86-64, the clobbers
- only appear to happen at the end of the insn.
- However, the ppc-32 contains clobbers for which these
- interferences are necessary.
-
- FIXME: We should consider either adding a new kind of
- clobber, or adding a flag to the clobber distinguish
- these two cases. */
- if (dump_file && VEC_length (df_ref_t, clobbers))
- fprintf (dump_file, " clobber conflicts\n");
- for (k = VEC_length (df_ref_t, clobbers) - 1; k >= 0; k--)
- {
- df_ref def = VEC_index (df_ref_t, clobbers, k);
- int j;
-
- for (j = VEC_length (df_ref_t, dying_regs) - 1; j >= 0; j--)
- {
- df_ref use = VEC_index (df_ref_t, dying_regs, j);
- record_one_conflict_between_regnos (GET_MODE (DF_REF_REG (def)),
- DF_REF_REGNO (def),
- GET_MODE (DF_REF_REG (use)),
- DF_REF_REGNO (use));
- }
- }
-
- /* Early clobbers, by definition, need to not only
- clobber the registers that are live across the insn
- but need to clobber the registers that die within the
- insn. The clobbering for registers live across the
- insn is handled above. */
- set_conflicts_for_earlyclobber (insn);
-
- /* If INSN is a store with multiple outputs, then any
- reg that dies here and is used inside of the address
- of the output must conflict with the other outputs.
-
- FIXME: There has been some discussion as to whether
- this is right place to handle this issue. This is a
- hold over from an early version global conflicts.
-
- 1) There is some evidence that code only deals with a
- bug that is only on the m68k. The conditions of this
- test are such that this case only triggers for a very
- peculiar insn, one that is a parallel where one of
- the sets is a store and the other sets a reg that is
- used in the address of the store. See
- http://gcc.gnu.org/ml/gcc-patches/1998-12/msg00259.html
-
- 2) The situation that this is addressing is a bug in
- the part of reload that handles stores, adding this
- conflict only hides the problem. (Of course no one
- really wants to fix reload so it is understandable
- why a bandaid was just added here.)
-
- Just because an output is unused does not mean the
- compiler can assume the side effect will not occur.
- Consider if REG appears in the address of an output
- and we reload the output. If we allocate REG to the
- same hard register as an unused output we could set
- the hard register before the output reload insn.
-
- 3) This could actually be handled by making the other
- (non store) operand of the insn be an early clobber.
- This would insert the same conflict, even if it is
- not technically an early clobber. */
-
- /* It is unsafe to use !single_set here since it will ignore an
- unused output. */
- if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
- {
- int j;
- if (dump_file)
- fprintf (dump_file, " multiple sets\n");
- for (j = VEC_length (df_ref_t, dying_regs) - 1; j >= 0; j--)
- {
- int used_in_output = 0;
- df_ref use = VEC_index (df_ref_t, dying_regs, j);
- rtx reg = DF_REF_REG (use);
- int uregno = DF_REF_REGNO (use);
- enum machine_mode umode = GET_MODE (DF_REF_REG (use));
- int k;
-
- for (k = XVECLEN (PATTERN (insn), 0) - 1; k >= 0; k--)
- {
- rtx set = XVECEXP (PATTERN (insn), 0, k);
- if (GET_CODE (set) == SET
- && !REG_P (SET_DEST (set))
- && !rtx_equal_p (reg, SET_DEST (set))
- && reg_overlap_mentioned_p (reg, SET_DEST (set)))
- used_in_output = 1;
- }
- if (used_in_output)
- for (k = XVECLEN (PATTERN (insn), 0) - 1; k >= 0; k--)
- {
- rtx set = XVECEXP (PATTERN (insn), 0, k);
- if (GET_CODE (set) == SET
- && REG_P (SET_DEST (set))
- && !rtx_equal_p (reg, SET_DEST (set)))
- record_one_conflict_between_regnos (GET_MODE (SET_DEST (set)),
- REGNO (SET_DEST (set)),
- umode, uregno);
- }
- }
- }
- }
- }
-
- /* Add the renumbers live to the hard_regs_live for the next few
- calls. All of this gets recomputed at the top of the loop so
- there is no harm. */
- IOR_HARD_REG_SET (hard_regs_live, renumbers_live);
-
-#ifdef EH_RETURN_DATA_REGNO
- if (bb_has_eh_pred (bb))
- {
- unsigned int i;
-
- for (i = 0; ; ++i)
- {
- unsigned int regno = EH_RETURN_DATA_REGNO (i);
- if (regno == INVALID_REGNUM)
- break;
- record_one_conflict (allocnos_live, &hard_regs_live, regno);
- }
-
- EXECUTE_IF_SET_IN_SPARSESET (allocnos_live, i)
- {
- allocno[i].no_eh_reg = 1;
- }
- }
-#endif
-
- if (bb_has_abnormal_pred (bb))
- {
- unsigned int i;
-#ifdef STACK_REGS
- /* Pseudos can't go in stack regs at the start of a basic block that
- is reached by an abnormal edge. Likewise for call clobbered regs,
- because caller-save, fixup_abnormal_edges and possibly the table
- driven EH machinery are not quite ready to handle such regs live
- across such edges. */
- EXECUTE_IF_SET_IN_SPARSESET (allocnos_live, i)
- {
- allocno[i].no_stack_reg = 1;
- }
-
- for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
- record_one_conflict (allocnos_live, &hard_regs_live, i);
-#endif
-
- /* No need to record conflicts for call clobbered regs if we have
- nonlocal labels around, as we don't ever try to allocate such
- regs in this case. */
- if (! cfun->has_nonlocal_label)
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- if (call_used_regs [i])
- record_one_conflict (allocnos_live, &hard_regs_live, i);
- }
- }
-
- for (i = 0; i < (unsigned int)max_allocno; i++)
- if (live_subregs[i])
- free (live_subregs[i]);
-
- /* Clean up. */
- free (allocnos_live);
- free (live_subregs);
- free (live_subregs_used);
- VEC_free (df_ref_t, heap, dying_regs);
- VEC_free (df_ref_t, heap, clobbers);
- BITMAP_FREE (live);
-}
diff --git a/gcc/regclass.c b/gcc/reginfo.c
index 2f665d874a3..01b409c1942 100644
--- a/gcc/regclass.c
+++ b/gcc/reginfo.c
@@ -1,4 +1,4 @@
-/* Compute register class preferences for pseudo-registers.
+/* Compute different info about registers.
Copyright (C) 1987, 1988, 1991, 1992, 1993, 1994, 1995, 1996
1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
Free Software Foundation, Inc.
@@ -20,9 +20,12 @@ along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
-/* This file contains two passes of the compiler: reg_scan and reg_class.
- It also defines some tables of information about the hardware registers
- and a function init_reg_sets to initialize the tables. */
+/* This file contains regscan pass of the compiler and passes for
+ dealing with info about modes of pseudo-registers inside
+ subregisters. It also defines some tables of information about the
+ hardware registers, function init_reg_sets to initialize the
+ tables, and other auxiliary functions to deal with info about
+ registers and their classes. */
#include "config.h"
#include "system.h"
@@ -55,17 +58,6 @@ along with GCC; see the file COPYING3. If not see
int max_regno;
-static void init_reg_sets_1 (void);
-static void init_reg_autoinc (void);
-
-/* If we have auto-increment or auto-decrement and we can have secondary
- reloads, we are not allowed to use classes requiring secondary
- reloads for pseudos auto-incremented since reload can't handle it. */
-/* We leave it to target hooks to decide if we have secondary reloads, so
- assume that we might have them. */
-#if defined(AUTO_INC_DEC) /* */
-#define FORBIDDEN_INC_DEC_CLASSES
-#endif
/* Register tables used by many passes. */
@@ -73,15 +65,12 @@ static void init_reg_autoinc (void);
that are fixed use (stack pointer, pc, frame pointer, etc.).
These are the registers that cannot be used to allocate
a pseudo reg for general use. */
-
char fixed_regs[FIRST_PSEUDO_REGISTER];
/* Same info as a HARD_REG_SET. */
-
HARD_REG_SET fixed_reg_set;
/* Data for initializing the above. */
-
static const char initial_fixed_regs[] = FIXED_REGISTERS;
/* Indexed by hard register number, contains 1 for registers
@@ -89,25 +78,18 @@ static const char initial_fixed_regs[] = FIXED_REGISTERS;
These are the registers that cannot be used to allocate
a pseudo reg whose life crosses calls unless we are able
to save/restore them across the calls. */
-
char call_used_regs[FIRST_PSEUDO_REGISTER];
/* Same info as a HARD_REG_SET. */
-
HARD_REG_SET call_used_reg_set;
-/* HARD_REG_SET of registers we want to avoid caller saving. */
-HARD_REG_SET losing_caller_save_reg_set;
-
/* Data for initializing the above. */
-
static const char initial_call_used_regs[] = CALL_USED_REGISTERS;
/* This is much like call_used_regs, except it doesn't have to
be a superset of FIXED_REGISTERS. This vector indicates
what is really call clobbered, and is used when defining
regs_invalidated_by_call. */
-
#ifdef CALL_REALLY_USED_REGISTERS
char call_really_used_regs[] = CALL_REALLY_USED_REGISTERS;
#endif
@@ -123,18 +105,15 @@ char call_really_used_regs[] = CALL_REALLY_USED_REGISTERS;
fixed use or call used registers that cannot hold quantities across
calls even if we are willing to save and restore them. call fixed
registers are a subset of call used registers. */
-
char call_fixed_regs[FIRST_PSEUDO_REGISTER];
/* The same info as a HARD_REG_SET. */
-
HARD_REG_SET call_fixed_reg_set;
/* Indexed by hard register number, contains 1 for registers
that are being used for global register decls.
These must be exempt from ordinary flow analysis
and are also considered fixed. */
-
char global_regs[FIRST_PSEUDO_REGISTER];
/* Contains 1 for registers that are set or clobbered by calls. */
@@ -143,17 +122,14 @@ char global_regs[FIRST_PSEUDO_REGISTER];
fixed_regs. Which leaves us guessing as to the set of fixed_regs
that are actually preserved. We know for sure that those associated
with the local stack frame are safe, but scant others. */
-
HARD_REG_SET regs_invalidated_by_call;
/* Same information as REGS_INVALIDATED_BY_CALL but in regset form to be used
in dataflow more conveniently. */
-
regset regs_invalidated_by_call_regset;
/* The bitmap_obstack is used to hold some static variables that
should not be reset after each function is compiled. */
-
static bitmap_obstack persistent_obstack;
/* Table of register numbers in the order in which to try to use them. */
@@ -165,14 +141,12 @@ int inv_reg_alloc_order[FIRST_PSEUDO_REGISTER];
#endif
/* For each reg class, a HARD_REG_SET saying which registers are in it. */
-
HARD_REG_SET reg_class_contents[N_REG_CLASSES];
/* The same information, but as an array of unsigned ints. We copy from
these unsigned ints to the table above. We do this so the tm.h files
do not have to be aware of the wordsize for machines with <= 64 regs.
Note that we hard-code 32 here, not HOST_BITS_PER_INT. */
-
#define N_REG_INTS \
((FIRST_PSEUDO_REGISTER + (32 - 1)) / 32)
@@ -180,89 +154,57 @@ static const unsigned int_reg_class_contents[N_REG_CLASSES][N_REG_INTS]
= REG_CLASS_CONTENTS;
/* For each reg class, number of regs it contains. */
-
unsigned int reg_class_size[N_REG_CLASSES];
-/* For each reg class, table listing all the containing classes. */
-
-static enum reg_class reg_class_superclasses[N_REG_CLASSES][N_REG_CLASSES];
-
/* For each reg class, table listing all the classes contained in it. */
-
enum reg_class reg_class_subclasses[N_REG_CLASSES][N_REG_CLASSES];
/* For each pair of reg classes,
a largest reg class contained in their union. */
-
enum reg_class reg_class_subunion[N_REG_CLASSES][N_REG_CLASSES];
/* For each pair of reg classes,
the smallest reg class containing their union. */
-
enum reg_class reg_class_superunion[N_REG_CLASSES][N_REG_CLASSES];
/* Array containing all of the register names. */
-
const char * reg_names[] = REGISTER_NAMES;
/* Array containing all of the register class names. */
-
const char * reg_class_names[] = REG_CLASS_NAMES;
/* For each hard register, the widest mode object that it can contain.
This will be a MODE_INT mode if the register can hold integers. Otherwise
it will be a MODE_FLOAT or a MODE_CC mode, whichever is valid for the
register. */
-
enum machine_mode reg_raw_mode[FIRST_PSEUDO_REGISTER];
/* 1 if there is a register of given mode. */
-
bool have_regs_of_mode [MAX_MACHINE_MODE];
/* 1 if class does contain register of given mode. */
-
char contains_reg_of_mode [N_REG_CLASSES] [MAX_MACHINE_MODE];
/* Maximum cost of moving from a register in one class to a register in
another class. Based on REGISTER_MOVE_COST. */
-
move_table *move_cost[MAX_MACHINE_MODE];
/* Similar, but here we don't have to move if the first index is a subset
of the second so in that case the cost is zero. */
-
move_table *may_move_in_cost[MAX_MACHINE_MODE];
/* Similar, but here we don't have to move if the first index is a superset
of the second so in that case the cost is zero. */
-
move_table *may_move_out_cost[MAX_MACHINE_MODE];
/* Keep track of the last mode we initialized move costs for. */
static int last_mode_for_init_move_cost;
-#ifdef FORBIDDEN_INC_DEC_CLASSES
-
-/* These are the classes that regs which are auto-incremented or decremented
- cannot be put in. */
-
-static int forbidden_inc_dec_class[N_REG_CLASSES];
-
-/* Indexed by n, is nonzero if (REG n) is used in an auto-inc or auto-dec
- context. */
-
-static char *in_inc_dec;
-
-#endif /* FORBIDDEN_INC_DEC_CLASSES */
-
/* Sample MEM values for use by memory_move_secondary_cost. */
-
static GTY(()) rtx top_of_stack[MAX_MACHINE_MODE];
/* No more global register variables may be declared; true once
- regclass has been initialized. */
-
+ reginfo has been initialized. */
static int no_global_reg_vars = 0;
/* Specify number of hard registers given machine mode occupy. */
@@ -272,7 +214,6 @@ unsigned char hard_regno_nregs[FIRST_PSEUDO_REGISTER][MAX_MACHINE_MODE];
correspond to the hard registers, if any, set in that map. This
could be done far more efficiently by having all sorts of special-cases
with moving single words, but probably isn't worth the trouble. */
-
void
reg_set_to_hard_reg_set (HARD_REG_SET *to, const_bitmap from)
{
@@ -287,10 +228,8 @@ reg_set_to_hard_reg_set (HARD_REG_SET *to, const_bitmap from)
}
}
-
/* Function called only once to initialize the above data on reg usage.
Once this is done, various switches may override. */
-
void
init_reg_sets (void)
{
@@ -321,7 +260,6 @@ init_reg_sets (void)
}
/* Initialize may_move_cost and friends for mode M. */
-
void
init_move_cost (enum machine_mode m)
{
@@ -412,7 +350,6 @@ init_move_cost (enum machine_mode m)
/* We need to save copies of some of the register information which
can be munged by command-line switches so we can restore it during
subsequent back-end reinitialization. */
-
static char saved_fixed_regs[FIRST_PSEUDO_REGISTER];
static char saved_call_used_regs[FIRST_PSEUDO_REGISTER];
#ifdef CALL_REALLY_USED_REGISTERS
@@ -421,7 +358,6 @@ static char saved_call_really_used_regs[FIRST_PSEUDO_REGISTER];
static const char *saved_reg_names[FIRST_PSEUDO_REGISTER];
/* Save the register information. */
-
void
save_register_info (void)
{
@@ -446,7 +382,6 @@ save_register_info (void)
}
/* Restore the register information. */
-
static void
restore_register_info (void)
{
@@ -463,7 +398,6 @@ restore_register_info (void)
/* After switches have been processed, which perhaps alter
`fixed_regs' and `call_used_regs', convert them to HARD_REG_SETs. */
-
static void
init_reg_sets_1 (void)
{
@@ -543,10 +477,7 @@ init_reg_sets_1 (void)
for (i = 0; i < N_REG_CLASSES; i++)
{
for (j = 0; j < N_REG_CLASSES; j++)
- {
- reg_class_superclasses[i][j] = LIM_REG_CLASSES;
- reg_class_subclasses[i][j] = LIM_REG_CLASSES;
- }
+ reg_class_subclasses[i][j] = LIM_REG_CLASSES;
}
for (i = 0; i < N_REG_CLASSES; i++)
@@ -562,9 +493,6 @@ init_reg_sets_1 (void)
Add J to the table of superclasses of I. */
enum reg_class *p;
- p = &reg_class_superclasses[i][0];
- while (*p != LIM_REG_CLASSES) p++;
- *p = (enum reg_class) j;
/* Add I to the table of superclasses of J. */
p = &reg_class_subclasses[j][0];
while (*p != LIM_REG_CLASSES) p++;
@@ -578,7 +506,6 @@ init_reg_sets_1 (void)
CLEAR_HARD_REG_SET (call_used_reg_set);
CLEAR_HARD_REG_SET (call_fixed_reg_set);
CLEAR_HARD_REG_SET (regs_invalidated_by_call);
- CLEAR_HARD_REG_SET (losing_caller_save_reg_set);
if (!regs_invalidated_by_call_regset)
{
bitmap_obstack_initialize (&persistent_obstack);
@@ -605,8 +532,6 @@ init_reg_sets_1 (void)
SET_HARD_REG_BIT (call_used_reg_set, i);
if (call_fixed_regs[i])
SET_HARD_REG_BIT (call_fixed_reg_set, i);
- if (CLASS_LIKELY_SPILLED_P (REGNO_REG_CLASS (i)))
- SET_HARD_REG_BIT (losing_caller_save_reg_set, i);
/* There are a couple of fixed registers that we know are safe to
exclude from being clobbered by calls:
@@ -702,7 +627,6 @@ init_reg_sets_1 (void)
This function might be invoked more than once, if the target has support
for changing register usage conventions on a per-function basis.
*/
-
void
init_reg_modes_target (void)
{
@@ -730,48 +654,39 @@ init_reg_modes_target (void)
This function might be invoked more than once, if the target has support
for changing register usage conventions on a per-function basis.
*/
-
void
init_regs (void)
{
/* This finishes what was started by init_reg_sets, but couldn't be done
until after register usage was specified. */
init_reg_sets_1 ();
-
- init_reg_autoinc ();
}
-/* The same as previous function plus initializing IRA if it is
- necessary. */
+/* The same as previous function plus initializing IRA. */
void
reinit_regs (void)
{
init_regs ();
-
- if (flag_ira)
- ira_init ();
+ ira_init ();
}
/* Initialize some fake stack-frame MEM references for use in
memory_move_secondary_cost. */
-
void
init_fake_stack_mems (void)
{
- {
- int i;
-
- for (i = 0; i < MAX_MACHINE_MODE; i++)
- top_of_stack[i] = gen_rtx_MEM (i, stack_pointer_rtx);
- }
+ int i;
+
+ for (i = 0; i < MAX_MACHINE_MODE; i++)
+ top_of_stack[i] = gen_rtx_MEM (i, stack_pointer_rtx);
}
/* Compute extra cost of moving registers to/from memory due to reloads.
Only needed if secondary reloads are required for memory moves. */
-
int
-memory_move_secondary_cost (enum machine_mode mode, enum reg_class rclass, int in)
+memory_move_secondary_cost (enum machine_mode mode, enum reg_class rclass,
+ int in)
{
enum reg_class altclass;
int partial_cost = 0;
@@ -779,7 +694,6 @@ memory_move_secondary_cost (enum machine_mode mode, enum reg_class rclass, int i
/* mem may be unused even if the SECONDARY_ macros are defined. */
rtx mem ATTRIBUTE_UNUSED = top_of_stack[(int) mode];
-
altclass = secondary_reload_class (in ? 1 : 0, rclass, mode, mem);
if (altclass == NO_REGS)
@@ -808,7 +722,6 @@ memory_move_secondary_cost (enum machine_mode mode, enum reg_class rclass, int i
/* Return a machine mode that is legitimate for hard reg REGNO and large
enough to save nregs. If we can't find one, return VOIDmode.
If CALL_SAVED is true, only consider modes that are call saved. */
-
enum machine_mode
choose_hard_reg_mode (unsigned int regno ATTRIBUTE_UNUSED,
unsigned int nregs, bool call_saved)
@@ -881,7 +794,6 @@ choose_hard_reg_mode (unsigned int regno ATTRIBUTE_UNUSED,
/* Specify the usage characteristics of the register named NAME.
It should be a fixed register if FIXED and a
call-used register if CALL_USED. */
-
void
fix_register (const char *name, int fixed, int call_used)
{
@@ -925,7 +837,6 @@ fix_register (const char *name, int fixed, int call_used)
}
/* Mark register number I as global. */
-
void
globalize_reg (int i)
{
@@ -950,7 +861,7 @@ globalize_reg (int i)
{
SET_HARD_REG_BIT (regs_invalidated_by_call, i);
SET_REGNO_REG_SET (regs_invalidated_by_call_regset, i);
- }
+ }
/* If already fixed, nothing else to do. */
if (fixed_regs[i])
@@ -966,18 +877,6 @@ globalize_reg (int i)
SET_HARD_REG_BIT (call_fixed_reg_set, i);
}
-/* Now the data and code for the `regclass' pass, which happens
- just before local-alloc. */
-
-/* The `costs' struct records the cost of using a hard register of each class
- and of using memory for each pseudo. We use this data to set up
- register class preferences. */
-
-struct costs
-{
- int cost[N_REG_CLASSES];
- int mem_cost;
-};
/* Structure used to record preferences of given pseudo. */
struct reg_pref
@@ -996,65 +895,13 @@ struct reg_pref
char altclass;
};
-/* Record the cost of each class for each pseudo. */
-
-static struct costs *costs;
-
-/* Initialized once, and used to initialize cost values for each insn. */
-
-static struct costs init_cost;
-
-/* Record preferences of each pseudo.
- This is available after `regclass' is run. */
-
+/* Record preferences of each pseudo. This is available after RA is
+ run. */
static struct reg_pref *reg_pref;
-/* Frequency of executions of current insn. */
-
-static int frequency;
-
-static rtx scan_one_insn (rtx, int);
-static void record_operand_costs (rtx, struct costs *, struct reg_pref *);
-static void dump_regclass (FILE *);
-static void record_reg_classes (int, int, rtx *, enum machine_mode *,
- const char **, rtx, struct costs *,
- struct reg_pref *);
-static int copy_cost (rtx, enum machine_mode, enum reg_class, int,
- secondary_reload_info *);
-static void record_address_regs (enum machine_mode, rtx, int, enum rtx_code,
- enum rtx_code, int);
-#ifdef FORBIDDEN_INC_DEC_CLASSES
-static int auto_inc_dec_reg_p (rtx, enum machine_mode);
-#endif
-static void reg_scan_mark_refs (rtx, rtx);
-
-/* Wrapper around REGNO_OK_FOR_INDEX_P, to allow pseudo registers. */
-
-static inline bool
-ok_for_index_p_nonstrict (rtx reg)
-{
- unsigned regno = REGNO (reg);
- return regno >= FIRST_PSEUDO_REGISTER || REGNO_OK_FOR_INDEX_P (regno);
-}
-
-/* A version of regno_ok_for_base_p for use during regclass, when all pseudos
- should count as OK. Arguments as for regno_ok_for_base_p. */
-
-static inline bool
-ok_for_base_p_nonstrict (rtx reg, enum machine_mode mode,
- enum rtx_code outer_code, enum rtx_code index_code)
-{
- unsigned regno = REGNO (reg);
- if (regno >= FIRST_PSEUDO_REGISTER)
- return true;
-
- return ok_for_base_p_1 (regno, mode, outer_code, index_code);
-}
-
/* Return the reg_class in which pseudo reg number REGNO is best allocated.
This function is sometimes called before the info has been computed.
When that happens, just return GENERAL_REGS, which is innocuous. */
-
enum reg_class
reg_preferred_class (int regno)
{
@@ -1074,21 +921,14 @@ reg_alternate_class (int regno)
}
/* Initialize some global data for this pass. */
-
static unsigned int
-regclass_init (void)
+reginfo_init (void)
{
- int i;
-
if (df)
df_compute_regs_ever_live (true);
- init_cost.mem_cost = 10000;
- for (i = 0; i < N_REG_CLASSES; i++)
- init_cost.cost[i] = 10000;
-
/* This prevents dump_flow_info from losing if called
- before regclass is run. */
+ before reginfo is run. */
reg_pref = NULL;
/* No more global register variables may be declared. */
@@ -1096,13 +936,13 @@ regclass_init (void)
return 1;
}
-struct rtl_opt_pass pass_regclass_init =
+struct rtl_opt_pass pass_reginfo_init =
{
{
RTL_PASS,
- "regclass", /* name */
+ "reginfo", /* name */
NULL, /* gate */
- regclass_init, /* execute */
+ reginfo_init, /* execute */
NULL, /* sub */
NULL, /* next */
0, /* static_pass_number */
@@ -1115,1210 +955,6 @@ struct rtl_opt_pass pass_regclass_init =
}
};
-
-
-/* Dump register costs. */
-static void
-dump_regclass (FILE *dump)
-{
- int i;
- for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
- {
- int /* enum reg_class */ rclass;
- if (REG_N_REFS (i))
- {
- fprintf (dump, " Register %i costs:", i);
- for (rclass = 0; rclass < (int) N_REG_CLASSES; rclass++)
- if (contains_reg_of_mode [(enum reg_class) rclass][PSEUDO_REGNO_MODE (i)]
-#ifdef FORBIDDEN_INC_DEC_CLASSES
- && (!in_inc_dec[i]
- || !forbidden_inc_dec_class[(enum reg_class) rclass])
-#endif
-#ifdef CANNOT_CHANGE_MODE_CLASS
- && ! invalid_mode_change_p (i, (enum reg_class) rclass,
- PSEUDO_REGNO_MODE (i))
-#endif
- )
- fprintf (dump, " %s:%i", reg_class_names[rclass],
- costs[i].cost[(enum reg_class) rclass]);
- fprintf (dump, " MEM:%i\n", costs[i].mem_cost);
- }
- }
-}
-
-
-/* Calculate the costs of insn operands. */
-
-static void
-record_operand_costs (rtx insn, struct costs *op_costs,
- struct reg_pref *reg_pref)
-{
- const char *constraints[MAX_RECOG_OPERANDS];
- enum machine_mode modes[MAX_RECOG_OPERANDS];
- int i;
-
- for (i = 0; i < recog_data.n_operands; i++)
- {
- constraints[i] = recog_data.constraints[i];
- modes[i] = recog_data.operand_mode[i];
- }
-
- /* If we get here, we are set up to record the costs of all the
- operands for this insn. Start by initializing the costs.
- Then handle any address registers. Finally record the desired
- classes for any pseudos, doing it twice if some pair of
- operands are commutative. */
-
- for (i = 0; i < recog_data.n_operands; i++)
- {
- op_costs[i] = init_cost;
-
- if (GET_CODE (recog_data.operand[i]) == SUBREG)
- recog_data.operand[i] = SUBREG_REG (recog_data.operand[i]);
-
- if (MEM_P (recog_data.operand[i]))
- record_address_regs (GET_MODE (recog_data.operand[i]),
- XEXP (recog_data.operand[i], 0),
- 0, MEM, SCRATCH, frequency * 2);
- else if (recog_data.alternative_enabled_p[0]
- && (constraints[i][0] == 'p'
- || EXTRA_ADDRESS_CONSTRAINT (constraints[i][0], constraints[i])))
- record_address_regs (VOIDmode, recog_data.operand[i], 0, ADDRESS,
- SCRATCH, frequency * 2);
- }
-
- /* Check for commutative in a separate loop so everything will
- have been initialized. We must do this even if one operand
- is a constant--see addsi3 in m68k.md. */
-
- for (i = 0; i < (int) recog_data.n_operands - 1; i++)
- if (constraints[i][0] == '%')
- {
- const char *xconstraints[MAX_RECOG_OPERANDS];
- int j;
-
- /* Handle commutative operands by swapping the constraints.
- We assume the modes are the same. */
-
- for (j = 0; j < recog_data.n_operands; j++)
- xconstraints[j] = constraints[j];
-
- xconstraints[i] = constraints[i+1];
- xconstraints[i+1] = constraints[i];
- record_reg_classes (recog_data.n_alternatives, recog_data.n_operands,
- recog_data.operand, modes,
- xconstraints, insn, op_costs, reg_pref);
- }
-
- record_reg_classes (recog_data.n_alternatives, recog_data.n_operands,
- recog_data.operand, modes,
- constraints, insn, op_costs, reg_pref);
-}
-
-/* Subroutine of regclass, processes one insn INSN. Scan it and record each
- time it would save code to put a certain register in a certain class.
- PASS, when nonzero, inhibits some optimizations which need only be done
- once.
- Return the last insn processed, so that the scan can be continued from
- there. */
-
-static rtx
-scan_one_insn (rtx insn, int pass ATTRIBUTE_UNUSED)
-{
- enum rtx_code pat_code;
- rtx set, note;
- int i, j;
- struct costs op_costs[MAX_RECOG_OPERANDS];
-
- if (!INSN_P (insn))
- return insn;
-
- pat_code = GET_CODE (PATTERN (insn));
- if (pat_code == USE
- || pat_code == CLOBBER
- || pat_code == ASM_INPUT
- || pat_code == ADDR_VEC
- || pat_code == ADDR_DIFF_VEC)
- return insn;
-
- set = single_set (insn);
- extract_insn (insn);
-
- /* If this insn loads a parameter from its stack slot, then
- it represents a savings, rather than a cost, if the
- parameter is stored in memory. Record this fact. */
-
- if (set != 0 && REG_P (SET_DEST (set))
- && MEM_P (SET_SRC (set))
- && (note = find_reg_note (insn, REG_EQUIV,
- NULL_RTX)) != 0
- && MEM_P (XEXP (note, 0)))
- {
- costs[REGNO (SET_DEST (set))].mem_cost
- -= (MEMORY_MOVE_COST (GET_MODE (SET_DEST (set)),
- GENERAL_REGS, 1)
- * frequency);
- record_address_regs (GET_MODE (SET_SRC (set)), XEXP (SET_SRC (set), 0),
- 0, MEM, SCRATCH, frequency * 2);
- return insn;
- }
-
- record_operand_costs (insn, op_costs, reg_pref);
-
- /* Now add the cost for each operand to the total costs for
- its register. */
-
- for (i = 0; i < recog_data.n_operands; i++)
- if (REG_P (recog_data.operand[i])
- && REGNO (recog_data.operand[i]) >= FIRST_PSEUDO_REGISTER)
- {
- int regno = REGNO (recog_data.operand[i]);
- struct costs *p = &costs[regno], *q = &op_costs[i];
-
- p->mem_cost += q->mem_cost * frequency;
- for (j = 0; j < N_REG_CLASSES; j++)
- p->cost[j] += q->cost[j] * frequency;
- }
-
- return insn;
-}
-
-/* Initialize information about which register classes can be used for
- pseudos that are auto-incremented or auto-decremented. */
-
-static void
-init_reg_autoinc (void)
-{
-#ifdef FORBIDDEN_INC_DEC_CLASSES
- int i;
-
- memset (forbidden_inc_dec_class, 0, sizeof forbidden_inc_dec_class);
- for (i = 0; i < N_REG_CLASSES; i++)
- {
- rtx r = gen_rtx_raw_REG (VOIDmode, 0);
- enum machine_mode m;
- int j;
-
- for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
- if (TEST_HARD_REG_BIT (reg_class_contents[i], j))
- {
- SET_REGNO (r, j);
-
- for (m = VOIDmode; (int) m < (int) MAX_MACHINE_MODE;
- m = (enum machine_mode) ((int) m + 1))
- if (HARD_REGNO_MODE_OK (j, m))
- {
- /* ??? There are two assumptions here; that the base class does not
- depend on the exact outer code (POST_INC vs. PRE_INC etc.), and
- that it does not depend on the machine mode of the memory
- reference. */
- enum reg_class base_class
- = base_reg_class (VOIDmode, POST_INC, SCRATCH);
-
- PUT_MODE (r, m);
-
- /* If a register is not directly suitable for an
- auto-increment or decrement addressing mode and
- requires secondary reloads, disallow its class from
- being used in such addresses. */
-
- if ((secondary_reload_class (0, base_class, m, r)
- || secondary_reload_class (1, base_class, m, r))
- && ! auto_inc_dec_reg_p (r, m))
- forbidden_inc_dec_class[i] = 1;
- }
- }
- }
-#endif /* FORBIDDEN_INC_DEC_CLASSES */
-}
-
-/* This is a pass of the compiler that scans all instructions
- and calculates the preferred class for each pseudo-register.
- This information can be accessed later by calling `reg_preferred_class'.
- This pass comes just before local register allocation. */
-
-void
-regclass (rtx f, int nregs)
-{
- rtx insn;
- int i;
- int pass;
- max_regno = max_reg_num ();
-
- init_recog ();
-
- reg_renumber = XNEWVEC (short, max_regno);
- reg_pref = XCNEWVEC (struct reg_pref, max_regno);
- memset (reg_renumber, -1, max_regno * sizeof (short));
-
- costs = XNEWVEC (struct costs, nregs);
-
-#ifdef FORBIDDEN_INC_DEC_CLASSES
-
- in_inc_dec = XNEWVEC (char, nregs);
-
-#endif /* FORBIDDEN_INC_DEC_CLASSES */
-
- /* Normally we scan the insns once and determine the best class to use for
- each register. However, if -fexpensive_optimizations are on, we do so
- twice, the second time using the tentative best classes to guide the
- selection. */
-
- for (pass = 0; pass <= flag_expensive_optimizations; pass++)
- {
- basic_block bb;
-
- if (dump_file)
- fprintf (dump_file, "\n\nPass %i\n\n",pass);
- /* Zero out our accumulation of the cost of each class for each reg. */
-
- memset (costs, 0, nregs * sizeof (struct costs));
-
-#ifdef FORBIDDEN_INC_DEC_CLASSES
- memset (in_inc_dec, 0, nregs);
-#endif
-
- /* Scan the instructions and record each time it would
- save code to put a certain register in a certain class. */
-
- if (!optimize)
- {
- frequency = REG_FREQ_MAX;
- for (insn = f; insn; insn = NEXT_INSN (insn))
- insn = scan_one_insn (insn, pass);
- }
- else
- FOR_EACH_BB (bb)
- {
- /* Show that an insn inside a loop is likely to be executed three
- times more than insns outside a loop. This is much more
- aggressive than the assumptions made elsewhere and is being
- tried as an experiment. */
- frequency = REG_FREQ_FROM_BB (bb);
- for (insn = BB_HEAD (bb); ; insn = NEXT_INSN (insn))
- {
- insn = scan_one_insn (insn, pass);
- if (insn == BB_END (bb))
- break;
- }
- }
-
- /* Now for each register look at how desirable each class is
- and find which class is preferred. Store that in
- `prefclass'. Record in `altclass' the largest register
- class any of whose registers is better than memory. */
-
- if (dump_file)
- {
- dump_regclass (dump_file);
- fprintf (dump_file,"\n");
- }
- for (i = FIRST_PSEUDO_REGISTER; i < nregs; i++)
- {
- int best_cost = (1 << (HOST_BITS_PER_INT - 2)) - 1;
- enum reg_class best = ALL_REGS, alt = NO_REGS;
- /* This is an enum reg_class, but we call it an int
- to save lots of casts. */
- int rclass;
- struct costs *p = &costs[i];
-
- if (regno_reg_rtx[i] == NULL)
- continue;
-
- /* In non-optimizing compilation REG_N_REFS is not initialized
- yet. */
- if (optimize && !REG_N_REFS (i) && !REG_N_SETS (i))
- continue;
-
- for (rclass = (int) ALL_REGS - 1; rclass > 0; rclass--)
- {
- /* Ignore classes that are too small for this operand or
- invalid for an operand that was auto-incremented. */
- if (!contains_reg_of_mode [rclass][PSEUDO_REGNO_MODE (i)]
-#ifdef FORBIDDEN_INC_DEC_CLASSES
- || (in_inc_dec[i] && forbidden_inc_dec_class[rclass])
-#endif
-#ifdef CANNOT_CHANGE_MODE_CLASS
- || invalid_mode_change_p (i, (enum reg_class) rclass,
- PSEUDO_REGNO_MODE (i))
-#endif
- )
- ;
- else if (p->cost[rclass] < best_cost)
- {
- best_cost = p->cost[rclass];
- best = (enum reg_class) rclass;
- }
- else if (p->cost[rclass] == best_cost)
- best = reg_class_subunion[(int) best][rclass];
- }
-
- /* If no register class is better than memory, use memory. */
- if (p->mem_cost < best_cost)
- best = NO_REGS;
-
- /* Record the alternate register class; i.e., a class for which
- every register in it is better than using memory. If adding a
- class would make a smaller class (i.e., no union of just those
- classes exists), skip that class. The major unions of classes
- should be provided as a register class. Don't do this if we
- will be doing it again later. */
-
- if ((pass == 1 || dump_file) || ! flag_expensive_optimizations)
- for (rclass = 0; rclass < N_REG_CLASSES; rclass++)
- if (p->cost[rclass] < p->mem_cost
- && (reg_class_size[(int) reg_class_subunion[(int) alt][rclass]]
- > reg_class_size[(int) alt])
-#ifdef FORBIDDEN_INC_DEC_CLASSES
- && ! (in_inc_dec[i] && forbidden_inc_dec_class[rclass])
-#endif
-#ifdef CANNOT_CHANGE_MODE_CLASS
- && ! invalid_mode_change_p (i, (enum reg_class) rclass,
- PSEUDO_REGNO_MODE (i))
-#endif
- )
- alt = reg_class_subunion[(int) alt][rclass];
-
- /* If we don't add any classes, nothing to try. */
- if (alt == best)
- alt = NO_REGS;
-
- if (dump_file
- && (reg_pref[i].prefclass != (int) best
- || reg_pref[i].altclass != (int) alt))
- {
- fprintf (dump_file, " Register %i", i);
- if (alt == ALL_REGS || best == ALL_REGS)
- fprintf (dump_file, " pref %s\n", reg_class_names[(int) best]);
- else if (alt == NO_REGS)
- fprintf (dump_file, " pref %s or none\n", reg_class_names[(int) best]);
- else
- fprintf (dump_file, " pref %s, else %s\n",
- reg_class_names[(int) best],
- reg_class_names[(int) alt]);
- }
-
- /* We cast to (int) because (char) hits bugs in some compilers. */
- reg_pref[i].prefclass = (int) best;
- reg_pref[i].altclass = (int) alt;
- }
- }
-
-#ifdef FORBIDDEN_INC_DEC_CLASSES
- free (in_inc_dec);
-#endif
- free (costs);
-}
-
-/* Record the cost of using memory or registers of various classes for
- the operands in INSN.
-
- N_ALTS is the number of alternatives.
-
- N_OPS is the number of operands.
-
- OPS is an array of the operands.
-
- MODES are the modes of the operands, in case any are VOIDmode.
-
- CONSTRAINTS are the constraints to use for the operands. This array
- is modified by this procedure.
-
- This procedure works alternative by alternative. For each alternative
- we assume that we will be able to allocate all pseudos to their ideal
- register class and calculate the cost of using that alternative. Then
- we compute for each operand that is a pseudo-register, the cost of
- having the pseudo allocated to each register class and using it in that
- alternative. To this cost is added the cost of the alternative.
-
- The cost of each class for this insn is its lowest cost among all the
- alternatives. */
-
-static void
-record_reg_classes (int n_alts, int n_ops, rtx *ops,
- enum machine_mode *modes, const char **constraints,
- rtx insn, struct costs *op_costs,
- struct reg_pref *reg_pref)
-{
- int alt;
- int i, j;
- rtx set;
-
- /* Process each alternative, each time minimizing an operand's cost with
- the cost for each operand in that alternative. */
-
- for (alt = 0; alt < n_alts; alt++)
- {
- struct costs this_op_costs[MAX_RECOG_OPERANDS];
- int alt_fail = 0;
- int alt_cost = 0;
- enum reg_class classes[MAX_RECOG_OPERANDS];
- int allows_mem[MAX_RECOG_OPERANDS];
- int rclass;
-
- for (i = 0; i < n_ops; i++)
- {
- const char *p = constraints[i];
- rtx op = ops[i];
- enum machine_mode mode = modes[i];
- int allows_addr = 0;
- int win = 0;
- unsigned char c;
-
- /* Initially show we know nothing about the register class. */
- classes[i] = NO_REGS;
- allows_mem[i] = 0;
-
- /* If this operand has no constraints at all, we can conclude
- nothing about it since anything is valid. */
-
- if (*p == 0)
- {
- if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
- memset (&this_op_costs[i], 0, sizeof this_op_costs[i]);
-
- continue;
- }
-
- /* If this alternative is only relevant when this operand
- matches a previous operand, we do different things depending
- on whether this operand is a pseudo-reg or not. We must process
- any modifiers for the operand before we can make this test. */
-
- while (*p == '%' || *p == '=' || *p == '+' || *p == '&')
- p++;
-
- if (p[0] >= '0' && p[0] <= '0' + i && (p[1] == ',' || p[1] == 0))
- {
- /* Copy class and whether memory is allowed from the matching
- alternative. Then perform any needed cost computations
- and/or adjustments. */
- j = p[0] - '0';
- classes[i] = classes[j];
- allows_mem[i] = allows_mem[j];
-
- if (!REG_P (op) || REGNO (op) < FIRST_PSEUDO_REGISTER)
- {
- /* If this matches the other operand, we have no added
- cost and we win. */
- if (rtx_equal_p (ops[j], op))
- win = 1;
-
- /* If we can put the other operand into a register, add to
- the cost of this alternative the cost to copy this
- operand to the register used for the other operand. */
-
- else if (classes[j] != NO_REGS)
- {
- alt_cost += copy_cost (op, mode, classes[j], 1, NULL);
- win = 1;
- }
- }
- else if (!REG_P (ops[j])
- || REGNO (ops[j]) < FIRST_PSEUDO_REGISTER)
- {
- /* This op is a pseudo but the one it matches is not. */
-
- /* If we can't put the other operand into a register, this
- alternative can't be used. */
-
- if (classes[j] == NO_REGS)
- alt_fail = 1;
-
- /* Otherwise, add to the cost of this alternative the cost
- to copy the other operand to the register used for this
- operand. */
-
- else
- alt_cost += copy_cost (ops[j], mode, classes[j], 1, NULL);
- }
- else
- {
- /* The costs of this operand are not the same as the other
- operand since move costs are not symmetric. Moreover,
- if we cannot tie them, this alternative needs to do a
- copy, which is one instruction. */
-
- struct costs *pp = &this_op_costs[i];
- move_table *intable = NULL;
- move_table *outtable = NULL;
- int op_class = (int) classes[i];
-
- if (!move_cost[mode])
- init_move_cost (mode);
- intable = may_move_in_cost[mode];
- outtable = may_move_out_cost[mode];
-
- /* The loop is performance critical, so unswitch it manually.
- */
- switch (recog_data.operand_type[i])
- {
- case OP_INOUT:
- for (rclass = 0; rclass < N_REG_CLASSES; rclass++)
- pp->cost[rclass] = (intable[rclass][op_class]
- + outtable[op_class][rclass]);
- break;
- case OP_IN:
- for (rclass = 0; rclass < N_REG_CLASSES; rclass++)
- pp->cost[rclass] = intable[rclass][op_class];
- break;
- case OP_OUT:
- for (rclass = 0; rclass < N_REG_CLASSES; rclass++)
- pp->cost[rclass] = outtable[op_class][rclass];
- break;
- }
-
- /* If the alternative actually allows memory, make things
- a bit cheaper since we won't need an extra insn to
- load it. */
-
- pp->mem_cost
- = ((recog_data.operand_type[i] != OP_IN
- ? MEMORY_MOVE_COST (mode, classes[i], 0)
- : 0)
- + (recog_data.operand_type[i] != OP_OUT
- ? MEMORY_MOVE_COST (mode, classes[i], 1)
- : 0) - allows_mem[i]);
-
- /* If we have assigned a class to this register in our
- first pass, add a cost to this alternative corresponding
- to what we would add if this register were not in the
- appropriate class. */
-
- if (reg_pref && reg_pref[REGNO (op)].prefclass != NO_REGS)
- alt_cost
- += (may_move_in_cost[mode]
- [(unsigned char) reg_pref[REGNO (op)].prefclass]
- [(int) classes[i]]);
-
- if (REGNO (ops[i]) != REGNO (ops[j])
- && ! find_reg_note (insn, REG_DEAD, op))
- alt_cost += 2;
-
- /* This is in place of ordinary cost computation
- for this operand, so skip to the end of the
- alternative (should be just one character). */
- while (*p && *p++ != ',')
- ;
-
- constraints[i] = p;
- continue;
- }
- }
-
- /* Scan all the constraint letters. See if the operand matches
- any of the constraints. Collect the valid register classes
- and see if this operand accepts memory. */
-
- while ((c = *p))
- {
- switch (c)
- {
- case ',':
- break;
- case '*':
- /* Ignore the next letter for this pass. */
- c = *++p;
- break;
-
- case '?':
- alt_cost += 2;
- case '!': case '#': case '&':
- case '0': case '1': case '2': case '3': case '4':
- case '5': case '6': case '7': case '8': case '9':
- break;
-
- case 'p':
- allows_addr = 1;
- win = address_operand (op, GET_MODE (op));
- /* We know this operand is an address, so we want it to be
- allocated to a register that can be the base of an
- address, i.e. BASE_REG_CLASS. */
- classes[i]
- = reg_class_subunion[(int) classes[i]]
- [(int) base_reg_class (VOIDmode, ADDRESS, SCRATCH)];
- break;
-
- case TARGET_MEM_CONSTRAINT: case 'o': case 'V':
- /* It doesn't seem worth distinguishing between offsettable
- and non-offsettable addresses here. */
- allows_mem[i] = 1;
- if (MEM_P (op))
- win = 1;
- break;
-
- case '<':
- if (MEM_P (op)
- && (GET_CODE (XEXP (op, 0)) == PRE_DEC
- || GET_CODE (XEXP (op, 0)) == POST_DEC))
- win = 1;
- break;
-
- case '>':
- if (MEM_P (op)
- && (GET_CODE (XEXP (op, 0)) == PRE_INC
- || GET_CODE (XEXP (op, 0)) == POST_INC))
- win = 1;
- break;
-
- case 'E':
- case 'F':
- if (GET_CODE (op) == CONST_DOUBLE
- || (GET_CODE (op) == CONST_VECTOR
- && (GET_MODE_CLASS (GET_MODE (op))
- == MODE_VECTOR_FLOAT)))
- win = 1;
- break;
-
- case 'G':
- case 'H':
- if (GET_CODE (op) == CONST_DOUBLE
- && CONST_DOUBLE_OK_FOR_CONSTRAINT_P (op, c, p))
- win = 1;
- break;
-
- case 's':
- if (GET_CODE (op) == CONST_INT
- || (GET_CODE (op) == CONST_DOUBLE
- && GET_MODE (op) == VOIDmode))
- break;
- case 'i':
- if (CONSTANT_P (op)
- && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op)))
- win = 1;
- break;
-
- case 'n':
- if (GET_CODE (op) == CONST_INT
- || (GET_CODE (op) == CONST_DOUBLE
- && GET_MODE (op) == VOIDmode))
- win = 1;
- break;
-
- case 'I':
- case 'J':
- case 'K':
- case 'L':
- case 'M':
- case 'N':
- case 'O':
- case 'P':
- if (GET_CODE (op) == CONST_INT
- && CONST_OK_FOR_CONSTRAINT_P (INTVAL (op), c, p))
- win = 1;
- break;
-
- case 'X':
- win = 1;
- break;
-
- case 'g':
- if (MEM_P (op)
- || (CONSTANT_P (op)
- && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op))))
- win = 1;
- allows_mem[i] = 1;
- case 'r':
- classes[i]
- = reg_class_subunion[(int) classes[i]][(int) GENERAL_REGS];
- break;
-
- default:
- if (REG_CLASS_FROM_CONSTRAINT (c, p) != NO_REGS)
- classes[i]
- = reg_class_subunion[(int) classes[i]]
- [(int) REG_CLASS_FROM_CONSTRAINT (c, p)];
-#ifdef EXTRA_CONSTRAINT_STR
- else if (EXTRA_CONSTRAINT_STR (op, c, p))
- win = 1;
-
- if (EXTRA_MEMORY_CONSTRAINT (c, p))
- {
- /* Every MEM can be reloaded to fit. */
- allows_mem[i] = 1;
- if (MEM_P (op))
- win = 1;
- }
- if (EXTRA_ADDRESS_CONSTRAINT (c, p))
- {
- /* Every address can be reloaded to fit. */
- allows_addr = 1;
- if (address_operand (op, GET_MODE (op)))
- win = 1;
- /* We know this operand is an address, so we want it to
- be allocated to a register that can be the base of an
- address, i.e. BASE_REG_CLASS. */
- classes[i]
- = reg_class_subunion[(int) classes[i]]
- [(int) base_reg_class (VOIDmode, ADDRESS, SCRATCH)];
- }
-#endif
- break;
- }
- p += CONSTRAINT_LEN (c, p);
- if (c == ',')
- break;
- }
-
- constraints[i] = p;
-
- /* How we account for this operand now depends on whether it is a
- pseudo register or not. If it is, we first check if any
- register classes are valid. If not, we ignore this alternative,
- since we want to assume that all pseudos get allocated for
- register preferencing. If some register class is valid, compute
- the costs of moving the pseudo into that class. */
-
- if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
- {
- if (classes[i] == NO_REGS)
- {
- /* We must always fail if the operand is a REG, but
- we did not find a suitable class.
-
- Otherwise we may perform an uninitialized read
- from this_op_costs after the `continue' statement
- below. */
- alt_fail = 1;
- }
- else
- {
- struct costs *pp = &this_op_costs[i];
- move_table *intable = NULL;
- move_table *outtable = NULL;
- int op_class = (int) classes[i];
-
- if (!move_cost[mode])
- init_move_cost (mode);
- intable = may_move_in_cost[mode];
- outtable = may_move_out_cost[mode];
-
- /* The loop is performance critical, so unswitch it manually.
- */
- switch (recog_data.operand_type[i])
- {
- case OP_INOUT:
- for (rclass = 0; rclass < N_REG_CLASSES; rclass++)
- pp->cost[rclass] = (intable[rclass][op_class]
- + outtable[op_class][rclass]);
- break;
- case OP_IN:
- for (rclass = 0; rclass < N_REG_CLASSES; rclass++)
- pp->cost[rclass] = intable[rclass][op_class];
- break;
- case OP_OUT:
- for (rclass = 0; rclass < N_REG_CLASSES; rclass++)
- pp->cost[rclass] = outtable[op_class][rclass];
- break;
- }
-
- /* If the alternative actually allows memory, make things
- a bit cheaper since we won't need an extra insn to
- load it. */
-
- pp->mem_cost
- = ((recog_data.operand_type[i] != OP_IN
- ? MEMORY_MOVE_COST (mode, classes[i], 0)
- : 0)
- + (recog_data.operand_type[i] != OP_OUT
- ? MEMORY_MOVE_COST (mode, classes[i], 1)
- : 0) - allows_mem[i]);
-
- /* If we have assigned a class to this register in our
- first pass, add a cost to this alternative corresponding
- to what we would add if this register were not in the
- appropriate class. */
-
- if (reg_pref && reg_pref[REGNO (op)].prefclass != NO_REGS)
- alt_cost
- += (may_move_in_cost[mode]
- [(unsigned char) reg_pref[REGNO (op)].prefclass]
- [(int) classes[i]]);
- }
- }
-
- /* Otherwise, if this alternative wins, either because we
- have already determined that or if we have a hard register of
- the proper class, there is no cost for this alternative. */
-
- else if (win
- || (REG_P (op)
- && reg_fits_class_p (op, classes[i], 0, GET_MODE (op))))
- ;
-
- /* If registers are valid, the cost of this alternative includes
- copying the object to and/or from a register. */
-
- else if (classes[i] != NO_REGS)
- {
- if (recog_data.operand_type[i] != OP_OUT)
- alt_cost += copy_cost (op, mode, classes[i], 1, NULL);
-
- if (recog_data.operand_type[i] != OP_IN)
- alt_cost += copy_cost (op, mode, classes[i], 0, NULL);
- }
-
- /* The only other way this alternative can be used is if this is a
- constant that could be placed into memory. */
-
- else if (CONSTANT_P (op) && (allows_addr || allows_mem[i]))
- alt_cost += MEMORY_MOVE_COST (mode, classes[i], 1);
- else
- alt_fail = 1;
- }
-
- if (alt_fail)
- continue;
-
- if (!recog_data.alternative_enabled_p[alt])
- continue;
-
- /* Finally, update the costs with the information we've calculated
- about this alternative. */
-
- for (i = 0; i < n_ops; i++)
- if (REG_P (ops[i])
- && REGNO (ops[i]) >= FIRST_PSEUDO_REGISTER)
- {
- struct costs *pp = &op_costs[i], *qq = &this_op_costs[i];
- int scale = 1 + (recog_data.operand_type[i] == OP_INOUT);
-
- pp->mem_cost = MIN (pp->mem_cost,
- (qq->mem_cost + alt_cost) * scale);
-
- for (rclass = 0; rclass < N_REG_CLASSES; rclass++)
- pp->cost[rclass] = MIN (pp->cost[rclass],
- (qq->cost[rclass] + alt_cost) * scale);
- }
- }
-
- /* If this insn is a single set copying operand 1 to operand 0
- and one operand is a pseudo with the other a hard reg or a pseudo
- that prefers a register that is in its own register class then
- we may want to adjust the cost of that register class to -1.
-
- Avoid the adjustment if the source does not die to avoid stressing of
- register allocator by preferencing two colliding registers into single
- class.
-
- Also avoid the adjustment if a copy between registers of the class
- is expensive (ten times the cost of a default copy is considered
- arbitrarily expensive). This avoids losing when the preferred class
- is very expensive as the source of a copy instruction. */
-
- if ((set = single_set (insn)) != 0
- && ops[0] == SET_DEST (set) && ops[1] == SET_SRC (set)
- && REG_P (ops[0]) && REG_P (ops[1])
- && find_regno_note (insn, REG_DEAD, REGNO (ops[1])))
- for (i = 0; i <= 1; i++)
- if (REGNO (ops[i]) >= FIRST_PSEUDO_REGISTER)
- {
- unsigned int regno = REGNO (ops[!i]);
- enum machine_mode mode = GET_MODE (ops[!i]);
- int rclass;
-
- if (regno >= FIRST_PSEUDO_REGISTER && reg_pref != 0
- && reg_pref[regno].prefclass != NO_REGS)
- {
- enum reg_class pref = reg_pref[regno].prefclass;
-
- if ((reg_class_size[(unsigned char) pref]
- == (unsigned) CLASS_MAX_NREGS (pref, mode))
- && REGISTER_MOVE_COST (mode, pref, pref) < 10 * 2)
- op_costs[i].cost[(unsigned char) pref] = -1;
- }
- else if (regno < FIRST_PSEUDO_REGISTER)
- for (rclass = 0; rclass < N_REG_CLASSES; rclass++)
- if (TEST_HARD_REG_BIT (reg_class_contents[rclass], regno)
- && reg_class_size[rclass] == (unsigned) CLASS_MAX_NREGS (rclass, mode))
- {
- if (reg_class_size[rclass] == 1)
- op_costs[i].cost[rclass] = -1;
- else if (in_hard_reg_set_p (reg_class_contents[rclass],
- mode, regno))
- op_costs[i].cost[rclass] = -1;
- }
- }
-}
-
-/* Compute the cost of loading X into (if TO_P is nonzero) or from (if
- TO_P is zero) a register of class CLASS in mode MODE.
-
- X must not be a pseudo. */
-
-static int
-copy_cost (rtx x, enum machine_mode mode, enum reg_class rclass, int to_p,
- secondary_reload_info *prev_sri)
-{
- enum reg_class secondary_class = NO_REGS;
- secondary_reload_info sri;
-
- /* If X is a SCRATCH, there is actually nothing to move since we are
- assuming optimal allocation. */
-
- if (GET_CODE (x) == SCRATCH)
- return 0;
-
- /* Get the class we will actually use for a reload. */
- rclass = PREFERRED_RELOAD_CLASS (x, rclass);
-
- /* If we need a secondary reload for an intermediate, the
- cost is that to load the input into the intermediate register, then
- to copy it. */
-
- sri.prev_sri = prev_sri;
- sri.extra_cost = 0;
- secondary_class = targetm.secondary_reload (to_p, x, rclass, mode, &sri);
-
- if (!move_cost[mode])
- init_move_cost (mode);
-
- if (secondary_class != NO_REGS)
- return (move_cost[mode][(int) secondary_class][(int) rclass]
- + sri.extra_cost
- + copy_cost (x, mode, secondary_class, to_p, &sri));
-
- /* For memory, use the memory move cost, for (hard) registers, use the
- cost to move between the register classes, and use 2 for everything
- else (constants). */
-
- if (MEM_P (x) || rclass == NO_REGS)
- return sri.extra_cost + MEMORY_MOVE_COST (mode, rclass, to_p);
-
- else if (REG_P (x))
- return (sri.extra_cost
- + move_cost[mode][(int) REGNO_REG_CLASS (REGNO (x))][(int) rclass]);
-
- else
- /* If this is a constant, we may eventually want to call rtx_cost here. */
- return sri.extra_cost + COSTS_N_INSNS (1);
-}
-
-/* Record the pseudo registers we must reload into hard registers
- in a subexpression of a memory address, X.
-
- If CONTEXT is 0, we are looking at the base part of an address, otherwise we
- are looking at the index part.
-
- MODE is the mode of the memory reference; OUTER_CODE and INDEX_CODE
- give the context that the rtx appears in. These three arguments are
- passed down to base_reg_class.
-
- SCALE is twice the amount to multiply the cost by (it is twice so we
- can represent half-cost adjustments). */
-
-static void
-record_address_regs (enum machine_mode mode, rtx x, int context,
- enum rtx_code outer_code, enum rtx_code index_code,
- int scale)
-{
- enum rtx_code code = GET_CODE (x);
- enum reg_class rclass;
-
- if (context == 1)
- rclass = INDEX_REG_CLASS;
- else
- rclass = base_reg_class (mode, outer_code, index_code);
-
- switch (code)
- {
- case CONST_INT:
- case CONST:
- case CC0:
- case PC:
- case SYMBOL_REF:
- case LABEL_REF:
- return;
-
- case PLUS:
- /* When we have an address that is a sum,
- we must determine whether registers are "base" or "index" regs.
- If there is a sum of two registers, we must choose one to be
- the "base". Luckily, we can use the REG_POINTER to make a good
- choice most of the time. We only need to do this on machines
- that can have two registers in an address and where the base
- and index register classes are different.
-
- ??? This code used to set REGNO_POINTER_FLAG in some cases, but
- that seems bogus since it should only be set when we are sure
- the register is being used as a pointer. */
-
- {
- rtx arg0 = XEXP (x, 0);
- rtx arg1 = XEXP (x, 1);
- enum rtx_code code0 = GET_CODE (arg0);
- enum rtx_code code1 = GET_CODE (arg1);
-
- /* Look inside subregs. */
- if (code0 == SUBREG)
- arg0 = SUBREG_REG (arg0), code0 = GET_CODE (arg0);
- if (code1 == SUBREG)
- arg1 = SUBREG_REG (arg1), code1 = GET_CODE (arg1);
-
- /* If this machine only allows one register per address, it must
- be in the first operand. */
-
- if (MAX_REGS_PER_ADDRESS == 1)
- record_address_regs (mode, arg0, 0, PLUS, code1, scale);
-
- /* If index and base registers are the same on this machine, just
- record registers in any non-constant operands. We assume here,
- as well as in the tests below, that all addresses are in
- canonical form. */
-
- else if (INDEX_REG_CLASS == base_reg_class (VOIDmode, PLUS, SCRATCH))
- {
- record_address_regs (mode, arg0, context, PLUS, code1, scale);
- if (! CONSTANT_P (arg1))
- record_address_regs (mode, arg1, context, PLUS, code0, scale);
- }
-
- /* If the second operand is a constant integer, it doesn't change
- what class the first operand must be. */
-
- else if (code1 == CONST_INT || code1 == CONST_DOUBLE)
- record_address_regs (mode, arg0, context, PLUS, code1, scale);
-
- /* If the second operand is a symbolic constant, the first operand
- must be an index register. */
-
- else if (code1 == SYMBOL_REF || code1 == CONST || code1 == LABEL_REF)
- record_address_regs (mode, arg0, 1, PLUS, code1, scale);
-
- /* If both operands are registers but one is already a hard register
- of index or reg-base class, give the other the class that the
- hard register is not. */
-
- else if (code0 == REG && code1 == REG
- && REGNO (arg0) < FIRST_PSEUDO_REGISTER
- && (ok_for_base_p_nonstrict (arg0, mode, PLUS, REG)
- || ok_for_index_p_nonstrict (arg0)))
- record_address_regs (mode, arg1,
- ok_for_base_p_nonstrict (arg0, mode, PLUS, REG)
- ? 1 : 0,
- PLUS, REG, scale);
- else if (code0 == REG && code1 == REG
- && REGNO (arg1) < FIRST_PSEUDO_REGISTER
- && (ok_for_base_p_nonstrict (arg1, mode, PLUS, REG)
- || ok_for_index_p_nonstrict (arg1)))
- record_address_regs (mode, arg0,
- ok_for_base_p_nonstrict (arg1, mode, PLUS, REG)
- ? 1 : 0,
- PLUS, REG, scale);
-
- /* If one operand is known to be a pointer, it must be the base
- with the other operand the index. Likewise if the other operand
- is a MULT. */
-
- else if ((code0 == REG && REG_POINTER (arg0))
- || code1 == MULT)
- {
- record_address_regs (mode, arg0, 0, PLUS, code1, scale);
- record_address_regs (mode, arg1, 1, PLUS, code0, scale);
- }
- else if ((code1 == REG && REG_POINTER (arg1))
- || code0 == MULT)
- {
- record_address_regs (mode, arg0, 1, PLUS, code1, scale);
- record_address_regs (mode, arg1, 0, PLUS, code0, scale);
- }
-
- /* Otherwise, count equal chances that each might be a base
- or index register. This case should be rare. */
-
- else
- {
- record_address_regs (mode, arg0, 0, PLUS, code1, scale / 2);
- record_address_regs (mode, arg0, 1, PLUS, code1, scale / 2);
- record_address_regs (mode, arg1, 0, PLUS, code0, scale / 2);
- record_address_regs (mode, arg1, 1, PLUS, code0, scale / 2);
- }
- }
- break;
-
- /* Double the importance of a pseudo register that is incremented
- or decremented, since it would take two extra insns
- if it ends up in the wrong place. */
- case POST_MODIFY:
- case PRE_MODIFY:
- record_address_regs (mode, XEXP (x, 0), 0, code,
- GET_CODE (XEXP (XEXP (x, 1), 1)), 2 * scale);
- if (REG_P (XEXP (XEXP (x, 1), 1)))
- record_address_regs (mode, XEXP (XEXP (x, 1), 1), 1, code, REG,
- 2 * scale);
- break;
-
- case POST_INC:
- case PRE_INC:
- case POST_DEC:
- case PRE_DEC:
- /* Double the importance of a pseudo register that is incremented
- or decremented, since it would take two extra insns
- if it ends up in the wrong place. If the operand is a pseudo,
- show it is being used in an INC_DEC context. */
-
-#ifdef FORBIDDEN_INC_DEC_CLASSES
- if (REG_P (XEXP (x, 0))
- && REGNO (XEXP (x, 0)) >= FIRST_PSEUDO_REGISTER)
- in_inc_dec[REGNO (XEXP (x, 0))] = 1;
-#endif
-
- record_address_regs (mode, XEXP (x, 0), 0, code, SCRATCH, 2 * scale);
- break;
-
- case REG:
- {
- struct costs *pp = &costs[REGNO (x)];
- int i;
-
- pp->mem_cost += (MEMORY_MOVE_COST (Pmode, rclass, 1) * scale) / 2;
-
- if (!move_cost[Pmode])
- init_move_cost (Pmode);
- for (i = 0; i < N_REG_CLASSES; i++)
- pp->cost[i] += (may_move_in_cost[Pmode][i][(int) rclass] * scale) / 2;
- }
- break;
-
- default:
- {
- const char *fmt = GET_RTX_FORMAT (code);
- int i;
- for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
- if (fmt[i] == 'e')
- record_address_regs (mode, XEXP (x, i), context, code, SCRATCH,
- scale);
- }
- }
-}
-
-#ifdef FORBIDDEN_INC_DEC_CLASSES
-
-/* Return 1 if REG is valid as an auto-increment memory reference
- to an object of MODE. */
-
-static int
-auto_inc_dec_reg_p (rtx reg, enum machine_mode mode)
-{
- if (HAVE_POST_INCREMENT
- && memory_address_p (mode, gen_rtx_POST_INC (Pmode, reg)))
- return 1;
-
- if (HAVE_POST_DECREMENT
- && memory_address_p (mode, gen_rtx_POST_DEC (Pmode, reg)))
- return 1;
-
- if (HAVE_PRE_INCREMENT
- && memory_address_p (mode, gen_rtx_PRE_INC (Pmode, reg)))
- return 1;
-
- if (HAVE_PRE_DECREMENT
- && memory_address_p (mode, gen_rtx_PRE_DEC (Pmode, reg)))
- return 1;
-
- return 0;
-}
-#endif
/* Allocate space for reg info. */
@@ -2383,6 +1019,8 @@ setup_reg_classes (int regno,
again just before loop. It finds the first and last use of each
pseudo-register. */
+static void reg_scan_mark_refs (rtx, rtx);
+
void
reg_scan (rtx f, unsigned int nregs ATTRIBUTE_UNUSED)
{
@@ -2406,7 +1044,6 @@ reg_scan (rtx f, unsigned int nregs ATTRIBUTE_UNUSED)
NOTE_FLAG is nonzero if X is from INSN's notes rather than its body.
We should only record information for REGs with numbers
greater than or equal to MIN_REGNO. */
-
static void
reg_scan_mark_refs (rtx x, rtx insn)
{
@@ -2541,9 +1178,9 @@ reg_scan_mark_refs (rtx x, rtx insn)
}
}
+
/* Return nonzero if C1 is a subset of C2, i.e., if every register in C1
is also in C2. */
-
int
reg_class_subset_p (enum reg_class c1, enum reg_class c2)
{
@@ -2554,7 +1191,6 @@ reg_class_subset_p (enum reg_class c1, enum reg_class c2)
}
/* Return nonzero if there is a register that is in both C1 and C2. */
-
int
reg_classes_intersect_p (enum reg_class c1, enum reg_class c2)
{
@@ -2565,6 +1201,11 @@ reg_classes_intersect_p (enum reg_class c1, enum reg_class c2)
reg_class_contents[(int) c2]));
}
+
+
+/* Passes for keeping and updating info about modes of registers
+ inside subregisters. */
+
#ifdef CANNOT_CHANGE_MODE_CLASS
struct subregs_of_mode_node
@@ -2593,7 +1234,6 @@ som_eq (const void *x, const void *y)
return a->block == b->block;
}
-
static void
record_subregs_of_mode (rtx subreg)
{
@@ -2625,9 +1265,7 @@ record_subregs_of_mode (rtx subreg)
node->modes[mode] |= 1 << (regno & 7);
}
-
/* Call record_subregs_of_mode for all the subregs in X. */
-
static void
find_subregs_of_mode (rtx x)
{
@@ -2671,10 +1309,9 @@ init_subregs_of_mode (void)
return 0;
}
-
/* Set bits in *USED which correspond to registers which can't change
- their mode from FROM to any mode in which REGNO was encountered. */
-
+ their mode from FROM to any mode in which REGNO was
+ encountered. */
void
cannot_change_mode_set_regs (HARD_REG_SET *used, enum machine_mode from,
unsigned int regno)
@@ -2702,7 +1339,6 @@ cannot_change_mode_set_regs (HARD_REG_SET *used, enum machine_mode from,
/* Return 1 if REGNO has had an invalid mode change in CLASS from FROM
mode. */
-
bool
invalid_mode_change_p (unsigned int regno,
enum reg_class rclass ATTRIBUTE_UNUSED,
@@ -2798,5 +1434,4 @@ struct rtl_opt_pass pass_subregs_of_mode_finish =
};
-
-#include "gt-regclass.h"
+#include "gt-reginfo.h"
diff --git a/gcc/regmove.c b/gcc/regmove.c
index 12b93fc2ff8..ce55c14596f 100644
--- a/gcc/regmove.c
+++ b/gcc/regmove.c
@@ -20,10 +20,8 @@ along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
-/* This module looks for cases where matching constraints would force
- an instruction to need a reload, and this reload would be a register
- to register move. It then attempts to change the registers used by the
- instruction to avoid the move instruction. */
+/* This module makes some simple RTL code transformations which
+ improve the subsequent register allocation. */
#include "config.h"
#include "system.h"
@@ -64,13 +62,8 @@ static rtx discover_flags_reg (void);
static void mark_flags_life_zones (rtx);
static void flags_set_1 (rtx, const_rtx, void *);
-static int try_auto_increment (rtx, rtx, rtx, rtx, HOST_WIDE_INT, int);
static int find_matches (rtx, struct match *);
-static void replace_in_call_usage (rtx *, unsigned int, rtx, rtx);
-static int fixup_match_1 (rtx, rtx, rtx, rtx, rtx, int, int, int);
-static int stable_and_no_regs_but_for_p (rtx, rtx, rtx);
static int regclass_compatible_p (int, int);
-static int replacement_quality (rtx);
static int fixup_match_2 (rtx, rtx, rtx, rtx);
/* Return nonzero if registers with CLASS1 and CLASS2 can be merged without
@@ -85,132 +78,6 @@ regclass_compatible_p (int class0, int class1)
&& ! CLASS_LIKELY_SPILLED_P (class1)));
}
-/* Find the place in the rtx X where REG is used as a memory address.
- Return the MEM rtx that so uses it.
- If PLUSCONST is nonzero, search instead for a memory address equivalent to
- (plus REG (const_int PLUSCONST)).
-
- If such an address does not appear, return 0.
- If REG appears more than once, or is used other than in such an address,
- return (rtx) 1. */
-
-static rtx
-find_use_as_address (rtx x, rtx reg, HOST_WIDE_INT plusconst)
-{
- enum rtx_code code = GET_CODE (x);
- const char * const fmt = GET_RTX_FORMAT (code);
- int i;
- rtx value = 0;
- rtx tem;
-
- if (code == MEM && XEXP (x, 0) == reg && plusconst == 0)
- return x;
-
- if (code == MEM && GET_CODE (XEXP (x, 0)) == PLUS
- && XEXP (XEXP (x, 0), 0) == reg
- && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
- && INTVAL (XEXP (XEXP (x, 0), 1)) == plusconst)
- return x;
-
- if (code == SIGN_EXTRACT || code == ZERO_EXTRACT)
- {
- /* If REG occurs inside a MEM used in a bit-field reference,
- that is unacceptable. */
- if (find_use_as_address (XEXP (x, 0), reg, 0) != 0)
- return (rtx) (size_t) 1;
- }
-
- if (x == reg)
- return (rtx) (size_t) 1;
-
- for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
- {
- if (fmt[i] == 'e')
- {
- tem = find_use_as_address (XEXP (x, i), reg, plusconst);
- if (value == 0)
- value = tem;
- else if (tem != 0)
- return (rtx) (size_t) 1;
- }
- else if (fmt[i] == 'E')
- {
- int j;
- for (j = XVECLEN (x, i) - 1; j >= 0; j--)
- {
- tem = find_use_as_address (XVECEXP (x, i, j), reg, plusconst);
- if (value == 0)
- value = tem;
- else if (tem != 0)
- return (rtx) (size_t) 1;
- }
- }
- }
-
- return value;
-}
-
-
-/* INC_INSN is an instruction that adds INCREMENT to REG.
- Try to fold INC_INSN as a post/pre in/decrement into INSN.
- Iff INC_INSN_SET is nonzero, inc_insn has a destination different from src.
- Return nonzero for success. */
-static int
-try_auto_increment (rtx insn, rtx inc_insn, rtx inc_insn_set, rtx reg,
- HOST_WIDE_INT increment, int pre)
-{
- enum rtx_code inc_code;
-
- rtx pset = single_set (insn);
- if (pset)
- {
- /* Can't use the size of SET_SRC, we might have something like
- (sign_extend:SI (mem:QI ... */
- rtx use = find_use_as_address (pset, reg, 0);
- if (use != 0 && use != (rtx) (size_t) 1)
- {
- int size = GET_MODE_SIZE (GET_MODE (use));
- if (0
- || (HAVE_POST_INCREMENT
- && pre == 0 && (inc_code = POST_INC, increment == size))
- || (HAVE_PRE_INCREMENT
- && pre == 1 && (inc_code = PRE_INC, increment == size))
- || (HAVE_POST_DECREMENT
- && pre == 0 && (inc_code = POST_DEC, increment == -size))
- || (HAVE_PRE_DECREMENT
- && pre == 1 && (inc_code = PRE_DEC, increment == -size))
- )
- {
- if (inc_insn_set)
- validate_change
- (inc_insn,
- &SET_SRC (inc_insn_set),
- XEXP (SET_SRC (inc_insn_set), 0), 1);
- validate_change (insn, &XEXP (use, 0),
- gen_rtx_fmt_e (inc_code, Pmode, reg), 1);
- if (apply_change_group ())
- {
- /* If there is a REG_DEAD note on this insn, we must
- change this not to REG_UNUSED meaning that the register
- is set, but the value is dead. Failure to do so will
- result in sched1 dying -- when it recomputes lifetime
- information, the number of REG_DEAD notes will have
- changed. */
- rtx note = find_reg_note (insn, REG_DEAD, reg);
- if (note)
- PUT_MODE (note, REG_UNUSED);
-
- add_reg_note (insn, REG_INC, reg);
-
- if (! inc_insn_set)
- delete_insn (inc_insn);
- return 1;
- }
- }
- }
- }
- return 0;
-}
/* Determine if the pattern generated by add_optab has a clobber,
such as might be issued for a flags hard register. To make the
@@ -376,43 +243,140 @@ flags_set_1 (rtx x, const_rtx pat, void *data ATTRIBUTE_UNUSED)
&& reg_overlap_mentioned_p (x, flags_set_1_rtx))
flags_set_1_set = 1;
}
-
-static int *regno_src_regno;
-/* Indicate how good a choice REG (which appears as a source) is to replace
- a destination register with. The higher the returned value, the better
- the choice. The main objective is to avoid using a register that is
- a candidate for tying to a hard register, since the output might in
- turn be a candidate to be tied to a different hard register. */
-static int
-replacement_quality (rtx reg)
+#ifdef AUTO_INC_DEC
+
+/* Find the place in the rtx X where REG is used as a memory address.
+ Return the MEM rtx that so uses it.
+ If PLUSCONST is nonzero, search instead for a memory address equivalent to
+ (plus REG (const_int PLUSCONST)).
+
+ If such an address does not appear, return 0.
+ If REG appears more than once, or is used other than in such an address,
+ return (rtx) 1. */
+
+static rtx
+find_use_as_address (rtx x, rtx reg, HOST_WIDE_INT plusconst)
{
- int src_regno;
+ enum rtx_code code = GET_CODE (x);
+ const char * const fmt = GET_RTX_FORMAT (code);
+ int i;
+ rtx value = 0;
+ rtx tem;
- /* Bad if this isn't a register at all. */
- if (!REG_P (reg))
- return 0;
+ if (code == MEM && XEXP (x, 0) == reg && plusconst == 0)
+ return x;
- /* If this register is not meant to get a hard register,
- it is a poor choice. */
- if (REG_LIVE_LENGTH (REGNO (reg)) < 0)
- return 0;
+ if (code == MEM && GET_CODE (XEXP (x, 0)) == PLUS
+ && XEXP (XEXP (x, 0), 0) == reg
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && INTVAL (XEXP (XEXP (x, 0), 1)) == plusconst)
+ return x;
- src_regno = regno_src_regno[REGNO (reg)];
+ if (code == SIGN_EXTRACT || code == ZERO_EXTRACT)
+ {
+ /* If REG occurs inside a MEM used in a bit-field reference,
+ that is unacceptable. */
+ if (find_use_as_address (XEXP (x, 0), reg, 0) != 0)
+ return (rtx) (size_t) 1;
+ }
- /* If it was not copied from another register, it is fine. */
- if (src_regno < 0)
- return 3;
+ if (x == reg)
+ return (rtx) (size_t) 1;
- /* Copied from a hard register? */
- if (src_regno < FIRST_PSEUDO_REGISTER)
- return 1;
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ tem = find_use_as_address (XEXP (x, i), reg, plusconst);
+ if (value == 0)
+ value = tem;
+ else if (tem != 0)
+ return (rtx) (size_t) 1;
+ }
+ else if (fmt[i] == 'E')
+ {
+ int j;
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ {
+ tem = find_use_as_address (XVECEXP (x, i, j), reg, plusconst);
+ if (value == 0)
+ value = tem;
+ else if (tem != 0)
+ return (rtx) (size_t) 1;
+ }
+ }
+ }
- /* Copied from a pseudo register - not as bad as from a hard register,
- yet still cumbersome, since the register live length will be lengthened
- when the registers get tied. */
- return 2;
+ return value;
}
+
+
+/* INC_INSN is an instruction that adds INCREMENT to REG.
+ Try to fold INC_INSN as a post/pre in/decrement into INSN.
+ Iff INC_INSN_SET is nonzero, inc_insn has a destination different from src.
+ Return nonzero for success. */
+static int
+try_auto_increment (rtx insn, rtx inc_insn, rtx inc_insn_set, rtx reg,
+ HOST_WIDE_INT increment, int pre)
+{
+ enum rtx_code inc_code;
+
+ rtx pset = single_set (insn);
+ if (pset)
+ {
+ /* Can't use the size of SET_SRC, we might have something like
+ (sign_extend:SI (mem:QI ... */
+ rtx use = find_use_as_address (pset, reg, 0);
+ if (use != 0 && use != (rtx) (size_t) 1)
+ {
+ int size = GET_MODE_SIZE (GET_MODE (use));
+ if (0
+ || (HAVE_POST_INCREMENT
+ && pre == 0 && (inc_code = POST_INC, increment == size))
+ || (HAVE_PRE_INCREMENT
+ && pre == 1 && (inc_code = PRE_INC, increment == size))
+ || (HAVE_POST_DECREMENT
+ && pre == 0 && (inc_code = POST_DEC, increment == -size))
+ || (HAVE_PRE_DECREMENT
+ && pre == 1 && (inc_code = PRE_DEC, increment == -size))
+ )
+ {
+ if (inc_insn_set)
+ validate_change
+ (inc_insn,
+ &SET_SRC (inc_insn_set),
+ XEXP (SET_SRC (inc_insn_set), 0), 1);
+ validate_change (insn, &XEXP (use, 0),
+ gen_rtx_fmt_e (inc_code, Pmode, reg), 1);
+ if (apply_change_group ())
+ {
+ /* If there is a REG_DEAD note on this insn, we must
+ change this not to REG_UNUSED meaning that the register
+ is set, but the value is dead. Failure to do so will
+ result in sched1 dying -- when it recomputes lifetime
+ information, the number of REG_DEAD notes will have
+ changed. */
+ rtx note = find_reg_note (insn, REG_DEAD, reg);
+ if (note)
+ PUT_MODE (note, REG_UNUSED);
+
+ add_reg_note (insn, REG_INC, reg);
+
+ if (! inc_insn_set)
+ delete_insn (inc_insn);
+ return 1;
+ }
+ }
+ }
+ }
+ return 0;
+}
+#endif
+
+
+static int *regno_src_regno;
+
/* Return 1 if INSN might end a basic block. */
@@ -898,7 +862,7 @@ copy_src_to_dest (rtx insn, rtx src, rtx dest)
/* reg_set_in_bb[REGNO] points to basic block iff the register is set
only once in the given block and has REG_EQUAL note. */
-basic_block *reg_set_in_bb;
+static basic_block *reg_set_in_bb;
/* Size of reg_set_in_bb array. */
static unsigned int max_reg_computed;
@@ -1126,7 +1090,7 @@ regmove_optimize (rtx f, int nregs)
for (pass = 0; pass <= 2; pass++)
{
/* We need fewer optimizations for IRA. */
- if ((! flag_regmove || flag_ira) && pass >= flag_expensive_optimizations)
+ if (! flag_regmove && pass >= flag_expensive_optimizations)
goto done;
if (dump_file)
@@ -1137,7 +1101,6 @@ regmove_optimize (rtx f, int nregs)
insn = pass ? PREV_INSN (insn) : NEXT_INSN (insn))
{
rtx set;
- int op_no, match_no;
set = single_set (insn);
if (! set)
@@ -1174,103 +1137,6 @@ regmove_optimize (rtx f, int nregs)
}
}
}
-
- /* All optimizations important for IRA have been done. */
- if (! flag_regmove || flag_ira)
- continue;
-
- if (! find_matches (insn, &match))
- continue;
-
- /* Now scan through the operands looking for a source operand
- which is supposed to match the destination operand.
- Then scan forward for an instruction which uses the dest
- operand.
- If it dies there, then replace the dest in both operands with
- the source operand. */
-
- for (op_no = 0; op_no < recog_data.n_operands; op_no++)
- {
- rtx src, dst, src_subreg;
- enum reg_class src_class, dst_class;
-
- match_no = match.with[op_no];
-
- /* Nothing to do if the two operands aren't supposed to match. */
- if (match_no < 0)
- continue;
-
- src = recog_data.operand[op_no];
- dst = recog_data.operand[match_no];
-
- if (!REG_P (src))
- continue;
-
- src_subreg = src;
- if (GET_CODE (dst) == SUBREG
- && GET_MODE_SIZE (GET_MODE (dst))
- >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (dst))))
- {
- dst = SUBREG_REG (dst);
- src_subreg = lowpart_subreg (GET_MODE (dst),
- src, GET_MODE (src));
- if (!src_subreg)
- continue;
- }
- if (!REG_P (dst)
- || REGNO (dst) < FIRST_PSEUDO_REGISTER)
- continue;
-
- if (REGNO (src) < FIRST_PSEUDO_REGISTER)
- {
- if (match.commutative[op_no] < op_no)
- regno_src_regno[REGNO (dst)] = REGNO (src);
- continue;
- }
-
- if (REG_LIVE_LENGTH (REGNO (src)) < 0)
- continue;
-
- /* op_no/src must be a read-only operand, and
- match_operand/dst must be a write-only operand. */
- if (match.use[op_no] != READ
- || match.use[match_no] != WRITE)
- continue;
-
- if (match.early_clobber[match_no]
- && count_occurrences (PATTERN (insn), src, 0) > 1)
- continue;
-
- /* Make sure match_operand is the destination. */
- if (recog_data.operand[match_no] != SET_DEST (set))
- continue;
-
- /* If the operands already match, then there is nothing to do. */
- if (operands_match_p (src, dst))
- continue;
-
- /* But in the commutative case, we might find a better match. */
- if (match.commutative[op_no] >= 0)
- {
- rtx comm = recog_data.operand[match.commutative[op_no]];
- if (operands_match_p (comm, dst)
- && (replacement_quality (comm)
- >= replacement_quality (src)))
- continue;
- }
-
- src_class = reg_preferred_class (REGNO (src));
- dst_class = reg_preferred_class (REGNO (dst));
- if (! regclass_compatible_p (src_class, dst_class))
- continue;
-
- if (GET_MODE (src) != GET_MODE (dst))
- continue;
-
- if (fixup_match_1 (insn, set, src, src_subreg, dst, pass,
- op_no, match_no))
- break;
- }
}
}
@@ -1656,478 +1522,6 @@ find_matches (rtx insn, struct match *matchp)
return any_matches;
}
-/* Try to replace all occurrences of DST_REG with SRC in LOC, that is
- assumed to be in INSN. */
-
-static void
-replace_in_call_usage (rtx *loc, unsigned int dst_reg, rtx src, rtx insn)
-{
- rtx x = *loc;
- enum rtx_code code;
- const char *fmt;
- int i, j;
-
- if (! x)
- return;
-
- code = GET_CODE (x);
- if (code == REG)
- {
- if (REGNO (x) != dst_reg)
- return;
-
- validate_change (insn, loc, src, 1);
-
- return;
- }
-
- /* Process each of our operands recursively. */
- fmt = GET_RTX_FORMAT (code);
- for (i = 0; i < GET_RTX_LENGTH (code); i++, fmt++)
- if (*fmt == 'e')
- replace_in_call_usage (&XEXP (x, i), dst_reg, src, insn);
- else if (*fmt == 'E')
- for (j = 0; j < XVECLEN (x, i); j++)
- replace_in_call_usage (& XVECEXP (x, i, j), dst_reg, src, insn);
-}
-
-/* Try to replace output operand DST in SET, with input operand SRC. SET is
- the only set in INSN. INSN has just been recognized and constrained.
- SRC is operand number OPERAND_NUMBER in INSN.
- DST is operand number MATCH_NUMBER in INSN.
- If BACKWARD is nonzero, we have been called in a backward pass.
- Return nonzero for success. */
-
-static int
-fixup_match_1 (rtx insn, rtx set, rtx src, rtx src_subreg, rtx dst,
- int backward, int operand_number, int match_number)
-{
- rtx p;
- rtx post_inc = 0, post_inc_set = 0, search_end = 0;
- int success = 0;
- int num_calls = 0, freq_calls = 0, s_num_calls = 0, s_freq_calls = 0;
- enum rtx_code code = NOTE;
- HOST_WIDE_INT insn_const = 0, newconst = 0;
- rtx overlap = 0; /* need to move insn ? */
- rtx src_note = find_reg_note (insn, REG_DEAD, src), dst_note = NULL_RTX;
- int length, s_length;
-
- if (! src_note)
- {
- /* Look for (set (regX) (op regA constX))
- (set (regY) (op regA constY))
- and change that to
- (set (regA) (op regA constX)).
- (set (regY) (op regA constY-constX)).
- This works for add and shift operations, if
- regA is dead after or set by the second insn. */
-
- code = GET_CODE (SET_SRC (set));
- if ((code == PLUS || code == LSHIFTRT
- || code == ASHIFT || code == ASHIFTRT)
- && XEXP (SET_SRC (set), 0) == src
- && GET_CODE (XEXP (SET_SRC (set), 1)) == CONST_INT)
- insn_const = INTVAL (XEXP (SET_SRC (set), 1));
- else if (! stable_and_no_regs_but_for_p (SET_SRC (set), src, dst))
- return 0;
- else
- /* We might find a src_note while scanning. */
- code = NOTE;
- }
-
- if (dump_file)
- fprintf (dump_file,
- "Could fix operand %d of insn %d matching operand %d.\n",
- operand_number, INSN_UID (insn), match_number);
-
- /* If SRC is equivalent to a constant set in a different basic block,
- then do not use it for this optimization. We want the equivalence
- so that if we have to reload this register, we can reload the
- constant, rather than extending the lifespan of the register. */
- if (reg_is_remote_constant_p (src, insn))
- return 0;
-
- /* Scan forward to find the next instruction that
- uses the output operand. If the operand dies here,
- then replace it in both instructions with
- operand_number. */
-
- for (length = s_length = 0, p = NEXT_INSN (insn); p; p = NEXT_INSN (p))
- {
- if (CALL_P (p))
- replace_in_call_usage (& CALL_INSN_FUNCTION_USAGE (p),
- REGNO (dst), src, p);
-
- /* ??? We can't scan past the end of a basic block without updating
- the register lifetime info (REG_DEAD/basic_block_live_at_start). */
- if (perhaps_ends_bb_p (p))
- break;
- else if (! INSN_P (p))
- continue;
-
- length++;
- if (src_note)
- s_length++;
-
- if (reg_set_p (src, p) || reg_set_p (dst, p)
- || (GET_CODE (PATTERN (p)) == USE
- && reg_overlap_mentioned_p (src, XEXP (PATTERN (p), 0))))
- break;
-
- /* See if all of DST dies in P. This test is
- slightly more conservative than it needs to be. */
- if ((dst_note = find_regno_note (p, REG_DEAD, REGNO (dst)))
- && (GET_MODE (XEXP (dst_note, 0)) == GET_MODE (dst)))
- {
- /* If we would be moving INSN, check that we won't move it
- into the shadow of a live a live flags register. */
- /* ??? We only try to move it in front of P, although
- we could move it anywhere between OVERLAP and P. */
- if (overlap && GET_MODE (PREV_INSN (p)) != VOIDmode)
- break;
-
- if (! src_note)
- {
- rtx q;
- rtx set2 = NULL_RTX;
-
- /* If an optimization is done, the value of SRC while P
- is executed will be changed. Check that this is OK. */
- if (reg_overlap_mentioned_p (src, PATTERN (p)))
- break;
- for (q = p; q; q = NEXT_INSN (q))
- {
- /* ??? We can't scan past the end of a basic block without
- updating the register lifetime info
- (REG_DEAD/basic_block_live_at_start). */
- if (perhaps_ends_bb_p (q))
- {
- q = 0;
- break;
- }
- else if (! INSN_P (q))
- continue;
- else if (reg_overlap_mentioned_p (src, PATTERN (q))
- || reg_set_p (src, q))
- break;
- }
- if (q)
- set2 = single_set (q);
- if (! q || ! set2 || GET_CODE (SET_SRC (set2)) != code
- || XEXP (SET_SRC (set2), 0) != src
- || GET_CODE (XEXP (SET_SRC (set2), 1)) != CONST_INT
- || (SET_DEST (set2) != src
- && ! find_reg_note (q, REG_DEAD, src)))
- {
- /* If this is a PLUS, we can still save a register by doing
- src += insn_const;
- P;
- src -= insn_const; .
- This also gives opportunities for subsequent
- optimizations in the backward pass, so do it there. */
- if (code == PLUS && backward
- /* Don't do this if we can likely tie DST to SET_DEST
- of P later; we can't do this tying here if we got a
- hard register. */
- && ! (dst_note && ! REG_N_CALLS_CROSSED (REGNO (dst))
- && single_set (p)
- && REG_P (SET_DEST (single_set (p)))
- && (REGNO (SET_DEST (single_set (p)))
- < FIRST_PSEUDO_REGISTER))
- /* We may only emit an insn directly after P if we
- are not in the shadow of a live flags register. */
- && GET_MODE (p) == VOIDmode)
- {
- search_end = q;
- q = insn;
- set2 = set;
- newconst = -insn_const;
- code = MINUS;
- }
- else
- break;
- }
- else
- {
- newconst = INTVAL (XEXP (SET_SRC (set2), 1)) - insn_const;
- /* Reject out of range shifts. */
- if (code != PLUS
- && (newconst < 0
- || ((unsigned HOST_WIDE_INT) newconst
- >= (GET_MODE_BITSIZE (GET_MODE
- (SET_SRC (set2)))))))
- break;
- if (code == PLUS)
- {
- post_inc = q;
- if (SET_DEST (set2) != src)
- post_inc_set = set2;
- }
- }
- /* We use 1 as last argument to validate_change so that all
- changes are accepted or rejected together by apply_change_group
- when it is called by validate_replace_rtx . */
- validate_change (q, &XEXP (SET_SRC (set2), 1),
- GEN_INT (newconst), 1);
- }
- validate_change (insn, recog_data.operand_loc[match_number], src, 1);
- if (validate_replace_rtx (dst, src_subreg, p))
- success = 1;
- break;
- }
-
- if (reg_overlap_mentioned_p (dst, PATTERN (p)))
- break;
- if (! src_note && reg_overlap_mentioned_p (src, PATTERN (p)))
- {
- /* INSN was already checked to be movable wrt. the registers that it
- sets / uses when we found no REG_DEAD note for src on it, but it
- still might clobber the flags register. We'll have to check that
- we won't insert it into the shadow of a live flags register when
- we finally know where we are to move it. */
- overlap = p;
- src_note = find_reg_note (p, REG_DEAD, src);
- }
-
- /* If we have passed a call instruction, and the pseudo-reg SRC is not
- already live across a call, then don't perform the optimization. */
- if (CALL_P (p))
- {
- if (REG_N_CALLS_CROSSED (REGNO (src)) == 0)
- break;
-
- num_calls++;
- freq_calls += REG_FREQ_FROM_BB (BLOCK_FOR_INSN (p));
-
- if (src_note)
- {
- s_num_calls++;
- s_freq_calls += REG_FREQ_FROM_BB (BLOCK_FOR_INSN (p));
- }
- }
- }
-
- if (! success)
- return 0;
-
- /* Remove the death note for DST from P. */
- remove_note (p, dst_note);
- if (code == MINUS)
- {
- post_inc = emit_insn_after (copy_rtx (PATTERN (insn)), p);
- if ((HAVE_PRE_INCREMENT || HAVE_PRE_DECREMENT)
- && search_end
- && try_auto_increment (search_end, post_inc, 0, src, newconst, 1))
- post_inc = 0;
- validate_change (insn, &XEXP (SET_SRC (set), 1), GEN_INT (insn_const), 0);
- INC_REG_N_SETS (REGNO (src), 1);
- REG_LIVE_LENGTH (REGNO (src))++;
- }
- if (overlap)
- {
- /* The lifetime of src and dest overlap,
- but we can change this by moving insn. */
- rtx pat = PATTERN (insn);
- if (src_note)
- remove_note (overlap, src_note);
- if ((HAVE_POST_INCREMENT || HAVE_POST_DECREMENT)
- && code == PLUS
- && try_auto_increment (overlap, insn, 0, src, insn_const, 0))
- insn = overlap;
- else
- {
- rtx notes = REG_NOTES (insn);
-
- p = emit_insn_after_setloc (pat, PREV_INSN (p), INSN_LOCATOR (insn));
- delete_insn (insn);
- REG_NOTES (p) = notes;
- df_notes_rescan (p);
- }
- }
- /* Sometimes we'd generate src = const; src += n;
- if so, replace the instruction that set src
- in the first place. */
-
- if (! overlap && (code == PLUS || code == MINUS))
- {
- rtx note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
- rtx q, set2 = NULL_RTX;
- int num_calls2 = 0, s_length2 = 0, freq_calls2 = 0;
-
- if (note && CONSTANT_P (XEXP (note, 0)))
- {
- for (q = PREV_INSN (insn); q; q = PREV_INSN (q))
- {
- /* ??? We can't scan past the end of a basic block without
- updating the register lifetime info
- (REG_DEAD/basic_block_live_at_start). */
- if (perhaps_ends_bb_p (q))
- {
- q = 0;
- break;
- }
- else if (! INSN_P (q))
- continue;
-
- s_length2++;
- if (reg_set_p (src, q))
- {
- set2 = single_set (q);
- break;
- }
- if (reg_overlap_mentioned_p (src, PATTERN (q)))
- {
- q = 0;
- break;
- }
- if (CALL_P (p))
- {
- num_calls2++;
- freq_calls2 += REG_FREQ_FROM_BB (BLOCK_FOR_INSN (p));
- }
- }
- if (q && set2 && SET_DEST (set2) == src && CONSTANT_P (SET_SRC (set2))
- && validate_change (insn, &SET_SRC (set), XEXP (note, 0), 0))
- {
- delete_insn (q);
- INC_REG_N_SETS (REGNO (src), -1);
- REG_N_CALLS_CROSSED (REGNO (src)) -= num_calls2;
- REG_FREQ_CALLS_CROSSED (REGNO (src)) -= freq_calls2;
- REG_LIVE_LENGTH (REGNO (src)) -= s_length2;
- insn_const = 0;
- }
- }
- }
-
- if ((HAVE_PRE_INCREMENT || HAVE_PRE_DECREMENT)
- && (code == PLUS || code == MINUS) && insn_const
- && try_auto_increment (p, insn, 0, src, insn_const, 1))
- insn = p;
- else if ((HAVE_POST_INCREMENT || HAVE_POST_DECREMENT)
- && post_inc
- && try_auto_increment (p, post_inc, post_inc_set, src, newconst, 0))
- post_inc = 0;
- /* If post_inc still prevails, try to find an
- insn where it can be used as a pre-in/decrement.
- If code is MINUS, this was already tried. */
- if (post_inc && code == PLUS
- /* Check that newconst is likely to be usable
- in a pre-in/decrement before starting the search. */
- && ((HAVE_PRE_INCREMENT && newconst > 0 && newconst <= MOVE_MAX)
- || (HAVE_PRE_DECREMENT && newconst < 0 && newconst >= -MOVE_MAX))
- && exact_log2 (newconst))
- {
- rtx q, inc_dest;
-
- inc_dest = post_inc_set ? SET_DEST (post_inc_set) : src;
- for (q = post_inc; (q = NEXT_INSN (q)); )
- {
- /* ??? We can't scan past the end of a basic block without updating
- the register lifetime info
- (REG_DEAD/basic_block_live_at_start). */
- if (perhaps_ends_bb_p (q))
- break;
- else if (! INSN_P (q))
- continue;
- else if (src != inc_dest
- && (reg_overlap_mentioned_p (src, PATTERN (q))
- || reg_set_p (src, q)))
- break;
- else if (reg_set_p (inc_dest, q))
- break;
- else if (reg_overlap_mentioned_p (inc_dest, PATTERN (q)))
- {
- try_auto_increment (q, post_inc,
- post_inc_set, inc_dest, newconst, 1);
- break;
- }
- }
- }
-
- /* Move the death note for DST to INSN if it is used
- there. */
- if (reg_overlap_mentioned_p (dst, PATTERN (insn)))
- {
- XEXP (dst_note, 1) = REG_NOTES (insn);
- REG_NOTES (insn) = dst_note;
- }
-
- if (src_note)
- {
- /* Move the death note for SRC from INSN to P. */
- if (! overlap)
- remove_note (insn, src_note);
- XEXP (src_note, 1) = REG_NOTES (p);
- REG_NOTES (p) = src_note;
-
- REG_N_CALLS_CROSSED (REGNO (src)) += s_num_calls;
- REG_FREQ_CALLS_CROSSED (REGNO (src)) += s_freq_calls;
- }
-
- INC_REG_N_SETS (REGNO (src), 1);
- INC_REG_N_SETS (REGNO (dst), -1);
-
- REG_N_CALLS_CROSSED (REGNO (dst)) -= num_calls;
- REG_FREQ_CALLS_CROSSED (REGNO (dst)) -= freq_calls;
-
- REG_LIVE_LENGTH (REGNO (src)) += s_length;
- if (REG_LIVE_LENGTH (REGNO (dst)) >= 0)
- {
- REG_LIVE_LENGTH (REGNO (dst)) -= length;
- /* REG_LIVE_LENGTH is only an approximation after
- combine if sched is not run, so make sure that we
- still have a reasonable value. */
- if (REG_LIVE_LENGTH (REGNO (dst)) < 2)
- REG_LIVE_LENGTH (REGNO (dst)) = 2;
- }
- if (dump_file)
- fprintf (dump_file,
- "Fixed operand %d of insn %d matching operand %d.\n",
- operand_number, INSN_UID (insn), match_number);
- return 1;
-}
-
-
-/* Return nonzero if X is stable and mentions no registers but for
- mentioning SRC or mentioning / changing DST . If in doubt, presume
- it is unstable.
- The rationale is that we want to check if we can move an insn easily
- while just paying attention to SRC and DST. */
-static int
-stable_and_no_regs_but_for_p (rtx x, rtx src, rtx dst)
-{
- RTX_CODE code = GET_CODE (x);
- switch (GET_RTX_CLASS (code))
- {
- case RTX_UNARY:
- case RTX_BIN_ARITH:
- case RTX_COMM_ARITH:
- case RTX_COMPARE:
- case RTX_COMM_COMPARE:
- case RTX_TERNARY:
- case RTX_BITFIELD_OPS:
- {
- int i;
- const char *fmt = GET_RTX_FORMAT (code);
- for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
- if (fmt[i] == 'e'
- && ! stable_and_no_regs_but_for_p (XEXP (x, i), src, dst))
- return 0;
- return 1;
- }
- case RTX_OBJ:
- if (code == REG)
- return x == src || x == dst;
- /* If this is a MEM, look inside - there might be a register hidden in
- the address of an unchanging MEM. */
- if (code == MEM
- && ! stable_and_no_regs_but_for_p (XEXP (x, 0), src, dst))
- return 0;
- /* Fall through. */
- default:
- return ! rtx_unstable_p (x);
- }
-}
static bool
diff --git a/gcc/reload.c b/gcc/reload.c
index e353c50acdb..7f982c29be1 100644
--- a/gcc/reload.c
+++ b/gcc/reload.c
@@ -1549,9 +1549,7 @@ push_reload (rtx in, rtx out, rtx *inloc, rtx *outloc,
&& reg_mentioned_p (XEXP (note, 0), in)
/* Check that a former pseudo is valid; see find_dummy_reload. */
&& (ORIGINAL_REGNO (XEXP (note, 0)) < FIRST_PSEUDO_REGISTER
- || (! bitmap_bit_p (flag_ira
- ? DF_LR_OUT (ENTRY_BLOCK_PTR)
- : DF_LIVE_OUT (ENTRY_BLOCK_PTR),
+ || (! bitmap_bit_p (DF_LR_OUT (ENTRY_BLOCK_PTR),
ORIGINAL_REGNO (XEXP (note, 0)))
&& hard_regno_nregs[regno][GET_MODE (XEXP (note, 0))] == 1))
&& ! refers_to_regno_for_reload_p (regno,
@@ -2029,9 +2027,7 @@ find_dummy_reload (rtx real_in, rtx real_out, rtx *inloc, rtx *outloc,
can ignore the conflict). We must never introduce writes
to such hardregs, as they would clobber the other live
pseudo. See PR 20973. */
- || (!bitmap_bit_p (flag_ira
- ? DF_LR_OUT (ENTRY_BLOCK_PTR)
- : DF_LIVE_OUT (ENTRY_BLOCK_PTR),
+ || (!bitmap_bit_p (DF_LR_OUT (ENTRY_BLOCK_PTR),
ORIGINAL_REGNO (in))
/* Similarly, only do this if we can be sure that the death
note is still valid. global can assign some hardreg to
diff --git a/gcc/reload1.c b/gcc/reload1.c
index bbffad0b98d..e0d0f942ff0 100644
--- a/gcc/reload1.c
+++ b/gcc/reload1.c
@@ -557,7 +557,7 @@ compute_use_by_pseudos (HARD_REG_SET *to, regset from)
which might still contain registers that have not
actually been allocated since they have an
equivalence. */
- gcc_assert ((flag_ira && ira_conflicts_p) || reload_completed);
+ gcc_assert (ira_conflicts_p || reload_completed);
}
else
add_to_hard_reg_set (to, PSEUDO_REGNO_MODE (regno), r);
@@ -901,7 +901,7 @@ reload (rtx first, int global)
for (n = 0, i = LAST_VIRTUAL_REGISTER + 1; i < max_regno; i++)
temp_pseudo_reg_arr[n++] = i;
- if (flag_ira && ira_conflicts_p)
+ if (ira_conflicts_p)
/* Ask IRA to order pseudo-registers for better stack slot
sharing. */
ira_sort_regnos_for_alter_reg (temp_pseudo_reg_arr, n, reg_max_ref_width);
@@ -1055,7 +1055,7 @@ reload (rtx first, int global)
calculate_needs_all_insns (global);
- if (! flag_ira || ! ira_conflicts_p)
+ if (! ira_conflicts_p)
/* Don't do it for IRA. We need this info because we don't
change live_throughout and dead_or_set for chains when IRA
is used. */
@@ -1614,7 +1614,7 @@ calculate_needs_all_insns (int global)
reg_equiv_memory_loc
[REGNO (SET_DEST (set))]))))
{
- if (flag_ira && ira_conflicts_p)
+ if (ira_conflicts_p)
/* Inform IRA about the insn deletion. */
ira_mark_memory_move_deletion (REGNO (SET_DEST (set)),
REGNO (SET_SRC (set)));
@@ -1723,7 +1723,7 @@ count_pseudo (int reg)
|| REGNO_REG_SET_P (&spilled_pseudos, reg)
/* Ignore spilled pseudo-registers which can be here only if IRA
is used. */
- || (flag_ira && ira_conflicts_p && r < 0))
+ || (ira_conflicts_p && r < 0))
return;
SET_REGNO_REG_SET (&pseudos_counted, reg);
@@ -1804,7 +1804,7 @@ count_spilled_pseudo (int spilled, int spilled_nregs, int reg)
/* Ignore spilled pseudo-registers which can be here only if IRA is
used. */
- if ((flag_ira && ira_conflicts_p && r < 0)
+ if ((ira_conflicts_p && r < 0)
|| REGNO_REG_SET_P (&spilled_pseudos, reg)
|| spilled + spilled_nregs <= r || r + nregs <= spilled)
return;
@@ -1876,7 +1876,7 @@ find_reg (struct insn_chain *chain, int order)
if (! ok)
continue;
- if (flag_ira && ira_conflicts_p)
+ if (ira_conflicts_p)
{
/* Ask IRA to find a better pseudo-register for
spilling. */
@@ -2158,7 +2158,7 @@ alter_reg (int i, int from_reg, bool dont_share_p)
unsigned int min_align = reg_max_ref_width[i] * BITS_PER_UNIT;
int adjust = 0;
- if (flag_ira && ira_conflicts_p)
+ if (ira_conflicts_p)
{
/* Mark the spill for IRA. */
SET_REGNO_REG_SET (&spilled_pseudos, i);
@@ -2177,8 +2177,7 @@ alter_reg (int i, int from_reg, bool dont_share_p)
enough inherent space and enough total space.
Otherwise, we allocate a new slot, making sure that it has no less
inherent space, and no less total space, then the previous slot. */
- else if (from_reg == -1
- || (!dont_share_p && flag_ira && ira_conflicts_p))
+ else if (from_reg == -1 || (!dont_share_p && ira_conflicts_p))
{
rtx stack_slot;
@@ -2203,7 +2202,7 @@ alter_reg (int i, int from_reg, bool dont_share_p)
adjust);
}
- if (! dont_share_p && flag_ira && ira_conflicts_p)
+ if (! dont_share_p && ira_conflicts_p)
/* Inform IRA about allocation a new stack slot. */
ira_mark_new_stack_slot (stack_slot, i, total_size);
}
@@ -3905,7 +3904,7 @@ finish_spills (int global)
spill_reg_order[i] = -1;
EXECUTE_IF_SET_IN_REG_SET (&spilled_pseudos, FIRST_PSEUDO_REGISTER, i, rsi)
- if (! flag_ira || ! ira_conflicts_p || reg_renumber[i] >= 0)
+ if (! ira_conflicts_p || reg_renumber[i] >= 0)
{
/* Record the current hard register the pseudo is allocated to
in pseudo_previous_regs so we avoid reallocating it to the
@@ -3915,7 +3914,7 @@ finish_spills (int global)
SET_HARD_REG_BIT (pseudo_previous_regs[i], reg_renumber[i]);
/* Mark it as no longer having a hard register home. */
reg_renumber[i] = -1;
- if (flag_ira && ira_conflicts_p)
+ if (ira_conflicts_p)
/* Inform IRA about the change. */
ira_mark_allocation_change (i);
/* We will need to scan everything again. */
@@ -3923,8 +3922,10 @@ finish_spills (int global)
}
/* Retry global register allocation if possible. */
- if (global)
+ if (global && ira_conflicts_p)
{
+ unsigned int n;
+
memset (pseudo_forbidden_regs, 0, max_regno * sizeof (HARD_REG_SET));
/* For every insn that needs reloads, set the registers used as spill
regs in pseudo_forbidden_regs for every pseudo live across the
@@ -3945,49 +3946,23 @@ finish_spills (int global)
}
}
- if (! flag_ira || ! ira_conflicts_p)
- {
- /* Retry allocating the spilled pseudos. For each reg,
- merge the various reg sets that indicate which hard regs
- can't be used, and call retry_global_alloc. We change
- spill_pseudos here to only contain pseudos that did not
- get a new hard register. */
- for (i = FIRST_PSEUDO_REGISTER; i < (unsigned)max_regno; i++)
- if (reg_old_renumber[i] != reg_renumber[i])
- {
- HARD_REG_SET forbidden;
-
- COPY_HARD_REG_SET (forbidden, bad_spill_regs_global);
- IOR_HARD_REG_SET (forbidden, pseudo_forbidden_regs[i]);
- IOR_HARD_REG_SET (forbidden, pseudo_previous_regs[i]);
- retry_global_alloc (i, forbidden);
- if (reg_renumber[i] >= 0)
- CLEAR_REGNO_REG_SET (&spilled_pseudos, i);
- }
- }
- else
- {
- /* Retry allocating the pseudos spilled in IRA and the
- reload. For each reg, merge the various reg sets that
- indicate which hard regs can't be used, and call
- ira_reassign_pseudos. */
- unsigned int n;
-
- for (n = 0, i = FIRST_PSEUDO_REGISTER; i < (unsigned) max_regno; i++)
- if (reg_old_renumber[i] != reg_renumber[i])
- {
- if (reg_renumber[i] < 0)
- temp_pseudo_reg_arr[n++] = i;
- else
- CLEAR_REGNO_REG_SET (&spilled_pseudos, i);
- }
- if (ira_reassign_pseudos (temp_pseudo_reg_arr, n,
- bad_spill_regs_global,
- pseudo_forbidden_regs, pseudo_previous_regs,
- &spilled_pseudos))
- something_changed = 1;
-
- }
+ /* Retry allocating the pseudos spilled in IRA and the
+ reload. For each reg, merge the various reg sets that
+ indicate which hard regs can't be used, and call
+ ira_reassign_pseudos. */
+ for (n = 0, i = FIRST_PSEUDO_REGISTER; i < (unsigned) max_regno; i++)
+ if (reg_old_renumber[i] != reg_renumber[i])
+ {
+ if (reg_renumber[i] < 0)
+ temp_pseudo_reg_arr[n++] = i;
+ else
+ CLEAR_REGNO_REG_SET (&spilled_pseudos, i);
+ }
+ if (ira_reassign_pseudos (temp_pseudo_reg_arr, n,
+ bad_spill_regs_global,
+ pseudo_forbidden_regs, pseudo_previous_regs,
+ &spilled_pseudos))
+ something_changed = 1;
}
/* Fix up the register information in the insn chain.
This involves deleting those of the spilled pseudos which did not get
@@ -3997,7 +3972,7 @@ finish_spills (int global)
HARD_REG_SET used_by_pseudos;
HARD_REG_SET used_by_pseudos2;
- if (! flag_ira || ! ira_conflicts_p)
+ if (! ira_conflicts_p)
{
/* Don't do it for IRA because IRA and the reload still can
assign hard registers to the spilled pseudos on next
@@ -7000,7 +6975,7 @@ emit_input_reload_insns (struct insn_chain *chain, struct reload *rl,
&& REG_N_SETS (REGNO (old)) == 1)
{
reg_renumber[REGNO (old)] = REGNO (reloadreg);
- if (flag_ira && ira_conflicts_p)
+ if (ira_conflicts_p)
/* Inform IRA about the change. */
ira_mark_allocation_change (REGNO (old));
alter_reg (REGNO (old), -1, false);
@@ -8541,7 +8516,7 @@ delete_output_reload (rtx insn, int j, int last_reload_reg, rtx new_reload_reg)
/* For the debugging info, say the pseudo lives in this reload reg. */
reg_renumber[REGNO (reg)] = REGNO (new_reload_reg);
- if (flag_ira && ira_conflicts_p)
+ if (ira_conflicts_p)
/* Inform IRA about the change. */
ira_mark_allocation_change (REGNO (reg));
alter_reg (REGNO (reg), -1, false);
diff --git a/gcc/reorg.c b/gcc/reorg.c
index fd6a58e02eb..8b3219ad446 100644
--- a/gcc/reorg.c
+++ b/gcc/reorg.c
@@ -4046,7 +4046,8 @@ static bool
gate_handle_delay_slots (void)
{
#ifdef DELAY_SLOTS
- return flag_delayed_branch && !crtl->dbr_scheduled_p;
+ /* At -O0 dataflow info isn't updated after RA. */
+ return optimize > 0 && flag_delayed_branch && !crtl->dbr_scheduled_p;
#else
return 0;
#endif
diff --git a/gcc/rtl.h b/gcc/rtl.h
index 5394ea6f6e2..5c2a89edec9 100644
--- a/gcc/rtl.h
+++ b/gcc/rtl.h
@@ -2212,17 +2212,11 @@ extern void expand_dec (rtx, rtx);
extern bool can_copy_p (enum machine_mode);
extern rtx fis_get_condition (rtx);
-/* In global.c */
+/* In ira.c */
#ifdef HARD_CONST
extern HARD_REG_SET eliminable_regset;
#endif
extern void mark_elimination (int, int);
-extern void dump_global_regs (FILE *);
-#ifdef HARD_CONST
-/* Yes, this ifdef is silly, but HARD_REG_SET is not always defined. */
-extern void retry_global_alloc (int, HARD_REG_SET);
-#endif
-extern void build_insn_chain (void);
/* In regclass.c */
extern int reg_classes_intersect_p (enum reg_class, enum reg_class);
@@ -2247,10 +2241,6 @@ extern bool invalid_mode_change_p (unsigned int, enum reg_class,
/* In reorg.c */
extern void dbr_schedule (rtx);
-/* In local-alloc.c */
-extern void dump_local_alloc (FILE *);
-extern int update_equiv_regs (void);
-
/* In reload1.c */
extern int function_invariant_p (const_rtx);
diff --git a/gcc/sel-sched.c b/gcc/sel-sched.c
index c98a52162dc..172bb3ebb5a 100644
--- a/gcc/sel-sched.c
+++ b/gcc/sel-sched.c
@@ -562,9 +562,9 @@ static rtx get_dest_from_orig_ops (av_set_t);
static basic_block generate_bookkeeping_insn (expr_t, edge, edge);
static bool find_used_regs (insn_t, av_set_t, regset, struct reg_rename *,
def_list_t *);
-static bool move_op (insn_t, av_set_t, expr_t, rtx, expr_t);
-static bool code_motion_path_driver (insn_t, av_set_t, ilist_t,
- cmpd_local_params_p, void *);
+static bool move_op (insn_t, av_set_t, expr_t, rtx, expr_t, bool*);
+static int code_motion_path_driver (insn_t, av_set_t, ilist_t,
+ cmpd_local_params_p, void *);
static void sel_sched_region_1 (void);
static void sel_sched_region_2 (int);
static av_set_t compute_av_set_inside_bb (insn_t, ilist_t, int, bool);
@@ -819,6 +819,7 @@ count_occurrences_1 (rtx *cur_rtx, void *arg)
{
/* Bail out if we occupy more than one register. */
if (REG_P (*cur_rtx)
+ && HARD_REGISTER_P (*cur_rtx)
&& hard_regno_nregs[REGNO(*cur_rtx)][GET_MODE (*cur_rtx)] > 1)
{
p->n = 0;
@@ -4947,11 +4948,11 @@ prepare_place_to_insert (bnd_t bnd)
/* Find original instructions for EXPR_SEQ and move it to BND boundary.
Return the expression to emit in C_EXPR. */
-static void
+static bool
move_exprs_to_boundary (bnd_t bnd, expr_t expr_vliw,
av_set_t expr_seq, expr_t c_expr)
{
- bool b;
+ bool b, should_move;
unsigned book_uid;
bitmap_iterator bi;
int n_bookkeeping_copies_before_moveop;
@@ -4966,11 +4967,11 @@ move_exprs_to_boundary (bnd_t bnd, expr_t expr_vliw,
bitmap_clear (current_originators);
b = move_op (BND_TO (bnd), expr_seq, expr_vliw,
- get_dest_from_orig_ops (expr_seq), c_expr);
+ get_dest_from_orig_ops (expr_seq), c_expr, &should_move);
/* We should be able to find the expression we've chosen for
scheduling. */
- gcc_assert (b == 1);
+ gcc_assert (b);
if (stat_bookkeeping_copies > n_bookkeeping_copies_before_moveop)
stat_insns_needed_bookkeeping++;
@@ -4984,6 +4985,8 @@ move_exprs_to_boundary (bnd_t bnd, expr_t expr_vliw,
bitmap_copy (INSN_ORIGINATORS_BY_UID (book_uid),
current_originators);
}
+
+ return should_move;
}
@@ -5130,7 +5133,7 @@ schedule_expr_on_boundary (bnd_t bnd, expr_t expr_vliw, int seqno)
expr_t c_expr = XALLOCA (expr_def);
insn_t place_to_insert;
insn_t insn;
- bool cant_move;
+ bool should_move;
expr_seq = find_sequential_best_exprs (bnd, expr_vliw, true);
@@ -5147,13 +5150,9 @@ schedule_expr_on_boundary (bnd_t bnd, expr_t expr_vliw, int seqno)
move_cond_jump (insn, bnd);
}
- /* Calculate cant_move now as EXPR_WAS_RENAMED can change after move_op
- meaning that there was *any* renaming somewhere. */
- cant_move = EXPR_WAS_CHANGED (expr_vliw) || EXPR_WAS_RENAMED (expr_vliw);
-
/* Find a place for C_EXPR to schedule. */
place_to_insert = prepare_place_to_insert (bnd);
- move_exprs_to_boundary (bnd, expr_vliw, expr_seq, c_expr);
+ should_move = move_exprs_to_boundary (bnd, expr_vliw, expr_seq, c_expr);
clear_expr (c_expr);
/* Add the instruction. The corner case to care about is when
@@ -5166,13 +5165,13 @@ schedule_expr_on_boundary (bnd_t bnd, expr_t expr_vliw, int seqno)
vinsn_new = vinsn_copy (EXPR_VINSN (expr_vliw), false);
change_vinsn_in_expr (expr_vliw, vinsn_new);
- cant_move = 1;
+ should_move = false;
}
- if (cant_move)
+ if (should_move)
+ insn = sel_move_insn (expr_vliw, seqno, place_to_insert);
+ else
insn = emit_insn_from_expr_after (expr_vliw, NULL, seqno,
place_to_insert);
- else
- insn = sel_move_insn (expr_vliw, seqno, place_to_insert);
/* Return the nops generated for preserving of data sets back
into pool. */
@@ -5671,6 +5670,10 @@ move_op_orig_expr_found (insn_t insn, expr_t expr,
insn_emitted = handle_emitting_transformations (insn, expr, params);
only_disconnect = (params->uid == INSN_UID (insn)
&& ! insn_emitted && ! EXPR_WAS_CHANGED (expr));
+
+ /* Mark that we've disconnected an insn. */
+ if (only_disconnect)
+ params->uid = -1;
remove_insn_from_stream (insn, only_disconnect);
}
@@ -6053,7 +6056,7 @@ code_motion_process_successors (insn_t insn, av_set_t orig_ops,
#endif
/* Merge data, clean up, etc. */
- if (code_motion_path_driver_info->after_merge_succs)
+ if (res != -1 && code_motion_path_driver_info->after_merge_succs)
code_motion_path_driver_info->after_merge_succs (&lparams, static_params);
return res;
@@ -6081,7 +6084,7 @@ code_motion_path_driver_cleanup (av_set_t *orig_ops_p, ilist_t *path_p)
Returns whether original instructions were found. Note that top-level
code_motion_path_driver always returns true. */
-static bool
+static int
code_motion_path_driver (insn_t insn, av_set_t orig_ops, ilist_t path,
cmpd_local_params_p local_params_in,
void *static_params)
@@ -6315,12 +6318,14 @@ code_motion_path_driver (insn_t insn, av_set_t orig_ops, ilist_t path,
DEST is the register chosen for scheduling the current expr. Insert
bookkeeping code in the join points. EXPR_VLIW is the chosen expression,
C_EXPR is how it looks like at the given cfg point.
+ Set *SHOULD_MOVE to indicate whether we have only disconnected
+ one of the insns found.
Returns whether original instructions were found, which is asserted
to be true in the caller. */
static bool
move_op (insn_t insn, av_set_t orig_ops, expr_t expr_vliw,
- rtx dest, expr_t c_expr)
+ rtx dest, expr_t c_expr, bool *should_move)
{
struct moveop_static_params sparams;
struct cmpd_local_params lparams;
@@ -6346,6 +6351,8 @@ move_op (insn_t insn, av_set_t orig_ops, expr_t expr_vliw,
if (sparams.was_renamed)
EXPR_WAS_RENAMED (expr_vliw) = true;
+ *should_move = (sparams.uid == -1);
+
return res;
}
diff --git a/gcc/stor-layout.c b/gcc/stor-layout.c
index 0e7caa5450b..1a33b1c54d6 100644
--- a/gcc/stor-layout.c
+++ b/gcc/stor-layout.c
@@ -937,7 +937,9 @@ place_field (record_layout_info rli, tree field)
&& TREE_CODE (field) == FIELD_DECL
&& type != error_mark_node
&& DECL_BIT_FIELD (field)
- && ! DECL_PACKED (field)
+ && (! DECL_PACKED (field)
+ /* Enter for these packed fields only to issue a warning. */
+ || TYPE_ALIGN (type) <= BITS_PER_UNIT)
&& maximum_field_alignment == 0
&& ! integer_zerop (DECL_SIZE (field))
&& host_integerp (DECL_SIZE (field), 1)
@@ -958,9 +960,21 @@ place_field (record_layout_info rli, tree field)
/* A bit field may not span more units of alignment of its type
than its type itself. Advance to next boundary if necessary. */
if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
- rli->bitpos = round_up (rli->bitpos, type_align);
+ {
+ if (DECL_PACKED (field))
+ {
+ if (warn_packed_bitfield_compat == 1)
+ inform
+ (input_location,
+ "Offset of packed bit-field %qD has changed in GCC 4.4",
+ field);
+ }
+ else
+ rli->bitpos = round_up (rli->bitpos, type_align);
+ }
- TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
+ if (! DECL_PACKED (field))
+ TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
}
#endif
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index 91b8d8ae643..a52ad85a5d8 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,3 +1,198 @@
+2009-01-29 Kazu Hirata <kazu@codesourcery.com>
+
+ PR tree-optimization/39007
+ * gcc.dg/tree-ssa/pr39007.c: New.
+
+2009-01-29 Kenneth Zadeck <zadeck@naturalbridge.com>
+
+ PR middle-end/35854
+ * gcc.dg/lower-subreg-1.c: Renamed dump pass from "subreg" to "subreg1"
+
+2009-01-29 Steve Ellcey <sje@cup.hp.com>
+
+ PR middle-end/38857
+ * gcc.c-torture/compile/pr38857.c: New test.
+
+2009-01-28 Paul Thomas <pault@gcc.gnu.org>
+
+ PR fortran/38852
+ PR fortran/39006
+ * gfortran.dg/bound_6.f90: New test.
+
+2009-01-28 Pat Haugen <pthaugen@us.ibm.com>
+
+ * gcc.target/powerpc/avoid-indexed-addresses.c: New test.
+
+2009-01-28 Kazu Hirata <kazu@codesourcery.com>
+
+ PR tree-optimization/38997
+ * gcc.dg/tree-ssa/pr38997.c: New.
+
+2009-01-28 Richard Guenther <rguenther@suse.de>
+
+ PR tree-optimization/38926
+ * gcc.c-torture/compile/pr38926.c: New testcase.
+
+2009-01-28 Jakub Jelinek <jakub@redhat.com>
+
+ PR middle-end/38934
+ * gcc.dg/pr38934.c: New test.
+
+2009-01-28 Richard Guenther <rguenther@suse.de>
+
+ PR middle-end/38908
+ * g++.dg/warn/Wuninitialized-2.C: New testcase.
+
+2009-01-28 Paolo Bonzini <bonzini@gnu.org>
+
+ PR tree-optimization/38984
+ * gcc.dg/pr38984.c: New XFAILed testcase.
+
+
+2009-01-28 Uros Bizjak <ubizjak@gmail.com>
+
+ PR target/38988
+ * gcc.target/i386/pr38988.c: New test.
+
+2009-01-27 Paolo Carlini <paolo.carlini@oracle.com>
+
+ PR c++/37554
+ * g++.dg/parse/crash51.C: New.
+ * g++.old-deja/g++.pt/crash9.C: Adjust.
+
+2009-01-27 Daniel Kraft <d@domob.eu>
+
+ PR fortran/38883
+ * gfortran.dg/mvbits_6.f90: New test.
+ * gfortran.dg/mvbits_7.f90: New test.
+ * gfortran.dg/mvbits_8.f90: New test.
+
+2009-01-27 Richard Guenther <rguenther@suse.de>
+
+ PR tree-optimization/38503
+ * g++.dg/warn/Wstrict-aliasing-bogus-placement-new.C: New testcase.
+
+2009-01-27 Uros Bizjak <ubizjak@gmail.com>
+
+ PR middle-end/38969
+ * gcc.c-torture/execute/pr38969.c: New test.
+
+2009-01-26 Adam Nemet <anemet@caviumnetworks.com>
+
+ PR testsuite/38864
+ * gcc.target/mips/fixed-vector-type.c: Add target { fixed_point }
+ to dg-do compile.
+ * gcc.target/mips/fixed-scalar-type.c: Likewise.
+ * gcc.target/mips/dpaq_sa_l_w.c: Likewise.
+ * gcc.target/mips/dpsq_sa_l_w.c: Likewise.
+
+2009-01-26 H.J. Lu <hongjiu.lu@intel.com>
+
+ PR target/38952
+ * g++.dg/torture/stackalign/test-unwind.h (main): Also return 0
+ if __USING_SJLJ_EXCEPTIONS__ is defined.
+
+ * g++.dg/torture/stackalign/unwind-0.C (foo): Don't define if
+ __USING_SJLJ_EXCEPTIONS__ is defined.
+ * g++.dg/torture/stackalign/unwind-1.C (foo): Likewise.
+ * g++.dg/torture/stackalign/unwind-2.C (foo): Likewise.
+ * g++.dg/torture/stackalign/unwind-3.C (foo): Likewise.
+ * g++.dg/torture/stackalign/unwind-4.C (foo): Likewise.
+ * g++.dg/torture/stackalign/unwind-5.C (foo): Likewise.
+ * g++.dg/torture/stackalign/unwind-6.C (foo): Likewise.
+
+2009-01-26 Danny Smith <dannysmith@users.sourceforge.net>
+
+ PR testsuite/38949
+ * g++.dg/torture/stackalign/test-unwind.h (ASMNAME): Define.
+ Use instead of C name in asm statements.
+
+2009-01-26 Richard Guenther <rguenther@suse.de>
+
+ PR tree-optimization/38745
+ * g++.dg/torture/pr38745.C: New testcase.
+
+2009-01-26 Richard Guenther <rguenther@suse.de>
+
+ PR middle-end/38851
+ * g++.dg/warn/Wuninitialized-1.C: New testcase.
+
+2009-01-25 Hans-Peter Nilsson <hp@axis.com>
+
+ * gcc.dg/bitfld-15.c: Gate warning on target
+ pcc_bitfield_type_matters.
+ * gcc.dg/bitfld-17.c, g++.dg/ext/bitfield2.C,
+ g++.dg/ext/bitfield4.C: Likewise.
+ * gcc.dg/pr17112-1.c: Don't expect a warning for the packed
+ bitfield for any target.
+
+2009-01-24 Jakub Jelinek <jakub@redhat.com>
+
+ PR c/38957
+ * gcc.dg/pr38957.c: New test.
+
+2009-01-24 David Billinghurst <billingd@gcc.gnu.org>
+
+ PR fortran/38955
+ * gfortran.dg/array_constructor_24.f: Allow tolerance when
+ comparing floats.
+
+2009-01-24 Sebastian Pop <sebastian.pop@amd.com>
+
+ PR tree-optimization/38953
+ * gfortran.dg/graphite/pr38953.f90: New.
+
+2009-01-24 Paul Brook <paul@codesourcery.com>
+
+ * gcc.target/arm/neon-cond-1.c: New test.
+
+2008-01-23 Paolo Bonzini <bonzini@gnu.org>
+
+ PR tree-optimization/38932
+ * gcc.dg/pr38932.c: New.
+
+2009-01-23 Revital Eres <eres@il.ibm.com>
+
+ * gcc.dg/sms-7.c: Fix test.
+
+2009-01-22 Adam Nemet <anemet@caviumnetworks.com>
+
+ * gcc.dg/bitfld-15.c, gcc.dg/bitfld-16.c,
+ gcc.dg/bitfld-17.c,gcc.dg/bitfld-18.c: New tests.
+ * g++.dg/ext/bitfield2.C, g++.dg/ext/bitfield3.C,
+ g++.dg/ext/bitfield4.C, g++.dg/ext/bitfield5.C: New tests.
+
+2009-01-22 Steve Ellcey <sje@cup.hp.com>
+
+ * gcc.dg/pr35729.c: Make test x86 specific.
+
+2009-01-22 Steve Ellcey <sje@cup.hp.com>
+
+ PR middle-end/38615
+ * gcc.dg/pr38615.c: New test.
+
+2009-01-22 Uros Bizjak <ubizjak@gmail.com>
+
+ PR target/38931
+ * gcc.target/i386/pr38931.c: New test.
+
+2009-01-22 Dodji Seketeli <dodji@redhat.com>
+
+ PR c++/38930
+ * g++.dg/template/typedef11.C: Reverting changes of PR c++/26693.
+ * g++.dg/template/typedef12.C: Likewise.
+ * g++.dg/template/typedef13.C: Likewise.
+ * g++.dg/template/typedef14.C: Likewise.
+ * g++.dg/template/sfinae3.C: Likewise.
+ * g++.old-deja/g++.pt/typename8.C: Likewise.
+ * g++.dg/template/access11.C: Likewise
+
+2009-01-21 Richard Sandiford <rdsandiford@googlemail.com>
+
+ * gcc.dg/tree-ssa/ssa-store-ccp-3.c: Skip for mips*-*-linux*
+ and mips*-*-irix*.
+ * gcc.dg/tree-ssa/vrp47.c: Skip for all MIPS targets.
+
2009-01-21 Dodji Seketeli <dodji@redhat.com>
PR c++/26693
@@ -27,7 +222,7 @@
* gfortran.dg/host_assoc_function_7.f90: New test
2009-01-20 Andrew Pinski <andrew_pinski@playstation.sony.com>
- Richard Guenther <rguenther@suse.de>
+ Richard Guenther <rguenther@suse.de>
PR tree-optimization/38747
PR tree-optimization/38748
@@ -219,6 +414,11 @@
* gcc.dg/vect/vect-105.c: Prevent compiler from hoisting abort
out of loop.
+2009-01-14 Vladimir Makarov <vmakarov@redhat.com>
+
+ PR target/38811
+ * g++.dg/torture/pr38811.C: New file.
+
2009-01-14 Richard Guenther <rguenther@suse.de>
PR tree-optimization/38826
diff --git a/gcc/testsuite/g++.dg/ext/bitfield2.C b/gcc/testsuite/g++.dg/ext/bitfield2.C
new file mode 100644
index 00000000000..303d82de267
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ext/bitfield2.C
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* Remove pedantic. Allow the GCC extension to use char for bitfields. */
+/* { dg-options "" } */
+
+struct t
+{ /* { dg-message "note: Offset of packed bit-field 't::b' has changed in GCC 4.4" "" { target pcc_bitfield_type_matters } } */
+ char a:4;
+ char b:8;
+ char c:4;
+} __attribute__ ((packed));
+
+int assrt[sizeof (struct t) == 2 ? 1 : -1];
diff --git a/gcc/testsuite/g++.dg/ext/bitfield3.C b/gcc/testsuite/g++.dg/ext/bitfield3.C
new file mode 100644
index 00000000000..3b30cc9b002
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ext/bitfield3.C
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-Wno-packed-bitfield-compat" } */
+
+struct t
+{
+ char a:4;
+ char b:8;
+ char c:4;
+} __attribute__ ((packed));
+
+int assrt[sizeof (struct t) == 2 ? 1 : -1];
diff --git a/gcc/testsuite/g++.dg/ext/bitfield4.C b/gcc/testsuite/g++.dg/ext/bitfield4.C
new file mode 100644
index 00000000000..258b3338c23
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ext/bitfield4.C
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "" } */
+
+struct t
+{ /* { dg-message "note: Offset of packed bit-field 't::b' has changed in GCC 4.4" "" { target pcc_bitfield_type_matters } } */
+ char a:4;
+ char b:8 __attribute__ ((packed));
+ char c:4;
+};
+
+int assrt[sizeof (struct t) == 2 ? 1 : -1];
diff --git a/gcc/testsuite/g++.dg/ext/bitfield5.C b/gcc/testsuite/g++.dg/ext/bitfield5.C
new file mode 100644
index 00000000000..2cd8e7daa94
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ext/bitfield5.C
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-Wno-packed-bitfield-compat" } */
+
+struct t
+{
+ char a:4;
+ char b:8 __attribute__ ((packed));
+ char c:4;
+};
+
+int assrt[sizeof (struct t) == 2 ? 1 : -1];
diff --git a/gcc/testsuite/g++.dg/parse/crash51.C b/gcc/testsuite/g++.dg/parse/crash51.C
new file mode 100644
index 00000000000..03fcd361336
--- /dev/null
+++ b/gcc/testsuite/g++.dg/parse/crash51.C
@@ -0,0 +1,9 @@
+// PR c++/37554
+
+struct A {};
+class B : A {};
+
+void foo(B b)
+{
+ (A)b; // { dg-error "inaccessible base" }
+}
diff --git a/gcc/testsuite/g++.dg/template/access11.C b/gcc/testsuite/g++.dg/template/access11.C
index 38bd5155f65..4c8dce521a8 100644
--- a/gcc/testsuite/g++.dg/template/access11.C
+++ b/gcc/testsuite/g++.dg/template/access11.C
@@ -17,8 +17,8 @@ template <> struct X::Y<int> {
A::X x; // { dg-error "this context" }
};
-template <typename T> struct X::Y { // { dg-error "this context" }
+template <typename T> struct X::Y {
typename T::X x; // { dg-error "this context" }
};
-template struct X::Y<A>; // { dg-message "instantiated from here" }
+template struct X::Y<A>; // { dg-message "instantiated" }
diff --git a/gcc/testsuite/g++.dg/template/sfinae3.C b/gcc/testsuite/g++.dg/template/sfinae3.C
index 349463d95fe..5799a364e42 100644
--- a/gcc/testsuite/g++.dg/template/sfinae3.C
+++ b/gcc/testsuite/g++.dg/template/sfinae3.C
@@ -1,5 +1,5 @@
// PR c++/24671
-// { dg-do compile }
+// { dg-options "" }
template<typename> struct A
{
@@ -9,9 +9,9 @@ template<typename> struct A
template<typename> struct B
{
- B(const B&);
- typedef typename A<char[A<B>::i]>::X Y; // { dg-error "forbids zero-size array" }
- template<typename T> B(T, Y);
+ B(const B&); // { dg-message "candidate" }
+ typedef typename A<char[A<B>::i]>::X Y;
+ template<typename T> B(T, Y); // { dg-error "call" }
};
-B<int> b(0,0); // { dg-message "instantiated from here" }
+B<int> b(0,0);
diff --git a/gcc/testsuite/g++.dg/template/typedef11.C b/gcc/testsuite/g++.dg/template/typedef11.C
deleted file mode 100644
index c7c7c989f72..00000000000
--- a/gcc/testsuite/g++.dg/template/typedef11.C
+++ /dev/null
@@ -1,25 +0,0 @@
-// Author: Dodji Seketeli <dodji@redhat.com>
-// Origin: PR c++/26693
-// { dg-do compile }
-
-
-class Alpha
-{
- typedef int X; // { dg-error "'typedef int Alpha::X' is private" }
-};
-
-template<int>
-class Beta
-{
- typedef int Y; // { dg-error "'typedef int Beta<0>::Y' is private" }
-};
-
-template <int>
-int
-bar ()
-{
- Beta<0>::Y i = 0;
- return Alpha::X ();
-}
-
-int i = bar<0> (); // { dg-error "within this context" }
diff --git a/gcc/testsuite/g++.dg/template/typedef12.C b/gcc/testsuite/g++.dg/template/typedef12.C
deleted file mode 100644
index 30605044f6f..00000000000
--- a/gcc/testsuite/g++.dg/template/typedef12.C
+++ /dev/null
@@ -1,23 +0,0 @@
-// Contributed by Dodji Seketeli <dodji@redhat.com>
-// Origin: Jason Merrill <jason@redhat.com>, PR c++/26693
-// { dg-do compile }
-
-class A
-{
- protected:
- typedef int mytype;
-};
-
-template <class T> class B;
-
-class C: public A
-{
- template <class T> friend class B;
-};
-
-template <class T> class B
-{
- C::mytype mem;
-};
-
-B<int> b;
diff --git a/gcc/testsuite/g++.dg/template/typedef13.C b/gcc/testsuite/g++.dg/template/typedef13.C
deleted file mode 100644
index aa8bb326829..00000000000
--- a/gcc/testsuite/g++.dg/template/typedef13.C
+++ /dev/null
@@ -1,16 +0,0 @@
-// Contributed by Dodji Seketeli <dodji@redhat.com>
-// Origin: PR c++/26693
-// { dg-do compile }
-
-class A
-{
- typedef int mytype; // { dg-error "typedef int A::mytype' is private" }
-};
-
-template <class T> class B : public A
-{ // { dg-error "within this context" }
- mytype mem;
-};
-
-B<int> b; // { dg-message "instantiated from here" }
-
diff --git a/gcc/testsuite/g++.dg/template/typedef14.C b/gcc/testsuite/g++.dg/template/typedef14.C
deleted file mode 100644
index caa565a08cd..00000000000
--- a/gcc/testsuite/g++.dg/template/typedef14.C
+++ /dev/null
@@ -1,16 +0,0 @@
-// Contributed by Dodji Seketeli <dodji@redhat.com>
-// Origin: PR c++/26693
-// { dg-do compile }
-
-template <class T>
-struct A
-{
- typedef int mytype;
-
- void
- foo ()
- {
- mytype v = ~static_cast<mytype> (0);
- }
-};
-
diff --git a/gcc/testsuite/g++.dg/torture/pr38745.C b/gcc/testsuite/g++.dg/torture/pr38745.C
new file mode 100644
index 00000000000..4ad9d85fe36
--- /dev/null
+++ b/gcc/testsuite/g++.dg/torture/pr38745.C
@@ -0,0 +1,36 @@
+/* { dg-do compile } */
+
+union u_u16
+{
+ unsigned short v;
+ struct
+ {
+ unsigned char lo8, hi8;
+ } __attribute__ ((__may_alias__)) u;
+} __attribute__ ((__may_alias__));
+union u_u32
+{
+ unsigned int v;
+ struct
+ {
+ u_u16 lo16, hi16;
+ } u;
+} __attribute__ ((__may_alias__));
+union u_u64
+{
+ struct
+ {
+ u_u32 lo32, hi32;
+ } u;
+};
+struct Record
+{
+};
+long long
+UnpackFullKey (Record & rec, const char *&p)
+{
+ long long c64 = 0;
+ (*(u_u16 *) & (*(u_u32 *) & ( *(u_u64*)&c64).u.lo32.v).u.lo16.v).u.hi8 = 1;
+ return c64;
+}
+
diff --git a/gcc/testsuite/g++.dg/torture/stackalign/test-unwind.h b/gcc/testsuite/g++.dg/torture/stackalign/test-unwind.h
index e6493ffedb2..b07b27c21fb 100644
--- a/gcc/testsuite/g++.dg/torture/stackalign/test-unwind.h
+++ b/gcc/testsuite/g++.dg/torture/stackalign/test-unwind.h
@@ -1,5 +1,10 @@
#include "check.h"
+
+#define ASMNAME(cname) ASMNAME2 (__USER_LABEL_PREFIX__, cname)
+#define ASMNAME2(prefix, cname) STRING (prefix) cname
+#define STRING(x) #x
+
#ifdef __cplusplus
extern "C" void abort (void);
#else
@@ -29,7 +34,7 @@ extern void foo(void);
);\
}
-#ifdef __PIC__
+#if defined __PIC__ || defined __USING_SJLJ_EXCEPTIONS__
int
main ()
{
@@ -69,8 +74,8 @@ main()
: "i" (INIT_EBX)
);
__asm__ __volatile__ (
- "movl %ebp, g_ebp_save\n\t"
- "movl %esp, g_esp_save\n\t"
+ "movl %ebp," ASMNAME("g_ebp_save")"\n\t"
+ "movl %esp," ASMNAME("g_esp_save")"\n\t"
);
try {
foo();
@@ -81,11 +86,11 @@ main()
// Get DI/SI/BX register value after exception caught
__asm__ __volatile__ (
- "movl %edi, g_edi\n\t"
- "movl %esi, g_esi\n\t"
- "movl %ebx, g_ebx\n\t"
- "movl %ebp, g_ebp\n\t"
- "movl %esp, g_esp\n\t"
+ "movl %edi," ASMNAME("g_edi")"\n\t"
+ "movl %esi," ASMNAME("g_esi")"\n\t"
+ "movl %ebx," ASMNAME("g_ebx")"\n\t"
+ "movl %ebp," ASMNAME("g_ebp")"\n\t"
+ "movl %esp," ASMNAME("g_esp")"\n\t"
);
// Check if DI/SI/BX register value are the same as before calling
diff --git a/gcc/testsuite/g++.dg/torture/stackalign/unwind-0.C b/gcc/testsuite/g++.dg/torture/stackalign/unwind-0.C
index 546123bdd0e..f8166eb9548 100644
--- a/gcc/testsuite/g++.dg/torture/stackalign/unwind-0.C
+++ b/gcc/testsuite/g++.dg/torture/stackalign/unwind-0.C
@@ -2,7 +2,7 @@
#include "test-unwind.h"
-#ifndef __PIC__
+#if !defined __PIC__ && !defined __USING_SJLJ_EXCEPTIONS__
void __attribute__ ((noinline)) foo()
{
ALTER_REGS();
diff --git a/gcc/testsuite/g++.dg/torture/stackalign/unwind-1.C b/gcc/testsuite/g++.dg/torture/stackalign/unwind-1.C
index 3b809642ab7..70dce748567 100644
--- a/gcc/testsuite/g++.dg/torture/stackalign/unwind-1.C
+++ b/gcc/testsuite/g++.dg/torture/stackalign/unwind-1.C
@@ -2,7 +2,7 @@
#include "test-unwind.h"
-#ifndef __PIC__
+#if !defined __PIC__ && !defined __USING_SJLJ_EXCEPTIONS__
/* Test situation 1: Stack really realign without DRAP */
void __attribute__ ((noinline))
foo ()
diff --git a/gcc/testsuite/g++.dg/torture/stackalign/unwind-2.C b/gcc/testsuite/g++.dg/torture/stackalign/unwind-2.C
index 1569ed84b7d..791eeb55106 100644
--- a/gcc/testsuite/g++.dg/torture/stackalign/unwind-2.C
+++ b/gcc/testsuite/g++.dg/torture/stackalign/unwind-2.C
@@ -2,7 +2,7 @@
#include "test-unwind.h"
-#ifndef __PIC__
+#if !defined __PIC__ && !defined __USING_SJLJ_EXCEPTIONS__
/* Test situation 2: stack really realign with DRAP reg CX */
void __attribute__ ((noinline))
foo ()
diff --git a/gcc/testsuite/g++.dg/torture/stackalign/unwind-3.C b/gcc/testsuite/g++.dg/torture/stackalign/unwind-3.C
index 48eddaf5565..29dee2d5a7b 100644
--- a/gcc/testsuite/g++.dg/torture/stackalign/unwind-3.C
+++ b/gcc/testsuite/g++.dg/torture/stackalign/unwind-3.C
@@ -2,7 +2,7 @@
#include "test-unwind.h"
-#ifndef __PIC__
+#if !defined __PIC__ && !defined __USING_SJLJ_EXCEPTIONS__
/* Test situation 3: Stack realign really happen with DRAP reg DI */
void __attribute__ ((noinline)) __attribute__ ((regparm(3)))
bar (int arg1, int arg2, int arg3)
diff --git a/gcc/testsuite/g++.dg/torture/stackalign/unwind-4.C b/gcc/testsuite/g++.dg/torture/stackalign/unwind-4.C
index dacbd3dede9..a896176ae04 100644
--- a/gcc/testsuite/g++.dg/torture/stackalign/unwind-4.C
+++ b/gcc/testsuite/g++.dg/torture/stackalign/unwind-4.C
@@ -2,7 +2,7 @@
#include "test-unwind.h"
-#ifndef __PIC__
+#if !defined __PIC__ && !defined __USING_SJLJ_EXCEPTIONS__
volatile int __attribute__ ((aligned(32))) g_a=1;
/* Test situation 4: no Drap and stack realign doesn't really happen */
void __attribute__ ((noinline))
diff --git a/gcc/testsuite/g++.dg/torture/stackalign/unwind-5.C b/gcc/testsuite/g++.dg/torture/stackalign/unwind-5.C
index fde430bfb72..514e4e77a56 100644
--- a/gcc/testsuite/g++.dg/torture/stackalign/unwind-5.C
+++ b/gcc/testsuite/g++.dg/torture/stackalign/unwind-5.C
@@ -2,7 +2,7 @@
#include "test-unwind.h"
-#ifndef __PIC__
+#if !defined __PIC__ && !defined __USING_SJLJ_EXCEPTIONS__
double g_f=1.0;
/* Test situation 5: Stack realign dosn't really happen with DRAP reg CX */
void __attribute__ ((noinline)) __attribute__ ((regparm(2)))
diff --git a/gcc/testsuite/g++.dg/torture/stackalign/unwind-6.C b/gcc/testsuite/g++.dg/torture/stackalign/unwind-6.C
index 7c9dee13338..a888ca1b5f9 100644
--- a/gcc/testsuite/g++.dg/torture/stackalign/unwind-6.C
+++ b/gcc/testsuite/g++.dg/torture/stackalign/unwind-6.C
@@ -2,7 +2,7 @@
#include "test-unwind.h"
-#ifndef __PIC__
+#if !defined __PIC__ && !defined __USING_SJLJ_EXCEPTIONS__
double g_f=1.0;
/* Test situation 6: Stack realign dosn't really happen with DRAP reg DI */
void __attribute__ ((noinline)) __attribute__ ((regparm(3)))
diff --git a/gcc/testsuite/g++.dg/warn/Wstrict-aliasing-bogus-placement-new.C b/gcc/testsuite/g++.dg/warn/Wstrict-aliasing-bogus-placement-new.C
new file mode 100644
index 00000000000..514957bcde2
--- /dev/null
+++ b/gcc/testsuite/g++.dg/warn/Wstrict-aliasing-bogus-placement-new.C
@@ -0,0 +1,29 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -Wstrict-aliasing" } */
+
+inline void *operator new (__SIZE_TYPE__, void *__p) throw() { return __p; }
+
+struct Y {
+ Y() {}
+ int i;
+};
+
+struct X {
+ X() {}
+ void construct(const Y& y)
+ {
+ new (&m_data[0]) Y(y);
+ }
+ bool initialized;
+ char m_data[sizeof (Y)];
+};
+
+void bar(const X&);
+void foo(Y& y)
+{
+ X x;
+ x.construct(y);
+ x.initialized = true;
+ bar(x);
+}
+
diff --git a/gcc/testsuite/g++.dg/warn/Wuninitialized-1.C b/gcc/testsuite/g++.dg/warn/Wuninitialized-1.C
new file mode 100644
index 00000000000..7b1b90b944d
--- /dev/null
+++ b/gcc/testsuite/g++.dg/warn/Wuninitialized-1.C
@@ -0,0 +1,15 @@
+/* { dg-options "-O2 -Wuninitialized" } */
+
+struct Empty { Empty() {} }; /* { dg-bogus "uninitialized" } */
+struct Other {
+ Other(const Empty& e_) : e(e_) {}
+ Empty e;
+};
+void bar(Other&);
+void foo()
+{
+ Empty e;
+ Other o(e);
+ bar(o);
+}
+
diff --git a/gcc/testsuite/g++.dg/warn/Wuninitialized-2.C b/gcc/testsuite/g++.dg/warn/Wuninitialized-2.C
new file mode 100644
index 00000000000..2b6f9253012
--- /dev/null
+++ b/gcc/testsuite/g++.dg/warn/Wuninitialized-2.C
@@ -0,0 +1,53 @@
+/* { dg-do compile } */
+/* { dg-options "-O -Wuninitialized" } */
+
+struct S8 { template<typename T> S8(T) { } };
+
+template<typename T> struct S10;
+template<typename T> struct S10<T()> { typedef T S12; typedef S8 S1(); };
+
+template<typename T> struct S3 { };
+template<typename T> struct S11 { S11(S3<T>); };
+
+struct S2
+{
+ template<typename T> operator S11<T>() { return S11<T>(S5<T>()); }
+ template<typename T> struct S5:public S3<T>
+ {
+ virtual typename S10<T>::S12 S13() {
+ return 0;
+ }
+ };
+};
+
+template<typename T> S11<T> S6(S3<T>) { return S11<T>(S3<T>()); }
+template<typename S12> struct S7 { typedef S12 S15(); };
+
+struct S4
+{
+ template<typename T> operator S11<T>()
+ {
+ struct S14:public S3<T>
+ {
+ S14(S2 x):S11_(x) { }
+ S11<typename S7<typename S10<T>::S12>::S15> S11_;
+ };
+ return S6(S14(S11_));
+ }
+ S2 S11_;
+};
+
+struct S9
+{
+ template<typename F> operator S11<F>() { return S11<F>(S14<F>(S11_)); }
+ template<typename F> struct S14:public S3<F>
+ {
+ S14(S4 x):S11_(x) { }
+ S11<typename S10<F>::S1> S11_;
+ };
+ S4 S11_;
+};
+
+void S15(S11<void()>);
+void S16() { S9 x; S15(x); }
+
diff --git a/gcc/testsuite/g++.old-deja/g++.pt/crash9.C b/gcc/testsuite/g++.old-deja/g++.pt/crash9.C
index dab0e4c2824..20bd7584fdd 100644
--- a/gcc/testsuite/g++.old-deja/g++.pt/crash9.C
+++ b/gcc/testsuite/g++.old-deja/g++.pt/crash9.C
@@ -1,11 +1,11 @@
// { dg-do assemble }
template <class T>
-void f(T) {} // { dg-error "" } parameter has incomplete type
+void f(T) {}
-class C; // { dg-error "" } forward declaration
+class C; // { dg-error "forward declaration" }
void g(const C& c)
{
- f(c); // { dg-error "" } invalid use of undefined type
+ f(c); // { dg-error "invalid use of incomplete type|initializing argument" }
}
diff --git a/gcc/testsuite/g++.old-deja/g++.pt/typename8.C b/gcc/testsuite/g++.old-deja/g++.pt/typename8.C
index 4861cf301ed..6eb818b3947 100644
--- a/gcc/testsuite/g++.old-deja/g++.pt/typename8.C
+++ b/gcc/testsuite/g++.old-deja/g++.pt/typename8.C
@@ -5,14 +5,14 @@ template < class T > class A
public:
typedef typename T::myT anotherT; // { dg-error "" } undefined type
- anotherT t;
+ anotherT t; // { dg-error "" } undefined type
A() { }
- A(anotherT _t) {
+ A(anotherT _t) { // { dg-error "" } undefined type
t=_t;
}
- anotherT getT() {
+ anotherT getT() { // { dg-error "" } undefined type
return t;
}
};
diff --git a/gcc/testsuite/gcc.c-torture/compile/pr38857.c b/gcc/testsuite/gcc.c-torture/compile/pr38857.c
new file mode 100644
index 00000000000..2492b77a131
--- /dev/null
+++ b/gcc/testsuite/gcc.c-torture/compile/pr38857.c
@@ -0,0 +1,22 @@
+static const int vs_total_ac_bits = 2680;
+typedef struct EncBlockInfo {
+ short mb[64];
+ unsigned char next[64];
+} EncBlockInfo;
+inline void dv_guess_qnos(EncBlockInfo* blks, int* qnos) {
+ int size[5];
+ int j, k, a, prev;
+ EncBlockInfo* b;
+ for(a=2; a==2 || vs_total_ac_bits < size[0]; a+=a){
+ for (j=0; j<6*5; j++, b++) {
+ for (k= b->next[prev]; k<64; k= b->next[k]) {
+ if(b->mb[k] < a && b->mb[k] > -a){
+ b->next[prev] = b->next[k];
+ }
+ else{
+ prev = k;
+ }
+ }
+ }
+ }
+}
diff --git a/gcc/testsuite/gcc.c-torture/compile/pr38926.c b/gcc/testsuite/gcc.c-torture/compile/pr38926.c
new file mode 100644
index 00000000000..9c71a798cde
--- /dev/null
+++ b/gcc/testsuite/gcc.c-torture/compile/pr38926.c
@@ -0,0 +1,41 @@
+static inline int foo (unsigned _si1)
+{
+ if (_si1 != 0)
+ if (_si1 > 2147483647)
+ return 1;
+ return 0;
+}
+
+static inline unsigned bar (unsigned _left, int _right)
+{
+ return (unsigned) _right >= 8 ? 1 : _left >> _right;
+}
+
+unsigned g_2;
+unsigned g_67;
+volatile unsigned g_162;
+
+static inline int func_62 (unsigned p_63)
+{
+ p_63 = g_2 & g_67;
+ if (g_2)
+ ;
+ else if (p_63)
+ return 1;
+ g_67 = bar (p_63, g_2);
+ return 0;
+}
+
+unsigned baz (void)
+{
+ if (g_2)
+ for (; g_2 <= -16; g_2 = foo (g_2))
+ {
+ for (; g_162; g_162)
+ func_62 (func_62 (0));
+ if (g_67)
+ break;
+ }
+ return g_2;
+}
+
diff --git a/gcc/testsuite/gcc.c-torture/execute/pr38969.c b/gcc/testsuite/gcc.c-torture/execute/pr38969.c
new file mode 100644
index 00000000000..328bdf4474b
--- /dev/null
+++ b/gcc/testsuite/gcc.c-torture/execute/pr38969.c
@@ -0,0 +1,25 @@
+__complex__ float
+__attribute__ ((noinline)) foo (__complex__ float x)
+{
+ return x;
+}
+
+__complex__ float
+__attribute__ ((noinline)) bar (__complex__ float x)
+{
+ return foo (x);
+}
+
+int main()
+{
+ __complex__ float a, b;
+ __real__ a = 9;
+ __imag__ a = 42;
+
+ b = bar (a);
+
+ if (a != b)
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/bitfld-15.c b/gcc/testsuite/gcc.dg/bitfld-15.c
new file mode 100644
index 00000000000..1c066bb4865
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/bitfld-15.c
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* Remove pedantic. Allow the GCC extension to use char for bitfields. */
+/* { dg-options "" } */
+
+struct t
+{
+ char a:4;
+ char b:8;
+ char c:4;
+} __attribute__ ((packed)); /* { dg-message "note: Offset of packed bit-field 'b' has changed in GCC 4.4" "" { target pcc_bitfield_type_matters } } */
+
+int assrt[sizeof (struct t) == 2 ? 1 : -1];
diff --git a/gcc/testsuite/gcc.dg/bitfld-16.c b/gcc/testsuite/gcc.dg/bitfld-16.c
new file mode 100644
index 00000000000..3b30cc9b002
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/bitfld-16.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-Wno-packed-bitfield-compat" } */
+
+struct t
+{
+ char a:4;
+ char b:8;
+ char c:4;
+} __attribute__ ((packed));
+
+int assrt[sizeof (struct t) == 2 ? 1 : -1];
diff --git a/gcc/testsuite/gcc.dg/bitfld-17.c b/gcc/testsuite/gcc.dg/bitfld-17.c
new file mode 100644
index 00000000000..32b9c1efd78
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/bitfld-17.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "" } */
+
+struct t
+{
+ char a:4;
+ char b:8 __attribute__ ((packed));
+ char c:4;
+}; /* { dg-message "note: Offset of packed bit-field 'b' has changed in GCC 4.4" "" { target pcc_bitfield_type_matters } } */
+
+int assrt[sizeof (struct t) == 2 ? 1 : -1];
diff --git a/gcc/testsuite/gcc.dg/bitfld-18.c b/gcc/testsuite/gcc.dg/bitfld-18.c
new file mode 100644
index 00000000000..2cd8e7daa94
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/bitfld-18.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-Wno-packed-bitfield-compat" } */
+
+struct t
+{
+ char a:4;
+ char b:8 __attribute__ ((packed));
+ char c:4;
+};
+
+int assrt[sizeof (struct t) == 2 ? 1 : -1];
diff --git a/gcc/testsuite/gcc.dg/lower-subreg-1.c b/gcc/testsuite/gcc.dg/lower-subreg-1.c
index bb35d21bb50..4de90bd99d6 100644
--- a/gcc/testsuite/gcc.dg/lower-subreg-1.c
+++ b/gcc/testsuite/gcc.dg/lower-subreg-1.c
@@ -1,8 +1,8 @@
/* { dg-do compile { target { { { ! mips64 } && { ! ia64-*-* } } && { ! spu-*-* } } } } */
-/* { dg-options "-O -fdump-rtl-subreg" } */
+/* { dg-options "-O -fdump-rtl-subreg1" } */
/* { dg-require-effective-target ilp32 } */
long long test (long long a, long long b) { return a | b; }
-/* { dg-final { scan-rtl-dump "Splitting reg" "subreg" } } */
-/* { dg-final { cleanup-rtl-dump "subreg" } } */
+/* { dg-final { scan-rtl-dump "Splitting reg" "subreg1" } } */
+/* { dg-final { cleanup-rtl-dump "subreg1" } } */
diff --git a/gcc/testsuite/gcc.dg/pr17112-1.c b/gcc/testsuite/gcc.dg/pr17112-1.c
index 04d3a2e0e21..d43868fe074 100644
--- a/gcc/testsuite/gcc.dg/pr17112-1.c
+++ b/gcc/testsuite/gcc.dg/pr17112-1.c
@@ -6,7 +6,7 @@
extern void abort(void);
typedef struct {
- int int24:24 __attribute__ ((packed)); /* { dg-warning "attribute ignored" "" { target { default_packed && { ! pcc_bitfield_type_matters } } } } */
+ int int24:24 __attribute__ ((packed));
} myint24;
myint24 x[3] = {
diff --git a/gcc/testsuite/gcc.dg/pr35729.c b/gcc/testsuite/gcc.dg/pr35729.c
index 3ed40f413bc..a9cf2e1057b 100644
--- a/gcc/testsuite/gcc.dg/pr35729.c
+++ b/gcc/testsuite/gcc.dg/pr35729.c
@@ -1,4 +1,7 @@
-/* { dg-do compile } */
+/* Target is restricted to x86 type architectures, so that we may
+ assume something about memory addressing modes. */
+
+/* { dg-do compile { target { { i?86-*-* x86_64-*-* } && nonpic } } } */
/* { dg-options "-Os -fdump-rtl-loop2_invariant" } */
const volatile int g_361 = 3L;
diff --git a/gcc/testsuite/gcc.dg/pr38615.c b/gcc/testsuite/gcc.dg/pr38615.c
new file mode 100644
index 00000000000..8a818a11fb3
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr38615.c
@@ -0,0 +1,19 @@
+/* { dg-do run } */
+
+int t;
+extern void abort (void);
+
+int f(int t, const int *a)
+{
+ const int b[] = { 1, 2, 3};
+ if (!t)
+ return f(1, b);
+ return b == a;
+}
+
+int main(void)
+{
+ if (f(0, 0))
+ abort ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/pr38932.c b/gcc/testsuite/gcc.dg/pr38932.c
new file mode 100644
index 00000000000..4dfaffc777a
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr38932.c
@@ -0,0 +1,19 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+/* This variable needed only to exercise FRE instead of CCP. */
+unsigned char g;
+
+extern void abort();
+
+void f (long long int p)
+{
+ g = 255;
+ if (p >= (-9223372036854775807LL - 1) - (signed char) g)
+ p = 1;
+
+ if (p)
+ abort ();
+}
+
+
diff --git a/gcc/testsuite/gcc.dg/pr38934.c b/gcc/testsuite/gcc.dg/pr38934.c
new file mode 100644
index 00000000000..c05742c5baa
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr38934.c
@@ -0,0 +1,19 @@
+/* PR middle-end/38934 */
+/* { dg-do compile } */
+/* { dg-options "-O2 -std=gnu99 -w" } */
+
+/* This variable needed only to work around earlier optimizations than VRP. */
+unsigned char g;
+
+extern void abort ();
+
+void
+f (long long int p)
+{
+ g = 255;
+ if (p >= -9223372036854775808LL - (signed char) g)
+ p = 1;
+
+ if (p)
+ abort ();
+}
diff --git a/gcc/testsuite/gcc.dg/pr38957.c b/gcc/testsuite/gcc.dg/pr38957.c
new file mode 100644
index 00000000000..f94cd7627b4
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr38957.c
@@ -0,0 +1,16 @@
+/* PR c/38957 */
+/* { dg-do compile } */
+
+char *
+foo (void)
+{
+ char a[2];
+ return a+1; /* { dg-warning "returns address of local variable" } */
+}
+
+char *
+bar (void)
+{
+ char a[2];
+ return a; /* { dg-warning "returns address of local variable" } */
+}
diff --git a/gcc/testsuite/gcc.dg/pr38984.c b/gcc/testsuite/gcc.dg/pr38984.c
new file mode 100644
index 00000000000..0ba72739942
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr38984.c
@@ -0,0 +1,19 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fno-delete-null-pointer-checks -fdump-tree-optimized" }
+ * */
+
+int f(int *p)
+{
+ int a = *p;
+ int *null = 0;
+ *null = 5;
+ return *p == a;
+}
+
+/* Currently fails because of PR38985. */
+
+/* { dg-final { scan-tree-dump-times " = \\\*p" 2 "optimized" { xfail *-*-* } } } */
+/* { dg-final { scan-tree-dump-not "return 1" "optimized" { xfail *-*-* } } } */
+/* { dg-final { cleanup-tree-dump "optimized" } } */
+
+
diff --git a/gcc/testsuite/gcc.dg/sms-7.c b/gcc/testsuite/gcc.dg/sms-7.c
index 35f04a5ac30..7c4810d8610 100644
--- a/gcc/testsuite/gcc.dg/sms-7.c
+++ b/gcc/testsuite/gcc.dg/sms-7.c
@@ -3,7 +3,7 @@
extern void abort (void);
-void foo (int *a, short * __restrict__ b, short * __restrict__ c)
+void foo (int * __restrict__ a, int * __restrict__ b, short * c)
{
int i;
for(i = 0; i < 100; i+=4)
@@ -15,8 +15,8 @@ void foo (int *a, short * __restrict__ b, short * __restrict__ c)
}
}
-int a[100];
-short b[100], c[100];
+int a[100], b[100];
+short c[100];
int main()
{
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr38997.c b/gcc/testsuite/gcc.dg/tree-ssa/pr38997.c
new file mode 100644
index 00000000000..211203c9c35
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/pr38997.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -ftree-loop-distribution" } */
+
+int a[8][8];
+int b[8];
+
+void
+foo (int i)
+{
+ int j;
+ for (j = 0; j < 8; j++)
+ {
+ a[i][j] = 0;
+ b[j] = j;
+ }
+}
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr39007.c b/gcc/testsuite/gcc.dg/tree-ssa/pr39007.c
new file mode 100644
index 00000000000..94b24436d69
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/pr39007.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -ftree-loop-distribution" } */
+
+void
+foo (int *__restrict__ p, int *__restrict__ q, int count)
+{
+ int i;
+ for (i = 0; i < count; i++)
+ {
+ *p++ = 0;
+ *q++ = 0;
+ }
+}
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/ssa-store-ccp-3.c b/gcc/testsuite/gcc.dg/tree-ssa/ssa-store-ccp-3.c
index 1db56a17537..fd6b7c8126e 100644
--- a/gcc/testsuite/gcc.dg/tree-ssa/ssa-store-ccp-3.c
+++ b/gcc/testsuite/gcc.dg/tree-ssa/ssa-store-ccp-3.c
@@ -1,5 +1,7 @@
/* { dg-do compile } */
-/* { dg-skip-if "" { *-*-darwin* hppa*64*-*-* } { "*" } { "" } } */
+/* Skipped on MIPS GNU/Linux and IRIX target because __PIC__ can be
+ defined for executables as well as shared libraries. */
+/* { dg-skip-if "" { *-*-darwin* hppa*64*-*-* mips*-*-linux* mips*-*-irix* } { "*" } { "" } } */
/* { dg-options "-O2 -fno-common -fdump-tree-optimized" } */
const int conststaticvariable;
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/vrp47.c b/gcc/testsuite/gcc.dg/tree-ssa/vrp47.c
index a1ee927816c..2e9d08b59fa 100644
--- a/gcc/testsuite/gcc.dg/tree-ssa/vrp47.c
+++ b/gcc/testsuite/gcc.dg/tree-ssa/vrp47.c
@@ -1,4 +1,6 @@
-/* { dg-do compile } */
+/* Skip on MIPS, where LOGICAL_OP_NON_SHORT_CIRCUIT inhibits the setcc
+ optimizations that expose the VRP opportunity. */
+/* { dg-do compile { target { ! mips*-*-* } } } */
/* { dg-options "-O2 -fdump-tree-vrp -fdump-tree-dom" } */
int h(int x, int y)
diff --git a/gcc/testsuite/gcc.target/arm/neon-cond-1.c b/gcc/testsuite/gcc.target/arm/neon-cond-1.c
new file mode 100644
index 00000000000..7d87b6e126b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/neon-cond-1.c
@@ -0,0 +1,30 @@
+/* { dg-do run } */
+/* { dg-require-effective-target arm_neon_hw } */
+/* { dg-options "-O2 -mfpu=neon -mfloat-abi=softfp" } */
+/* Check that the arm_final_prescan_insn ccfsm code does not try to
+ * conditionally execute NEON instructions. */
+#include <arm_neon.h>
+#include <stdlib.h>
+
+int __attribute__((noinline))
+foo(uint32x2_t a, uint32_t *p, uint32_t *q)
+{
+ if (p != q)
+ /* This vst1 instruction could be conditional, except that NEON
+ instructions are never conditional in ARM mode. */
+ vst1_u32(p, a);
+ return 0;
+}
+
+int
+main()
+{
+ uint32x2_t v;
+ uint32_t a[2] = {1, 42};
+ v = vld1_u32(a);
+ v = vadd_u32(v, v);
+ foo(v, a, a);
+ if (a[0] != 1 || a[1] != 42)
+ abort();
+ exit(0);
+}
diff --git a/gcc/testsuite/gcc.target/i386/pr38931.c b/gcc/testsuite/gcc.target/i386/pr38931.c
new file mode 100644
index 00000000000..dd35dec7520
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr38931.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -msse" } */
+
+typedef int __m64 __attribute__ ((__vector_size__ (8)));
+
+extern __m64 foo () ;
+
+void bar (const int input_bpl, const unsigned char *input,
+ unsigned char *output, unsigned long x1)
+{
+ unsigned char *pix_end_ptr = output + x1 * 4;
+ __m64 m_original = { 0, 0 };
+ __m64 m_base_addr = __builtin_ia32_vec_init_v2si (0, input_bpl);
+ __m64 m_addr = __builtin_ia32_paddd (m_original, m_base_addr);
+ __m64 *a0 = (__m64 *) input;
+
+ for (; output < pix_end_ptr; output += 4)
+ {
+ a0 = (__m64 *) (input + __builtin_ia32_vec_ext_v2si (m_addr, 0));
+ m_addr = foo ();
+ __builtin_prefetch (a0, 0);
+ }
+}
diff --git a/gcc/testsuite/gcc.target/i386/pr38988.c b/gcc/testsuite/gcc.target/i386/pr38988.c
new file mode 100644
index 00000000000..8e2c8eaa67b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr38988.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target lp64 } */
+/* { dg-require-effective-target fpic } */
+/* { dg-options "-O2 -fpic -mcmodel=large" } */
+
+typedef long unsigned int size_t;
+typedef void (*func_ptr) (void);
+
+static func_ptr __DTOR_LIST__[1] = { (func_ptr) (-1) };
+
+void
+__do_global_dtors_aux (void)
+{
+ extern func_ptr __DTOR_END__[];
+ size_t dtor_idx = 0;
+ const size_t max_idx = __DTOR_END__ - __DTOR_LIST__ - 1;
+ func_ptr f;
+
+ while (dtor_idx < max_idx)
+ {
+ f = __DTOR_LIST__[++dtor_idx];
+ f ();
+ }
+}
diff --git a/gcc/testsuite/gcc.target/mips/dpaq_sa_l_w.c b/gcc/testsuite/gcc.target/mips/dpaq_sa_l_w.c
index d1812c16520..87d1da98cb5 100644
--- a/gcc/testsuite/gcc.target/mips/dpaq_sa_l_w.c
+++ b/gcc/testsuite/gcc.target/mips/dpaq_sa_l_w.c
@@ -1,4 +1,4 @@
-/* { dg-do compile } */
+/* { dg-do compile { target { fixed_point } } } */
/* { dg-options "-O2 -mgp32 -mdsp" } */
/* { dg-final { scan-assembler-times "\tdpaq_sa.l.w\t\\\$ac" 3 } } */
diff --git a/gcc/testsuite/gcc.target/mips/dpsq_sa_l_w.c b/gcc/testsuite/gcc.target/mips/dpsq_sa_l_w.c
index 849bd923261..9aeb5667acf 100644
--- a/gcc/testsuite/gcc.target/mips/dpsq_sa_l_w.c
+++ b/gcc/testsuite/gcc.target/mips/dpsq_sa_l_w.c
@@ -1,4 +1,4 @@
-/* { dg-do compile } */
+/* { dg-do compile { target { fixed_point } } } */
/* { dg-options "-O2 -mgp32 -mdsp" } */
/* { dg-final { scan-assembler-times "\tdpsq_sa.l.w\t\\\$ac" 2 } } */
diff --git a/gcc/testsuite/gcc.target/mips/fixed-scalar-type.c b/gcc/testsuite/gcc.target/mips/fixed-scalar-type.c
index a2e2fbfefe3..b4734f4d522 100644
--- a/gcc/testsuite/gcc.target/mips/fixed-scalar-type.c
+++ b/gcc/testsuite/gcc.target/mips/fixed-scalar-type.c
@@ -1,5 +1,5 @@
/* Test scalar fixed-point instructions */
-/* { dg-do compile } */
+/* { dg-do compile { target { fixed_point } } } */
/* { dg-options "-mdspr2 -O2" } */
/* { dg-final { scan-assembler-times "\taddu\t" 10 } } */
/* { dg-final { scan-assembler-times "\tsubu\t" 10 } } */
diff --git a/gcc/testsuite/gcc.target/mips/fixed-vector-type.c b/gcc/testsuite/gcc.target/mips/fixed-vector-type.c
index 2fb16aa1d07..9b67704794f 100644
--- a/gcc/testsuite/gcc.target/mips/fixed-vector-type.c
+++ b/gcc/testsuite/gcc.target/mips/fixed-vector-type.c
@@ -1,5 +1,5 @@
/* Test vector fixed-point instructions */
-/* { dg-do compile } */
+/* { dg-do compile { target { fixed_point } } } */
/* { dg-options "-mdspr2 -O2" } */
/* { dg-final { scan-assembler-times "\taddq_s.ph\t" 2 } } */
/* { dg-final { scan-assembler-times "\tsubq_s.ph\t" 2 } } */
diff --git a/gcc/testsuite/gcc.target/powerpc/avoid-indexed-addresses.c b/gcc/testsuite/gcc.target/powerpc/avoid-indexed-addresses.c
new file mode 100644
index 00000000000..b1b067283f7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/avoid-indexed-addresses.c
@@ -0,0 +1,14 @@
+/* { dg-do compile { target { powerpc*-*-* } } } */
+/* { dg-options "-O2 -mavoid-indexed-addresses" } */
+
+/* { dg-final { scan-assembler-not "lbzx" } }
+
+/* Ensure that an indexed load is not generated with
+ -mavoid-indexed-addresses. */
+
+char
+do_one (char *base, unsigned long offset)
+{
+ return base[offset];
+}
+
diff --git a/gcc/testsuite/gfortran.dg/array_constructor_24.f b/gcc/testsuite/gfortran.dg/array_constructor_24.f
index dad613b136e..ee7b55694a4 100644
--- a/gcc/testsuite/gfortran.dg/array_constructor_24.f
+++ b/gcc/testsuite/gfortran.dg/array_constructor_24.f
@@ -19,7 +19,7 @@
DDA1 = ATAN2 ((/(REAL(J1,KV),J1=1,10)/),
$ REAL((/(J1,J1=nf10,nf1,mf1)/), KV)) !fails
DDA2 = ATAN2 (DDA, DDA(10:1:-1))
- if (any (DDA1 .ne. DDA2)) call abort ()
+ if (any (abs(DDA1-DDA2) .gt. 1.0e-6)) call abort ()
END
subroutine FA6077 (nf10,nf1,mf1, ida)
diff --git a/gcc/testsuite/gfortran.dg/bound_6.f90 b/gcc/testsuite/gfortran.dg/bound_6.f90
new file mode 100644
index 00000000000..5e0e3f7dc55
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/bound_6.f90
@@ -0,0 +1,71 @@
+! { dg-do run }
+! Test the fix for PR38852 and PR39006 in which LBOUND did not work
+! for some arrays with negative strides.
+!
+! Contributed by Dick Hendrickson <dick.hendrickson@gmail.com>
+! Clive Page <clivegpage@googlemail.com>
+! and Mikael Morin <mikael.morin@tele2.fr>
+!
+program try_je0031
+ integer ida(4)
+ real dda(5,5,5,5,5)
+ integer, parameter :: nx = 4, ny = 3
+ interface
+ SUBROUTINE PR38852(IDA,DDA,nf2,nf5,mf2)
+ INTEGER IDA(4)
+ REAL DDA(5,5,5,5,5)
+ TARGET DDA
+ END SUBROUTINE
+ end interface
+ integer :: array1(nx,ny), array2(nx,ny)
+ data array2 / 1,2,3,4, 10,20,30,40, 100,200,300,400 /
+ array1 = array2
+ call PR38852(IDA,DDA,2,5,-2)
+ call PR39006(array1, array2(:,ny:1:-1))
+ call mikael ! http://gcc.gnu.org/ml/fortran/2009-01/msg00342.html
+contains
+ subroutine PR39006(array1, array2)
+ integer, intent(in) :: array1(:,:), array2(:,:)
+ integer :: j
+ do j = 1, ubound(array2,2)
+ if (any (array1(:,j) .ne. array2(:,4-j))) call abort
+ end do
+ end subroutine
+end
+
+SUBROUTINE PR38852(IDA,DDA,nf2,nf5,mf2)
+ INTEGER IDA(4)
+ REAL DLA(:,:,:,:)
+ REAL DDA(5,5,5,5,5)
+ POINTER DLA
+ TARGET DDA
+ DLA => DDA(2:3, 1:3:2, 5:4:-1, NF2, NF5:NF2:MF2)
+ IDA = UBOUND(DLA)
+ if (any(ida /= 2)) call abort
+ DLA => DDA(2:3, 1:3:2, 5:4:-1, 2, 5:2:-2)
+ IDA = UBOUND(DLA)
+ if (any(ida /= 2)) call abort
+!
+! These worked.
+!
+ DLA => DDA(2:3, 1:3:2, 5:4:-1, 2, 5:2:-2)
+ IDA = shape(DLA)
+ if (any(ida /= 2)) call abort
+ DLA => DDA(2:3, 1:3:2, 5:4:-1, 2, 5:2:-2)
+ IDA = LBOUND(DLA)
+ if (any(ida /= 1)) call abort
+END SUBROUTINE
+
+subroutine mikael
+ implicit none
+ call test (1, 3, 3)
+ call test (2, 3, 3)
+ call test (2, -1, 0)
+ call test (1, -1, 0)
+contains
+ subroutine test (a, b, expect)
+ integer :: a, b, expect
+ integer :: c(a:b)
+ if (ubound (c, 1) .ne. expect) call abort
+ end subroutine test
+end subroutine
diff --git a/gcc/testsuite/gfortran.dg/graphite/pr38953.f90 b/gcc/testsuite/gfortran.dg/graphite/pr38953.f90
new file mode 100644
index 00000000000..245db0dfe28
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/graphite/pr38953.f90
@@ -0,0 +1,115 @@
+! { dg-options "-O3 -floop-block -fgraphite-identity" }
+
+ MODULE MAIN1
+ INTEGER , PARAMETER :: IFMAX = 40 , IKN = 85 , ISTRG = 132 , &
+ & IERRN = 170 , ILEN_FLD = 80
+ CHARACTER PATH*2 , PPATH*2 , KEYWRD*8 , PKEYWD*8 , KEYWD*8 , &
+ & KTYPE*5 , RUNST*1
+ DIMENSION FIELD(IFMAX) , KEYWD(IKN) , RUNST(ISTRG)
+ LOGICAL :: DFAULT , CONC , DEPOS , DDEP , WDEP , RURAL , URBAN , &
+ & GRDRIS , NOSTD , NOBID , CLMPRO , MSGPRO , PERIOD , &
+ & OLM=.FALSE.
+ INTEGER :: NSRC , NREC , NGRP , NQF, &
+ & NARC , NOLM
+ CHARACTER NETID*8 , NETIDT*8 , PNETID*8 , NTID*8 , NTTYP*8 , &
+ & RECTYP*2 , PXSOID*8 , PESOID*8 , ARCID*8
+ ALLOCATABLE ::NETID(:) , RECTYP(:) , NTID(:) , NTTYP(:) , ARCID(:)
+ DATA (KEYWD(I),I=1,IKN)/'STARTING' , 'FINISHED' , 'TITLEONE' , &
+ & 'TITLETWO' , 'MODELOPT' , 'AVERTIME' , 'POLLUTID' , &
+ & 'HALFLIFE' , 'DCAYCOEF' , 'DEBUGOPT' , 'ELEVUNIT' , &
+ & 'FLAGPOLE' , 'RUNORNOT' , 'EVENTFIL' , 'SAVEFILE' , &
+ & 'INITFILE' , 'MULTYEAR' , 'ERRORFIL' , 'GASDEPDF' , &
+ & 'GDSEASON' , 'GASDEPVD' , 'GDLANUSE' , 'EVENTFIL' , &
+ & 'URBANOPT' , 'METHOD_2' , 'LOCATION' , 'SRCPARAM' , &
+ & 'BUILDHGT' , 'BUILDWID' , 'BUILDLEN' , 'XBADJ ' , &
+ & 'YBADJ ' , 'EMISFACT' , 'EMISUNIT' , 'PARTDIAM' , &
+ & 'MASSFRAX' , 'PARTDENS' , ' ' , ' ' , &
+ & ' ' , 'CONCUNIT' , 'DEPOUNIT' , 'HOUREMIS' , &
+ & 'GASDEPOS' , 'URBANSRC' , 'EVENTPER' , 'EVENTLOC' , &
+ & 'SRCGROUP' , 'GRIDCART' , 'GRIDPOLR' , 'DISCCART' , &
+ & 'DISCPOLR' , 'SURFFILE' , 'PROFFILE' , 'PROFBASE' , &
+ & ' ' , 'SURFDATA' , 'UAIRDATA' , 'SITEDATA' , &
+ & 'STARTEND' , 'DAYRANGE' , 'WDROTATE' , 'DTHETADZ' , &
+ & 'WINDCATS' , 'RECTABLE' , 'MAXTABLE' , 'DAYTABLE' , &
+ & 'MAXIFILE' , 'POSTFILE' , 'PLOTFILE' , 'TOXXFILE' , &
+ & 'EVENTOUT' , 'INCLUDED' , 'SCIMBYHR' , 'SEASONHR' , &
+ & 'AREAVERT' , 'PARTSIZE' , 'RANKFILE' , 'EVALCART' , &
+ & 'EVALFILE' , 'NO2EQUIL' , 'OZONEVAL' , 'OZONEFIL' , &
+ & 'NO2RATIO' , 'OLMGROUP'/
+ DIMENSION RESTAB(9,6,5) , STAB(9)
+ DATA (((RESTAB(I,J,K),I=1,9),J=1,6),K=1,5)/1.E07 , 60. , 120. , &
+ & 100. , 200. , 150. , 1.E07 , 1.E07 , 80. , 1.E07 , 2000. , &
+ & 2000. , 2000. , 2000. , 2000. , 1.E07 , 1.E07 , 2500. , &
+ & 1.E07 , 1000. , 1000. , 1000. , 2000. , 2000. , 1.E07 , &
+ & 1.E07 , 1000. , 100. , 200. , 100. , 2000. , 100. , 1500. , &
+ & 0. , 0. , 300. , 400. , 150. , 350. , 300. , 500. , 450. , &
+ & 0. , 1000. , 0. , 300. , 150. , 200. , 200. , 300. , 300. , &
+ & 2000. , 400. , 1000. , 1.E07 , 1.E07 , 1.E07 , 350. , &
+ & 1.E07 , 700. , 1.E07 , 1.E07 , 1.E07 , 1.E07 , 6500. , &
+ & 6500. , 3000. , 2000. , 2000. , 1.E07 , 1.E07 , 6500. , &
+ & 1.E07 , 400. , 300. , 500. , 600. , 1000. , 1.E07 , 1.E07 , &
+ & 300. , 100. , 150. , 100. , 1700. , 100. , 1200. , 0. , 0. ,&
+ & 200. , 400. , 200. , 350. , 300. , 500. , 450. , 0. , &
+ & 1000. , 0. , 300. , 150. , 200. , 200. , 300. , 300. , &
+ & 2000. , 400. , 800. , 1.E07 , 1.E07 , 1.E07 , 500. , 1.E07 ,&
+ & 1000. , 1.E07 , 1.E07 , 1.E07 , 1.E07 , 1.E07 , 9000. , &
+ & 6000. , 2000. , 2000. , 1.E07 , 1.E07 , 9000. , 1.E07 , &
+ & 1.E07 , 400. , 600. , 800. , 1600. , 1.E07 , 1.E07 , 800. , &
+ & 100. , 0. , 100. , 1500. , 100. , 1000. , 0. , 0. , 100. , &
+ & 400. , 150. , 350. , 300. , 500. , 450. , 0. , 0. , 1000. , &
+ & 300. , 150. , 200. , 200. , 300. , 300. , 2000. , 400. , &
+ & 1000. , 1.E07 , 1.E07 , 1.E07 , 800. , 1.E07 , 1600. , &
+ & 1.E07 , 1.E07 , 1.E07 , 1.E07 , 1.E07 , 1.E07 , 400. , &
+ & 1.E07 , 800. , 1.E07 , 1.E07 , 9000. , 1.E07 , 2000. , &
+ & 1000. , 600. , 2000. , 1200. , 1.E07 , 1.E07 , 800. , 100. ,&
+ & 0. , 10. , 1500. , 100. , 1000. , 0. , 0. , 50. , 100. , &
+ & 100. , 100. , 100. , 200. , 200. , 0. , 1000. , 100. , &
+ & 600. , 3500. , 3500. , 3500. , 500. , 500. , 2000. , 400. , &
+ & 3500. , 1.E07 , 100. , 120. , 100. , 200. , 150. , 1.E07 , &
+ & 1.E07 , 80. , 1.E07 , 2000. , 2000. , 1500. , 2000. , &
+ & 2000. , 1.E07 , 1.E07 , 2000. , 1.E07 , 1000. , 250. , &
+ & 350. , 500. , 700. , 1.E07 , 1.E07 , 300. , 100. , 50. , &
+ & 80. , 1500. , 100. , 1000. , 0. , 0. , 200. , 500. , 150. , &
+ & 350. , 300. , 500. , 450. , 0. , 1000. , 0. , 300. , 150. , &
+ & 200. , 200. , 300. , 300. , 2000. , 400. , 1000./
+ END
+ SUBROUTINE SHAVE
+ USE MAIN1
+ IF ( PERIOD ) THEN
+ 9020 FORMAT ('(''*'',8X,''X'',13X,''Y'',4X,',I1, &
+ &'(2X,3A4),4X,''ZELEV'', 4X,''ZHILL'',4X,''ZFLAG'',4X,''AVE'',5X,&
+ &_______ ________ ________'')')
+ ENDIF
+ DO IGRP = 1 , NUMGRP
+ IF ( IANPST(IGRP).EQ.1 ) THEN
+ IF ( IANFRM(IGRP).EQ.0 ) THEN
+ DO IREC = 1 , NUMREC
+ ENDDO
+ ENDIF
+ DO IREC = 1 , NUMREC
+ IF ( RECTYP(IREC).EQ.'DC' ) THEN
+ WRITE (IOUNIT,9082) SRCID(ISRF) , SRCTYP(ISRF) , &
+ & AXS(ISRF) , AYS(ISRF) , AZS(ISRF) &
+ & , (J,AXR(IREC+J-1),AYR(IREC+J-1), &
+ & HCLMSG(IREC+J-1,IHNUM,IGRP,IAVE, &
+ & ITYP),J=1,36)
+ 9082 FORMAT (' BOUNDARY RECEPTOR NETWORK OF SOURCE ID: ', &
+ & 18(2(1X,I4,3X,F10.2,', ',F10.2,',',F13.5,A1, &
+ & '(',I8.8,')',7X),/),/)
+ ENDIF
+ ENDDO
+ ENDIF
+ ENDDO
+ END
+ USE MAIN1
+ IF ( ICOUNT.NE.0 .AND. JCOUNT.NE.0 ) THEN
+ DO J = 1 , JCOUNT
+ DO I = 1 , ICOUNT
+ IF ( ISET.GT.NREC ) THEN
+ GOTO 999
+ ENDIF
+ ENDDO
+ ENDDO
+ ENDIF
+ 999 CONTINUE
+ END
diff --git a/gcc/testsuite/gfortran.dg/mvbits_6.f90 b/gcc/testsuite/gfortran.dg/mvbits_6.f90
new file mode 100644
index 00000000000..c8986df21ca
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/mvbits_6.f90
@@ -0,0 +1,33 @@
+! { dg-do compile }
+
+! PR fortran/38883
+! This ICE'd because the temporary-creation in the MVBITS call was wrong.
+! This is the original test from the PR, the complicated version.
+
+! Contributed by Dick Hendrickson <dick.hendrickson@gmail.com>
+
+ module yg0009_stuff
+
+ type unseq
+ integer I
+ end type
+
+ contains
+
+ SUBROUTINE YG0009(TDA2L,NF4,NF3,NF1,MF1,MF4,MF3)
+ TYPE(UNSEQ) TDA2L(NF4,NF3)
+
+ CALL MVBITS (TDA2L(NF4:NF1:MF1,NF1:NF3)%I,2, &
+ 4, TDA2L(-MF4:-MF1:-NF1,-MF1:-MF3)%I, 3)
+
+ END SUBROUTINE
+
+ end module yg0009_stuff
+
+ program try_yg0009
+ use yg0009_stuff
+ type(unseq) tda2l(4,3)
+
+ call yg0009(tda2l,4,3,1,-1,-4,-3)
+
+ end
diff --git a/gcc/testsuite/gfortran.dg/mvbits_7.f90 b/gcc/testsuite/gfortran.dg/mvbits_7.f90
new file mode 100644
index 00000000000..2c7cab8ac24
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/mvbits_7.f90
@@ -0,0 +1,30 @@
+! { dg-do run }
+
+! PR fortran/38883
+! This ICE'd because the temporary-creation in the MVBITS call was wrong.
+
+! Contributed by Paul Richard Thomas <paul.richard.thomas@gmail.com>
+
+ type t
+ integer :: I
+ character(9) :: chr
+ end type
+ type(t) :: x(4,3)
+ type(t) :: y(4,3)
+ x = reshape ([((t (i*j, "a"),i = 1,4), j=1,3)], [4,3])
+ call foo (x)
+ y = reshape ([((t (i*j*2, "a"),i = 1,4), j=1,3)], [4,3])
+ call bar(y, 4, 3, 1, -1, -4, -3)
+ if (any (x%i .ne. y%i)) call abort
+contains
+ SUBROUTINE foo (x)
+ TYPE(t) x(4, 3) ! No dependency at all
+ CALL MVBITS (x%i, 0, 6, x%i, 8)
+ x%i = x%i * 2
+ END SUBROUTINE
+ SUBROUTINE bar (x, NF4, NF3, NF1, MF1, MF4, MF3)
+ TYPE(t) x(NF4, NF3) ! Dependency through variable indices
+ CALL MVBITS (x(NF4:NF1:MF1, NF1:NF3)%i, 1, &
+ 6, x(-MF4:-MF1:-NF1, -MF1:-MF3)%i, 9)
+ END SUBROUTINE
+end
diff --git a/gcc/testsuite/gfortran.dg/mvbits_8.f90 b/gcc/testsuite/gfortran.dg/mvbits_8.f90
new file mode 100644
index 00000000000..f69d1e84f9a
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/mvbits_8.f90
@@ -0,0 +1,36 @@
+! { dg-do run }
+
+! PR fortran/38883
+! This ICE'd because the temporary-creation in the MVBITS call was wrong.
+
+PROGRAM main
+ IMPLICIT NONE
+
+ TYPE inner
+ INTEGER :: i
+ INTEGER :: j
+ END TYPE inner
+
+ TYPE outer
+ TYPE(inner) :: comp(2)
+ END TYPE outer
+
+ TYPE(outer) :: var
+
+ var%comp%i = (/ 1, 2 /)
+ var%comp%j = (/ 3, 4 /)
+
+ CALL foobar (var, 1, 2)
+
+ IF (ANY (var%comp%i /= (/ 1, 2 /))) CALL abort ()
+ IF (ANY (var%comp%j /= (/ 3, 4 /))) CALL abort ()
+
+CONTAINS
+
+ SUBROUTINE foobar (x, lower, upper)
+ TYPE(outer), INTENT(INOUT) :: x
+ INTEGER, INTENT(IN) :: lower, upper
+ CALL MVBITS (x%comp%i, 1, 2, x%comp(lower:upper)%i, 1)
+ END SUBROUTINE foobar
+
+END PROGRAM main
diff --git a/gcc/toplev.h b/gcc/toplev.h
index 552ed3367d2..0cd62ef22be 100644
--- a/gcc/toplev.h
+++ b/gcc/toplev.h
@@ -140,7 +140,6 @@ extern int flag_unroll_all_loops;
extern int flag_unswitch_loops;
extern int flag_cprop_registers;
extern int time_report;
-extern int flag_ira;
extern int flag_ira_coalesce;
extern int flag_ira_move_spills;
extern int flag_ira_share_save_slots;
diff --git a/gcc/tree-loop-distribution.c b/gcc/tree-loop-distribution.c
index 745957fcea2..8eca7c02166 100644
--- a/gcc/tree-loop-distribution.c
+++ b/gcc/tree-loop-distribution.c
@@ -256,10 +256,15 @@ generate_memset_zero (gimple stmt, tree op0, tree nb_iter,
/* Test for a positive stride, iterating over every element. */
if (integer_zerop (fold_build2 (MINUS_EXPR, integer_type_node, DR_STEP (dr),
TYPE_SIZE_UNIT (TREE_TYPE (op0)))))
- addr_base = fold_build2 (PLUS_EXPR, TREE_TYPE (DR_BASE_ADDRESS (dr)),
- DR_BASE_ADDRESS (dr),
- size_binop (PLUS_EXPR,
- DR_OFFSET (dr), DR_INIT (dr)));
+ {
+ tree offset = fold_convert (sizetype,
+ size_binop (PLUS_EXPR,
+ DR_OFFSET (dr),
+ DR_INIT (dr)));
+ addr_base = fold_build2 (POINTER_PLUS_EXPR,
+ TREE_TYPE (DR_BASE_ADDRESS (dr)),
+ DR_BASE_ADDRESS (dr), offset);
+ }
/* Test for a negative stride, iterating over every element. */
else if (integer_zerop (fold_build2 (PLUS_EXPR, integer_type_node,
@@ -434,11 +439,13 @@ generate_builtin (struct loop *loop, bitmap partition, bool copy_p)
basic_block dest = single_exit (loop)->dest;
prop_phis (dest);
make_edge (src, dest, EDGE_FALLTHRU);
- set_immediate_dominator (CDI_DOMINATORS, dest, src);
cancel_loop_tree (loop);
for (i = 0; i < nbbs; i++)
delete_basic_block (bbs[i]);
+
+ set_immediate_dominator (CDI_DOMINATORS, dest,
+ recompute_dominator (CDI_DOMINATORS, dest));
}
end:
diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h
index 589428bd507..9b2add80e74 100644
--- a/gcc/tree-pass.h
+++ b/gcc/tree-pass.h
@@ -472,7 +472,7 @@ extern struct rtl_opt_pass pass_web;
extern struct rtl_opt_pass pass_cse2;
extern struct rtl_opt_pass pass_df_initialize_opt;
extern struct rtl_opt_pass pass_df_initialize_no_opt;
-extern struct rtl_opt_pass pass_regclass_init;
+extern struct rtl_opt_pass pass_reginfo_init;
extern struct rtl_opt_pass pass_subregs_of_mode_init;
extern struct rtl_opt_pass pass_subregs_of_mode_finish;
extern struct rtl_opt_pass pass_inc_dec;
@@ -490,8 +490,6 @@ extern struct rtl_opt_pass pass_mode_switching;
extern struct rtl_opt_pass pass_see;
extern struct rtl_opt_pass pass_sms;
extern struct rtl_opt_pass pass_sched;
-extern struct rtl_opt_pass pass_local_alloc;
-extern struct rtl_opt_pass pass_global_alloc;
extern struct rtl_opt_pass pass_ira;
extern struct rtl_opt_pass pass_postreload;
extern struct rtl_opt_pass pass_clean_state;
diff --git a/gcc/tree-ssa-alias.c b/gcc/tree-ssa-alias.c
index 4e9d28bf9eb..3c76fcd2db4 100644
--- a/gcc/tree-ssa-alias.c
+++ b/gcc/tree-ssa-alias.c
@@ -2483,6 +2483,24 @@ update_alias_info_1 (gimple stmt, struct alias_info *ai)
if (addr_taken)
bitmap_ior_into (gimple_addressable_vars (cfun), addr_taken);
+ /* If we have a call or an assignment, see if the lhs contains
+ a local decl that requires not to be a gimple register. */
+ if (gimple_code (stmt) == GIMPLE_ASSIGN
+ || gimple_code (stmt) == GIMPLE_CALL)
+ {
+ tree lhs = gimple_get_lhs (stmt);
+ /* A plain decl does not need it set. */
+ if (lhs && handled_component_p (lhs))
+ {
+ tree var = get_base_address (lhs);
+ if (DECL_P (var)
+ /* We are not going to mess with RESULT_DECL anyway. */
+ && TREE_CODE (var) != RESULT_DECL
+ && is_gimple_reg_type (TREE_TYPE (var)))
+ bitmap_set_bit (gimple_addressable_vars (cfun), DECL_UID (var));
+ }
+ }
+
/* Process each operand use. For pointers, determine whether they
are dereferenced by the statement, or whether their value
escapes, etc. */
diff --git a/gcc/tree-ssa-ccp.c b/gcc/tree-ssa-ccp.c
index d0fcf3937a7..ef6890c65c3 100644
--- a/gcc/tree-ssa-ccp.c
+++ b/gcc/tree-ssa-ccp.c
@@ -966,7 +966,6 @@ ccp_fold (gimple stmt)
so this should almost always return a simplified RHS. */
tree lhs = gimple_assign_lhs (stmt);
tree op0 = gimple_assign_rhs1 (stmt);
- tree res;
/* Simplify the operand down to a constant. */
if (TREE_CODE (op0) == SSA_NAME)
@@ -1002,20 +1001,8 @@ ccp_fold (gimple stmt)
return op0;
}
- res = fold_unary (subcode, gimple_expr_type (stmt), op0);
-
- /* If the operation was a conversion do _not_ mark a
- resulting constant with TREE_OVERFLOW if the original
- constant was not. These conversions have implementation
- defined behavior and retaining the TREE_OVERFLOW flag
- here would confuse later passes such as VRP. */
- if (res
- && TREE_CODE (res) == INTEGER_CST
- && TREE_CODE (op0) == INTEGER_CST
- && CONVERT_EXPR_CODE_P (subcode))
- TREE_OVERFLOW (res) = TREE_OVERFLOW (op0);
-
- return res;
+ return fold_unary_ignore_overflow (subcode,
+ gimple_expr_type (stmt), op0);
}
case GIMPLE_BINARY_RHS:
diff --git a/gcc/tree-ssa-dse.c b/gcc/tree-ssa-dse.c
index b4be5142e01..8a724953419 100644
--- a/gcc/tree-ssa-dse.c
+++ b/gcc/tree-ssa-dse.c
@@ -34,6 +34,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-dump.h"
#include "domwalk.h"
#include "flags.h"
+#include "langhooks.h"
/* This file implements dead store elimination.
@@ -660,20 +661,35 @@ execute_simple_dse (void)
tree op;
bool removed = false;
ssa_op_iter iter;
+ tree size;
- if (gimple_stored_syms (stmt)
- && !bitmap_empty_p (gimple_stored_syms (stmt))
- && (is_gimple_assign (stmt)
- || (is_gimple_call (stmt)
- && gimple_call_lhs (stmt)))
- && !bitmap_intersect_p (gimple_stored_syms (stmt), variables_loaded))
+ if (is_gimple_assign (stmt)
+ && AGGREGATE_TYPE_P (TREE_TYPE (gimple_assign_lhs (stmt)))
+ && (size = lang_hooks.expr_size (gimple_assign_lhs (stmt)))
+ && integer_zerop (size))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, " Deleted zero-sized store '");
+ print_gimple_stmt (dump_file, stmt, 0, dump_flags);
+ fprintf (dump_file, "'\n");
+ }
+ removed = true;
+ gsi_remove (&gsi, true);
+ todo |= TODO_cleanup_cfg;
+ }
+ else if (gimple_stored_syms (stmt)
+ && !bitmap_empty_p (gimple_stored_syms (stmt))
+ && (is_gimple_assign (stmt)
+ || (is_gimple_call (stmt)
+ && gimple_call_lhs (stmt)))
+ && !bitmap_intersect_p (gimple_stored_syms (stmt),
+ variables_loaded))
{
unsigned int i;
bitmap_iterator bi;
bool dead = true;
-
-
/* See if STMT only stores to write-only variables and
verify that there are no volatile operands. tree-ssa-operands
sets has_volatile_ops flag for all statements involving
diff --git a/gcc/tree-ssa-pre.c b/gcc/tree-ssa-pre.c
index 0717a366bf3..a3f91f07d58 100644
--- a/gcc/tree-ssa-pre.c
+++ b/gcc/tree-ssa-pre.c
@@ -434,6 +434,7 @@ static tree create_expression_by_pieces (basic_block, pre_expr, gimple_seq *,
gimple, tree);
static tree find_or_generate_expression (basic_block, pre_expr, gimple_seq *,
gimple);
+static unsigned int get_expr_value_id (pre_expr);
/* We can add and remove elements and entries to and from sets
and hash tables, so we use alloc pools for them. */
@@ -559,6 +560,8 @@ add_to_value (unsigned int v, pre_expr e)
{
bitmap_set_t set;
+ gcc_assert (get_expr_value_id (e) == v);
+
if (v >= VEC_length (bitmap_set_t, value_expressions))
{
VEC_safe_grow_cleared (bitmap_set_t, heap, value_expressions,
@@ -2975,7 +2978,7 @@ insert_into_preds_of_block (basic_block block, unsigned int exprnum,
pre_expr eprime;
edge_iterator ei;
tree type = get_expr_type (expr);
- tree temp, res;
+ tree temp;
gimple phi;
if (dump_file && (dump_flags & TDF_DETAILS))
@@ -3131,8 +3134,12 @@ insert_into_preds_of_block (basic_block block, unsigned int exprnum,
if (TREE_CODE (type) == COMPLEX_TYPE
|| TREE_CODE (type) == VECTOR_TYPE)
DECL_GIMPLE_REG_P (temp) = 1;
-
phi = create_phi_node (temp, block);
+
+ gimple_set_plf (phi, NECESSARY, false);
+ VN_INFO_GET (gimple_phi_result (phi))->valnum = gimple_phi_result (phi);
+ VN_INFO (gimple_phi_result (phi))->value_id = val;
+ VEC_safe_push (gimple, heap, inserted_exprs, phi);
FOR_EACH_EDGE (pred, ei, block->preds)
{
pre_expr ae = avail[pred->src->index];
@@ -3143,20 +3150,6 @@ insert_into_preds_of_block (basic_block block, unsigned int exprnum,
else
add_phi_arg (phi, PRE_EXPR_NAME (avail[pred->src->index]), pred);
}
- /* If the PHI node is already available, use it. */
- if ((res = vn_phi_lookup (phi)) != NULL_TREE)
- {
- gimple_stmt_iterator gsi = gsi_for_stmt (phi);
- remove_phi_node (&gsi, true);
- release_defs (phi);
- add_to_value (val, get_or_alloc_expr_for_name (res));
- return false;
- }
-
- gimple_set_plf (phi, NECESSARY, false);
- VN_INFO_GET (gimple_phi_result (phi))->valnum = gimple_phi_result (phi);
- VN_INFO (gimple_phi_result (phi))->value_id = val;
- VEC_safe_push (gimple, heap, inserted_exprs, phi);
newphi = get_or_alloc_expr_for_name (gimple_phi_result (phi));
add_to_value (val, newphi);
@@ -3330,7 +3323,7 @@ do_regular_insertion (basic_block block, basic_block dom)
pre_stats.constified++;
}
else
- info->valnum = PRE_EXPR_NAME (edoubleprime);
+ info->valnum = VN_INFO (PRE_EXPR_NAME (edoubleprime))->valnum;
info->value_id = new_val;
}
}
diff --git a/gcc/tree-ssa-sccvn.c b/gcc/tree-ssa-sccvn.c
index 78af47ed5eb..87ddcb6872c 100644
--- a/gcc/tree-ssa-sccvn.c
+++ b/gcc/tree-ssa-sccvn.c
@@ -1481,7 +1481,7 @@ static VEC(tree, heap) *shared_lookup_phiargs;
value number if it exists in the hash table. Return NULL_TREE if
it does not exist in the hash table. */
-tree
+static tree
vn_phi_lookup (gimple phi)
{
void **slot;
@@ -1761,7 +1761,8 @@ visit_reference_op_load (tree lhs, tree op, gimple stmt)
tree tem = valueize_expr (vn_get_expr_for (TREE_OPERAND (val, 0)));
if ((CONVERT_EXPR_P (tem)
|| TREE_CODE (tem) == VIEW_CONVERT_EXPR)
- && (tem = fold_unary (TREE_CODE (val), TREE_TYPE (val), tem)))
+ && (tem = fold_unary_ignore_overflow (TREE_CODE (val),
+ TREE_TYPE (val), tem)))
val = tem;
}
result = val;
@@ -2123,7 +2124,7 @@ simplify_binary_expression (gimple stmt)
fold_defer_overflow_warnings ();
result = fold_binary (gimple_assign_rhs_code (stmt),
- TREE_TYPE (gimple_get_lhs (stmt)), op0, op1);
+ TREE_TYPE (gimple_get_lhs (stmt)), op0, op1);
if (result)
STRIP_USELESS_TYPE_CONVERSION (result);
@@ -2182,8 +2183,8 @@ simplify_unary_expression (gimple stmt)
if (op0 == orig_op0)
return NULL_TREE;
- result = fold_unary (gimple_assign_rhs_code (stmt),
- gimple_expr_type (stmt), op0);
+ result = fold_unary_ignore_overflow (gimple_assign_rhs_code (stmt),
+ gimple_expr_type (stmt), op0);
if (result)
{
STRIP_USELESS_TYPE_CONVERSION (result);
diff --git a/gcc/tree-ssa-sccvn.h b/gcc/tree-ssa-sccvn.h
index 74f43c3df2e..df0f3dfe6e7 100644
--- a/gcc/tree-ssa-sccvn.h
+++ b/gcc/tree-ssa-sccvn.h
@@ -184,7 +184,6 @@ vn_reference_t vn_reference_insert (tree, tree, VEC (tree, gc) *);
vn_reference_t vn_reference_insert_pieces (VEC (tree, gc) *,
VEC (vn_reference_op_s, heap) *,
tree, unsigned int);
-tree vn_phi_lookup (gimple);
hashval_t vn_nary_op_compute_hash (const vn_nary_op_t);
int vn_nary_op_eq (const void *, const void *);
diff --git a/gcc/tree-ssa-structalias.c b/gcc/tree-ssa-structalias.c
index 8b49556b3c0..21566bbe5d8 100644
--- a/gcc/tree-ssa-structalias.c
+++ b/gcc/tree-ssa-structalias.c
@@ -3044,8 +3044,14 @@ get_constraint_for_1 (tree t, VEC (ce_s, heap) **results, bool address_p)
happens below, since it will fall into the default case. The only
case we know something about an integer treated like a pointer is
when it is the NULL pointer, and then we just say it points to
- NULL. */
- if (TREE_CODE (t) == INTEGER_CST
+ NULL.
+
+ Do not do that if -fno-delete-null-pointer-checks though, because
+ in that case *NULL does not fail, so it _should_ alias *anything.
+ It is not worth adding a new option or renaming the existing one,
+ since this case is relatively obscure. */
+ if (flag_delete_null_pointer_checks
+ && TREE_CODE (t) == INTEGER_CST
&& integer_zerop (t))
{
temp.var = nothing_id;
@@ -4703,7 +4709,8 @@ set_uids_in_ptset (tree ptr, bitmap into, bitmap from, bool is_derefed,
type-based pruning disabled. */
if (vi->is_artificial_var
|| !is_derefed
- || no_tbaa_pruning)
+ || no_tbaa_pruning
+ || vi->no_tbaa_pruning)
bitmap_set_bit (into, DECL_UID (vi->decl));
else
{
@@ -5496,19 +5503,8 @@ compute_points_to_sets (void)
find_func_aliases (phi);
}
- for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
- {
- gimple stmt = gsi_stmt (gsi);
-
- find_func_aliases (stmt);
-
- /* The information in GIMPLE_CHANGE_DYNAMIC_TYPE statements
- has now been captured, and we can remove them. */
- if (gimple_code (stmt) == GIMPLE_CHANGE_DYNAMIC_TYPE)
- gsi_remove (&gsi, true);
- else
- gsi_next (&gsi);
- }
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ find_func_aliases (gsi_stmt (gsi));
}
diff --git a/gcc/tree-ssa.c b/gcc/tree-ssa.c
index 935cad676c1..75e4685149f 100644
--- a/gcc/tree-ssa.c
+++ b/gcc/tree-ssa.c
@@ -1496,9 +1496,13 @@ warn_uninitialized_var (tree *tp, int *walk_subtrees, void *data_)
|| !gimple_aliases_computed_p (cfun))
return NULL_TREE;
+ /* If the load happens as part of a call do not warn about it. */
+ if (is_gimple_call (data->stmt))
+ return NULL_TREE;
+
vuse = SINGLE_SSA_USE_OPERAND (data->stmt, SSA_OP_VUSE);
if (vuse == NULL_USE_OPERAND_P)
- return NULL_TREE;
+ return NULL_TREE;
op = USE_FROM_PTR (vuse);
if (t != SSA_NAME_VAR (op)
@@ -1731,7 +1735,12 @@ execute_update_addresses_taken (void)
|| bitmap_bit_p (addresses_taken, DECL_UID (var)))
continue;
- if (TREE_ADDRESSABLE (var))
+ if (TREE_ADDRESSABLE (var)
+ /* Do not change TREE_ADDRESSABLE if we need to preserve var as
+ a non-register. Otherwise we are confused and forget to
+ add virtual operands for it. */
+ && (!is_gimple_reg_type (TREE_TYPE (var))
+ || !bitmap_bit_p (not_reg_needs, DECL_UID (var))))
{
TREE_ADDRESSABLE (var) = 0;
if (is_gimple_reg (var))
diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c
index 4b6caca1c37..8ca65b33850 100644
--- a/gcc/tree-vrp.c
+++ b/gcc/tree-vrp.c
@@ -1620,7 +1620,7 @@ extract_range_from_assert (value_range_t *vr_p, tree expr)
all should be optimized away above us. */
if ((cond_code == LT_EXPR
&& compare_values (max, min) == 0)
- || is_overflow_infinity (max))
+ || (CONSTANT_CLASS_P (max) && TREE_OVERFLOW (max)))
set_value_range_to_varying (vr_p);
else
{
@@ -1655,7 +1655,7 @@ extract_range_from_assert (value_range_t *vr_p, tree expr)
all should be optimized away above us. */
if ((cond_code == GT_EXPR
&& compare_values (min, max) == 0)
- || is_overflow_infinity (min))
+ || (CONSTANT_CLASS_P (min) && TREE_OVERFLOW (min)))
set_value_range_to_varying (vr_p);
else
{
diff --git a/gcc/tree.h b/gcc/tree.h
index f7ff2e6d92b..b7300fa8b87 100644
--- a/gcc/tree.h
+++ b/gcc/tree.h
@@ -3412,9 +3412,6 @@ struct tree_target_option GTY(())
/* Return a tree node that encapsulates the current target options. */
extern tree build_target_option_node (void);
-extern void set_underlying_type (tree x);
-
-extern bool is_typedef_decl (tree x);
/* Define the overall contents of a tree node.
It may be any of the structures declared above
@@ -4736,6 +4733,7 @@ extern tree native_interpret_expr (tree, const unsigned char *, int);
extern tree fold (tree);
extern tree fold_unary (enum tree_code, tree, tree);
+extern tree fold_unary_ignore_overflow (enum tree_code, tree, tree);
extern tree fold_binary (enum tree_code, tree, tree, tree);
extern tree fold_ternary (enum tree_code, tree, tree, tree, tree);
extern tree fold_build1_stat (enum tree_code, tree, tree MEM_STAT_DECL);
diff --git a/libjava/ChangeLog b/libjava/ChangeLog
index ebef09f23ef..d28dcc86ec8 100644
--- a/libjava/ChangeLog
+++ b/libjava/ChangeLog
@@ -1,3 +1,9 @@
+2009-01-26 Jakub Jelinek <jakub@redhat.com>
+
+ PR libgcj/38872
+ * gcj/javaprims.h (_Jv_CreateJavaVM): Change to extern "C" symbol.
+ * libjgc_bc.c (_Jv_GetStringChars): Remove.
+
2009-01-12 Andrew Haley <aph@redhat.com>
* sun/misc/natUnsafe.cc (spinlock contructor): Call sched_yield().
diff --git a/libjava/gcj/javaprims.h b/libjava/gcj/javaprims.h
index 313ecc02efc..3a0c964a437 100644
--- a/libjava/gcj/javaprims.h
+++ b/libjava/gcj/javaprims.h
@@ -1,8 +1,8 @@
// javaprims.h - Main external header file for libgcj. -*- c++ -*-
-/* Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
- Free Software Foundation
+/* Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
+ 2008, 2009 Free Software Foundation
This file is part of libgcj.
@@ -919,7 +919,7 @@ struct _Jv_VMInitArgs
jboolean ignoreUnrecognized;
};
-extern jint _Jv_CreateJavaVM (struct _Jv_VMInitArgs*);
+extern "C" jint _Jv_CreateJavaVM (struct _Jv_VMInitArgs*);
void
_Jv_ThreadRun (java::lang::Thread* thread);
diff --git a/libjava/libgcj_bc.c b/libjava/libgcj_bc.c
index 7073d7308b2..e8da443f5fb 100644
--- a/libjava/libgcj_bc.c
+++ b/libjava/libgcj_bc.c
@@ -1,6 +1,6 @@
/* libgcj_bc.c */
-/* Copyright (C) 2006 Free Software Foundation
+/* Copyright (C) 2006, 2009 Free Software Foundation
This file is part of libgcj.
@@ -89,7 +89,6 @@ void _Jv_AttachCurrentThreadAsDaemon () {}
void _Jv_CreateJavaVM () {}
void _Jv_DetachCurrentThread () {}
void _Jv_Free () {}
-void _Jv_GetStringChars () {}
void _Jv_GetStringUTFLength () {}
void _Jv_GetStringUTFRegion () {}
void _Jv_Malloc () {}
diff --git a/libmudflap/ChangeLog b/libmudflap/ChangeLog
index 25dcdf9730e..e42a15b5605 100644
--- a/libmudflap/ChangeLog
+++ b/libmudflap/ChangeLog
@@ -1,3 +1,13 @@
+2009-01-23 Jie Zhang <jie.zhang@analog.com>
+
+ * mf-impl.h (__mf_get_state, __mf_set_state): Don't use
+ __thread when TLS support is emulated.
+ * mf-hooks3.c (__mf_get_state, __mf_set_state): Likewise.
+ * mf-runtime.c (__mf_state_1): Likewise.
+ * configure.ac: Use GCC_CHECK_EMUTLS.
+ * configure: Regenerate.
+ * config.h.in: Regenerate.
+
2008-12-18 Ralf Wildenhues <Ralf.Wildenhues@gmx.de>
* configure: Regenerate.
diff --git a/libmudflap/config.h.in b/libmudflap/config.h.in
index 94b2b8a2098..0c88f61553f 100644
--- a/libmudflap/config.h.in
+++ b/libmudflap/config.h.in
@@ -277,5 +277,8 @@
/* Define to 1 if you have the ANSI C header files. */
#undef STDC_HEADERS
+/* Define to 1 if the target use emutls for thread-local storage. */
+#undef USE_EMUTLS
+
/* Version number of package */
#undef VERSION
diff --git a/libmudflap/configure b/libmudflap/configure
index ad10a2c238d..758ac8f0a30 100755
--- a/libmudflap/configure
+++ b/libmudflap/configure
@@ -13028,6 +13028,37 @@ _ACEOF
fi
+ echo "$as_me:$LINENO: checking whether the thread-local storage support is from emutls" >&5
+echo $ECHO_N "checking whether the thread-local storage support is from emutls... $ECHO_C" >&6
+if test "${gcc_cv_use_emutls+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+
+ gcc_cv_use_emutls=no
+ echo '__thread int a; int b; int main() { return a = b; }' > conftest.c
+ if { ac_try='${CC-cc} -Werror -S -o conftest.s conftest.c 1>&5'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ if grep __emutls_get_address conftest.s > /dev/null; then
+ gcc_cv_use_emutls=yes
+ fi
+ fi
+ rm -f conftest.*
+
+fi
+echo "$as_me:$LINENO: result: $gcc_cv_use_emutls" >&5
+echo "${ECHO_T}$gcc_cv_use_emutls" >&6
+ if test "$gcc_cv_use_emutls" = "yes" ; then
+
+cat >>confdefs.h <<\_ACEOF
+#define USE_EMUTLS 1
+_ACEOF
+
+ fi
+
ac_config_files="$ac_config_files Makefile testsuite/Makefile testsuite/mfconfig.exp"
cat >confcache <<\_ACEOF
diff --git a/libmudflap/configure.ac b/libmudflap/configure.ac
index a22be278d32..8ce99a10270 100644
--- a/libmudflap/configure.ac
+++ b/libmudflap/configure.ac
@@ -265,6 +265,7 @@ fi
# See if we support thread-local storage.
GCC_CHECK_TLS
+GCC_CHECK_EMUTLS
AC_CONFIG_FILES([Makefile testsuite/Makefile testsuite/mfconfig.exp])
AC_OUTPUT
diff --git a/libmudflap/mf-hooks3.c b/libmudflap/mf-hooks3.c
index dec4cd63b83..5792a14f5db 100644
--- a/libmudflap/mf-hooks3.c
+++ b/libmudflap/mf-hooks3.c
@@ -78,7 +78,7 @@ DECLARE(int, pthread_create, pthread_t *thr, const pthread_attr_t *attr,
/* Multithreading support hooks. */
-#ifndef HAVE_TLS
+#if !defined(HAVE_TLS) || defined(USE_EMUTLS)
/* We don't have TLS. Ordinarily we could use pthread keys, but since we're
commandeering malloc/free that presents a few problems. The first is that
we'll recurse from __mf_get_state to pthread_setspecific to malloc back to
@@ -217,7 +217,7 @@ __mf_pthread_cleanup (void *arg)
if (__mf_opts.heur_std_data)
__mf_unregister (&errno, sizeof (errno), __MF_TYPE_GUESS);
-#ifndef HAVE_TLS
+#if !defined(HAVE_TLS) || defined(USE_EMUTLS)
struct mf_thread_data *data = __mf_find_threadinfo (0);
if (data)
data->used_p = 0;
diff --git a/libmudflap/mf-impl.h b/libmudflap/mf-impl.h
index 728a58bc05d..15d0b9ae53a 100644
--- a/libmudflap/mf-impl.h
+++ b/libmudflap/mf-impl.h
@@ -244,7 +244,7 @@ extern pthread_mutex_t __mf_biglock;
#define UNLOCKTH() do {} while (0)
#endif
-#if defined(LIBMUDFLAPTH) && !defined(HAVE_TLS)
+#if defined(LIBMUDFLAPTH) && (!defined(HAVE_TLS) || defined(USE_EMUTLS))
extern enum __mf_state_enum __mf_get_state (void);
extern void __mf_set_state (enum __mf_state_enum);
#else
diff --git a/libmudflap/mf-runtime.c b/libmudflap/mf-runtime.c
index 79fdb323dbe..93b895a1573 100644
--- a/libmudflap/mf-runtime.c
+++ b/libmudflap/mf-runtime.c
@@ -178,7 +178,7 @@ struct __mf_options __mf_opts;
int __mf_starting_p = 1;
#ifdef LIBMUDFLAPTH
-#ifdef HAVE_TLS
+#if defined(HAVE_TLS) && !defined(USE_EMUTLS)
__thread enum __mf_state_enum __mf_state_1 = reentrant;
#endif
#else
diff --git a/libstdc++-v3/ChangeLog b/libstdc++-v3/ChangeLog
index 27417109735..6a58259edb7 100644
--- a/libstdc++-v3/ChangeLog
+++ b/libstdc++-v3/ChangeLog
@@ -1,3 +1,54 @@
+2009-01-28 Benjamin Kosnik <bkoz@redhat.com>
+
+ * testsuite/util/testsuite_common_types.h (has_trivial_cons_dtor): New.
+ (standard_layout): Just use !has_virtual_destructor for now.
+ * testsuite/30_threads/mutex/requirements/standard_layout.cc: New.
+ * testsuite/30_threads/timed_mutex/requirements/standard_layout.cc: New.
+ * testsuite/30_threads/recursive_mutex/requirements/
+ standard_layout.cc: New.
+ * testsuite/30_threads/condition_variable/requirements/
+ standard_layout.cc: New.
+ * testsuite/29_atomics/atomic_address/requirements/
+ standard_layout.cc: Adjust.
+ * testsuite/29_atomics/atomic_flag/requirements/
+ standard_layout.cc: Same.
+ * testsuite/29_atomics/atomic_address/requirements/trivial.cc: New.
+ * testsuite/29_atomics/atomic_integral/requirements/trivial.cc: Same.
+ * testsuite/29_atomics/atomic_flag/requirements/trivial.cc: Same.
+ * testsuite/30_threads/condition_variable/requirements/typedefs.cc: New.
+ * testsuite/29_atomics/atomic_integral/cons/assign_neg.cc: Adjust
+ line numbers.
+ * testsuite/29_atomics/atomic_integral/cons/copy_neg.cc: Same.
+ * testsuite/29_atomics/atomic/cons/assign_neg.cc: Same.
+ * testsuite/29_atomics/atomic/cons/copy_neg.cc: Same.
+ * testsuite/29_atomics/atomic/requirements/standard_layout.cc:
+ Remove, not required.
+
+2009-01-22 Jonathan Wakely <jwakely.gcc@gmail.com>
+
+ * include/std/thread (__thread_data_base, thread): Rename member
+ functions to match coding style.
+ (thread::thread,thread::operator=): Define move operations.
+ * src/thread.cc (__thread_data_base, thread): Rename member functions.
+ * config/abi/pre/gnu.ver: Adjust.
+
+2009-01-22 Benjamin Kosnik <bkoz@redhat.com>
+
+ PR libstdc++/38384
+ * acinclude.m4 (GLIBCXX_ENABLE_SYMVERS): Disable symbol versioning
+ on HPUX.
+ * configure: Regenerate.
+
+2009-01-22 Dodji Seketeli <dodji@redhat.com>
+
+ * include/ext/bitmap_allocator.h: Reverting changes related to PR
+ c++/26693.
+
+2009-01-21 Benjamin Kosnik <bkoz@redhat.com>
+
+ * testsuite/29_atomics/headers/stdatomic.h/functions.c: Remove
+ atomic_flag_fence.
+
2009-01-21 Dodji Seketeli <dodji@redhat.com>
* include/ext/bitmap_allocator.h: the typedefs should be made public
diff --git a/libstdc++-v3/acinclude.m4 b/libstdc++-v3/acinclude.m4
index 11f58f66bf6..22e6bc49c5a 100644
--- a/libstdc++-v3/acinclude.m4
+++ b/libstdc++-v3/acinclude.m4
@@ -2728,7 +2728,7 @@ if test x$enable_symvers = xyes ; then
else
if test $with_gnu_ld = yes ; then
case ${target_os} in
- cygwin* | pe | mingw32*)
+ cygwin* | pe | mingw32* | hpux*)
enable_symvers=no ;;
*)
enable_symvers=gnu ;;
diff --git a/libstdc++-v3/config/abi/pre/gnu.ver b/libstdc++-v3/config/abi/pre/gnu.ver
index 40237a6a854..7a034db1ec1 100644
--- a/libstdc++-v3/config/abi/pre/gnu.ver
+++ b/libstdc++-v3/config/abi/pre/gnu.ver
@@ -900,7 +900,7 @@ GLIBCXX_3.4.11 {
_ZNSt10shared_ptrISt18__thread_data_baseED1Ev;
_ZNSt12bad_weak_ptrD0Ev;
_ZNSt12bad_weak_ptrD1Ev;
- _ZNSt6thread14__start_threadEv;
+ _ZNSt6thread15_M_start_threadEv;
_ZNSt6thread4joinEv;
_ZNSt6thread6detachEv;
diff --git a/libstdc++-v3/configure b/libstdc++-v3/configure
index 81a81e7308e..d11accaf082 100755
--- a/libstdc++-v3/configure
+++ b/libstdc++-v3/configure
@@ -116669,7 +116669,7 @@ if test x$enable_symvers = xyes ; then
else
if test $with_gnu_ld = yes ; then
case ${target_os} in
- cygwin* | pe | mingw32*)
+ cygwin* | pe | mingw32* | hpux*)
enable_symvers=no ;;
*)
enable_symvers=gnu ;;
diff --git a/libstdc++-v3/include/ext/bitmap_allocator.h b/libstdc++-v3/include/ext/bitmap_allocator.h
index 7768bd2396d..7f5466afe18 100644
--- a/libstdc++-v3/include/ext/bitmap_allocator.h
+++ b/libstdc++-v3/include/ext/bitmap_allocator.h
@@ -549,13 +549,11 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
*/
class free_list
{
- public:
typedef size_t* value_type;
typedef __detail::__mini_vector<value_type> vector_type;
typedef vector_type::iterator iterator;
typedef __mutex __mutex_type;
- private:
struct _LT_pointer_compare
{
bool
diff --git a/libstdc++-v3/include/std/thread b/libstdc++-v3/include/std/thread
index 00fb018989f..e6ce0f71876 100644
--- a/libstdc++-v3/include/std/thread
+++ b/libstdc++-v3/include/std/thread
@@ -65,7 +65,7 @@ namespace std
__thread_data_base() = default;
virtual ~__thread_data_base() = default;
- virtual void __run() = 0;
+ virtual void _M_run() = 0;
__gthread_t _M_thread_handle;
__thread_data_ptr _M_this_ptr;
@@ -80,7 +80,7 @@ namespace std
: _M_func(std::forward<_Callable>(__f))
{ }
- void __run()
+ void _M_run()
{ _M_func(); }
private:
@@ -100,21 +100,29 @@ namespace std
template<typename _Callable>
explicit thread(_Callable __f)
- : _M_thread_data(__make_thread_data(__f))
- { __start_thread(); }
+ : _M_thread_data(_M_make_thread_data(__f))
+ { _M_start_thread(); }
template<typename _Callable, typename... _Args>
thread(_Callable&& __f, _Args&&... __args)
- : _M_thread_data(__make_thread_data(std::bind(__f, __args...)))
- { __start_thread(); }
+ : _M_thread_data(_M_make_thread_data(std::bind(__f, __args...)))
+ { _M_start_thread(); }
~thread()
{ detach(); }
thread(const thread&) = delete;
- thread(thread&&);
+ thread(thread&& __t)
+ { swap(__t); }
+
thread& operator=(const thread&) = delete;
- thread& operator=(thread&&);
+ thread& operator=(thread&& __t)
+ {
+ if (joinable())
+ detach();
+ swap(__t);
+ return *this;
+ }
// members
void
@@ -150,17 +158,17 @@ namespace std
private:
template<typename _Callable>
__thread_data_ptr
- __make_thread_data(_Callable&& __f)
+ _M_make_thread_data(_Callable&& __f)
{
return __thread_data_ptr(
new __thread_data<_Callable>(std::forward<_Callable>(__f)));
}
__thread_data_ptr
- __make_thread_data(void(*__f)())
+ _M_make_thread_data(void(*__f)())
{ return __thread_data_ptr(new __thread_data<void(*)()>(__f)); }
- void __start_thread();
+ void _M_start_thread();
__thread_data_ptr _M_thread_data;
mutable mutex _M_thread_data_mutex;
diff --git a/libstdc++-v3/src/thread.cc b/libstdc++-v3/src/thread.cc
index b7a4f83a8e5..ca934dd9059 100644
--- a/libstdc++-v3/src/thread.cc
+++ b/libstdc++-v3/src/thread.cc
@@ -46,7 +46,7 @@ namespace std
try
{
- __local_thread_data->__run();
+ __local_thread_data->_M_run();
}
catch(...)
{
@@ -88,7 +88,7 @@ namespace std
}
void
- thread::__start_thread()
+ thread::_M_start_thread()
{
_M_thread_data->_M_this_ptr = _M_thread_data;
int __e = __gthread_create(&_M_thread_data->_M_thread_handle,
diff --git a/libstdc++-v3/testsuite/29_atomics/atomic/cons/assign_neg.cc b/libstdc++-v3/testsuite/29_atomics/atomic/cons/assign_neg.cc
index 021d1fedd4b..820d5aa21b0 100644
--- a/libstdc++-v3/testsuite/29_atomics/atomic/cons/assign_neg.cc
+++ b/libstdc++-v3/testsuite/29_atomics/atomic/cons/assign_neg.cc
@@ -38,7 +38,7 @@ int main()
return 0;
}
-// { dg-error "used here" "" { target *-*-* } 510 }
+// { dg-error "used here" "" { target *-*-* } 530 }
// { dg-error "deleted function" "" { target *-*-* } 239 }
// { dg-error "deleted function" "" { target *-*-* } 257 }
// { dg-error "deleted function" "" { target *-*-* } 275 }
diff --git a/libstdc++-v3/testsuite/29_atomics/atomic/cons/copy_neg.cc b/libstdc++-v3/testsuite/29_atomics/atomic/cons/copy_neg.cc
index 94022289152..b723066d75b 100644
--- a/libstdc++-v3/testsuite/29_atomics/atomic/cons/copy_neg.cc
+++ b/libstdc++-v3/testsuite/29_atomics/atomic/cons/copy_neg.cc
@@ -38,7 +38,7 @@ int main()
return 0;
}
-// { dg-error "used here" "" { target *-*-* } 549 }
+// { dg-error "used here" "" { target *-*-* } 569 }
// { dg-error "deleted function" "" { target *-*-* } 238 }
// { dg-error "deleted function" "" { target *-*-* } 256 }
// { dg-error "deleted function" "" { target *-*-* } 274 }
diff --git a/libstdc++-v3/testsuite/29_atomics/atomic/requirements/standard_layout.cc b/libstdc++-v3/testsuite/29_atomics/atomic_address/requirements/standard_layout.cc
index e51e8e4454f..3d5187d2137 100644
--- a/libstdc++-v3/testsuite/29_atomics/atomic/requirements/standard_layout.cc
+++ b/libstdc++-v3/testsuite/29_atomics/atomic_address/requirements/standard_layout.cc
@@ -1,8 +1,7 @@
// { dg-options "-std=gnu++0x" }
// { dg-do compile }
-// 2008-10-22 Benjamin Kosnik <bkoz@redhat.com>
-// Copyright (C) 2008 Free Software Foundation, Inc.
+// Copyright (C) 2008, 2009 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
@@ -20,12 +19,20 @@
// Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
// USA.
+// As a special exception, you may use this file as part of a free software
+// library without restriction. Specifically, if other files instantiate
+// templates or use macros or inline functions from this file, or you compile
+// this file and link it with other files to produce an executable, this
+// file does not by itself cause the resulting executable to be covered by
+// the GNU General Public License. This exception does not however
+// invalidate any other reasons why the executable file might be covered by
+// the GNU General Public License.
+
#include <cstdatomic>
#include <testsuite_common_types.h>
void test01()
{
- // Check for standard layout requirements
__gnu_test::standard_layout test;
- __gnu_cxx::typelist::apply_generator(test, __gnu_test::atomics_tl());
+ test.operator()<std::atomic_address>();
}
diff --git a/libstdc++-v3/testsuite/29_atomics/atomic_address/requirements/trivial.cc b/libstdc++-v3/testsuite/29_atomics/atomic_address/requirements/trivial.cc
new file mode 100644
index 00000000000..de722ad232b
--- /dev/null
+++ b/libstdc++-v3/testsuite/29_atomics/atomic_address/requirements/trivial.cc
@@ -0,0 +1,38 @@
+// { dg-options "-std=gnu++0x" }
+// { dg-do compile }
+
+// Copyright (C) 2009 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 2, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING. If not, write to the Free
+// Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+// USA.
+
+// As a special exception, you may use this file as part of a free software
+// library without restriction. Specifically, if other files instantiate
+// templates or use macros or inline functions from this file, or you compile
+// this file and link it with other files to produce an executable, this
+// file does not by itself cause the resulting executable to be covered by
+// the GNU General Public License. This exception does not however
+// invalidate any other reasons why the executable file might be covered by
+// the GNU General Public License.
+
+#include <cstdatomic>
+#include <testsuite_common_types.h>
+
+void test01()
+{
+ __gnu_test::has_trivial_cons_dtor test;
+ test.operator()<std::atomic_address>();
+}
diff --git a/libstdc++-v3/testsuite/29_atomics/atomic_flag/requirements/standard_layout.cc b/libstdc++-v3/testsuite/29_atomics/atomic_flag/requirements/standard_layout.cc
index c7489014334..3542f0e2a75 100644
--- a/libstdc++-v3/testsuite/29_atomics/atomic_flag/requirements/standard_layout.cc
+++ b/libstdc++-v3/testsuite/29_atomics/atomic_flag/requirements/standard_layout.cc
@@ -1,6 +1,7 @@
// { dg-options "-std=gnu++0x" }
+// { dg-do compile }
-// Copyright (C) 2008 Free Software Foundation, Inc.
+// Copyright (C) 2008, 2009 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
@@ -28,20 +29,10 @@
// the GNU General Public License.
#include <cstdatomic>
-#include <type_traits>
-#include <testsuite_hooks.h>
+#include <testsuite_common_types.h>
-int main()
+void test01()
{
- bool test __attribute__((unused)) = true;
-
- typedef std::atomic_flag test_type;
-
- // libstdc++/37907
- // VERIFY( std::is_standard_layout<test_type>::value );
-
- VERIFY( std::has_trivial_default_constructor<test_type>::value );
- VERIFY( std::has_trivial_destructor<test_type>::value );
-
- return 0;
+ __gnu_test::standard_layout test;
+ test.operator()<std::atomic_flag>();
}
diff --git a/libstdc++-v3/testsuite/29_atomics/atomic_flag/requirements/trivial.cc b/libstdc++-v3/testsuite/29_atomics/atomic_flag/requirements/trivial.cc
new file mode 100644
index 00000000000..ae8e9a48881
--- /dev/null
+++ b/libstdc++-v3/testsuite/29_atomics/atomic_flag/requirements/trivial.cc
@@ -0,0 +1,38 @@
+// { dg-options "-std=gnu++0x" }
+// { dg-do compile }
+
+// Copyright (C) 2009 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 2, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING. If not, write to the Free
+// Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+// USA.
+
+// As a special exception, you may use this file as part of a free software
+// library without restriction. Specifically, if other files instantiate
+// templates or use macros or inline functions from this file, or you compile
+// this file and link it with other files to produce an executable, this
+// file does not by itself cause the resulting executable to be covered by
+// the GNU General Public License. This exception does not however
+// invalidate any other reasons why the executable file might be covered by
+// the GNU General Public License.
+
+#include <cstdatomic>
+#include <testsuite_common_types.h>
+
+void test01()
+{
+ __gnu_test::has_trivial_cons_dtor test;
+ test.operator()<std::atomic_flag>();
+}
diff --git a/libstdc++-v3/testsuite/29_atomics/atomic_integral/cons/assign_neg.cc b/libstdc++-v3/testsuite/29_atomics/atomic_integral/cons/assign_neg.cc
index 724714ed819..85c737eb24a 100644
--- a/libstdc++-v3/testsuite/29_atomics/atomic_integral/cons/assign_neg.cc
+++ b/libstdc++-v3/testsuite/29_atomics/atomic_integral/cons/assign_neg.cc
@@ -39,11 +39,11 @@ int main()
return 0;
}
-// { dg-error "used here" "" { target *-*-* } 510 }
+// { dg-error "used here" "" { target *-*-* } 530 }
// { dg-excess-errors "deleted function" }
// { dg-excess-errors "deleted function" }
// { dg-error "instantiated from" "" { target *-*-* } 38 }
-// { dg-error "instantiated from" "" { target *-*-* } 517 }
+// { dg-error "instantiated from" "" { target *-*-* } 537 }
// { dg-error "instantiated from" "" { target *-*-* } 173 }
// { dg-error "instantiated from" "" { target *-*-* } 404 }
// { dg-error "instantiated from" "" { target *-*-* } 175 }
diff --git a/libstdc++-v3/testsuite/29_atomics/atomic_integral/cons/copy_neg.cc b/libstdc++-v3/testsuite/29_atomics/atomic_integral/cons/copy_neg.cc
index f03ceced11b..69e45d95951 100644
--- a/libstdc++-v3/testsuite/29_atomics/atomic_integral/cons/copy_neg.cc
+++ b/libstdc++-v3/testsuite/29_atomics/atomic_integral/cons/copy_neg.cc
@@ -39,11 +39,11 @@ int main()
return 0;
}
-// { dg-error "used here" "" { target *-*-* } 549 }
+// { dg-error "used here" "" { target *-*-* } 569 }
// { dg-excess-errors "deleted function" }
// { dg-excess-errors "deleted function" }
// { dg-error "instantiated from" "" { target *-*-* } 38 }
-// { dg-error "instantiated from" "" { target *-*-* } 555 }
+// { dg-error "instantiated from" "" { target *-*-* } 575 }
// { dg-error "instantiated from" "" { target *-*-* } 173 }
// { dg-error "instantiated from" "" { target *-*-* } 404 }
// { dg-error "instantiated from" "" { target *-*-* } 175 }
diff --git a/libstdc++-v3/testsuite/29_atomics/atomic_integral/requirements/trivial.cc b/libstdc++-v3/testsuite/29_atomics/atomic_integral/requirements/trivial.cc
new file mode 100644
index 00000000000..8866ba8cab9
--- /dev/null
+++ b/libstdc++-v3/testsuite/29_atomics/atomic_integral/requirements/trivial.cc
@@ -0,0 +1,39 @@
+// { dg-options "-std=gnu++0x" }
+// { dg-do compile }
+
+// Copyright (C) 2009 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 2, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING. If not, write to the Free
+// Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+// USA.
+
+// As a special exception, you may use this file as part of a free software
+// library without restriction. Specifically, if other files instantiate
+// templates or use macros or inline functions from this file, or you compile
+// this file and link it with other files to produce an executable, this
+// file does not by itself cause the resulting executable to be covered by
+// the GNU General Public License. This exception does not however
+// invalidate any other reasons why the executable file might be covered by
+// the GNU General Public License.
+
+#include <cstdatomic>
+#include <testsuite_common_types.h>
+
+void test01()
+{
+ __gnu_test::has_trivial_cons_dtor test;
+ __gnu_cxx::typelist::apply_generator(test,
+ __gnu_test::atomic_integrals::type());
+}
diff --git a/libstdc++-v3/testsuite/29_atomics/headers/stdatomic.h/functions.c b/libstdc++-v3/testsuite/29_atomics/headers/stdatomic.h/functions.c
index 1282c0df906..ac0eeda3c27 100644
--- a/libstdc++-v3/testsuite/29_atomics/headers/stdatomic.h/functions.c
+++ b/libstdc++-v3/testsuite/29_atomics/headers/stdatomic.h/functions.c
@@ -1,7 +1,7 @@
// { dg-options "-x c" }
// { dg-do compile }
-// Copyright (C) 2008 Free Software Foundation, Inc.
+// Copyright (C) 2008, 2009 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
@@ -32,7 +32,6 @@ int main()
atomic_flag_test_and_set_explicit(p, m);
atomic_flag_clear(p);
atomic_flag_clear_explicit(p, m);
- atomic_flag_fence(p, m);
return 0;
}
diff --git a/libstdc++-v3/testsuite/30_threads/condition_variable/requirements/standard_layout.cc b/libstdc++-v3/testsuite/30_threads/condition_variable/requirements/standard_layout.cc
new file mode 100644
index 00000000000..f50e5c82714
--- /dev/null
+++ b/libstdc++-v3/testsuite/30_threads/condition_variable/requirements/standard_layout.cc
@@ -0,0 +1,40 @@
+// { dg-do compile }
+// { dg-options "-std=gnu++0x" }
+// { dg-require-cstdint "" }
+// { dg-require-gthreads "" }
+
+// Copyright (C) 2009 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 2, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING. If not, write to the Free
+// Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+// USA.
+
+// As a special exception, you may use this file as part of a free software
+// library without restriction. Specifically, if other files instantiate
+// templates or use macros or inline functions from this file, or you compile
+// this file and link it with other files to produce an executable, this
+// file does not by itself cause the resulting executable to be covered by
+// the GNU General Public License. This exception does not however
+// invalidate any other reasons why the executable file might be covered by
+// the GNU General Public License.
+
+#include <condition_variable>
+#include <testsuite_common_types.h>
+
+void test01()
+{
+ __gnu_test::standard_layout test;
+ test.operator()<std::condition_variable>();
+}
diff --git a/libstdc++-v3/testsuite/30_threads/condition_variable/requirements/typedefs.cc b/libstdc++-v3/testsuite/30_threads/condition_variable/requirements/typedefs.cc
new file mode 100644
index 00000000000..aec35e249a2
--- /dev/null
+++ b/libstdc++-v3/testsuite/30_threads/condition_variable/requirements/typedefs.cc
@@ -0,0 +1,41 @@
+// { dg-do compile }
+// { dg-options "-std=gnu++0x" }
+// { dg-require-cstdint "" }
+// { dg-require-gthreads "" }
+// 2009-01-28 Benjamin Kosnik <bkoz@redhat.com>
+
+// Copyright (C) 2009 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 2, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING. If not, write to the Free
+// Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+// USA.
+
+// As a special exception, you may use this file as part of a free software
+// library without restriction. Specifically, if other files instantiate
+// templates or use macros or inline functions from this file, or you compile
+// this file and link it with other files to produce an executable, this
+// file does not by itself cause the resulting executable to be covered by
+// the GNU General Public License. This exception does not however
+// invalidate any other reasons why the executable file might be covered by
+// the GNU General Public License.
+
+#include <condition_variable>
+
+void test01()
+{
+ // Check for required typedefs
+ typedef std::condition_variable test_type;
+ typedef test_type::native_handle_type type;
+}
diff --git a/libstdc++-v3/testsuite/30_threads/mutex/requirements/standard_layout.cc b/libstdc++-v3/testsuite/30_threads/mutex/requirements/standard_layout.cc
new file mode 100644
index 00000000000..cbe76ad0252
--- /dev/null
+++ b/libstdc++-v3/testsuite/30_threads/mutex/requirements/standard_layout.cc
@@ -0,0 +1,40 @@
+// { dg-do compile }
+// { dg-options "-std=gnu++0x" }
+// { dg-require-cstdint "" }
+// { dg-require-gthreads "" }
+
+// Copyright (C) 2009 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 2, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING. If not, write to the Free
+// Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+// USA.
+
+// As a special exception, you may use this file as part of a free software
+// library without restriction. Specifically, if other files instantiate
+// templates or use macros or inline functions from this file, or you compile
+// this file and link it with other files to produce an executable, this
+// file does not by itself cause the resulting executable to be covered by
+// the GNU General Public License. This exception does not however
+// invalidate any other reasons why the executable file might be covered by
+// the GNU General Public License.
+
+#include <mutex>
+#include <testsuite_common_types.h>
+
+void test01()
+{
+ __gnu_test::standard_layout test;
+ test.operator()<std::mutex>();
+}
diff --git a/libstdc++-v3/testsuite/30_threads/recursive_mutex/requirements/standard_layout.cc b/libstdc++-v3/testsuite/30_threads/recursive_mutex/requirements/standard_layout.cc
new file mode 100644
index 00000000000..e736312400a
--- /dev/null
+++ b/libstdc++-v3/testsuite/30_threads/recursive_mutex/requirements/standard_layout.cc
@@ -0,0 +1,40 @@
+// { dg-do compile }
+// { dg-options "-std=gnu++0x" }
+// { dg-require-cstdint "" }
+// { dg-require-gthreads "" }
+
+// Copyright (C) 2009 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 2, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING. If not, write to the Free
+// Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+// USA.
+
+// As a special exception, you may use this file as part of a free software
+// library without restriction. Specifically, if other files instantiate
+// templates or use macros or inline functions from this file, or you compile
+// this file and link it with other files to produce an executable, this
+// file does not by itself cause the resulting executable to be covered by
+// the GNU General Public License. This exception does not however
+// invalidate any other reasons why the executable file might be covered by
+// the GNU General Public License.
+
+#include <mutex>
+#include <testsuite_common_types.h>
+
+void test01()
+{
+ __gnu_test::standard_layout test;
+ test.operator()<std::recursive_mutex>();
+}
diff --git a/libstdc++-v3/testsuite/30_threads/timed_mutex/requirements/standard_layout.cc b/libstdc++-v3/testsuite/30_threads/timed_mutex/requirements/standard_layout.cc
new file mode 100644
index 00000000000..57231b396e4
--- /dev/null
+++ b/libstdc++-v3/testsuite/30_threads/timed_mutex/requirements/standard_layout.cc
@@ -0,0 +1,40 @@
+// { dg-do compile }
+// { dg-options "-std=gnu++0x" }
+// { dg-require-cstdint "" }
+// { dg-require-gthreads "" }
+
+// Copyright (C) 2009 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 2, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING. If not, write to the Free
+// Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+// USA.
+
+// As a special exception, you may use this file as part of a free software
+// library without restriction. Specifically, if other files instantiate
+// templates or use macros or inline functions from this file, or you compile
+// this file and link it with other files to produce an executable, this
+// file does not by itself cause the resulting executable to be covered by
+// the GNU General Public License. This exception does not however
+// invalidate any other reasons why the executable file might be covered by
+// the GNU General Public License.
+
+#include <mutex>
+#include <testsuite_common_types.h>
+
+void test01()
+{
+ __gnu_test::standard_layout test;
+ test.operator()<std::timed_mutex>();
+}
diff --git a/libstdc++-v3/testsuite/util/testsuite_common_types.h b/libstdc++-v3/testsuite/util/testsuite_common_types.h
index 6dc0dfa7ebf..1ead6bf7fc8 100644
--- a/libstdc++-v3/testsuite/util/testsuite_common_types.h
+++ b/libstdc++-v3/testsuite/util/testsuite_common_types.h
@@ -1,7 +1,7 @@
// -*- C++ -*-
// typelist for the C++ library testsuite.
//
-// Copyright (C) 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
+// Copyright (C) 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
@@ -446,6 +446,29 @@ namespace __gnu_test
// Generator to test standard layout
#ifdef __GXX_EXPERIMENTAL_CXX0X__
+ struct has_trivial_cons_dtor
+ {
+ template<typename _Tp>
+ void
+ operator()()
+ {
+ struct _Concept
+ {
+ void __constraint()
+ {
+ typedef std::has_trivial_default_constructor<_Tp> ctor_p;
+ static_assert(ctor_p::value, "default constructor not trivial");
+
+ typedef std::has_trivial_destructor<_Tp> dtor_p;
+ static_assert(dtor_p::value, "destructor not trivial");
+ }
+ };
+
+ void (_Concept::*__x)() __attribute__((unused))
+ = &_Concept::__constraint;
+ }
+ };
+
struct standard_layout
{
template<typename _Tp>
@@ -459,12 +482,9 @@ namespace __gnu_test
// libstdc++/37907
// typedef std::is_standard_layout<_Tp> standard_layout_p;
// static_assert(standard_layout_p::value, "not standard_layout");
-
- typedef std::has_trivial_default_constructor<_Tp> ctor_p;
- static_assert(ctor_p::value, "default ctor not trivial");
- typedef std::has_trivial_destructor<_Tp> dtor_p;
- static_assert(dtor_p::value, "dtor not trivial");
+ typedef std::has_virtual_destructor<_Tp> ctor_p;
+ static_assert(!ctor_p::value, "has virtual destructor");
}
};