summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorrsandifo <rsandifo@138bc75d-0d04-0410-961f-82ee72b054a4>2013-11-20 13:32:32 +0000
committerrsandifo <rsandifo@138bc75d-0d04-0410-961f-82ee72b054a4>2013-11-20 13:32:32 +0000
commitfe5ad9266cba2cbb611a831aaac450d3f6decd0c (patch)
treea1dce161550e71aa81d0af00e118e4f68d907995
parent6715fbd40b05c43941c4d4e093cceb5345a695e7 (diff)
parent8c53c46cebf42cb4f4ac125ca6428c5e9b519f66 (diff)
downloadgcc-fe5ad9266cba2cbb611a831aaac450d3f6decd0c.tar.gz
Merge from trunk.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/branches/wide-int@205111 138bc75d-0d04-0410-961f-82ee72b054a4
-rw-r--r--ChangeLog4
-rw-r--r--config/ChangeLog4
-rw-r--r--config/bootstrap-lto.mk6
-rw-r--r--config/bootstrap-ubsan.mk2
-rw-r--r--gcc/ChangeLog2400
-rw-r--r--gcc/DATESTAMP2
-rw-r--r--gcc/Makefile.in4
-rw-r--r--gcc/ada/ChangeLog27
-rw-r--r--gcc/ada/gcc-interface/decl.c2
-rw-r--r--gcc/ada/gcc-interface/misc.c2
-rw-r--r--gcc/ada/gcc-interface/trans.c4
-rw-r--r--gcc/ada/gcc-interface/utils.c4
-rw-r--r--gcc/ada/gcc-interface/utils2.c3
-rw-r--r--gcc/alias.c4
-rw-r--r--gcc/asan.c88
-rw-r--r--gcc/attribs.c3
-rw-r--r--gcc/attribs.h40
-rw-r--r--gcc/basic-block.h16
-rw-r--r--gcc/bb-reorder.c58
-rw-r--r--gcc/bt-load.c5
-rw-r--r--gcc/builtins.c86
-rw-r--r--gcc/c-family/ChangeLog65
-rw-r--r--gcc/c-family/c-common.c56
-rw-r--r--gcc/c-family/c-common.h12
-rw-r--r--gcc/c-family/c-cppbuiltin.c2
-rw-r--r--gcc/c-family/c-format.c1
-rw-r--r--gcc/c-family/c-lex.c2
-rw-r--r--gcc/c-family/c-opts.c12
-rw-r--r--gcc/c-family/c-pragma.c4
-rw-r--r--gcc/c-family/c-pretty-print.c2
-rw-r--r--gcc/c-family/c-ubsan.c13
-rw-r--r--gcc/c-family/c.opt4
-rw-r--r--gcc/c-family/cilk.c2
-rw-r--r--gcc/c/ChangeLog18
-rw-r--r--gcc/c/c-decl.c5
-rw-r--r--gcc/c/c-lang.c1
-rw-r--r--gcc/c/c-parser.c5
-rw-r--r--gcc/c/c-typeck.c4
-rw-r--r--gcc/calls.c4
-rw-r--r--gcc/calls.h31
-rw-r--r--gcc/cfg.c56
-rw-r--r--gcc/cfganal.c139
-rw-r--r--gcc/cfgbuild.c23
-rw-r--r--gcc/cfgcleanup.c74
-rw-r--r--gcc/cfgexpand.c85
-rw-r--r--gcc/cfghooks.c32
-rw-r--r--gcc/cfgloop.c58
-rw-r--r--gcc/cfgloop.h62
-rw-r--r--gcc/cfgloopanal.c8
-rw-r--r--gcc/cfgloopmanip.c26
-rw-r--r--gcc/cfgrtl.c191
-rw-r--r--gcc/cgraph.c25
-rw-r--r--gcc/cgraph.h8
-rw-r--r--gcc/cgraphbuild.c6
-rw-r--r--gcc/cgraphclones.c5
-rw-r--r--gcc/cgraphunit.c46
-rw-r--r--gcc/cilk-common.c2
-rw-r--r--gcc/combine.c25
-rw-r--r--gcc/common.opt6
-rw-r--r--gcc/common/config/i386/i386-common.c2
-rw-r--r--gcc/config/aarch64/aarch64-builtins.c1120
-rw-r--r--gcc/config/aarch64/aarch64.c19
-rw-r--r--gcc/config/aarch64/aarch64.h14
-rw-r--r--gcc/config/aarch64/aarch64.md787
-rw-r--r--gcc/config/alpha/alpha.c7
-rw-r--r--gcc/config/arc/arc.c4
-rw-r--r--gcc/config/arm/aarch-common-protos.h1
-rw-r--r--gcc/config/arm/aarch-common.c2
-rw-r--r--gcc/config/arm/arm.c66
-rw-r--r--gcc/config/arm/arm.h4
-rw-r--r--gcc/config/arm/arm.md3
-rw-r--r--gcc/config/arm/arm.opt4
-rw-r--r--gcc/config/avr/avr-c.c1
-rw-r--r--gcc/config/avr/avr-log.c1
-rw-r--r--gcc/config/avr/avr.c4
-rw-r--r--gcc/config/bfin/bfin.c4
-rw-r--r--gcc/config/c6x/c6x.c4
-rw-r--r--gcc/config/cr16/cr16.c2
-rw-r--r--gcc/config/cris/cris.c4
-rw-r--r--gcc/config/darwin.c3
-rw-r--r--gcc/config/epiphany/epiphany.c6
-rw-r--r--gcc/config/fr30/fr30.c2
-rw-r--r--gcc/config/frv/frv.c5
-rw-r--r--gcc/config/h8300/h8300.c4
-rw-r--r--gcc/config/i386/i386-protos.h2
-rw-r--r--gcc/config/i386/i386.c154
-rw-r--r--gcc/config/i386/i386.md11
-rw-r--r--gcc/config/i386/winnt-cxx.c2
-rw-r--r--gcc/config/i386/winnt.c2
-rw-r--r--gcc/config/ia64/ia64-c.c1
-rw-r--r--gcc/config/ia64/ia64.c9
-rw-r--r--gcc/config/iq2000/iq2000.c3
-rw-r--r--gcc/config/lm32/lm32.c1
-rw-r--r--gcc/config/m32c/m32c.c3
-rw-r--r--gcc/config/m32r/m32r.c4
-rw-r--r--gcc/config/m68k/m68k.c5
-rw-r--r--gcc/config/mcore/mcore.c4
-rw-r--r--gcc/config/mep/mep.c4
-rw-r--r--gcc/config/microblaze/microblaze.c3
-rw-r--r--gcc/config/mips/mips.c17
-rw-r--r--gcc/config/mips/mips.h1
-rw-r--r--gcc/config/mips/mips.md17
-rw-r--r--gcc/config/mips/mips.opt4
-rw-r--r--gcc/config/mmix/mmix.c3
-rw-r--r--gcc/config/mn10300/mn10300.c6
-rw-r--r--gcc/config/moxie/moxie.c3
-rw-r--r--gcc/config/msp430/msp430.c2
-rw-r--r--gcc/config/nds32/nds32.c5
-rw-r--r--gcc/config/pa/pa.c4
-rw-r--r--gcc/config/pdp11/pdp11.c3
-rw-r--r--gcc/config/picochip/picochip.c4
-rw-r--r--gcc/config/rl78/rl78.c3
-rw-r--r--gcc/config/rs6000/linux64.h4
-rw-r--r--gcc/config/rs6000/rs6000-c.c2
-rw-r--r--gcc/config/rs6000/rs6000.c16
-rw-r--r--gcc/config/rs6000/sysv4.h7
-rw-r--r--gcc/config/rs6000/vector.md1
-rw-r--r--gcc/config/rx/rx.c3
-rw-r--r--gcc/config/s390/htmxlintrin.h25
-rw-r--r--gcc/config/s390/s390.c51
-rw-r--r--gcc/config/s390/s390.md32
-rw-r--r--gcc/config/score/score.c4
-rw-r--r--gcc/config/sh/sh-c.c2
-rw-r--r--gcc/config/sh/sh.c4
-rw-r--r--gcc/config/sol2-c.c2
-rw-r--r--gcc/config/sol2-cxx.c1
-rw-r--r--gcc/config/sol2.c2
-rw-r--r--gcc/config/sparc/sparc.c4
-rw-r--r--gcc/config/spu/spu-c.c1
-rw-r--r--gcc/config/spu/spu.c8
-rw-r--r--gcc/config/stormy16/stormy16.c4
-rw-r--r--gcc/config/tilegx/tilegx.c4
-rw-r--r--gcc/config/tilepro/tilepro.c4
-rw-r--r--gcc/config/v850/v850-c.c2
-rw-r--r--gcc/config/v850/v850.c4
-rw-r--r--gcc/config/vax/vax.c2
-rw-r--r--gcc/config/vms/vms.c1
-rw-r--r--gcc/config/vxworks.c1
-rw-r--r--gcc/config/xtensa/xtensa.c4
-rw-r--r--gcc/convert.c1
-rw-r--r--gcc/coretypes.h5
-rw-r--r--gcc/coverage.c4
-rw-r--r--gcc/cp/ChangeLog62
-rw-r--r--gcc/cp/call.c3
-rw-r--r--gcc/cp/class.c4
-rw-r--r--gcc/cp/cp-gimplify.c1
-rw-r--r--gcc/cp/cvt.c1
-rw-r--r--gcc/cp/decl.c5
-rw-r--r--gcc/cp/decl2.c6
-rw-r--r--gcc/cp/error.c1
-rw-r--r--gcc/cp/except.c3
-rw-r--r--gcc/cp/init.c2
-rw-r--r--gcc/cp/lambda.c1
-rw-r--r--gcc/cp/lex.c1
-rw-r--r--gcc/cp/mangle.c2
-rw-r--r--gcc/cp/method.c2
-rw-r--r--gcc/cp/name-lookup.c3
-rw-r--r--gcc/cp/optimize.c1
-rw-r--r--gcc/cp/parser.c4
-rw-r--r--gcc/cp/pt.c4
-rw-r--r--gcc/cp/ptree.c1
-rw-r--r--gcc/cp/repo.c1
-rw-r--r--gcc/cp/rtti.c2
-rw-r--r--gcc/cp/semantics.c4
-rw-r--r--gcc/cp/tree.c3
-rw-r--r--gcc/cp/typeck.c3
-rw-r--r--gcc/cp/typeck2.c2
-rw-r--r--gcc/cp/vtable-class-hierarchy.c2
-rw-r--r--gcc/cprop.c29
-rw-r--r--gcc/cse.c6
-rw-r--r--gcc/dbxout.c4
-rw-r--r--gcc/df-core.c8
-rw-r--r--gcc/df-problems.c2
-rw-r--r--gcc/df-scan.c8
-rw-r--r--gcc/doc/invoke.texi72
-rw-r--r--gcc/doc/md.texi2
-rw-r--r--gcc/dojump.c1
-rw-r--r--gcc/dominance.c29
-rw-r--r--gcc/domwalk.c9
-rw-r--r--gcc/dse.c3
-rw-r--r--gcc/dwarf2asm.c2
-rw-r--r--gcc/dwarf2cfi.c1
-rw-r--r--gcc/dwarf2out.c8
-rw-r--r--gcc/emit-rtl.c3
-rw-r--r--gcc/emit-rtl.h3
-rw-r--r--gcc/except.c6
-rw-r--r--gcc/explow.c1
-rw-r--r--gcc/expmed.c1
-rw-r--r--gcc/expr.c61
-rw-r--r--gcc/expr.h22
-rw-r--r--gcc/final.c12
-rw-r--r--gcc/flag-types.h3
-rw-r--r--gcc/fold-const.c21
-rw-r--r--gcc/fold-const.h172
-rw-r--r--gcc/fortran/ChangeLog27
-rw-r--r--gcc/fortran/decl.c1
-rw-r--r--gcc/fortran/iresolve.c1
-rw-r--r--gcc/fortran/match.c1
-rw-r--r--gcc/fortran/module.c1
-rw-r--r--gcc/fortran/target-memory.c1
-rw-r--r--gcc/fortran/trans-common.c3
-rw-r--r--gcc/fortran/trans-const.c1
-rw-r--r--gcc/fortran/trans-decl.c4
-rw-r--r--gcc/fortran/trans-expr.c1
-rw-r--r--gcc/fortran/trans-intrinsic.c3
-rw-r--r--gcc/fortran/trans-io.c2
-rw-r--r--gcc/fortran/trans-openmp.c1
-rw-r--r--gcc/fortran/trans-stmt.c1
-rw-r--r--gcc/fortran/trans-types.c2
-rw-r--r--gcc/fortran/trans.c2
-rw-r--r--gcc/function.c61
-rw-r--r--gcc/function.h21
-rw-r--r--gcc/fwprop.c2
-rw-r--r--gcc/gcc-symtab.h28
-rw-r--r--gcc/gcse.c39
-rw-r--r--gcc/gdbhooks.py111
-rw-r--r--gcc/gdbinit.in2
-rw-r--r--gcc/genattrtab.c3
-rw-r--r--gcc/genautomata.c3
-rw-r--r--gcc/genemit.c3
-rw-r--r--gcc/gengtype.c2
-rw-r--r--gcc/genopinit.c3
-rw-r--r--gcc/genoutput.c3
-rw-r--r--gcc/genpeep.c3
-rw-r--r--gcc/genpreds.c3
-rw-r--r--gcc/ggc.h6
-rw-r--r--gcc/gimple-builder.c1
-rw-r--r--gcc/gimple-expr.c2
-rw-r--r--gcc/gimple-fold.c6
-rw-r--r--gcc/gimple-iterator.c79
-rw-r--r--gcc/gimple-iterator.h8
-rw-r--r--gcc/gimple-low.c2
-rw-r--r--gcc/gimple-pretty-print.c7
-rw-r--r--gcc/gimple-pretty-print.h4
-rw-r--r--gcc/gimple-ssa-isolate-paths.c1
-rw-r--r--gcc/gimple-ssa-strength-reduction.c5
-rw-r--r--gcc/gimple-ssa.h16
-rw-r--r--gcc/gimple-streamer-in.c20
-rw-r--r--gcc/gimple-streamer-out.c2
-rw-r--r--gcc/gimple-walk.c1
-rw-r--r--gcc/gimple.c79
-rw-r--r--gcc/gimple.h1649
-rw-r--r--gcc/gimplify-me.c3
-rw-r--r--gcc/gimplify.c11
-rw-r--r--gcc/go/ChangeLog8
-rw-r--r--gcc/go/go-backend.c1
-rw-r--r--gcc/go/go-gcc.cc3
-rw-r--r--gcc/go/go-lang.c1
-rw-r--r--gcc/go/gofrontend/expressions.cc2
-rw-r--r--gcc/go/gofrontend/gogo-tree.cc3
-rw-r--r--gcc/graph.c14
-rw-r--r--gcc/graphite-clast-to-gimple.c5
-rw-r--r--gcc/graphite-scop-detection.c10
-rw-r--r--gcc/graphite-sese-to-poly.c11
-rw-r--r--gcc/graphite.c3
-rw-r--r--gcc/haifa-sched.c31
-rw-r--r--gcc/hw-doloop.c4
-rw-r--r--gcc/ifcvt.c32
-rw-r--r--gcc/internal-fn.c9
-rw-r--r--gcc/internal-fn.def1
-rw-r--r--gcc/ipa-devirt.c618
-rw-r--r--gcc/ipa-inline-analysis.c20
-rw-r--r--gcc/ipa-inline.c2
-rw-r--r--gcc/ipa-prop.c18
-rw-r--r--gcc/ipa-pure-const.c11
-rw-r--r--gcc/ipa-reference.c1
-rw-r--r--gcc/ipa-split.c47
-rw-r--r--gcc/ipa-utils.c4
-rw-r--r--gcc/ipa-utils.h59
-rw-r--r--gcc/ipa.c2
-rw-r--r--gcc/ira-build.c4
-rw-r--r--gcc/ira-color.c2
-rw-r--r--gcc/ira-emit.c4
-rw-r--r--gcc/ira-int.h5
-rw-r--r--gcc/ira.c47
-rw-r--r--gcc/java/ChangeLog24
-rw-r--r--gcc/java/builtins.c2
-rw-r--r--gcc/java/class.c3
-rw-r--r--gcc/java/constants.c2
-rw-r--r--gcc/java/decl.c3
-rw-r--r--gcc/java/except.c2
-rw-r--r--gcc/java/expr.c2
-rw-r--r--gcc/java/jcf-parse.c1
-rw-r--r--gcc/java/mangle.c1
-rw-r--r--gcc/java/resource.c2
-rw-r--r--gcc/java/typeck.c2
-rw-r--r--gcc/java/verify-glue.c1
-rw-r--r--gcc/langhooks.c2
-rw-r--r--gcc/lcm.c60
-rw-r--r--gcc/loop-doloop.c3
-rw-r--r--gcc/loop-init.c10
-rw-r--r--gcc/loop-invariant.c12
-rw-r--r--gcc/loop-iv.c4
-rw-r--r--gcc/loop-unroll.c9
-rw-r--r--gcc/loop-unswitch.c5
-rw-r--r--gcc/lra-assigns.c2
-rw-r--r--gcc/lra-constraints.c3
-rw-r--r--gcc/lra-lives.c5
-rw-r--r--gcc/lra.c6
-rw-r--r--gcc/lto-cgraph.c1
-rw-r--r--gcc/lto-opts.c25
-rw-r--r--gcc/lto-streamer-in.c7
-rw-r--r--gcc/lto-streamer-out.c102
-rw-r--r--gcc/lto-streamer.h2
-rw-r--r--gcc/lto-wrapper.c13
-rw-r--r--gcc/lto/ChangeLog7
-rw-r--r--gcc/lto/lto-lang.c2
-rw-r--r--gcc/lto/lto-partition.c1
-rw-r--r--gcc/lto/lto.c1
-rw-r--r--gcc/mcf.c29
-rw-r--r--gcc/mode-switching.c6
-rw-r--r--gcc/modulo-sched.c11
-rw-r--r--gcc/objc/ChangeLog13
-rw-r--r--gcc/objc/objc-act.c3
-rw-r--r--gcc/objc/objc-encoding.c2
-rw-r--r--gcc/objc/objc-gnu-runtime-abi-01.c1
-rw-r--r--gcc/objc/objc-next-runtime-abi-01.c1
-rw-r--r--gcc/objc/objc-next-runtime-abi-02.c1
-rw-r--r--gcc/objc/objc-runtime-shared-support.c1
-rw-r--r--gcc/omp-low.c5
-rw-r--r--gcc/optabs.c7
-rw-r--r--gcc/optabs.h1
-rw-r--r--gcc/opts.c35
-rw-r--r--gcc/pass_manager.h2
-rw-r--r--gcc/passes.c34
-rw-r--r--gcc/passes.def9
-rw-r--r--gcc/plugin.def6
-rw-r--r--gcc/postreload-gcse.c6
-rw-r--r--gcc/predict.c63
-rw-r--r--gcc/print-rtl.c1
-rw-r--r--gcc/print-rtl.h27
-rw-r--r--gcc/print-tree.c3
-rw-r--r--gcc/print-tree.h46
-rw-r--r--gcc/profile.c71
-rw-r--r--gcc/realmpfr.c1
-rw-r--r--gcc/reg-stack.c15
-rw-r--r--gcc/regrename.c2
-rw-r--r--gcc/regs.h2
-rw-r--r--gcc/reload.c6
-rw-r--r--gcc/reload1.c6
-rw-r--r--gcc/reorg.c2
-rw-r--r--gcc/resource.c4
-rw-r--r--gcc/sanitizer.def4
-rw-r--r--gcc/sched-deps.c2
-rw-r--r--gcc/sched-ebb.c6
-rw-r--r--gcc/sched-int.h5
-rw-r--r--gcc/sched-rgn.c47
-rw-r--r--gcc/sdbout.c2
-rw-r--r--gcc/sel-sched-ir.c36
-rw-r--r--gcc/sel-sched-ir.h10
-rw-r--r--gcc/sel-sched.c7
-rw-r--r--gcc/sese.c1
-rw-r--r--gcc/simplify-rtx.c3
-rw-r--r--gcc/stmt.c2
-rw-r--r--gcc/stmt.h34
-rw-r--r--gcc/stor-layout.c4
-rw-r--r--gcc/stor-layout.h115
-rw-r--r--gcc/store-motion.c8
-rw-r--r--gcc/stringpool.h43
-rw-r--r--gcc/symtab.c6
-rw-r--r--gcc/system.h2
-rw-r--r--gcc/targhooks.c3
-rw-r--r--gcc/testsuite/ChangeLog131
-rw-r--r--gcc/testsuite/c-c++-common/Wfloat-conversion.c58
-rw-r--r--gcc/testsuite/c-c++-common/cilk-plus/PS/body.c1
-rw-r--r--gcc/testsuite/c-c++-common/cilk-plus/PS/reduction-3.c4
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/div-by-zero-3.c6
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/null-1.c13
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/null-10.c14
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/null-11.c17
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/null-2.c13
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/null-3.c19
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/null-4.c15
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/null-5.c17
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/null-6.c14
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/null-7.c18
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/null-8.c17
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/null-9.c17
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/shift-1.c12
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/constexpr-noexcept7.C9
-rw-r--r--gcc/testsuite/g++.dg/ipa/devirt-9.C8
-rw-r--r--gcc/testsuite/gcc.c-torture/execute/20101011-1.c7
-rw-r--r--gcc/testsuite/gcc.dg/builtin-object-size-14.c28
-rw-r--r--gcc/testsuite/gcc.dg/builtin-object-size-8.c2
-rw-r--r--gcc/testsuite/gcc.dg/c11-complex-1.c1
-rw-r--r--gcc/testsuite/gcc.dg/plugin/selfassign.c1
-rw-r--r--gcc/testsuite/gcc.dg/plugin/start_unit_plugin.c1
-rw-r--r--gcc/testsuite/gcc.dg/strlenopt-14gf.c10
-rw-r--r--gcc/testsuite/gcc.dg/strlenopt-1f.c4
-rw-r--r--gcc/testsuite/gcc.dg/strlenopt-4gf.c12
-rw-r--r--gcc/testsuite/gcc.dg/torture/pr57517.c16
-rw-r--r--gcc/testsuite/gcc.dg/torture/pr58956.c30
-rw-r--r--gcc/testsuite/gcc.dg/torture/pr59164.c21
-rw-r--r--gcc/testsuite/gcc.dg/tree-prof/cold_partition_label.c36
-rw-r--r--gcc/testsuite/gcc.dg/ubsan/c99-shift-2.c2
-rw-r--r--gcc/testsuite/gcc.target/i386/memcpy-2.c6
-rw-r--r--gcc/testsuite/gcc.target/i386/memcpy-3.c11
-rw-r--r--gcc/testsuite/gcc.target/i386/pr59099.c76
-rw-r--r--gcc/testsuite/gcc.target/powerpc/ppc64-abi-2.c18
-rw-r--r--gcc/testsuite/gcc.target/s390/htm-1.c73
-rw-r--r--gcc/testsuite/gcc.target/s390/htm-builtins-1.c1073
-rw-r--r--gcc/testsuite/gcc.target/s390/htm-builtins-2.c682
-rw-r--r--gcc/testsuite/gcc.target/s390/htm-builtins-compile-1.c164
-rw-r--r--gcc/testsuite/gcc.target/s390/htm-builtins-compile-2.c12
-rw-r--r--gcc/testsuite/gcc.target/s390/htm-builtins-compile-3.c (renamed from gcc/testsuite/gcc.target/s390/htm-xl-intrin-1.c)0
-rw-r--r--gcc/testsuite/gcc.target/s390/s390.exp13
-rw-r--r--gcc/testsuite/gfortran.fortran-torture/compile/pr57517.f9013
-rw-r--r--gcc/timevar.def1
-rw-r--r--gcc/toplev.c2
-rw-r--r--gcc/tracer.c4
-rw-r--r--gcc/trans-mem.c23
-rw-r--r--gcc/trans-mem.h11
-rw-r--r--gcc/tree-affine.c1
-rw-r--r--gcc/tree-browser.c1
-rw-r--r--gcc/tree-call-cdce.c2
-rw-r--r--gcc/tree-cfg.c115
-rw-r--r--gcc/tree-cfgcleanup.c10
-rw-r--r--gcc/tree-complex.c5
-rw-r--r--gcc/tree-data-ref.c4
-rw-r--r--gcc/tree-dfa.c9
-rw-r--r--gcc/tree-eh.c3
-rw-r--r--gcc/tree-emutls.c3
-rw-r--r--gcc/tree-if-conv.c7
-rw-r--r--gcc/tree-inline.c73
-rw-r--r--gcc/tree-inline.h9
-rw-r--r--gcc/tree-into-ssa.c21
-rw-r--r--gcc/tree-iterator.h2
-rw-r--r--gcc/tree-loop-distribution.c5
-rw-r--r--gcc/tree-nested.c2
-rw-r--r--gcc/tree-nrv.c1
-rw-r--r--gcc/tree-object-size.c39
-rw-r--r--gcc/tree-object-size.h26
-rw-r--r--gcc/tree-outof-ssa.c5
-rw-r--r--gcc/tree-parloops.c9
-rw-r--r--gcc/tree-pass.h4
-rw-r--r--gcc/tree-phinodes.c40
-rw-r--r--gcc/tree-predcom.c11
-rw-r--r--gcc/tree-pretty-print.c8
-rw-r--r--gcc/tree-profile.c6
-rw-r--r--gcc/tree-scalar-evolution.c12
-rw-r--r--gcc/tree-scalar-evolution.h2
-rw-r--r--gcc/tree-sra.c20
-rw-r--r--gcc/tree-ssa-address.c3
-rw-r--r--gcc/tree-ssa-alias.c9
-rw-r--r--gcc/tree-ssa-ccp.c10
-rw-r--r--gcc/tree-ssa-coalesce.c3
-rw-r--r--gcc/tree-ssa-copy.c1
-rw-r--r--gcc/tree-ssa-copyrename.c2
-rw-r--r--gcc/tree-ssa-dce.c22
-rw-r--r--gcc/tree-ssa-dom.c7
-rw-r--r--gcc/tree-ssa-dse.c2
-rw-r--r--gcc/tree-ssa-forwprop.c3
-rw-r--r--gcc/tree-ssa-ifcombine.c3
-rw-r--r--gcc/tree-ssa-live.c16
-rw-r--r--gcc/tree-ssa-live.h8
-rw-r--r--gcc/tree-ssa-loop-ch.c9
-rw-r--r--gcc/tree-ssa-loop-im.c8
-rw-r--r--gcc/tree-ssa-loop-ivcanon.c4
-rw-r--r--gcc/tree-ssa-loop-ivopts.c72
-rw-r--r--gcc/tree-ssa-loop-manip.c8
-rw-r--r--gcc/tree-ssa-loop-niter.c13
-rw-r--r--gcc/tree-ssa-loop-prefetch.c6
-rw-r--r--gcc/tree-ssa-loop-unswitch.c5
-rw-r--r--gcc/tree-ssa-math-opts.c9
-rw-r--r--gcc/tree-ssa-operands.c3
-rw-r--r--gcc/tree-ssa-phiopt.c5
-rw-r--r--gcc/tree-ssa-phiprop.c3
-rw-r--r--gcc/tree-ssa-pre.c23
-rw-r--r--gcc/tree-ssa-propagate.c10
-rw-r--r--gcc/tree-ssa-reassoc.c15
-rw-r--r--gcc/tree-ssa-sccvn.c24
-rw-r--r--gcc/tree-ssa-sink.c5
-rw-r--r--gcc/tree-ssa-strlen.c3
-rw-r--r--gcc/tree-ssa-structalias.c12
-rw-r--r--gcc/tree-ssa-tail-merge.c8
-rw-r--r--gcc/tree-ssa-ter.c8
-rw-r--r--gcc/tree-ssa-threadedge.c9
-rw-r--r--gcc/tree-ssa-threadupdate.c354
-rw-r--r--gcc/tree-ssa-threadupdate.h2
-rw-r--r--gcc/tree-ssa-uncprop.c3
-rw-r--r--gcc/tree-ssa-uninit.c17
-rw-r--r--gcc/tree-ssa.c2
-rw-r--r--gcc/tree-ssanames.c2
-rw-r--r--gcc/tree-stdarg.c3
-rw-r--r--gcc/tree-streamer-in.c1
-rw-r--r--gcc/tree-streamer-out.c1
-rw-r--r--gcc/tree-switch-conversion.c3
-rw-r--r--gcc/tree-tailcall.c21
-rw-r--r--gcc/tree-vect-data-refs.c8
-rw-r--r--gcc/tree-vect-generic.c2
-rw-r--r--gcc/tree-vect-loop-manip.c3
-rw-r--r--gcc/tree-vect-loop.c19
-rw-r--r--gcc/tree-vect-patterns.c2
-rw-r--r--gcc/tree-vect-slp.c2
-rw-r--r--gcc/tree-vect-stmts.c2
-rw-r--r--gcc/tree-vectorizer.c4
-rw-r--r--gcc/tree-vrp.c6
-rw-r--r--gcc/tree.c12
-rw-r--r--gcc/tree.h635
-rw-r--r--gcc/tsan.c6
-rw-r--r--gcc/ubsan.c343
-rw-r--r--gcc/ubsan.h21
-rw-r--r--gcc/value-prof.c3
-rw-r--r--gcc/var-tracking.c32
-rw-r--r--gcc/varasm.c16
-rw-r--r--gcc/varasm.h69
-rw-r--r--gcc/varpool.c1
-rw-r--r--gcc/vmsdbgout.c1
-rw-r--r--gcc/vtable-verify.c1
-rw-r--r--gcc/xcoffout.c1
-rw-r--r--libbacktrace/ChangeLog38
-rw-r--r--libbacktrace/Makefile.am1
-rw-r--r--libbacktrace/Makefile.in7
-rw-r--r--libbacktrace/atomic.c113
-rw-r--r--libbacktrace/backtrace.h7
-rw-r--r--libbacktrace/btest.c30
-rw-r--r--libbacktrace/config.h.in3
-rwxr-xr-xlibbacktrace/configure38
-rw-r--r--libbacktrace/configure.ac18
-rw-r--r--libbacktrace/dwarf.c36
-rw-r--r--libbacktrace/elf.c55
-rw-r--r--libbacktrace/fileline.c40
-rw-r--r--libbacktrace/internal.h43
-rw-r--r--libcpp/ChangeLog4
-rw-r--r--libcpp/lex.c9
-rw-r--r--libgcc/ChangeLog20
-rw-r--r--libgcc/libgcov-driver.c69
-rwxr-xr-xlibgo/configure2
-rw-r--r--libgo/configure.ac2
-rw-r--r--libgo/go/reflect/all_test.go40
-rw-r--r--libgo/runtime/go-caller.c2
-rw-r--r--libgo/runtime/go-reflect-call.c5
-rwxr-xr-xlibgo/testsuite/gotest2
-rw-r--r--libstdc++-v3/ChangeLog52
-rw-r--r--libstdc++-v3/config/abi/pre/gnu.ver3
-rw-r--r--libstdc++-v3/doc/xml/manual/status_cxx2014.xml83
-rw-r--r--libstdc++-v3/include/experimental/string_view12
-rw-r--r--libstdc++-v3/testsuite/17_intro/static.cc5
-rw-r--r--libstdc++-v3/testsuite/20_util/addressof/1.cc1
-rw-r--r--libstdc++-v3/testsuite/20_util/allocator_traits/members/destroy.cc1
-rw-r--r--libstdc++-v3/testsuite/20_util/allocator_traits/members/select.cc3
-rw-r--r--libstdc++-v3/testsuite/23_containers/forward_list/allocator/noexcept.cc24
-rw-r--r--libstdc++-v3/testsuite/23_containers/map/allocator/noexcept.cc23
-rw-r--r--libstdc++-v3/testsuite/23_containers/multimap/allocator/noexcept.cc23
-rw-r--r--libstdc++-v3/testsuite/23_containers/multiset/allocator/noexcept.cc22
-rw-r--r--libstdc++-v3/testsuite/23_containers/set/allocator/noexcept.cc23
-rw-r--r--libstdc++-v3/testsuite/23_containers/unordered_map/allocator/noexcept.cc24
-rw-r--r--libstdc++-v3/testsuite/23_containers/unordered_multimap/allocator/noexcept.cc24
-rw-r--r--libstdc++-v3/testsuite/23_containers/unordered_multiset/allocator/noexcept.cc24
-rw-r--r--libstdc++-v3/testsuite/23_containers/unordered_set/allocator/noexcept.cc24
-rw-r--r--libstdc++-v3/testsuite/23_containers/vector/allocator/noexcept.cc22
-rw-r--r--libstdc++-v3/testsuite/23_containers/vector/allocator/swap.cc2
-rw-r--r--libstdc++-v3/testsuite/28_regex/basic_regex/ctors/extended/cstring.cc3
-rw-r--r--libstdc++-v3/testsuite/28_regex/init-list.cc1
-rw-r--r--libstdc++-v3/testsuite/28_regex/regex_error/regex_error.cc1
-rw-r--r--libstdc++-v3/testsuite/28_regex/sub_match/cast_char.cc9
-rw-r--r--libstdc++-v3/testsuite/28_regex/sub_match/cast_wchar_t.cc9
-rw-r--r--libstdc++-v3/testsuite/28_regex/sub_match/length.cc6
-rw-r--r--libstdc++-v3/testsuite/experimental/string_view/capacity/1.cc7
-rw-r--r--libstdc++-v3/testsuite/experimental/string_view/inserters/pod/10081-out.cc7
-rw-r--r--libstdc++-v3/testsuite/tr1/7_regular_expressions/regex/cons/char/c_string_extended.cc42
561 files changed, 12637 insertions, 4954 deletions
diff --git a/ChangeLog b/ChangeLog
index a8b909cb735..5f576400655 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,7 @@
+2013-11-18 Jan Hubicka <jh@suse.cz>
+
+ * config/bootstrap-lto.mk: Use -ffat-lto-objects.
+
2013-11-15 David Edelsohn <dje.gcc@gmail.com>
* MAINTAINERS (Global Reviewers, c++): Remove Mark Mitchell.
diff --git a/config/ChangeLog b/config/ChangeLog
index 83273a0816c..313572db18a 100644
--- a/config/ChangeLog
+++ b/config/ChangeLog
@@ -1,3 +1,7 @@
+2013-11-19 Marek Polacek <polacek@redhat.com>
+
+ * bootstrap-ubsan.mk (POSTSTAGE1_LDFLAGS): Add -ldl.
+
2013-11-15 Andreas Schwab <schwab@linux-m68k.org>
* picflag.m4 (m68k-*-*): Use default PIC flag.
diff --git a/config/bootstrap-lto.mk b/config/bootstrap-lto.mk
index bbd3515eaa4..27bad1529be 100644
--- a/config/bootstrap-lto.mk
+++ b/config/bootstrap-lto.mk
@@ -1,5 +1,7 @@
# This option enables LTO for stage2 and stage3.
+# FIXME: Our build system is not yet able to use gcc-ar wrapper, so we need
+# to go with -ffat-lto-objects.
-STAGE2_CFLAGS += -flto=jobserver -frandom-seed=1
-STAGE3_CFLAGS += -flto=jobserver -frandom-seed=1
+STAGE2_CFLAGS += -flto=jobserver -frandom-seed=1 -ffat-lto-objects
+STAGE3_CFLAGS += -flto=jobserver -frandom-seed=1 -ffat-lto-objects
STAGEprofile_CFLAGS += -fno-lto
diff --git a/config/bootstrap-ubsan.mk b/config/bootstrap-ubsan.mk
index 2d21e832e21..0cd8b172b0f 100644
--- a/config/bootstrap-ubsan.mk
+++ b/config/bootstrap-ubsan.mk
@@ -2,6 +2,6 @@
STAGE2_CFLAGS += -fsanitize=undefined
STAGE3_CFLAGS += -fsanitize=undefined
-POSTSTAGE1_LDFLAGS += -fsanitize=undefined -static-libubsan -lpthread \
+POSTSTAGE1_LDFLAGS += -fsanitize=undefined -static-libubsan -lpthread -ldl \
-B$$r/prev-$(TARGET_SUBDIR)/libsanitizer/ubsan/ \
-B$$r/prev-$(TARGET_SUBDIR)/libsanitizer/ubsan/.libs
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 7220961ba60..d962593581c 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,1666 @@
+2013-11-20 Kenneth Zadeck <zadeck@naturalbridge.com>
+ Mike Stump <mikestump@comcast.net>
+ Richard Sandiford <rdsandiford@googlemail.com>
+
+ * alias.c (ao_ref_from_mem): Use tree_to_shwi and tree_to_uhwi
+ instead of TREE_INT_CST_LOW, in cases where there is a protecting
+ tree_fits_shwi_p or tree_fits_uhwi_p.
+ * builtins.c (fold_builtin_powi): Likewise.
+ * config/epiphany/epiphany.c (epiphany_special_round_type_align):
+ Likewise.
+ * dbxout.c (dbxout_symbol): Likewise.
+ * expr.c (expand_expr_real_1): Likewise.
+ * fold-const.c (fold_single_bit_test, fold_plusminus_mult_expr)
+ (fold_binary_loc): Likewise.
+ * gimple-fold.c (fold_const_aggregate_ref_1): Likewise.
+ * gimple-ssa-strength-reduction.c (stmt_cost): Likewise.
+ * omp-low.c (lower_omp_for_lastprivate): Likewise.
+ * simplify-rtx.c (delegitimize_mem_from_attrs): Likewise.
+ * stor-layout.c (compute_record_mode): Likewise.
+ * tree-cfg.c (verify_expr): Likewise.
+ * tree-dfa.c (get_ref_base_and_extent): Likewise.
+ * tree-pretty-print.c (dump_array_domain): Likewise.
+ * tree-sra.c (build_user_friendly_ref_for_offset): Likewise.
+ * tree-ssa-ccp.c (fold_builtin_alloca_with_align): Likewise.
+ * tree-ssa-loop-ivopts.c (get_loop_invariant_expr_id): Likewise.
+ * tree-ssa-math-opts.c (execute_cse_sincos): Likewise.
+ * tree-ssa-phiopt.c (hoist_adjacent_loads): Likewise.
+ * tree-ssa-reassoc.c (acceptable_pow_call): Likewise.
+ * tree-ssa-sccvn.c (copy_reference_ops_from_ref): Likewise.
+ (ao_ref_init_from_vn_reference, vn_reference_fold_indirect): Likewise.
+ (vn_reference_lookup_3, simplify_binary_expression): Likewise.
+ * tree-ssa-structalias.c (bitpos_of_field): Likewise.
+ (get_constraint_for_1, push_fields_onto_fieldstack): Likewise.
+ (create_variable_info_for_1): Likewise.
+ * tree-vect-data-refs.c (vect_compute_data_ref_alignment): Likewise.
+ (vect_verify_datarefs_alignment): Likewise.
+ (vect_analyze_data_ref_accesses): Likewise.
+ (vect_prune_runtime_alias_test_list): Likewise.
+ * tree-vectorizer.h (NITERS_KNOWN_P): Likewise.
+
+2013-11-20 Richard Sandiford <rdsandiford@googlemail.com>
+
+ * tree-ssa-alias.c (ao_ref_init_from_ptr_and_size): Avoid signed
+ overflow. Use tree_to_shwi.
+
+2013-11-20 Richard Sandiford <rdsandiford@googlemail.com>
+
+ * fold-const.c (fold_binary_loc): Use unsigned rather than signed
+ HOST_WIDE_INTs when folding (x >> c) << c.
+
+2013-11-20 Andreas Krebbel <Andreas.Krebbel@de.ibm.com>
+ Dominik Vogt <vogt@linux.vnet.ibm.com>
+
+ * config/s390/s390.c (s390_canonicalize_comparison): Don't fold
+ int comparisons with an out of range condition code.
+ (s390_optimize_nonescaping_tx): Skip empty BBs.
+ Generate the new tbegin RTX when removing the FPR clobbers (with
+ two SETs).
+ (s390_expand_tbegin): Fix the retry loop counter. Copy CC to the
+ result before doing the retry calculations.
+ (s390_init_builtins): Make tbegin "returns_twice" and tabort
+ "noreturn".
+ * config/s390/s390.md (UNSPECV_TBEGIN_TDB): New constant used for
+ the TDB setting part of an tbegin.
+ ("tbegin_1", "tbegin_nofloat_1"): Add a set for the TDB.
+ ("tx_assist"): Set unused argument to an immediate zero instead of
+ loading zero into a GPR and pass it as argument.
+ * config/s390/htmxlintrin.h (__TM_simple_begin, __TM_begin):
+ Remove inline and related attributes.
+ (__TM_nesting_depth, __TM_is_user_abort, __TM_is_named_user_abort)
+ (__TM_is_illegal, __TM_is_footprint_exceeded)
+ (__TM_is_nested_too_deep, __TM_is_conflict): Fix format value
+ check.
+
+2013-11-20 Richard Biener <rguenther@suse.de>
+
+ PR lto/59035
+ * lto-opts.c (lto_write_options): Write defaults only if
+ they were not explicitely specified. Also write
+ -ffp-contract default.
+ * lto-wrapper.c (merge_and_complain): Merge -ffp-contract
+ conservatively.
+ (run_gcc): Pass through -ffp-contract.
+
+2013-11-20 Jan-Benedict Glaw <jbglaw@lug-owl.de>
+
+ * config/mips/mips.c (r10k_simplify_address): Eliminate macro usage.
+
+2013-11-20 James Greenhalgh <james.greenhalgh@arm.com>
+
+ * gcc/config/aarch64/aarch64-builtins.c
+ (aarch64_simd_itype): Remove.
+ (aarch64_simd_builtin_datum): Remove itype, add
+ qualifiers pointer.
+ (VAR1): Use qualifiers.
+ (aarch64_build_scalar_type): New.
+ (aarch64_build_vector_type): Likewise.
+ (aarch64_build_type): Likewise.
+ (aarch64_init_simd_builtins): Refactor, remove special cases,
+ consolidate main loop.
+ (aarch64_simd_expand_args): Likewise.
+
+2013-11-19 Joshua J Cogliati <jrincayc@yahoo.com>
+
+ PR c/53001
+ * doc/invoke.texi: Adding documentation about -Wfloat-conversion.
+
+2013-11-19 Miro Kropacek <miro.kropacek@gmail.com>
+
+ * config/m68k/m68k.c (m68k_option_overrides): Fix typo.
+
+2013-11-19 David Malcolm <dmalcolm@redhat.com>
+
+ * gdbhooks.py (VecPrinter): New class, for prettyprinting pointers
+ to "vec<>" instances.
+ (build_pretty_printer): Register the vec<>* prettyprinter.
+
+2013-11-19 David Malcolm <dmalcolm@redhat.com>
+
+ * gdbhooks.py (GdbSubprinter.__init__): Drop str_type_ field.
+ (GdbSubprinter.handles_type): New.
+ (GdbSubprinterTypeList): New subclass of GdbSubprinter.
+ (GdbSubprinterRegex): New subclass of GdbSubprinter.
+ (GdbPrettyPrinters.add_printer): Remove in favor of...
+ (GdbPrettyPrinters.add_printer_for_types): ...this new method and...
+ (GdbPrettyPrinters.add_printer_for_regex): ...this other new method.
+ (GdbPrettyPrinters.__call__): Update search for subprinter
+ to use handles_type method.
+ (build_pretty_printer): Update registration of subprinters to
+ use the new API above, supporting multiple spelling of each type,
+ and allowing for future regex-based subprinters.
+
+2013-11-19 Bill Schmidt <wschmidt@linux.vnet.ibm.com>
+
+ * config/rs6000/rs6000.c (altivec_expand_vec_perm_const): Adjust
+ V16QI vector splat case for little endian.
+
+2013-11-19 Jeff Law <law@redhat.com>
+
+ * tree-ssa-threadedge.c (thread_across_edge): After threading
+ through a joiner, allow threading a normal block requiring duplication.
+
+ * tree-ssa-threadupdate.c (thread_block_1): Improve code to detect
+ jump threading requests that would muck up the loop structures.
+
+ * tree-ssa-threadupdate.c: Fix trailing whitespace.
+ * tree-ssa-threadupdate.h: Likewise.
+
+2013-11-19 Mike Stump <mikestump@comcast.net>
+
+ * gdbinit.in: Add pmz to print out mpz values.
+
+2013-11-20 Jan Hubicka <jh@suse.cz>
+
+ * common.opt (ffat-lto-objects): Disable by default.
+ * doc/invoke.texi (fat-lto-objects): Update documentation.
+ * opts.c: Enable fat-lto-objects on lto plugin disable setups.
+
+2013-11-19 Martin Jambor <mjambor@suse.cz>
+
+ PR rtl-optimization/59099
+ * ira.c (find_moveable_pseudos): Put back various analyses from ira()
+ here.
+ (ira): Move init_reg_equiv and call to
+ split_live_ranges_for_shrink_wrap up, remove analyses around call
+ to find_moveable_pseudos.
+
+2013-11-20 Alan Modra <amodra@gmail.com>
+
+ * config/rs6000/sysv4.h (CC1_ENDIAN_LITTLE_SPEC): Define as empty.
+ * config/rs6000/rs6000.c (rs6000_option_override_internal): Default
+ to strict alignment on older processors when little-endian.
+ * config/rs6000/linux64.h (PROCESSOR_DEFAULT64): Default to power8
+ for ELFv2.
+
+2013-11-19 Teresa Johnson <tejohnson@google.com>
+
+ * common/config/i386/i386-common.c: Enable
+ -freorder-blocks-and-partition at -O2 and up for x86.
+ * doc/invoke.texi: Update -freorder-blocks-and-partition default.
+ * opts.c (finish_options): Only warn if
+ -freorder-blocks-and-partition was set on command line.
+
+2013-11-19 Sriraman Tallam <tmsriram@google.com>
+
+ * final.c (final_scan_insn): Emit a label for the split
+ cold function part. Label name is formed by suffixing
+ the original function name with "cold".
+
+2013-11-19 David Malcolm <dmalcolm@redhat.com>
+
+ * basic-block.h (ENTRY_BLOCK_PTR_FOR_FUNCTION): Rename macro to...
+ (EXIT_BLOCK_PTR_FOR_FUNCTION): ...this.
+ (ENTRY_BLOCK_PTR_FOR_FN): Renamed macro to...
+ (EXIT_BLOCK_PTR_FOR_FN): ...this.
+ (ENTRY_BLOCK_PTR): Eliminate macro as work towards making uses of
+ cfun be explicit.
+ (EXIT_BLOCK_PTR): Likewise.
+ (FOR_ALL_BB): Rework for now to eliminate use of "ENTRY_BLOCK_PTR".
+ (FOR_ALL_BB_FN): Update for renaming of
+ "ENTRY_BLOCK_PTR_FOR_FUNCTION" to "ENTRY_BLOCK_PTR_FOR_FN".
+
+ * cfg.c (init_flow): Likewise.
+ (check_bb_profile): Likewise.
+ * cfganal.c (pre_and_rev_post_order_compute_fn): Likewise.
+ * cfgcleanup.c (walk_to_nondebug_insn): Likewise.
+ * cfghooks.c (account_profile_record): Likewise.
+ * cfgloop.c (init_loops_structure): Likewise.
+ * cgraphbuild.c (record_eh_tables): Likewise.
+ (compute_call_stmt_bb_frequency): Likewise.
+ * ipa-inline-analysis.c (compute_bb_predicates): Likewise.
+ * lto-streamer-in.c (input_cfg): Likewise.
+ * predict.c (maybe_hot_frequency_p): Likewise.
+ * tree-cfg.c (init_empty_tree_cfg_for_function): Likewise.
+ * tree-inline.c (initialize_cfun): Likewise.
+ (copy_cfg_body): Likewise.
+ (copy_body): Likewise.
+ (tree_function_versioning): Likewise.
+
+ * bb-reorder.c (add_labels_and_missing_jumps): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (duplicate_computed_gotos): Remove usage of EXIT_BLOCK_PTR macro.
+ (find_rarely_executed_basic_blocks_and_crossing_edges): Remove uses of
+ macros: ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (connect_traces): Likewise.
+ (rest_of_handle_reorder_blocks): Remove usage of EXIT_BLOCK_PTR macro.
+ (bb_to_key): Remove usage of ENTRY_BLOCK_PTR macro.
+ (fix_crossing_conditional_branches): Remove usage of EXIT_BLOCK_PTR
+ macro.
+ (find_traces_1_round): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (fix_up_fall_thru_edges): Remove usage of EXIT_BLOCK_PTR macro.
+ (find_traces): Remove usage of ENTRY_BLOCK_PTR macro.
+ (fix_up_crossing_landing_pad): Remove usage of EXIT_BLOCK_PTR macro.
+ (rotate_loop): Likewise.
+ * bt-load.c (migrate_btr_def): Remove usage of ENTRY_BLOCK_PTR macro.
+ * cfg.c (clear_aux_for_edges): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (alloc_aux_for_edges): Likewise.
+ (clear_bb_flags): Remove usage of ENTRY_BLOCK_PTR macro.
+ (cached_make_edge): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (compact_blocks): Likewise.
+ (clear_edges): Likewise.
+ * cfganal.c (single_pred_before_succ_order): Remove usage of
+ ENTRY_BLOCK_PTR macro.
+ (bitmap_union_of_succs): Remove usage of EXIT_BLOCK_PTR macro.
+ (bitmap_union_of_preds): Remove usage of ENTRY_BLOCK_PTR macro.
+ (bitmap_intersection_of_succs): Remove usage of EXIT_BLOCK_PTR macro.
+ (bitmap_intersection_of_preds): Remove usage of ENTRY_BLOCK_PTR macro.
+ (inverted_post_order_compute): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (compute_dominance_frontiers_1): Remove usage of ENTRY_BLOCK_PTR macro.
+ (post_order_compute): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (connect_infinite_loops_to_exit): Remove usage of EXIT_BLOCK_PTR macro.
+ (remove_fake_edges): Remove usage of ENTRY_BLOCK_PTR macro.
+ (add_noreturn_fake_exit_edges): Remove usage of EXIT_BLOCK_PTR macro.
+ (find_pdom): Remove uses of macros: ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (remove_fake_exit_edges): Remove usage of EXIT_BLOCK_PTR macro.
+ (verify_edge_list): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (print_edge_list): Likewise.
+ (create_edge_list): Likewise.
+ (find_unreachable_blocks): Remove usage of ENTRY_BLOCK_PTR macro.
+ (mark_dfs_back_edges): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ * cfgbuild.c (find_bb_boundaries): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ (find_many_sub_basic_blocks): Remove usage of EXIT_BLOCK_PTR macro.
+ (make_edges): Remove uses of macros: ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ * cfgcleanup.c (delete_unreachable_blocks): Likewise.
+ (try_optimize_cfg): Likewise.
+ (try_head_merge_bb): Remove usage of EXIT_BLOCK_PTR macro.
+ (try_crossjump_to_edge): Remove usage of ENTRY_BLOCK_PTR macro.
+ (try_crossjump_bb): Remove usage of EXIT_BLOCK_PTR macro.
+ (merge_blocks_move): Remove usage of ENTRY_BLOCK_PTR macro.
+ (outgoing_edges_match): Remove usage of EXIT_BLOCK_PTR macro.
+ (try_forward_edges): Likewise.
+ (try_simplify_condjump): Likewise.
+ * cfgexpand.c (gimple_expand_cfg): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (construct_exit_block): Remove usage of EXIT_BLOCK_PTR macro.
+ (construct_init_block): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (expand_gimple_basic_block): Remove usage of EXIT_BLOCK_PTR macro.
+ (expand_gimple_tailcall): Likewise.
+ * cfghooks.c (can_duplicate_block_p): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (tidy_fallthru_edges): Likewise.
+ (verify_flow_info): Likewise.
+ * cfgloop.c (flow_bb_inside_loop_p): Likewise.
+ (num_loop_branches): Remove usage of EXIT_BLOCK_PTR macro.
+ (disambiguate_multiple_latches): Remove usage of ENTRY_BLOCK_PTR macro.
+ (get_loop_exit_edges): Remove usage of EXIT_BLOCK_PTR macro.
+ (bb_loop_header_p): Remove usage of ENTRY_BLOCK_PTR macro.
+ (get_loop_body_in_bfs_order): Remove usage of EXIT_BLOCK_PTR macro.
+ (get_loop_body_in_dom_order): Likewise.
+ (get_loop_body): Likewise.
+ * cfgloopanal.c (mark_irreducible_loops): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ * cfgloopmanip.c (create_preheader): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ (remove_path): Remove usage of EXIT_BLOCK_PTR macro.
+ (fix_bb_placement): Likewise.
+ * cfgrtl.c (rtl_block_empty_p): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (rtl_can_remove_branch_p): Remove usage of EXIT_BLOCK_PTR macro.
+ (cfg_layout_split_edge): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (rtl_flow_call_edges_add): Remove usage of EXIT_BLOCK_PTR macro.
+ (cfg_layout_can_merge_blocks_p): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (cfg_layout_redirect_edge_and_branch): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ (fixup_fallthru_exit_predecessor): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (fixup_reorder_chain): Likewise.
+ (relink_block_chain): Likewise.
+ (cfg_layout_delete_block): Remove usage of EXIT_BLOCK_PTR macro.
+ (rtl_verify_bb_layout): Remove usage of ENTRY_BLOCK_PTR macro.
+ (cfg_layout_duplicate_bb): Remove usage of EXIT_BLOCK_PTR macro.
+ (force_one_exit_fallthru): Likewise.
+ (rtl_verify_fallthru): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (rtl_verify_edges): Likewise.
+ (commit_edge_insertions): Likewise.
+ (commit_one_edge_insertion): Likewise.
+ (rtl_split_edge): Likewise.
+ (force_nonfallthru_and_redirect): Likewise.
+ (outof_cfg_layout_mode): Remove usage of EXIT_BLOCK_PTR macro.
+ (skip_insns_after_block): Likewise.
+ (fixup_partition_crossing): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (purge_dead_edges): Remove usage of EXIT_BLOCK_PTR macro.
+ (rtl_can_merge_blocks): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (contains_no_active_insn_p): Likewise.
+ (emit_insn_at_entry): Remove usage of ENTRY_BLOCK_PTR macro.
+ (entry_of_function): Likewise.
+ (last_bb_in_partition): Remove usage of EXIT_BLOCK_PTR macro.
+ (fixup_new_cold_bb): Likewise.
+ (patch_jump_insn): Likewise.
+ (try_redirect_by_replacing_jump): Likewise.
+ (block_label): Likewise.
+ (could_fall_through): Likewise.
+ (can_fallthru): Likewise.
+ * cgraphbuild.c (cgraph_rebuild_references): Remove usage of
+ ENTRY_BLOCK_PTR macro.
+ (rebuild_cgraph_edges): Likewise.
+ * cgraphunit.c (init_lowered_empty_function): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (expand_thunk): Remove usage of EXIT_BLOCK_PTR macro.
+ * combine.c (get_last_value): Remove usage of ENTRY_BLOCK_PTR macro.
+ (distribute_links): Remove usage of EXIT_BLOCK_PTR macro.
+ (get_last_value_validate): Remove usage of ENTRY_BLOCK_PTR macro.
+ (try_combine): Remove usage of EXIT_BLOCK_PTR macro.
+ (reg_num_sign_bit_copies_for_combine): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ (reg_nonzero_bits_for_combine): Likewise.
+ (set_nonzero_bits_and_sign_copies): Likewise.
+ (combine_instructions): Likewise.
+ * cprop.c (one_cprop_pass): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (bypass_conditional_jumps): Likewise.
+ (bypass_block): Remove usage of EXIT_BLOCK_PTR macro.
+ (find_implicit_sets): Likewise.
+ (cprop_jump): Likewise.
+ * cse.c (cse_cc_succs): Likewise.
+ (cse_find_path): Likewise.
+ * df-problems.c (df_lr_confluence_0): Likewise.
+ * df-scan.c (df_entry_block_defs_collect): Remove usage of
+ ENTRY_BLOCK_PTR macro.
+ (df_exit_block_uses_collect): Remove usage of EXIT_BLOCK_PTR macro.
+ * dominance.c (iterate_fix_dominators): Remove usage of
+ ENTRY_BLOCK_PTR macro.
+ (calc_idoms): Remove uses of macros: ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (determine_dominators_for_sons): Remove usage of ENTRY_BLOCK_PTR macro.
+ (calc_dfs_tree): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (prune_bbs_to_update_dominators): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ (calc_dfs_tree_nonrec): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ * domwalk.c (cmp_bb_postorder): Likewise.
+ * dse.c (dse_step1): Remove usage of EXIT_BLOCK_PTR macro.
+ * except.c (finish_eh_generation): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ (sjlj_emit_function_enter): Likewise.
+ * final.c (compute_alignments): Likewise.
+ * function.c (thread_prologue_and_epilogue_insns): Remove uses of
+ macros: ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (reposition_prologue_and_epilogue_notes): Remove usage of
+ EXIT_BLOCK_PTR macro.
+ (convert_jumps_to_returns): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (regno_clobbered_at_setjmp): Remove usage of ENTRY_BLOCK_PTR macro.
+ (next_block_for_reg): Remove usage of EXIT_BLOCK_PTR macro.
+ * gcse.c (hoist_code): Remove usage of ENTRY_BLOCK_PTR macro.
+ (update_bb_reg_pressure): Remove usage of EXIT_BLOCK_PTR macro.
+ (compute_code_hoist_vbeinout): Likewise.
+ (should_hoist_expr_to_dom): Remove usage of ENTRY_BLOCK_PTR macro.
+ (pre_expr_reaches_here_p_work): Likewise.
+ * gimple-iterator.c (gsi_commit_edge_inserts): Likewise.
+ (gimple_find_edge_insert_loc): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ * gimple-ssa-strength-reduction.c (slsr_process_phi): Remove usage of
+ ENTRY_BLOCK_PTR macro.
+ * graph.c (draw_cfg_nodes_for_loop): Remove usage of EXIT_BLOCK_PTR
+ macro.
+ * graphite-clast-to-gimple.c (translate_clast_user): Remove usage of
+ ENTRY_BLOCK_PTR macro.
+ * graphite-scop-detection.c (build_scops): Likewise.
+ (create_sese_edges): Remove usage of EXIT_BLOCK_PTR macro.
+ (scopdet_basic_block_info): Remove usage of ENTRY_BLOCK_PTR macro.
+ * haifa-sched.c (restore_bb_notes): Remove usage of EXIT_BLOCK_PTR
+ macro.
+ (unlink_bb_notes): Likewise.
+ (create_check_block_twin): Likewise.
+ (init_before_recovery): Likewise.
+ (sched_extend_bb): Likewise.
+ (priority): Likewise.
+ * hw-doloop.c (reorder_loops): Likewise.
+ (discover_loop): Likewise.
+ * ifcvt.c (dead_or_predicable): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (find_if_case_1): Remove usage of EXIT_BLOCK_PTR macro.
+ (block_has_only_trap): Likewise.
+ (cond_exec_find_if_block): Likewise.
+ (merge_if_block): Likewise.
+ * ipa-inline-analysis.c (param_change_prob): Remove usage of
+ ENTRY_BLOCK_PTR macro.
+ (record_modified): Likewise.
+ * ipa-pure-const.c (execute_warn_function_noreturn): Remove usage of
+ EXIT_BLOCK_PTR macro.
+ (local_pure_const): Likewise.
+ * ipa-split.c (split_function): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (find_split_points): Likewise.
+ (consider_split): Likewise.
+ (find_return_bb): Remove usage of EXIT_BLOCK_PTR macro.
+ (verify_non_ssa_vars): Remove usage of ENTRY_BLOCK_PTR macro.
+ * ira-build.c (ira_loop_tree_body_rev_postorder): Likewise.
+ * ira-color.c (print_loop_title): Remove usage of EXIT_BLOCK_PTR macro.
+ * ira-emit.c (entered_from_non_parent_p): Remove usage of
+ ENTRY_BLOCK_PTR macro.
+ (ira_emit): Remove usage of EXIT_BLOCK_PTR macro.
+ * ira-int.h (ira_assert): Remove usage of ENTRY_BLOCK_PTR macro.
+ * ira.c (split_live_ranges_for_shrink_wrap): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ * lcm.c (compute_rev_insert_delete): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ (compute_nearerout): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (compute_farthest): Likewise.
+ (compute_available): Likewise.
+ (compute_insert_delete): Remove usage of EXIT_BLOCK_PTR macro.
+ (compute_laterin): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (compute_earliest): Likewise.
+ (compute_antinout_edge): Likewise.
+ * loop-iv.c (simplify_using_initial_values): Remove usage of
+ ENTRY_BLOCK_PTR macro.
+ * loop-unswitch.c (unswitch_loop): Remove usage of EXIT_BLOCK_PTR
+ macro.
+ * lra-assigns.c (find_hard_regno_for): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ * lra-constraints.c (lra_inheritance): Remove usage of EXIT_BLOCK_PTR
+ macro.
+ * lra-lives.c (lra_create_live_ranges): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ * lra.c (has_nonexceptional_receiver): Remove usage of EXIT_BLOCK_PTR
+ macro.
+ * lto-streamer-in.c (input_function): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ * lto-streamer-out.c (output_cfg): Likewise.
+ * mcf.c (adjust_cfg_counts): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (create_fixup_graph): Remove usage of ENTRY_BLOCK_PTR macro.
+ * mode-switching.c (optimize_mode_switching): Likewise.
+ (create_pre_exit): Remove usage of EXIT_BLOCK_PTR macro.
+ * modulo-sched.c (rest_of_handle_sms): Likewise.
+ (canon_loop): Likewise.
+ * omp-low.c (build_omp_regions): Remove usage of ENTRY_BLOCK_PTR macro.
+ * postreload-gcse.c (eliminate_partially_redundant_loads): Remove uses
+ of macros: ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ * predict.c (rebuild_frequencies): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ (propagate_freq): Remove usage of EXIT_BLOCK_PTR macro.
+ (estimate_bb_frequencies): Remove usage of ENTRY_BLOCK_PTR macro.
+ (tree_estimate_probability_bb): Remove usage of EXIT_BLOCK_PTR macro.
+ (expensive_function_p): Remove usage of ENTRY_BLOCK_PTR macro.
+ (tree_bb_level_predictions): Remove usage of EXIT_BLOCK_PTR macro.
+ (counts_to_freqs): Remove usage of ENTRY_BLOCK_PTR macro.
+ (apply_return_prediction): Remove usage of EXIT_BLOCK_PTR macro.
+ (estimate_loops): Remove usage of ENTRY_BLOCK_PTR macro.
+ (gimple_predict_edge): Likewise.
+ (probably_never_executed): Likewise.
+ * profile.c (find_spanning_tree): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (branch_prob): Likewise.
+ (compute_branch_probabilities): Likewise.
+ (compute_frequency_overlap): Remove usage of ENTRY_BLOCK_PTR macro.
+ (is_inconsistent): Remove usage of EXIT_BLOCK_PTR macro.
+ (read_profile_edge_counts): Remove usage of ENTRY_BLOCK_PTR macro.
+ (set_bb_counts): Likewise.
+ (correct_negative_edge_counts): Likewise.
+ (get_exec_counts): Likewise.
+ (instrument_values): Likewise.
+ (instrument_edges): Likewise.
+ * reg-stack.c (convert_regs): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (compensate_edges): Remove usage of ENTRY_BLOCK_PTR macro.
+ (convert_regs_exit): Remove usage of EXIT_BLOCK_PTR macro.
+ (convert_regs_entry): Remove usage of ENTRY_BLOCK_PTR macro.
+ (reg_to_stack): Likewise.
+ * regs.h (REG_N_SETS): Likewise.
+ * reload.c (find_dummy_reload): Likewise.
+ (combine_reloads): Likewise.
+ (push_reload): Likewise.
+ * reload1.c (has_nonexceptional_receiver): Remove usage of
+ EXIT_BLOCK_PTR macro.
+ * resource.c (mark_target_live_regs): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ (find_basic_block): Likewise.
+ * sched-ebb.c (ebb_add_block): Remove usage of EXIT_BLOCK_PTR macro.
+ (schedule_ebbs): Likewise.
+ * sched-int.h (sel_sched_p): Likewise.
+ * sched-rgn.c (compute_dom_prob_ps): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ (rgn_add_block): Remove usage of EXIT_BLOCK_PTR macro.
+ (haifa_find_rgns): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (propagate_deps): Remove usage of EXIT_BLOCK_PTR macro.
+ (extend_rgns): Likewise.
+ (find_single_block_region): Likewise.
+ * sel-sched-ir.c (sel_remove_loop_preheader): Remove usage of
+ ENTRY_BLOCK_PTR macro.
+ (setup_nop_and_exit_insns): Remove usage of EXIT_BLOCK_PTR macro.
+ (sel_create_recovery_block): Likewise.
+ (bb_ends_ebb_p): Likewise.
+ (sel_bb_end): Likewise.
+ (sel_bb_head): Likewise.
+ (free_lv_sets): Likewise.
+ (init_lv_sets): Likewise.
+ (tidy_control_flow): Likewise.
+ (maybe_tidy_empty_bb): Likewise.
+ * sel-sched-ir.h (_succ_iter_cond): Likewise.
+ (_succ_iter_start): Likewise.
+ (sel_bb_empty_or_nop_p): Likewise.
+ (get_loop_exit_edges_unique_dests): Likewise.
+ (inner_loop_header_p): Likewise.
+ * sel-sched.c (create_block_for_bookkeeping): Likewise.
+ (find_block_for_bookkeeping): Likewise.
+ * store-motion.c (remove_reachable_equiv_notes): Likewise.
+ (insert_store): Likewise.
+ * trans-mem.c (ipa_tm_transform_clone): Remove usage of
+ ENTRY_BLOCK_PTR macro.
+ (tm_memopt_compute_available): Remove usage of EXIT_BLOCK_PTR macro.
+ (ipa_tm_scan_irr_function): Remove usage of ENTRY_BLOCK_PTR macro.
+ (gate_tm_init): Likewise.
+ (tm_region_init): Likewise.
+ * tree-cfg.c (execute_fixup_cfg): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (execute_warn_function_return): Remove usage of EXIT_BLOCK_PTR macro.
+ (split_critical_edges): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (print_loops): Remove usage of ENTRY_BLOCK_PTR macro.
+ (move_sese_region_to_fn): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (gimple_redirect_edge_and_branch): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ (gimple_verify_flow_info): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (remove_edge_and_dominated_blocks): Remove usage of EXIT_BLOCK_PTR
+ macro.
+ (make_edges): Remove uses of macros: ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (gimple_flow_call_edges_add): Remove usage of EXIT_BLOCK_PTR macro.
+ (make_blocks): Remove usage of ENTRY_BLOCK_PTR macro.
+ (build_gimple_cfg): Likewise.
+ (gimple_duplicate_bb): Remove usage of EXIT_BLOCK_PTR macro.
+ (gimple_can_merge_blocks_p): Likewise.
+ * tree-cfgcleanup.c (tree_forwarder_block_p): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ * tree-complex.c (update_parameter_components): Remove usage of
+ ENTRY_BLOCK_PTR macro.
+ * tree-if-conv.c (get_loop_body_in_if_conv_order): Remove usage of
+ EXIT_BLOCK_PTR macro.
+ * tree-inline.c (tree_function_versioning): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (delete_unreachable_blocks_update_callgraph): Likewise.
+ (initialize_cfun): Likewise.
+ (copy_cfg_body): Remove usage of ENTRY_BLOCK_PTR macro.
+ (copy_edges_for_bb): Remove usage of EXIT_BLOCK_PTR macro.
+ (remap_ssa_name): Remove usage of ENTRY_BLOCK_PTR macro.
+ * tree-into-ssa.c (update_ssa): Likewise.
+ (maybe_register_def): Remove usage of EXIT_BLOCK_PTR macro.
+ (insert_updated_phi_nodes_for): Remove usage of ENTRY_BLOCK_PTR macro.
+ (rewrite_into_ssa): Likewise.
+ (rewrite_debug_stmt_uses): Likewise.
+ * tree-outof-ssa.c (expand_phi_nodes): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ * tree-profile.c (gimple_gen_ic_func_profiler): Remove usage of
+ ENTRY_BLOCK_PTR macro.
+ * tree-scalar-evolution.h (block_before_loop): Likewise.
+ * tree-sra.c (sra_ipa_reset_debug_stmts): Likewise.
+ (dump_dereferences_table): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (analyze_caller_dereference_legality): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ (propagate_dereference_distances): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (initialize_parameter_reductions): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ * tree-ssa-ccp.c (gsi_prev_dom_bb_nondebug): Likewise.
+ (optimize_stack_restore): Remove usage of EXIT_BLOCK_PTR macro.
+ * tree-ssa-coalesce.c (create_outofssa_var_map): Likewise.
+ * tree-ssa-dce.c (eliminate_unnecessary_stmts): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (remove_dead_stmt): Remove usage of EXIT_BLOCK_PTR macro.
+ (propagate_necessity): Remove usage of ENTRY_BLOCK_PTR macro.
+ (mark_control_dependent_edges_necessary): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ * tree-ssa-dom.c (eliminate_degenerate_phis): Remove usage of
+ ENTRY_BLOCK_PTR macro.
+ (tree_ssa_dominator_optimize): Remove usage of EXIT_BLOCK_PTR macro.
+ * tree-ssa-live.c (verify_live_on_entry): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (calculate_live_on_exit): Likewise.
+ (set_var_live_on_entry): Remove usage of ENTRY_BLOCK_PTR macro.
+ (loe_visit_block): Likewise.
+ * tree-ssa-live.h (live_on_exit): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (live_on_entry): Likewise.
+ * tree-ssa-loop-ivopts.c (find_interesting_uses): Remove usage of
+ EXIT_BLOCK_PTR macro.
+ * tree-ssa-loop-manip.c (compute_live_loop_exits): Remove usage of
+ ENTRY_BLOCK_PTR macro.
+ * tree-ssa-loop-niter.c (simplify_using_initial_conditions): Likewise.
+ (bound_difference): Likewise.
+ * tree-ssa-loop-prefetch.c (may_use_storent_in_loop_p): Remove usage
+ of EXIT_BLOCK_PTR macro.
+ * tree-ssa-loop-unswitch.c (simplify_using_entry_checks): Remove usage
+ of ENTRY_BLOCK_PTR macro.
+ * tree-ssa-math-opts.c (register_division_in): Likewise.
+ * tree-ssa-phiprop.c (tree_ssa_phiprop): Likewise.
+ * tree-ssa-pre.c (compute_avail): Likewise.
+ (compute_antic): Remove usage of EXIT_BLOCK_PTR macro.
+ (insert): Remove usage of ENTRY_BLOCK_PTR macro.
+ * tree-ssa-propagate.c (ssa_prop_init): Likewise.
+ (simulate_block): Remove usage of EXIT_BLOCK_PTR macro.
+ (cfg_blocks_add): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (add_control_edge): Remove usage of EXIT_BLOCK_PTR macro.
+ * tree-ssa-reassoc.c (do_reassoc): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (build_and_add_sum): Remove usage of ENTRY_BLOCK_PTR macro.
+ * tree-ssa-sink.c (nearest_common_dominator_of_uses): Likewise.
+ (execute_sink_code): Remove usage of EXIT_BLOCK_PTR macro.
+ * tree-ssa-uninit.c (find_dom): Remove usage of ENTRY_BLOCK_PTR macro.
+ (compute_control_dep_chain): Remove usage of EXIT_BLOCK_PTR macro.
+ (find_pdom): Likewise.
+ (warn_uninitialized_vars): Remove usage of ENTRY_BLOCK_PTR macro.
+ * tree-stdarg.c (reachable_at_most_once): Likewise.
+ * tree-tailcall.c (tree_optimize_tail_calls_1): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (eliminate_tail_call): Likewise.
+ * tsan.c (instrument_func_entry): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ (instrument_func_exit): Remove usage of EXIT_BLOCK_PTR macro.
+ * var-tracking.c (vt_initialize): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (vt_add_function_parameter): Remove usage of ENTRY_BLOCK_PTR macro.
+ (vt_find_locations): Remove usage of EXIT_BLOCK_PTR macro.
+ (vt_stack_adjustments): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ * varasm.c (assemble_start_function): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ * config/bfin/bfin.c (hwloop_optimize): Likewise.
+ * config/nds32/nds32.c (nds32_fp_as_gp_check_available): Remove usage
+ of EXIT_BLOCK_PTR macro.
+ * config/arm/arm.c (require_pic_register): Remove usage of
+ ENTRY_BLOCK_PTR macro.
+ (arm_r3_live_at_start_p): Likewise.
+ (any_sibcall_could_use_r3): Remove usage of EXIT_BLOCK_PTR macro.
+ * config/rs6000/rs6000.c (rs6000_emit_prologue): Likewise.
+ * config/frv/frv.c (frv_optimize_membar_global): Likewise.
+ * config/alpha/alpha.c (alpha_gp_save_rtx): Remove usage of
+ ENTRY_BLOCK_PTR macro.
+ * config/i386/i386.c (ix86_count_insn): Likewise.
+ (ix86_seh_fixup_eh_fallthru): Remove usage of EXIT_BLOCK_PTR macro.
+ (ix86_pad_short_function): Likewise.
+ (ix86_compute_frame_layout): Remove usage of ENTRY_BLOCK_PTR macro.
+ (ix86_pad_returns): Remove usage of EXIT_BLOCK_PTR macro.
+ (ix86_eax_live_at_start_p): Remove usage of ENTRY_BLOCK_PTR macro.
+ (add_condition_to_bb): Remove usage of EXIT_BLOCK_PTR macro.
+ (ix86_expand_epilogue): Likewise.
+ * config/ia64/ia64.c (ia64_asm_unwind_emit): Likewise.
+ (ia64_expand_prologue): Likewise.
+
+2013-11-19 Catherine Moore <clm@codesourcery.com>
+
+ * doc/invoke.texi (mfix-rm7000, mno-fix-rm7000): Document.
+ * config/mips/mips.opt (mfix-rm7000): New option.
+ * config/mips/mips.h (ASM_SPEC): Handle mfix-rm7000.
+ * config/mips/mips.c (mips_reorg_process_insns): Disable
+ noreorder for TARGET_FIX_RM7000.
+
+2013-11-19 Oleg Endo <olegendo@gcc.gnu.org>
+
+ * config/sh/sh-c.c: Fix typo in include of file attribs.h.
+
+2013-11-19 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
+
+ * config/arm/arm.c (arm_new_rtx_costs):
+ Handle narrow mode add-shifts properly.
+ * config/arm/arm-common.c (arm_rtx_shift_left_p): Remove static.
+ * config/arm/arm-common-protos.h (arm_rtx_shift_left_p):
+ Declare extern.
+
+2013-11-19 James Greenhalgh <james.greenhalgh@arm.com>
+
+ * config/arm/arm.md (zero_extend<mode>di2): Add type attribute.
+
+2013-11-19 Ulrich Weigand <Ulrich.Weigand@de.ibm.com>
+
+ * config/rs6000/vector.md ("mov<mode>"): Do not call
+ rs6000_emit_le_vsx_move to move into or out of GPRs.
+ * config/rs6000/rs6000.c (rs6000_emit_le_vsx_move): Assert
+ source and destination are not GPR hard regs.
+
+2013-11-19 David Malcolm <dmalcolm@redhat.com>
+
+ * basic-block.h (n_edges_for_function): Rename macro to...
+ (n_edges_for_fn): ...this.
+ (n_edges): Eliminate macro as work towards making uses of
+ cfun be explicit.
+
+ * cfg.c (init_flow): Update for renaming of "n_edges_for_function"
+ to "n_edges_for_fn".
+
+ * cfg.c (unchecked_make_edge): Remove usage of n_edges macro.
+ (clear_edges): Likewise.
+ (free_edge): Likewise.
+ * cfghooks.c (dump_flow_info): Likewise.
+ * cprop.c (is_too_expensive): Likewise.
+ * df-core.c (df_worklist_dataflow_doublequeue): Likewise.
+ * gcse.c (is_too_expensive): Likewise.
+ (prune_insertions_deletions): Likewise.
+ * mcf.c (create_fixup_graph): Likewise.
+ * sched-rgn.c (haifa_find_rgns): Likewise.
+ * tree-cfg.c (gimple_dump_cfg): Likewise.
+ * var-tracking.c (variable_tracking_main_1): Likewise.
+
+2013-11-19 Marcus Shawcroft <marcus.shawcroft@arm.com>
+
+ * config/aarch64/aarch64.c (aarch64_save_or_restore_fprs): Fix over
+ length lines.
+
+2013-11-19 Marcus Shawcroft <marcus.shawcroft@arm.com>
+
+ * config/aarch64/aarch64.md
+ (aarch64_movdi_<mode>low, *add_<shift>_si_uxtw): Adjust whitespace.
+
+2013-11-19 Marcus Shawcroft <marcus.shawcroft@arm.com>
+
+ * config/aarch64/aarch64.h (PROFILE_HOOK): Fix whitespace.
+
+2013-11-19 Joseph Myers <joseph@codesourcery.com>
+
+ * varasm.c (align_variable): Give error instead of warning for
+ unsupported alignment.
+ (assemble_noswitch_variable): Likewise.
+
+2013-11-19 Basile Starynkevitch <basile@starynkevitch.net>
+
+ * plugin.def (PLUGIN_INCLUDE_FILE): New event, invoked in
+ cb_file_change.
+
+2013-11-19 Peter Bergner <bergner@vnet.ibm.com>
+
+ * loop-doloop.c (doloop_optimize_loops): Remove unused
+ loop iterator argument from FOR_EACH_LOOP.
+
+2013-11-19 David Malcolm <dmalcolm@redhat.com>
+
+ Convert gimple types from a union to C++ inheritance.
+ * Makefile.in (GIMPLE_H): Add dep on is-a.h.
+ * coretypes.h (union gimple_statement_d): Remove declaration.
+ (gimple): Convert from being a "union gimple_statement_d *"
+ to a "struct gimple_statement_base *".
+ (const_gimple): Likewise (with "const").
+ * ggc.h (ggc_alloc_cleared_gimple_statement_d_stat): Replace with...
+ (ggc_alloc_cleared_gimple_statement_stat): ...this.
+ * gimple-pretty-print.c (debug): Change parameter from a
+ "gimple_statement_d &" to a "gimple_statement_base &".
+ (debug): Change parameter from a "gimple_statement_d *" to
+ a "gimple_statement_base *".
+ * gimple-pretty-print.h (debug): Update declarations as above.
+ * gimple.c (gimple_alloc_stat): Update for renaming of
+ ggc_alloc_cleared_gimple_statement_d_stat to
+ ggc_alloc_cleared_gimple_statement_stat.
+ * gimple.h: Include "is-a.h" for use by is_a_helper
+ specializations in followup autogenerated patch.
+ (struct gimple statement_base): Make this type usable as a base
+ class by adding "desc", "tag" and "variable_size" to GTY, thus
+ using opting-in to gengtype's support for simple inheritance.
+ (gimple_statement_with_ops_base): Convert to a subclass of
+ gimple_statement_base, dropping initial "gsbase" field. Note that
+ this type is abstract, with no GSS_ value, and thus no GTY tag value.
+ (gimple_statement_with_ops): Convert to a subclass of
+ gimple_statement_with_ops_base, dropping initial "opbase" field.
+ Add tag value to GTY marking. Update marking of op field to
+ reflect how num_ops field is accessed via inheritance.
+ (gimple_statement_with_memory_ops_base): Convert to a subclass of
+ gimple_statement_with_ops_base, dropping initial "opbase" field.
+ Add tag value to GTY marking.
+ (gimple_statement_with_memory_ops): Convert to a subclass of
+ public gimple_statement_with_memory_ops_base, dropping initial
+ "membase" field. Add tag value to GTY marking. Update marking
+ of op field to reflect how num_ops field is accessed via inheritance.
+ (gimple_statement_call): Analogous changes that also update the
+ marking of the "u" union.
+ (gimple_statement_omp): Convert to a subclass of
+ gimple_statement_base, dropping initial "gsbase" field, adding
+ tag value to GTY marking.
+ (gimple_statement_bind): Likewise.
+ (gimple_statement_catch): Likewise.
+ (gimple_statement_eh_filter): Likewise.
+ (gimple_statement_eh_else): Likewise.
+ (gimple_statement_eh_mnt): Likewise.
+ (gimple_statement_phi): Likewise.
+ (gimple_statement_eh_ctrl): Likewise.
+ (gimple_statement_try): Likewise.
+ (gimple_statement_wce): Likewise.
+ (gimple_statement_asm): Convert to a subclass of
+ gimple_statement_with_memory_ops_base, dropping initial
+ "membase" field, adding tag value to GTY marking, and updating
+ marking of op field.
+ (gimple_statement_omp_critical): Convert to a subclass of
+ gimple_statement_omp, dropping initial "omp" field, adding tag
+ value to GTY marking.
+ (gimple_statement_omp_for): Likewise.
+ (gimple_statement_omp_parallel): Likewise.
+ (gimple_statement_omp_task): Convert to a subclass of
+ gimple_statement_omp_parallel, dropping initial "par" field,
+ adding tag value to GTY marking.
+ (gimple_statement_omp_sections): Convert to a subclass of
+ gimple_statement_omp, dropping initial "omp" field, adding
+ tag value to GTY marking.
+ (gimple_statement_omp_continue): Convert to a subclass of
+ gimple_statement_base, dropping initial "gsbase" field, adding
+ tag value to GTY marking.
+ (gimple_statement_omp_single): Convert to a subclass of
+ gimple_statement_omp, dropping initial "omp" field, adding
+ tag value to GTY marking.
+ (gimple_statement_omp_atomic_load): Convert to a subclass of
+ gimple_statement_base, dropping initial "gsbase" field, adding
+ tag value to GTY marking.
+ (gimple_statement_omp_atomic_store): Convert to a subclass of
+ gimple_statement_base, dropping initial "gsbase" field, adding
+ tag value to GTY marking.
+ (gimple_statement_transaction): Convert to a subclass of
+ gimple_statement_with_memory_ops_base, dropping initial "gsbase"
+ field, adding tag value to GTY marking.
+ (union gimple_statement_d): Remove.
+ * system.h (CONST_CAST_GIMPLE): Update to use
+ "struct gimple_statement_base *" rather than
+ "union gimple_statement_d *".
+ * tree-ssa-ccp.c (gimple_htab): Convert underlying type from
+ gimple_statement_d to gimple_statement_base.
+
+ * gimple.h (gimple_use_ops): Port from union to usage of dyn_cast.
+ (gimple_set_use_ops): Port from union to usage of as_a.
+ (gimple_set_vuse): Likewise.
+ (gimple_set_vdef): Likewise.
+ (gimple_call_internal_fn): Port from union to a static_cast,
+ given that the type has already been asserted.
+ (gimple_omp_body_ptr): Port from unchecked union usage to
+ a static_cast.
+ (gimple_omp_set_body): Likewise.
+
+ * gimple-iterator.c (update_bb_for_stmts): Update for conversion of
+ gimple types to a true class hierarchy.
+ (update_call_edge_frequencies): Likewise.
+ (gsi_insert_seq_nodes_before): Likewise.
+ (gsi_insert_seq_nodes_after): Likewise.
+ (gsi_split_seq_after): Likewise.
+ (gsi_set_stmt): Likewise.
+ (gsi_split_seq_before): Likewise.
+ (gsi_remove): Likewise.
+ * gimple-iterator.h (gsi_one_before_end_p): Likewise.
+ (gsi_next): Likewise.
+ (gsi_prev): Likewise.
+ * gimple-pretty-print.c (dump_gimple_debug): Likewise.
+ * gimple-ssa.h (gimple_vuse_op): Likewise.
+ (gimple_vdef_op): Likewise.
+ * gimple-streamer-in.c (input_gimple_stmt): Likewise.
+ * gimple-streamer-out.c (output_gimple_stmt): Likewise.
+ * gimple.c (gimple_set_code): Likewise.
+ (gimple_alloc_stat): Likewise.
+ (gimple_set_subcode): Likewise.
+ (gimple_build_call_internal_1): Likewise.
+ (gimple_check_failed): Likewise.
+ (gimple_call_flags): Likewise.
+ (gimple_set_bb): Likewise.
+ * gimple.h (is_a_helper <gimple_statement_asm> (gimple)): New.
+ (is_a_helper <gimple_statement_bind> (gimple)): Likewise.
+ (is_a_helper <gimple_statement_call> (gimple)): Likewise.
+ (is_a_helper <gimple_statement_catch> (gimple)): Likewise.
+ (is_a_helper <gimple_statement_eh_ctrl> (gimple)): Likewise.
+ (is_a_helper <gimple_statement_eh_else> (gimple)): Likewise.
+ (is_a_helper <gimple_statement_eh_filter> (gimple)): Likewise.
+ (is_a_helper <gimple_statement_eh_mnt> (gimple)): Likewise.
+ (is_a_helper <gimple_statement_omp_atomic_load> (gimple)): Likewise.
+ (is_a_helper <gimple_statement_omp_atomic_store> (gimple)): Likewise.
+ (is_a_helper <gimple_statement_omp_continue> (gimple)): Likewise.
+ (is_a_helper <gimple_statement_omp_critical> (gimple)): Likewise.
+ (is_a_helper <gimple_statement_omp_for> (gimple)): Likewise.
+ (is_a_helper <gimple_statement_omp_parallel> (gimple)): Likewise.
+ (is_a_helper <gimple_statement_omp_sections> (gimple)): Likewise.
+ (is_a_helper <gimple_statement_omp_single> (gimple)): Likewise.
+ (is_a_helper <gimple_statement_omp_task> (gimple)): Likewise.
+ (is_a_helper <gimple_statement_phi> (gimple)): Likewise.
+ (is_a_helper <gimple_statement_transaction> (gimple)): Likewise.
+ (is_a_helper <gimple_statement_try> (gimple)): Likewise.
+ (is_a_helper <gimple_statement_wce> (gimple)): Likewise.
+ (is_a_helper <const gimple_statement_asm> (const_gimple)): Likewise.
+ (is_a_helper <const gimple_statement_bind> (const_gimple)): Likewise.
+ (is_a_helper <const gimple_statement_call> (const_gimple)): Likewise.
+ (is_a_helper <const gimple_statement_catch> (const_gimple)): Likewise.
+ (is_a_helper <const gimple_statement_eh_ctrl> (const_gimple)):
+ Likewise.
+ (is_a_helper <const gimple_statement_eh_filter> (const_gimple)):
+ Likewise.
+ (is_a_helper <const gimple_statement_omp_atomic_load> (const_gimple)):
+ Likewise.
+ (is_a_helper <const gimple_statement_omp_atomic_store>
+ (const_gimple)): Likewise.
+ (is_a_helper <const gimple_statement_omp_continue> (const_gimple)):
+ Likewise.
+ (is_a_helper <const gimple_statement_omp_critical> (const_gimple)):
+ Likewise.
+ (is_a_helper <const gimple_statement_omp_for> (const_gimple)):
+ Likewise.
+ (is_a_helper <const gimple_statement_omp_parallel> (const_gimple)):
+ Likewise.
+ (is_a_helper <const gimple_statement_omp_sections> (const_gimple)):
+ Likewise.
+ (is_a_helper <const gimple_statement_omp_single> (const_gimple)):
+ Likewise.
+ (is_a_helper <const gimple_statement_omp_task> (const_gimple)):
+ Likewise.
+ (is_a_helper <const gimple_statement_phi> (const_gimple)): Likewise.
+ (is_a_helper <const gimple_statement_transaction> (const_gimple)):
+ Likewise.
+ (gimple_seq_last): Update for conversion of gimple types to a true
+ class hierarchy.
+ (gimple_seq_set_last): Likewise.
+ (gimple_code): Likewise.
+ (gimple_bb): Likewise.
+ (gimple_block): Likewise.
+ (gimple_set_block): Likewise.
+ (gimple_location): Likewise.
+ (gimple_location_ptr): Likewise.
+ (gimple_set_location): Likewise.
+ (gimple_no_warning_p): Likewise.
+ (gimple_set_no_warning): Likewise.
+ (gimple_set_visited): Likewise.
+ (gimple_visited_p): Likewise.
+ (gimple_set_plf): Likewise.
+ (gimple_plf): Likewise.
+ (gimple_set_uid): Likewise.
+ (gimple_uid): Likewise.
+ (gimple_init_singleton): Likewise.
+ (gimple_modified_p): Likewise.
+ (gimple_set_modified): Likewise.
+ (gimple_expr_code): Likewise.
+ (gimple_has_volatile_ops): Likewise.
+ (gimple_set_has_volatile_ops): Likewise.
+ (gimple_omp_subcode): Likewise.
+ (gimple_omp_set_subcode): Likewise.
+ (gimple_omp_return_set_nowait): Likewise.
+ (gimple_omp_section_set_last): Likewise.
+ (gimple_omp_parallel_set_combined_p): Likewise.
+ (gimple_omp_atomic_set_need_value): Likewise.
+ (gimple_omp_atomic_set_seq_cst): Likewise.
+ (gimple_num_ops): Likewise.
+ (gimple_set_num_ops): Likewise.
+ (gimple_assign_nontemporal_move_p): Likewise.
+ (gimple_assign_set_nontemporal_move): Likewise.
+ (gimple_assign_rhs_code): Likewise.
+ (gimple_assign_set_rhs_code): Likewise.
+ (gimple_call_internal_p): Likewise.
+ (gimple_call_with_bounds_p): Likewise.
+ (gimple_call_set_with_bounds): Likewise.
+ (gimple_call_set_tail): Likewise.
+ (gimple_call_tail_p): Likewise.
+ (gimple_call_set_return_slot_opt): Likewise.
+ (gimple_call_return_slot_opt_p): Likewise.
+ (gimple_call_set_from_thunk): Likewise.
+ (gimple_call_from_thunk_p): Likewise.
+ (gimple_call_set_va_arg_pack): Likewise.
+ (gimple_call_va_arg_pack_p): Likewise.
+ (gimple_call_set_nothrow): Likewise.
+ (gimple_call_set_alloca_for_var): Likewise.
+ (gimple_call_alloca_for_var_p): Likewise.
+ (gimple_call_copy_flags): Likewise.
+ (gimple_cond_code): Likewise.
+ (gimple_cond_set_code): Likewise.
+ (gimple_cond_make_false): Likewise.
+ (gimple_cond_make_true): Likewise.
+ (gimple_asm_volatile_p): Likewise.
+ (gimple_asm_set_volatile): Likewise.
+ (gimple_asm_set_input): Likewise.
+ (gimple_asm_input_p): Likewise.
+ (gimple_try_kind): Likewise.
+ (gimple_try_set_kind): Likewise.
+ (gimple_try_catch_is_cleanup): Likewise.
+ (gimple_try_set_catch_is_cleanup): Likewise.
+ (gimple_wce_cleanup_eh_only): Likewise.
+ (gimple_wce_set_cleanup_eh_only): Likewise.
+ (gimple_debug_bind_p): Likewise.
+ (gimple_debug_source_bind_p): Likewise.
+ (gimple_omp_for_set_kind): Likewise.
+ (gimple_omp_for_set_combined_p): Likewise.
+ (gimple_omp_for_set_combined_into_p): Likewise.
+ (gimple_omp_target_set_kind): Likewise.
+ (gimple_transaction_subcode): Likewise.
+ (gimple_transaction_set_subcode): Likewise.
+ (gimple_predict_predictor): Likewise.
+ (gimple_predict_set_predictor): Likewise.
+ (gimple_predict_outcome): Likewise.
+ (gimple_predict_set_outcome): Likewise.
+ (gimple_transaction_set_label): Likewise.
+ (gimple_transaction_set_body): Likewise.
+ (gimple_transaction_label_ptr): Likewise.
+ (gimple_transaction_label): Likewise.
+ (gimple_transaction_body_ptr): Likewise.
+ (gimple_omp_continue_set_control_use): Likewise.
+ (gimple_omp_continue_control_use_ptr): Likewise.
+ (gimple_omp_continue_control_use): Likewise.
+ (gimple_omp_continue_set_control_def): Likewise.
+ (gimple_omp_continue_control_def_ptr): Likewise.
+ (gimple_omp_continue_control_def): Likewise.
+ (gimple_omp_atomic_load_rhs_ptr): Likewise.
+ (gimple_omp_atomic_load_rhs): Likewise.
+ (gimple_omp_atomic_load_set_rhs): Likewise.
+ (gimple_omp_atomic_load_lhs_ptr): Likewise.
+ (gimple_omp_atomic_load_lhs): Likewise.
+ (gimple_omp_atomic_load_set_lhs): Likewise.
+ (gimple_omp_atomic_store_val_ptr): Likewise.
+ (gimple_omp_atomic_store_val): Likewise.
+ (gimple_omp_atomic_store_set_val): Likewise.
+ (gimple_omp_for_cond): Likewise.
+ (gimple_omp_for_set_cond): Likewise.
+ (gimple_omp_sections_set_control): Likewise.
+ (gimple_omp_sections_control_ptr): Likewise.
+ (gimple_omp_sections_control): Likewise.
+ (gimple_omp_sections_set_clauses): Likewise.
+ (gimple_omp_sections_clauses_ptr): Likewise.
+ (gimple_omp_sections_clauses): Likewise.
+ (gimple_omp_teams_set_clauses): Likewise.
+ (gimple_omp_teams_clauses_ptr): Likewise.
+ (gimple_omp_teams_clauses): Likewise.
+ (gimple_omp_target_set_data_arg): Likewise.
+ (gimple_omp_target_data_arg_ptr): Likewise.
+ (gimple_omp_target_data_arg): Likewise.
+ (gimple_omp_target_set_child_fn): Likewise.
+ (gimple_omp_target_child_fn_ptr): Likewise.
+ (gimple_omp_target_child_fn): Likewise.
+ (gimple_omp_target_set_clauses): Likewise.
+ (gimple_omp_target_clauses_ptr): Likewise.
+ (gimple_omp_target_clauses): Likewise.
+ (gimple_omp_single_set_clauses): Likewise.
+ (gimple_omp_single_clauses_ptr): Likewise.
+ (gimple_omp_single_clauses): Likewise.
+ (gimple_omp_task_set_arg_align): Likewise.
+ (gimple_omp_task_arg_align_ptr): Likewise.
+ (gimple_omp_task_arg_align): Likewise.
+ (gimple_omp_task_set_arg_size): Likewise.
+ (gimple_omp_task_arg_size_ptr): Likewise.
+ (gimple_omp_task_arg_size): Likewise.
+ (gimple_omp_task_set_copy_fn): Likewise.
+ (gimple_omp_task_copy_fn_ptr): Likewise.
+ (gimple_omp_task_copy_fn): Likewise.
+ (gimple_omp_task_set_data_arg): Likewise.
+ (gimple_omp_task_data_arg_ptr): Likewise.
+ (gimple_omp_task_data_arg): Likewise.
+ (gimple_omp_task_set_child_fn): Likewise.
+ (gimple_omp_task_child_fn_ptr): Likewise.
+ (gimple_omp_task_child_fn): Likewise.
+ (gimple_omp_task_set_clauses): Likewise.
+ (gimple_omp_task_clauses_ptr): Likewise.
+ (gimple_omp_task_clauses): Likewise.
+ (gimple_omp_parallel_set_data_arg): Likewise.
+ (gimple_omp_parallel_data_arg_ptr): Likewise.
+ (gimple_omp_parallel_data_arg): Likewise.
+ (gimple_omp_parallel_set_child_fn): Likewise.
+ (gimple_omp_parallel_child_fn_ptr): Likewise.
+ (gimple_omp_parallel_child_fn): Likewise.
+ (gimple_omp_parallel_set_clauses): Likewise.
+ (gimple_omp_parallel_clauses_ptr): Likewise.
+ (gimple_omp_parallel_clauses): Likewise.
+ (gimple_omp_for_set_pre_body): Likewise.
+ (gimple_omp_for_pre_body_ptr): Likewise.
+ (gimple_omp_for_set_incr): Likewise.
+ (gimple_omp_for_incr_ptr): Likewise.
+ (gimple_omp_for_incr): Likewise.
+ (gimple_omp_for_set_final): Likewise.
+ (gimple_omp_for_final_ptr): Likewise.
+ (gimple_omp_for_final): Likewise.
+ (gimple_omp_for_set_initial): Likewise.
+ (gimple_omp_for_initial_ptr): Likewise.
+ (gimple_omp_for_initial): Likewise.
+ (gimple_omp_for_set_index): Likewise.
+ (gimple_omp_for_index_ptr): Likewise.
+ (gimple_omp_for_index): Likewise.
+ (gimple_omp_for_collapse): Likewise.
+ (gimple_omp_for_set_clauses): Likewise.
+ (gimple_omp_for_clauses_ptr): Likewise.
+ (gimple_omp_for_clauses): Likewise.
+ (gimple_omp_critical_set_name): Likewise.
+ (gimple_omp_critical_name_ptr): Likewise.
+ (gimple_omp_critical_name): Likewise.
+ (gimple_eh_dispatch_set_region): Likewise.
+ (gimple_eh_dispatch_region): Likewise.
+ (gimple_resx_set_region): Likewise.
+ (gimple_resx_region): Likewise.
+ (gimple_phi_set_arg): Likewise.
+ (gimple_phi_arg): Likewise.
+ (gimple_phi_set_result): Likewise.
+ (gimple_phi_result_ptr): Likewise.
+ (gimple_phi_result): Likewise.
+ (gimple_phi_num_args): Likewise.
+ (gimple_phi_capacity): Likewise.
+ (gimple_wce_set_cleanup): Likewise.
+ (gimple_wce_cleanup_ptr): Likewise.
+ (gimple_try_set_cleanup): Likewise.
+ (gimple_try_set_eval): Likewise.
+ (gimple_try_cleanup_ptr): Likewise.
+ (gimple_try_eval_ptr): Likewise.
+ (gimple_eh_else_set_e_body): Likewise.
+ (gimple_eh_else_set_n_body): Likewise.
+ (gimple_eh_else_e_body_ptr): Likewise.
+ (gimple_eh_else_n_body_ptr): Likewise.
+ (gimple_eh_must_not_throw_set_fndecl): Likewise.
+ (gimple_eh_must_not_throw_fndecl): Likewise.
+ (gimple_eh_filter_set_failure): Likewise.
+ (gimple_eh_filter_set_types): Likewise.
+ (gimple_eh_filter_failure_ptr): Likewise.
+ (gimple_eh_filter_types_ptr): Likewise.
+ (gimple_eh_filter_types): Likewise.
+ (gimple_catch_set_handler): Likewise.
+ (gimple_catch_set_types): Likewise.
+ (gimple_catch_handler_ptr): Likewise.
+ (gimple_catch_types_ptr): Likewise.
+ (gimple_catch_types): Likewise.
+ (gimple_asm_string): Likewise.
+ (gimple_asm_set_label_op): Likewise.
+ (gimple_asm_label_op): Likewise.
+ (gimple_asm_set_clobber_op): Likewise.
+ (gimple_asm_clobber_op): Likewise.
+ (gimple_asm_set_output_op): Likewise.
+ (gimple_asm_output_op_ptr): Likewise.
+ (gimple_asm_output_op): Likewise.
+ (gimple_asm_set_input_op): Likewise.
+ (gimple_asm_input_op_ptr): Likewise.
+ (gimple_asm_input_op): Likewise.
+ (gimple_asm_nlabels): Likewise.
+ (gimple_asm_nclobbers): Likewise.
+ (gimple_asm_noutputs): Likewise.
+ (gimple_asm_ninputs): Likewise.
+ (gimple_bind_set_block): Likewise.
+ (gimple_bind_block): Likewise.
+ (gimple_bind_add_seq): Likewise.
+ (gimple_bind_add_stmt): Likewise.
+ (gimple_bind_set_body): Likewise.
+ (gimple_bind_body_ptr): Likewise.
+ (gimple_bind_append_vars): Likewise.
+ (gimple_bind_set_vars): Likewise.
+ (gimple_bind_vars): Likewise.
+ (gimple_call_clobber_set): Likewise.
+ (gimple_call_use_set): Likewise.
+ (gimple_call_set_internal_fn): Likewise.
+ (gimple_call_set_fntype): Likewise.
+ (gimple_call_fntype): Likewise.
+ (gimple_omp_return_lhs_ptr): Likewise.
+ (gimple_omp_return_lhs): Likewise.
+ (gimple_omp_return_set_lhs): Likewise.
+ (gimple_omp_taskreg_set_data_arg): Likewise.
+ (gimple_omp_taskreg_data_arg_ptr): Likewise.
+ (gimple_omp_taskreg_data_arg): Likewise.
+ (gimple_omp_taskreg_set_child_fn): Likewise.
+ (gimple_omp_taskreg_child_fn_ptr): Likewise.
+ (gimple_omp_taskreg_child_fn): Likewise.
+ (gimple_omp_taskreg_set_clauses): Likewise.
+ (gimple_omp_taskreg_clauses_ptr): Likewise.
+ (gimple_omp_taskreg_clauses): Likewise.
+ (gimple_vuse): Likewise.
+ (gimple_vdef): Likewise.
+ (gimple_vuse_ptr): Likewise.
+ (gimple_vdef_ptr): Likewise.
+ * tree-inline.c (copy_debug_stmt): Likewise.
+ * tree-phinodes.c (make_phi_node): Likewise.
+
+ * gimple.h (is_a_helper <const gimple_statement_with_ops>::test): New.
+ (is_a_helper <gimple_statement_with_ops>::test): New.
+ (is_a_helper <const gimple_statement_with_memory_ops>::test): New.
+ (is_a_helper <gimple_statement_with_memory_ops>::test): New.
+
+ * gimple-streamer-in.c (input_gimple_stmt): Port from union
+ access to use of as_a.
+ * gimple.c (gimple_build_asm_1): Likewise.
+ (gimple_build_try): Likewise. Also, return a specific subclass
+ rather than just gimple.
+ (gimple_build_resx): Port from union access to use of as_a.
+ (gimple_build_eh_dispatch): Likewise.
+ (gimple_build_omp_for): Likewise. Also, convert allocation of iter
+ now that gengtype no longer provides a typed allocator function.
+ (gimple_copy): Likewise.
+ * gimple.h (gimple_build_try): Return a specific subclass rather
+ than just gimple.
+ * gimplify.c (gimplify_cleanup_point_expr): Replace union access
+ with subclass access by making use of new return type of
+ gimple_build_try.
+ * tree-phinodes.c: (allocate_phi_node): Return a
+ "gimple_statement_phi *" rather than just a gimple.
+ (resize_phi_node): Likewise.
+ (make_phi_node): Replace union access with subclass access by
+ making use of new return type of allocate_phi_node.
+ (reserve_phi_args_for_new_edge): Replace union access with as_a.
+ (remove_phi_arg_num): Accept a "gimple_statement_phi *" rather
+ than just a gimple.
+ (remove_phi_args): Update for change to remove_phi_arg_num.
+
+ * gdbhooks.py (GimplePrinter.to_string): Update lookup of
+ code field to reflect inheritance, rather than embedding of
+ the base gimple type.
+
+2013-11-19 Richard Biener <rguenther@suse.de>
+
+ * cfgloop.h (struct loop_iterator): C++-ify, add constructor
+ and destructor and make fel_next a member function.
+ (fel_next): Transform into ...
+ (loop_iterator::next): ... this.
+ (fel_init): Transform into ...
+ (loop_iterator::loop_iterator): ... this.
+ (loop_iterator::~loop_iterator): New.
+ (FOR_EACH_LOOP): Remove loop-iterator argument.
+ (FOR_EACH_LOOP_BREAK): Remove no longer necessary macro.
+ * cfgloop.c, cfgloopmanip.c, config/mn10300/mn10300.c,
+ graphite-clast-to-gimple.c, graphite-scop-detection.c,
+ graphite-sese-to-poly.c, ipa-inline-analysis.c, ipa-pure-const.c,
+ loop-init.c, loop-invariant.c, loop-unroll.c, loop-unswitch.c,
+ modulo-sched.c, predict.c, sel-sched-ir.c, tree-cfg.c, tree-data-ref.c,
+ tree-if-conv.c, tree-loop-distribution.c, tree-parloops.c,
+ tree-predcom.c, tree-scalar-evolution.c, tree-ssa-dce.c,
+ tree-ssa-loop-ch.c, tree-ssa-loop-im.c, tree-ssa-loop-ivcanon.c,
+ tree-ssa-loop-ivopts.c, tree-ssa-loop-manip.c, tree-ssa-loop-niter.c,
+ tree-ssa-loop-prefetch.c, tree-ssa-loop-unswitch.c,
+ tree-ssa-threadupdate.c, tree-vectorizer.c, tree-vrp.c: Adjust
+ uses of FOR_EACH_LOOP and remove loop_iterator variables. Replace
+ FOR_EACH_LOOP_BREAK with break.
+
+2013-11-19 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/59164
+ * tree-vect-loop-manip.c (vect_update_ivs_after_vectorizer):
+ Uncomment assert.
+ * tree-vect-loop.c (vect_analyze_loop_operations): Adjust check
+ whether we can create an epilogue loop to reflect thecases where
+ we create one.
+
+2013-11-19 Andrew MacLeod <amacleod@redhat.com>
+
+ * graphite-sese-to-poly.c: Include expr.h.
+
+2013-11-19 Richard Biener <rguenther@suse.de>
+
+ PR middle-end/58956
+ * tree-ssa-ter.c (find_replaceable_in_bb): Avoid forwarding
+ loads into stmts that may clobber it.
+
+2013-11-19 Bernd Schmidt <bernds@codesourcery.com>
+
+ * cgraphunit.c (symtab_terminator): New variable.
+ (queued_nodes): Renamed from first. Use symtab_terminator as
+ initializer.
+ (analyze_functions): Adjust accordingly.
+ (cgraph_process_new_functions): Return void.
+ * cgraph.h (cgraph_process_new_functions): Adjust declaration.
+
+2013-11-19 Marek Polacek <polacek@redhat.com>
+
+ * opts.c (common_handle_option): Add -fsanitize=null option.
+ Turn off -fdelete-null-pointer-checks option when doing the
+ NULL pointer checking.
+ * sanitizer.def (BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH): Add.
+ * tree-pass.h (make_pass_ubsan): Declare.
+ (make_pass_sanopt): Declare.
+ * timevar.def (TV_TREE_UBSAN): New timevar.
+ * passes.def: Add pass_sanopt and pass_ubsan.
+ * ubsan.h (ubsan_null_ckind): New enum.
+ (ubsan_mismatch_data): New struct.
+ (ubsan_expand_null_ifn): Declare.
+ (ubsan_create_data): Adjust declaration.
+ (ubsan_type_descriptor): Likewise.
+ * asan.c: Include "ubsan.h".
+ (pass_data_sanopt): New pass.
+ (execute_sanopt): New function.
+ (gate_sanopt): Likewise.
+ (make_pass_sanopt): Likewise.
+ (class pass_sanopt): New class.
+ * ubsan.c: Include tree-pass.h, gimple-ssa.h, gimple-walk.h,
+ gimple-iterator.h and cfgloop.h.
+ (PROB_VERY_UNLIKELY): Define.
+ (tree_type_map_hash): New function.
+ (ubsan_type_descriptor): Add new parameter.
+ Improve type name generation.
+ (ubsan_create_data): Add new parameter. Add pointer data into
+ ubsan structure.
+ (ubsan_expand_null_ifn): New function.
+ (instrument_member_call): Likewise.
+ (instrument_mem_ref): Likewise.
+ (instrument_null): Likewise.
+ (ubsan_pass): Likewise.
+ (gate_ubsan): Likewise.
+ (make_pass_ubsan): Likewise.
+ (ubsan_instrument_unreachable): Adjust ubsan_create_data call.
+ (class pass_ubsan): New class.
+ (pass_data_ubsan): New pass.
+ * flag-types.h (enum sanitize_code): Add SANITIZE_NULL.
+ * internal-fn.c (expand_UBSAN_NULL): New function.
+ * cgraphunit.c (varpool_finalize_decl): Call varpool_assemble_decl
+ even when !flag_toplevel_reorder.
+ * internal-fn.def (UBSAN_NULL): New.
+
+2013-11-19 Jan Hubicka <jh@suse.cz>
+
+ * cgraph.c (cgraph_create_indirect_edge): Use
+ get_polymorphic_call_info.
+ * cgraph.h (cgraph_indirect_call_info): Add outer_type,
+ maybe_in_construction and maybe_derived_type.
+ * ipa-utils.h (ipa_polymorphic_call_context): New structure.
+ (ipa_dummy_polymorphic_call_context): New global var.
+ (possible_polymorphic_call_targets): Add context paramter.
+ (dump_possible_polymorphic_call_targets): Likewise; update wrappers.
+ (possible_polymorphic_call_target_p): Likewise.
+ (get_polymorphic_call_info): New function.
+ * ipa-devirt.c (ipa_dummy_polymorphic_call_context): New function.
+ (add_type_duplicate): Remove forgotten debug output.
+ (method_class_type): Add sanity check.
+ (maybe_record_node): Add FINALP parameter.
+ (record_binfo): Add OUTER_TYPE and OFFSET; walk the inner
+ by info by get_binfo_at_offset.
+ (possible_polymorphic_call_targets_1): Add OUTER_TYPE/OFFSET
+ parameters; pass them to record-binfo.
+ (polymorphic_call_target_d): Add context and FINAL.
+ (polymorphic_call_target_hasher::hash): Hash context.
+ (polymorphic_call_target_hasher::equal): Compare context.
+ (free_polymorphic_call_targets_hash):
+ (get_class_context): New function.
+ (contains_type_p): New function.
+ (get_polymorphic_call_info): New function.
+ (walk_bases): New function.
+ (possible_polymorphic_call_targets): Add context parameter; honnor it.
+ (dump_possible_polymorphic_call_targets): Dump context.
+ (possible_polymorphic_call_target_p): Add context.
+ (update_type_inheritance_graph): Update comment.s
+ (ipa_set_jf_known_type): Assert that compoentn type is known.
+ (ipa_note_param_call): Do not tamper with offsets.
+ (ipa_analyze_indirect_call_uses): When offset is being changed; clear
+ outer type.
+ (update_indirect_edges_after_inlining): Likewise.
+ (ipa_write_indirect_edge_info): Stream new fields.
+ (ipa_read_indirect_edge_info): Stream in new fields.
+
+2013-11-19 Jan Hubicka <jh@suse.cz>
+
+ * tree-pretty-print.c (dump_generic_node): Print class type of
+ OBJ_TYPE_REF.
+
+2013-11-19 Joey Ye <joey.ye@arm.com>
+
+ * config/arm/arm.opt (-marm-pic-data-is-text-relative): New option.
+ * doc/invoke.texi (-marm-pic-data-is-text-relative): Documentation
+ for new option.
+ * config/arm/arm.c (arm_option_override): By default disable
+ -marm-pic-data-is-text-relative.
+ (legitimize_pic_address): Use arm_pic_data_is_text_relative.
+ (arm_assemble_integer): Likewise.
+ * config/arm/arm.h (TARGET_DEFAULT_PIC_DATA_IS_TEXT_RELATIVE):
+ New macro to initialize -marm-pic-data-is-text-relative.
+
+2013-11-19 Bin Cheng <bin.cheng@arm.com>
+
+ * tree-ssa-loop-ivopts.c (enum ainc_type): New.
+ (address_cost_data): New field.
+ (get_address_cost): Compute auto-increment rtx cost in ainc_costs.
+ Use ainc_costs for auto-increment rtx patterns. Cleanup TWS.
+
+2013-11-19 James Greenhalgh <james.greenhalgh@arm.com>
+
+ * config/aarch64/aarch64.md: Remove v8type from all insns.
+
+2013-11-19 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/57517
+ * tree-predcom.c (combinable_refs_p): Verify the combination
+ is always executed when the refs are.
+
+2013-11-19 Jeff Law <law@redhat.com>
+
+ * tree-ssa-threadupdate.c: Include ssa-iterators.h
+ (copy_phi_arg_into_existing_phi): New function.
+ (any_remaining_duplicated_blocks): Likewise.
+ (ssa_fix_duplicate_block_edges): Handle multiple duplicated
+ blocks on a jump threading path.
+
+ * tree-ssa-threadupdate.c (thread_through_loop_header): Do not
+ thread through a joiner which has the latch edge.
+
+2013-11-19 Jan Hubicka <jh@suse.cz>
+
+ * md.texi (setmem): Document new parameter.
+ * optabs.c (maybe_gen_insn): Support 9 operands.
+ * builtins.c (determine_block_size): Add probable_max_size;
+ support anti-ranges.
+ (expand_builtin_memcpy. expand_builtin_memset_args): Pass around
+ probable_max_size.
+ * expr.c (emit_block_move_via_movmem, emit_block_move_hints,
+ emit_block_move, clear_storage_hints, set_storage_via_setmem):
+ Likewise.
+ * expr.h (emit_block_move_hints, clear_storage_hints,
+ set_storage_via_setmem): Update prototype.
+ * i386.md (setmem, movmem patterns): Add 9th operand.
+ * i386-protos.h (ix86_expand_set_or_movmem): Update prototype.
+ * i386.c (ix86_expand_set_or_movmem): Take probable_max_size_exp
+ argument; pass it to decide_alg.
+
+2013-11-19 David Malcolm <dmalcolm@redhat.com>
+
+ * basic-block.h (n_basic_blocks_for_function): Rename macro to...
+ (n_basic_blocks_for_fn): ...this.
+
+ (n_basic_blocks): Eliminate macro as work towards making uses of
+ cfun be explicit.
+
+ * cfgloop.c (init_loops_structure): Update for renaming of
+ "n_basic_blocks_for_function" to "n_basic_blocks_for_fn".
+ * graph.c (draw_cfg_nodes_no_loops): Likewise.
+ * ipa-utils.c (ipa_merge_profiles): Likewise.
+ * lto-streamer-in.c (make_new_block): Likewise.
+ * tree-cfg.c (init_empty_tree_cfg_for_function): Likewise.
+ (dump_function_to_file): Likewise.
+
+ * alias.c (init_alias_analysis): Replace usage of "n_basic_blocks"
+ macro with "n_basic_blocks_for_fn (cfun)".
+ * bb-reorder.c (partition_hot_cold_basic_blocks): Likewise.
+ (duplicate_computed_gotos): Likewise.
+ (reorder_basic_blocks): Likewise.
+ * bt-load.c (augment_live_range): Likewise.
+ * cfg.c (expunge_block): Likewise.
+ (compact_blocks): Likewise.
+ * cfganal.c (single_pred_before_succ_order): Likewise.
+ (compute_idf): Likewise.
+ (flow_dfs_compute_reverse_init): Likewise.
+ (pre_and_rev_post_order_compute): Likewise.
+ (pre_and_rev_post_order_compute_fn): Likewise.
+ (inverted_post_order_compute): Likewise.
+ (post_order_compute): Likewise.
+ (print_edge_list): Likewise.
+ (find_unreachable_blocks): Likewise.
+ (mark_dfs_back_edges): Likewise.
+ * cfgcleanup.c (try_optimize_cfg): Likewise.
+ (try_forward_edges): Likewise.
+ * cfghooks.c (dump_flow_info): Likewise.
+ * cfgloop.c (verify_loop_structure): Likewise.
+ (get_loop_body): Likewise.
+ (flow_loops_find): Likewise.
+ * cfgloopmanip.c (add_loop): Likewise.
+ (remove_path): Likewise.
+ (find_path): Likewise.
+ * cfgrtl.c (rtl_flow_call_edges_add): Likewise.
+ (rtl_verify_bb_layout): Likewise.
+ (entry_of_function): Likewise.
+ (rtl_create_basic_block): Likewise.
+ * coverage.c (coverage_compute_cfg_checksum): Likewise.
+ * cprop.c (one_cprop_pass): Likewise.
+ (is_too_expensive): Likewise.
+ * df-core.c (df_compute_cfg_image): Likewise.
+ (df_compact_blocks): Likewise.
+ (df_worklist_dataflow_doublequeue): Likewise.
+ * dominance.c (calculate_dominance_info): Likewise.
+ (calc_dfs_tree): Likewise.
+ (calc_dfs_tree_nonrec): Likewise.
+ (init_dom_info): Likewise.
+ * domwalk.c (cmp_bb_postorder): Likewise.
+ * function.c (thread_prologue_and_epilogue_insns): Likewise.
+ (generate_setjmp_warnings): Likewise.
+ * fwprop.c (build_single_def_use_links): Likewise.
+ * gcse.c (is_too_expensive): Likewise.
+ (one_code_hoisting_pass): Likewise.
+ (one_pre_gcse_pass): Likewise.
+ * graphite.c (graphite_initialize): Likewise.
+ * haifa-sched.c (haifa_sched_init): Likewise.
+ * ipa-inline-analysis.c (estimate_function_body_sizes): Likewise.
+ * ira.c (split_live_ranges_for_shrink_wrap): Likewise.
+ * ira-build.c (ira_build): Likewise.
+ * lcm.c (compute_nearerout): Likewise.
+ (compute_available): Likewise.
+ (compute_laterin): Likewise.
+ (compute_antinout_edge): Likewise.
+ * lra-lives.c (lra_create_live_ranges): Likewise.
+ * lra.c (has_nonexceptional_receiver): Likewise.
+ * mcf.c (create_fixup_graph): Likewise.
+ * profile.c (branch_prob): Likewise.
+ * reg-stack.c (convert_regs_2): Likewise.
+ * regrename.c (regrename_analyze): Likewise.
+ * reload1.c (has_nonexceptional_receiver): Likewise.
+ * reorg.c (dbr_schedule): Likewise.
+ * sched-deps.c (sched_deps_init): Likewise.
+ * sched-ebb.c (schedule_ebbs): Likewise.
+ * sched-rgn.c (extend_regions): Likewise.
+ (schedule_insns): Likewise.
+ (sched_rgn_init): Likewise.
+ (extend_rgns): Likewise.
+ (haifa_find_rgns): Likewise.
+ * sel-sched-ir.c (recompute_rev_top_order): Likewise.
+ (sel_recompute_toporder): Likewise.
+ * sel-sched.c (run_selective_scheduling): Likewise.
+ * store-motion.c (one_store_motion_pass): Likewise.
+ (remove_reachable_equiv_notes): Likewise.
+ * tracer.c (tracer): Likewise.
+ (tail_duplicate): Likewise.
+ * tree-cfg.c (gimple_flow_call_edges_add): Likewise.
+ (dump_cfg_stats): Likewise.
+ (gimple_dump_cfg): Likewise.
+ (create_bb): Likewise.
+ (build_gimple_cfg): Likewise.
+ * tree-cfgcleanup.c (merge_phi_nodes): Likewise.
+ * tree-inline.c (optimize_inline_calls): Likewise.
+ (fold_marked_statements): Likewise.
+ * tree-ssa-ifcombine.c (tree_ssa_ifcombine): Likewise.
+ * tree-ssa-loop-ch.c (copy_loop_headers): Likewise.
+ * tree-ssa-loop-im.c (analyze_memory_references): Likewise.
+ * tree-ssa-loop-manip.c (compute_live_loop_exits): Likewise.
+ * tree-ssa-math-opts.c (execute_cse_reciprocals): Likewise.
+ * tree-ssa-phiopt.c (tree_ssa_phiopt_worker): Likewise.
+ * tree-ssa-pre.c (do_pre): Likewise.
+ (init_pre): Likewise.
+ (compute_avail): Likewise.
+ * tree-ssa-reassoc.c (init_reassoc): Likewise.
+ * tree-ssa-sccvn.c (init_scc_vn): Likewise.
+ * tree-ssa-tail-merge.c (alloc_cluster_vectors): Likewise.
+ (init_worklist): Likewise.
+ * tree-ssa-uncprop.c (associate_equivalences_with_edges): Likewise.
+ * var-tracking.c (variable_tracking_main_1): Likewise.
+ (vt_find_locations): Likewise.
+ (vt_stack_adjustments): Likewise.
+ * config/s390/s390.c (s390_optimize_nonescaping_tx): Likewise.
+ * config/spu/spu.c (spu_machine_dependent_reorg): Likewise.
+
+2013-11-18 Jan Hubicka <jh@suse.cz>
+
+ * profile.c (compute_branch_probabilities): Do not sanity check
+ run_max.
+
+2013-11-18 Kenneth Zadeck <zadeck@naturalbridge.com>
+
+ * tree.c (int_fits_type_p): Change GET_MODE_BITSIZE to
+ GET_MODE_PRECISION.
+ * fold-const.c (fold_single_bit_test_into_sign_test)
+ (fold_binary_loc): Change GET_MODE_BITSIZE to GET_MODE_PRECISION.
+
+2013-11-18 Teresa Johnson <tejohnson@google.com>
+
+ * cfgrtl.c (cfg_layout_initialize): Assert if we try to go into
+ cfglayout after bb reordering.
+ * passes.def: Move compgotos before bb reordering since it goes into
+ cfglayout.
+
+2013-11-18 Bernd Schmidt <bernds@codesourcery.com>
+
+ * cgraphunit.c (ipa_passes): Don't execute all_lto_gen_passes.
+ * lto-streamer-out.c (lto_output, produce_asm_for_decls): No longer
+ static.
+ (pass_data_ipa_lto_gimple_out, pass_ipa_lto_gimple_out,
+ make_pass_ipa_lto_gimple_out, pass_data_ipa_lto_finish_out,
+ pass_ipa_lto_finish_out, make_pass_ipa_lto_finish_out): Remove.
+ * lto-streamer.h (lto_output, produce_asm_for_decls): Declare.
+ * pass-manager.h (GCC_PASS_LISTS, class pass_manager):
+ Remove all_lto_gen_passes.
+ * passes.c (pass_manager::dump_passes): Remove its use.
+ (pass_manager::register_pass): Likewise.
+ (ipa_read_summaries, ipa_read_optimization_summaries): Likewise.
+ (pass_manager::pass_manager): Don't initialize or use it.
+ (write_lto): New static function.
+ (ipa_write_summaries_1, ipa_write_optimization_summaries): Use it
+ instead of using all_lto_gen_passes.
+ * passes.def (all_lto_gen_passes, pass_ipa_lto_gimple_out,
+ pass_ipa_lto_finish_out): Delete.
+ * tree-pass.h (make_pass_ipa_lto_gimple_out,
+ make_pass_ipa_lto_finish_out): Don't declare.
+
+2013-11-18 Jeff Law <law@redhat.com>
+
+ * tree-ssa-threadupdate.c (redirection_data): Record two
+ duplicated blocks instead of just one.
+ (local_info): Explain why we don't create a template for the
+ second duplicated block in a thread path.
+ (create_block_for_threading): Accept argument indicating array
+ index into redirection_data to store its result.
+ (lookup_redirection_data): Initialize both duplicate blocks.
+ (ssa_create_duplicates): If a jump threading path needs multiple
+ blocks duplicated, then duplicate them.
+ (ssa_fix_duplicate_block_edges): Corresponding changes.
+ (ssa_fixup_template_block, thread_single_edge): Likewise.
+
+2013-11-18 Marek Polacek <polacek@redhat.com>
+
+ * doc/invoke.texi: Extend -fsanitize=undefined documentation.
+
+2013-11-18 Andrew Pinski <apinski@cavium.com>
+ Steve Ellcey <sellcey@mips.com>
+
+ PR target/56552
+ * config/mips/mips.md (*mov<GPR:mode>_on_<MOVECC:mode>): Remove
+ type restriction from equality_operator on conditonal move.
+ (*mov<SCALARF:mode>_on_<MOVECC:mode>): Ditto.
+ (*mov<GPR:mode>_on_<GPR2:mode>_ne): New.
+
+2013-11-18 Jeff Law <law@redhat.com>
+
+ * tree-ssa-threadupdate.c: Fix file block comment.
+ Fix minor indention issue.
+
+2013-11-18 Uros Bizjak <ubizjak@gmail.com>
+
+ * config/i386/i386.c (ix86_decompose_address): Use REG_P instead of
+ ix86_address_subreg_operand. Move subreg checks to
+ ix86_validate_address_register. Move address override check to
+ ix86_legitimate_address_p.
+ (ix86_validate_address_register): New function.
+ (ix86_legitimate_address_p): Call ix86_validate_address_register
+ to validate base and index registers. Add address override check
+ from ix86_decompose_address.
+ (ix86_decompose_address): Remove.
+
+2013-11-18 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/59125
+ PR tree-optimization/54570
+ * tree-ssa-sccvn.c (copy_reference_ops_from_ref): When inlining
+ is not complete do not treat component-references with offset zero
+ but different fields as equal.
+ * tree-object-size.c: Include tree-phinodes.h and ssa-iterators.h.
+ (compute_object_sizes): Apply TLC. Propagate the constant
+ results into all uses and fold their stmts.
+ * passes.def (pass_all_optimizations): Move pass_object_sizes
+ after the first pass_forwprop and before pass_fre.
+
2013-11-18 Richard Sandiford <rdsandiford@googlemail.com>
* tree.h (tree_to_uhwi): Return an unsigned HOST_WIDE_INT.
@@ -124,9 +1787,8 @@
2013-11-18 Kirill Yukhin <kirill.yukhin@intel.com>
- * gcc/config/ia64/ia64.c (ia64_split_tmode_move): Mark
- load with `dead' flag if it kills address, not its
- post-increment.
+ * config/ia64/ia64.c (ia64_split_tmode_move): Mark load with `dead'
+ flag if it kills its address, not its post-increment.
2013-11-18 Ilya Enkovich <ilya.enkovich@intel.com>
@@ -160,7 +1822,7 @@
* config/arm/arm.c (arm_cortex_a53_tune): New.
* config/arm/arm-cores.def (cortex-a53): Use cortex_a53 tuning struct.
-2013-11-12 Ganesh Gopalasubramanian <Ganesh.Gopalasubramanian@amd.com>
+2013-11-12 Ganesh Gopalasubramanian <Ganesh.Gopalasubramanian@amd.com>
* config.gcc (i[34567]86-*-linux* | ...): Add bdver4.
(case ${target}): Add bdver4.
@@ -187,8 +1849,8 @@
(enum processor_type): Add PROCESSOR_BDVER4.
* config/i386/i386.md (define_attr "cpu"): Add bdver4.
* config/i386/i386.opt (flag_dispatch_scheduler): Add bdver4.
- * gcc/doc/extend.texi: Add details about bdver4.
- * gcc/doc/invoke.texi: Add details about bdver4. Add
+ * doc/extend.texi: Add details about bdver4.
+ * doc/invoke.texi: Add details about bdver4. Add
fma4 and fsgsbase for bdver3. Add fma4 for bdver2.
2013-11-17 Ulrich Weigand <Ulrich.Weigand@de.ibm.com>
@@ -200,18 +1862,17 @@
* doc/md.texi (setmem, movstr): Update documentation.
* builtins.c (determine_block_size): New function.
- (expand_builtin_memcpy): Use it and pass it to
- emit_block_move_hints.
+ (expand_builtin_memcpy): Use it and pass it to emit_block_move_hints.
(expand_builtin_memset_args): Use it and pass it to
set_storage_via_setmem.
- * expr.c (emit_block_move_via_movmem): Add min_size/max_size parameters;
- update call to expander.
+ * expr.c (emit_block_move_via_movmem): Add min_size/max_size
+ parameters; update call to expander.
(emit_block_move_hints): Add min_size/max_size parameters.
(clear_storage_hints): Likewise.
(set_storage_via_setmem): Likewise.
(clear_storage): Update.
* expr.h (emit_block_move_hints, clear_storage_hints,
- set_storage_via_setmem): Update prototype.
+ set_storage_via_setmem): Update prototypes.
* i386.c (ix86_expand_set_or_movmem): Add bounds; export.
(ix86_expand_movmem, ix86_expand_setmem): Remove.
(ix86_expand_movmem, ix86_expand_setmem): Remove.
@@ -542,6 +2203,723 @@
* config/tilepro/tilepro.c: Likewise.
* config/xtensa/xtensa.c: Likewise.
+2013-11-14 Diego Novillo <dnovillo@google.com>
+
+ * Makefile.in (PLUGIN_HEADERS): Add stringpool.h.
+
+2013-11-14 Diego Novillo <dnovillo@google.com>
+
+ * tree.h: Include fold-const.h.
+ (aggregate_value_p): Moved to function.h.
+ (alloca_call_p): Moved to calls.h.
+ (allocate_struct_function): Moved to function.h.
+ (apply_tm_attr): Moved to attribs.h.
+ (array_at_struct_end_p): Moved to expr.h.
+ (array_ref_element_size): Moved to tree-dfa.h.
+ (array_ref_low_bound): Moved to tree-dfa.h.
+ (array_ref_up_bound): Moved to tree.h.
+ (assemble_alias): Moved to cgraph.h.
+ (bit_from_pos): Moved to stor-layout.h.
+ (build_addr): Moved to tree-nested.h.
+ (build_duplicate_type): Moved to tree-inline.h.
+ (build_fold_addr_expr): Moved to fold-const.h.
+ (build_fold_addr_expr_with_type): Moved to fold-const.h.
+ (build_fold_addr_expr_with_type_loc): Moved to fold-const.h.
+ (build_fold_indirect_ref): Moved to fold-const.h.
+ (build_fold_indirect_ref_loc): Moved to fold-const.h.
+ (build_personality_function): Moved to tree.h.
+ (build_range_check): Moved to fold-const.h.
+ (build_simple_mem_ref): Moved to fold-const.h.
+ (build_simple_mem_ref_loc): Moved to fold-const.h.
+ (build_tm_abort_call): Moved to trans-mem.h.
+ (byte_from_pos): Moved to stor-layout.h.
+ (call_expr_flags): Moved to calls.h.
+ (can_move_by_pieces): Moved to expr.h.
+ (categorize_ctor_elements): Moved to expr.h.
+ (change_decl_assembler_name): Moved to gcc-symtab.h.
+ (combine_comparisons): Moved to fold-const.h.
+ (complete_ctor_at_level_p): Moved to tree.h.
+ (component_ref_field_offset): Moved to tree-dfa.h.
+ (compute_builtin_object_size): Moved to tree-object-size.h.
+ (compute_record_mode): Moved to stor-layout.h.
+ (constant_boolean_node): Moved to fold-const.h.
+ (constructor_static_from_elts_p): Moved to varasm.h.
+ (cxx11_attribute_p): Moved to attribs.h.
+ (debug_body): Moved to print-tree.h.
+ (debug_find_tree): Moved to tree-inline.h.
+ (debug_fold_checksum): Moved to fold-const.h.
+ (debug_head): Moved to print-tree.h.
+ (debug_head): Moved to print-tree.h.
+ (debug_raw): Moved to print-tree.h.
+ (debug_tree): Moved to print-tree.h.
+ (debug_vec_tree): Moved to print-tree.h.
+ (debug_verbose): Moved to print-tree.h.
+ (debug_verbose): Moved to print-tree.h.
+ (decl_attributes): Moved to attribs.h.
+ (decl_binds_to_current_def_p): Moved to varasm.h.
+ (decl_default_tls_model): Moved to varasm.h.
+ (decl_replaceable_p): Moved to varasm.h.
+ (div_if_zero_remainder): Moved to fold-const.h.
+ (double_int mem_ref_offset): Moved to fold-const.h.
+ (dump_addr): Moved to print-tree.h.
+ (element_precision): Moved to machmode.h.
+ (expand_dummy_function_end): Moved to function.h.
+ (expand_function_end): Moved to function.h.
+ (expand_function_start): Moved to function.h.
+ (expand_label): Moved to stmt.h.
+ (expr_first): Moved to tree-iterator.h.
+ (expr_last): Moved to tree-iterator.h.
+ (finalize_size_functions): Moved to stor-layout.h.
+ (finish_builtin_struct): Moved to stor-layout.h.
+ (finish_record_layout): Moved to stor-layout.h.
+ (fixup_signed_type): Moved to stor-layout.h.
+ (fixup_unsigned_type): Moved to stor-layout.h.
+ (flags_from_decl_or_type): Moved to calls.h.
+ (fold): Moved to fold-const.h.
+ (fold_abs_const): Moved to fold-const.h.
+ (fold_binary): Moved to fold-const.h.
+ (fold_binary_loc): Moved to fold-const.h.
+ (fold_binary_to_constant): Moved to fold-const.h.
+ (fold_build1): Moved to fold-const.h.
+ (fold_build1_initializer_loc): Moved to fold-const.h.
+ (fold_build1_loc): Moved to fold-const.h.
+ (fold_build1_stat_loc): Moved to fold-const.h.
+ (fold_build2): Moved to fold-const.h.
+ (fold_build2_initializer_loc): Moved to fold-const.h.
+ (fold_build2_loc): Moved to fold-const.h.
+ (fold_build2_stat_loc): Moved to fold-const.h.
+ (fold_build3): Moved to fold-const.h.
+ (fold_build3_loc): Moved to fold-const.h.
+ (fold_build3_stat_loc): Moved to fold-const.h.
+ (fold_build_call_array): Moved to fold-const.h.
+ (fold_build_call_array_initializer): Moved to fold-const.h.
+ (fold_build_call_array_initializer_loc): Moved to fold-const.h.
+ (fold_build_call_array_loc): Moved to fold-const.h.
+ (fold_build_cleanup_point_expr): Moved to fold-const.h.
+ (fold_convert): Moved to fold-const.h.
+ (fold_convert_loc): Moved to fold-const.h.
+ (fold_convertible_p): Moved to fold-const.h.
+ (fold_defer_overflow_warnings): Moved to fold-const.h.
+ (fold_deferring_overflow_warnings_p): Moved to fold-const.h.
+ (fold_fma): Moved to fold-const.h.
+ (fold_ignored_result): Moved to fold-const.h.
+ (fold_indirect_ref): Moved to fold-const.h.
+ (fold_indirect_ref_1): Moved to fold-const.h.
+ (fold_indirect_ref_loc): Moved to fold-const.h.
+ (fold_read_from_constant_string): Moved to fold-const.h.
+ (fold_real_zero_addition_p): Moved to fold-const.h.
+ (fold_single_bit_test): Moved to fold-const.h.
+ (fold_strip_sign_ops): Moved to fold-const.h.
+ (fold_ternary): Moved to fold-const.h.
+ (fold_ternary_loc): Moved to fold-const.h.
+ (fold_unary): Moved to tree-data-ref.h.
+ (fold_unary_ignore_overflow): Moved to fold-const.h.
+ (fold_unary_ignore_overflow_loc): Moved to fold-const.h.
+ (fold_unary_loc): Moved to fold-const.h.
+ (fold_unary_to_constant): Moved to fold-const.h.
+ (fold_undefer_and_ignore_overflow_warnings): Moved to fold-const.h.
+ (fold_undefer_overflow_warnings): Moved to fold-const.h.
+ (folding_initializer): Moved to fold-const.h.
+ (free_temp_slots): Moved to function.h.
+ (generate_setjmp_warnings): Moved to function.h.
+ (get_attribute_name): Moved to attribs.h.
+ (get_identifier): Moved to stringpool.h.
+ (get_identifier_with_length): Moved to stringpool.h.
+ (get_inner_reference): Moved to tree.h.
+ (gimple_alloca_call_p): Moved to calls.h.
+ (gimplify_parameters): Moved to function.h.
+ (highest_pow2_factor): Moved to expr.h.
+ (indent_to): Moved to print-tree.h.
+ (init_attributes): Moved to attribs.h.
+ (init_dummy_function_start): Moved to function.h.
+ (init_function_start): Moved to function.h.
+ (init_inline_once): Moved to tree-inline.h.
+ (init_object_sizes): Moved to tree-object-size.h.
+ (init_temp_slots): Moved to function.h.
+ (init_tree_optimization_optabs): Moved to optabs.h.
+ (initialize_sizetypes): Moved to stor-layout.h.
+ (initializer_constant_valid_for_bitfield_p): Moved to varasm.h.
+ (initializer_constant_valid_p): Moved to varasm.h.
+ (int_const_binop): Moved to fold-const.h.
+ (internal_reference_types): Moved to stor-layout.h.
+ (invert_tree_comparison): Moved to fold-const.h.
+ (invert_truthvalue): Moved to fold-const.h.
+ (invert_truthvalue_loc): Moved to fold-const.h.
+ (is_tm_ending_fndecl): Moved to trans-mem.h.
+ (is_tm_may_cancel_outer): Moved to trans-mem.h.
+ (is_tm_pure): Moved to trans-mem.h.
+ (is_tm_safe): Moved to trans-mem.h.
+ (layout_decl): Moved to stor-layout.h.
+ (layout_type): Moved to stor-layout.h.
+ (lookup_attribute_spec): Moved to attribs.h.
+ (make_accum_type): Moved to stor-layout.h.
+ (make_decl_one_only): Moved to varasm.h.
+ (make_decl_rtl): Moved to tree.h.
+ (make_decl_rtl_for_debug): Moved to varasm.h.
+ (make_fract_type): Moved to stor-layout.h.
+ (make_or_reuse_sat_signed_accum_type): Moved to stor-layout.h.
+ (make_or_reuse_sat_signed_fract_type): Moved to stor-layout.h.
+ (make_or_reuse_sat_unsigned_accum_type): Moved to stor-layout.h.
+ (make_or_reuse_sat_unsigned_fract_type): Moved to stor-layout.h.
+ (make_or_reuse_signed_accum_type): Moved to stor-layout.h.
+ (make_or_reuse_signed_fract_type): Moved to stor-layout.h.
+ (make_or_reuse_unsigned_accum_type): Moved to stor-layout.h.
+ (make_or_reuse_unsigned_fract_type): Moved to stor-layout.h.
+ (make_range): Moved to fold-const.h.
+ (make_range_step): Moved to fold-const.h.
+ (make_sat_signed_accum_type): Moved to stor-layout.h.
+ (make_sat_signed_fract_type): Moved to stor-layout.h.
+ (make_sat_unsigned_accum_type): Moved to stor-layout.h.
+ (make_sat_unsigned_fract_type): Moved to stor-layout.h.
+ (make_signed_accum_type): Moved to stor-layout.h.
+ (make_signed_fract_type): Moved to stor-layout.h.
+ (make_signed_type): Moved to stor-layout.h.
+ (make_unsigned_accum_type): Moved to stor-layout.h.
+ (make_unsigned_fract_type): Moved to stor-layout.h.
+ (make_unsigned_type): Moved to stor-layout.h.
+ (mark_decl_referenced): Moved to varasm.h.
+ (mark_referenced): Moved to varasm.h.
+ (may_negate_without_overflow_p): Moved to fold-const.h.
+ (maybe_get_identifier): Moved to stringpool.h.
+ (merge_ranges): Moved to fold-const.h.
+ (merge_weak): Moved to varasm.h.
+ (mode_for_size_tree): Moved to stor-layout.h.
+ (multiple_of_p): Moved to fold-const.h.
+ (must_pass_in_stack_var_size): Moved to calls.h.
+ (must_pass_in_stack_var_size_or_pad): Moved to calls.h.
+ (native_encode_expr): Moved to fold-const.h.
+ (native_interpret_expr): Moved to fold-const.h.
+ (non_lvalue): Moved to fold-const.h.
+ (non_lvalue_loc): Moved to fold-const.h.
+ (normalize_offset): Moved to stor-layout.h.
+ (normalize_rli): Moved to stor-layout.h.
+ (notice_global_symbol): Moved to varasm.h.
+ (omit_one_operand): Moved to fold-const.h.
+ (omit_one_operand_loc): Moved to fold-const.h.
+ (omit_two_operands): Moved to fold-const.h.
+ (omit_two_operands_loc): Moved to fold-const.h.
+ (operand_equal_p): Moved to tree-data-ref.h.
+ (parse_input_constraint): Moved to stmt.h.
+ (parse_output_constraint): Moved to stmt.h.
+ (place_field): Moved to stor-layout.h.
+ (pop_function_context): Moved to function.h.
+ (pop_temp_slots): Moved to function.h.
+ (pos_from_bit): Moved to stor-layout.h.
+ (preserve_temp_slots): Moved to function.h.
+ (print_node): Moved to print-tree.h.
+ (print_node_brief): Moved to print-tree.h.
+ (print_rtl): Moved to rtl.h.
+ (process_pending_assemble_externals): Moved to varasm.h.
+ (ptr_difference_const): Moved to fold-const.h.
+ (push_function_context): Moved to function.h.
+ (push_struct_function): Moved to function.h.
+ (push_temp_slots): Moved to function.h.
+ (record_tm_replacement): Moved to trans-mem.h.
+ (relayout_decl): Moved to stor-layout.h.
+ (resolve_asm_operand_names): Moved to stmt.h.
+ (resolve_unique_section): Moved to varasm.h.
+ (rli_size_so_far): Moved to stor-layout.h.
+ (rli_size_unit_so_far): Moved to stor-layout.h.
+ (round_down): Moved to fold-const.h.
+ (round_down_loc): Moved to fold-const.h.
+ (round_up): Moved to fold-const.h.
+ (round_up_loc): Moved to fold-const.h.
+ (set_decl_incoming_rtl): Moved to emit-rtl.h.
+ (set_decl_rtl): Moved to tree.h.
+ (set_min_and_max_values_for_integral_type): Moved to stor-layout.h.
+ (set_user_assembler_name): Moved to varasm.h.
+ (setjmp_call_p): Moved to calls.h.
+ (size_binop): Moved to fold-const.h.
+ (size_binop_loc): Moved to fold-const.h.
+ (size_diffop): Moved to fold-const.h.
+ (size_diffop_loc): Moved to fold-const.h.
+ (size_int_kind): Moved to fold-const.h.
+ (stack_protect_epilogue): Moved to function.h.
+ (start_record_layout): Moved to stor-layout.h.
+ (supports_one_only): Moved to varasm.h.
+ (swap_tree_comparison): Moved to fold-const.h.
+ (tm_malloc_replacement): Moved to trans-mem.h.
+ (tree build_fold_addr_expr_loc): Moved to fold-const.h.
+ (tree build_invariant_address): Moved to fold-const.h.
+ (tree_binary_nonnegative_warnv_p): Moved to fold-const.h.
+ (tree_binary_nonzero_warnv_p): Moved to fold-const.h.
+ (tree_call_nonnegative_warnv_p): Moved to fold-const.h.
+ (tree_expr_nonnegative_p): Moved to fold-const.h.
+ (tree_expr_nonnegative_warnv_p): Moved to fold-const.h.
+ (tree_output_constant_def): Moved to varasm.h.
+ (tree_overlaps_hard_reg_set): Moved to stmt.h.
+ (tree_single_nonnegative_warnv_p): Moved to fold-const.h.
+ (tree_single_nonzero_warnv_p): Moved to fold-const.h.
+ (tree_swap_operands_p): Moved to fold-const.h.
+ (tree_unary_nonnegative_warnv_p): Moved to fold-const.h.
+ (tree_unary_nonzero_warnv_p): Moved to fold-const.h.
+ (update_alignment_for_field): Moved to stor-layout.h.
+ (use_register_for_decl): Moved to function.h.
+ (variable_size): Moved to rtl.h.
+ (vector_type_mode): Moved to stor-layout.h.
+ * cgraph.h: Corresponding changes.
+ * emit-rtl.h: Corresponding changes.
+ * expr.h: Corresponding changes.
+ * function.h: Corresponding changes.
+ * optabs.h: Corresponding changes.
+ * trans-mem.h: Corresponding changes.
+ Protect against multiple inclusion.
+ * tree-inline.h: Corresponding changes.
+ * tree-iterator.h: Corresponding changes.
+ * tree-dfa.h: Include expr.h.
+ * tree-ssanames.h: Include stringpool.h.
+ * attribs.h: New file.
+ * calls.h: New file.
+ * fold-const.h: New file.
+ * gcc-symtab.h: New file.
+ * print-rtl.h: New file.
+ * print-tree.h: New file.
+ * stmt.h: New file.
+ * stor-layout.h: New file.
+ * strinpool.h: New file.
+ * tree-nested.h: New file
+ * tree-object-size.h: New file.
+ * varasm.h: New file.
+
+2013-11-14 Diego Novillo <dnovillo@google.com>
+
+ * alias.c: Include varasm.h.
+ Include expr.h.
+ * asan.c: Include calls.h.
+ Include stor-layout.h.
+ Include varasm.h.
+ * attribs.c: Include stringpool.h.
+ Include attribs.h.
+ Include stor-layout.h.
+ * builtins.c: Include stringpool.h.
+ Include stor-layout.h.
+ Include calls.h.
+ Include varasm.h.
+ Include tree-object-size.h.
+ * calls.c: Include stor-layout.h.
+ Include varasm.h.
+ Include stringpool.h.
+ Include attribs.h.
+ * cfgexpand.c: Include stringpool.h.
+ Include varasm.h.
+ Include stor-layout.h.
+ Include stmt.h.
+ Include print-tree.h.
+ * cgraph.c: Include varasm.h.
+ Include calls.h.
+ Include print-tree.h.
+ * cgraphclones.c: Include stringpool.h.
+ Include function.h.
+ Include emit-rtl.h.
+ Move inclusion of rtl.h earlier in the file.
+ * cgraphunit.c: Include varasm.h.
+ Include stor-layout.h.
+ Include stringpool.h.
+ * cilk-common.c: Include stringpool.h.
+ Include stor-layout.h.
+ * combine.c: Include stor-layout.h.
+ * config/aarch64/aarch64-builtins.c: Include stor-layout.h.
+ Include stringpool.h.
+ Include calls.h.
+ * config/aarch64/aarch64.c: Include stringpool.h.
+ Include stor-layout.h.
+ Include calls.h.
+ Include varasm.h.
+ * config/alpha/alpha.c: Include stor-layout.h.
+ Include calls.h.
+ Include varasm.h.
+ * config/arc/arc.c: Include varasm.h.
+ Include stor-layout.h.
+ Include stringpool.h.
+ Include calls.h.
+ * config/arm/arm.c: Include stringpool.h.
+ Include stor-layout.h.
+ Include calls.h.
+ Include varasm.h.
+ * config/avr/avr-c.c: Include stor-layout.h.
+ * config/avr/avr-log.c: Include print-tree.h.
+ * config/avr/avr.c: Include print-tree.h.
+ Include calls.h.
+ Include stor-layout.h.
+ Include stringpool.h.
+ * config/bfin/bfin.c: Include varasm.h.
+ Include calls.h.
+ * config/c6x/c6x.c: Include stor-layout.h.
+ Include varasm.h.
+ Include calls.h.
+ Include stringpool.h.
+ * config/cr16/cr16.c: Include stor-layout.h.
+ Include calls.h.
+ * config/cris/cris.c: Include varasm.h.
+ Include stor-layout.h.
+ Include calls.h.
+ Include stmt.h.
+ * config/darwin.c: Include stringpool.h.
+ Include varasm.h.
+ Include stor-layout.h.
+ * config/epiphany/epiphany.c: Include stor-layout.h.
+ Include varasm.h.
+ Include calls.h.
+ Include stringpool.h.
+ * config/fr30/fr30.c: Include stor-layout.h.
+ Include varasm.h.
+ * config/frv/frv.c: Include varasm.h.
+ Include stor-layout.h.
+ Include stringpool.h.
+ * config/h8300/h8300.c: Include stor-layout.h.
+ Include varasm.h.
+ Include calls.h.
+ Include stringpool.h.
+ * config/i386/i386.c: Include stringpool.h.
+ Include attribs.h.
+ Include calls.h.
+ Include stor-layout.h.
+ Include varasm.h.
+ * config/i386/winnt-cxx.c: Include stringpool.h.
+ Include attribs.h.
+ * config/i386/winnt.c: Include stringpool.h.
+ Include varasm.h.
+ * config/ia64/ia64-c.c: Include stringpool.h.
+ * config/ia64/ia64.c: Include stringpool.h.
+ Include stor-layout.h.
+ Include calls.h.
+ Include varasm.h.
+ * config/iq2000/iq2000.c: Include stor-layout.h.
+ Include calls.h.
+ Include varasm.h.
+ * config/lm32/lm32.c: Include calls.h.
+ * config/m32c/m32c.c: Include stor-layout.h.
+ Include varasm.h.
+ Include calls.h.
+ * config/m32r/m32r.c: Include stor-layout.h.
+ Include varasm.h.
+ Include stringpool.h.
+ Include calls.h.
+ * config/m68k/m68k.c: Include calls.h.
+ Include stor-layout.h.
+ Include varasm.h.
+ * config/mcore/mcore.c: Include stor-layout.h.
+ Include varasm.h.
+ Include stringpool.h.
+ Include calls.h.
+ * config/mep/mep.c: Include varasm.h.
+ Include calls.h.
+ Include stringpool.h.
+ Include stor-layout.h.
+ * config/microblaze/microblaze.c: Include varasm.h.
+ Include stor-layout.h.
+ Include calls.h.
+ * config/mips/mips.c: Include varasm.h.
+ Include stringpool.h.
+ Include stor-layout.h.
+ Include calls.h.
+ * config/mmix/mmix.c: Include varasm.h.
+ Include stor-layout.h.
+ Include calls.h.
+ * config/mn10300/mn10300.c: Include stor-layout.h.
+ Include varasm.h.
+ Include calls.h.
+ * config/moxie/moxie.c: Include stor-layout.h.
+ Include varasm.h.
+ Include calls.h.
+ * config/msp430/msp430.c: Include stor-layout.h.
+ Include calls.h.
+ * config/nds32/nds32.c: Include stor-layout.h.
+ Include varasm.h.
+ Include calls.h.
+ * config/pa/pa.c: Include stor-layout.h.
+ Include stringpool.h.
+ Include varasm.h.
+ Include calls.h.
+ * config/pdp11/pdp11.c: Include stor-layout.h.
+ Include varasm.h.
+ Include calls.h.
+ * config/picochip/picochip.c: Include calls.h.
+ Include stor-layout.h.
+ Include stringpool.h.
+ Include varasm.h.
+ * config/rl78/rl78.c: Include varasm.h.
+ Include stor-layout.h.
+ Include calls.h.
+ * config/rs6000/rs6000-c.c: Include stor-layout.h.
+ Include stringpool.h.
+ * config/rs6000/rs6000.c: Include stringpool.h.
+ Include stor-layout.h.
+ Include calls.h.
+ Include print-tree.h.
+ Include varasm.h.
+ * config/rx/rx.c: Include varasm.h.
+ Include stor-layout.h.
+ Include calls.h.
+ * config/s390/s390.c: Include print-tree.h.
+ Include stringpool.h.
+ Include stor-layout.h.
+ Include varasm.h.
+ Include calls.h.
+ * config/score/score.c: Include stringpool.h.
+ Include calls.h.
+ Include varasm.h.
+ Include stor-layout.h.
+ * config/sh/sh-c.c: Include stringpool.h.
+ Include attribs.h.h.
+ * config/sh/sh.c: Include stringpool.h.
+ Include stor-layout.h.
+ Include calls.h.
+ Include varasm.h.
+ * config/sol2-c.c: Include stringpool.h.
+ Include attribs.h.
+ * config/sol2-cxx.c: Include stringpool.h.
+ * config/sol2.c: Include stringpool.h.
+ Include varasm.h.
+ * config/sparc/sparc.c: Include stringpool.h.
+ Include stor-layout.h.
+ Include calls.h.
+ Include varasm.h.
+ * config/spu/spu-c.c: Include stringpool.h.
+ * config/spu/spu.c: Include stringpool.h.
+ Include stor-layout.h.
+ Include calls.h.
+ Include varasm.h.
+ * config/stormy16/stormy16.c: Include stringpool.h.
+ Include stor-layout.h.
+ Include varasm.h.
+ Include calls.h.
+ * config/tilegx/tilegx.c: Include stringpool.h.
+ Include stor-layout.h.
+ Include varasm.h.
+ Include calls.h.
+ * config/tilepro/tilepro.c: Include stringpool.h.
+ Include stor-layout.h.
+ Include varasm.h.
+ Include calls.h.
+ * config/v850/v850-c.c: Include stringpool.h.
+ Include attribs.h.
+ * config/v850/v850.c: Include stringpool.h.
+ Include stor-layout.h.
+ Include varasm.h.
+ Include calls.h.
+ * config/vax/vax.c: Include calls.h.
+ Include varasm.h.
+ * config/vms/vms.c: Include stringpool.h.
+ * config/vxworks.c: Include stringpool.h.
+ * config/xtensa/xtensa.c: Include stringpool.h.
+ Include stor-layout.h.
+ Include calls.h.
+ Include varasm.h.
+ * convert.c: Include stor-layout.h.
+ * coverage.c: Include stringpool.h.
+ Include stor-layout.h.
+ * dbxout.c: Include varasm.h.
+ Include stor-layout.h.
+ * dojump.c: Include stor-layout.h.
+ * dse.c: Include stor-layout.h.
+ * dwarf2asm.c: Include stringpool.h.
+ Include varasm.h.
+ * dwarf2cfi.c: Include stor-layout.h.
+ * dwarf2out.c: Include rtl.h.
+ Include stringpool.h.
+ Include stor-layout.h.
+ Include varasm.h.
+ Include function.h.
+ Include emit-rtl.h.
+ Move inclusion of rtl.h earlier in the file.
+ * emit-rtl.c: Include varasm.h.
+ * except.c: Include stringpool.h.
+ Include stor-layout.h.
+ * explow.c: Include stor-layout.h.
+ * expmed.c: Include stor-layout.h.
+ * expr.c: Include stringpool.h.
+ Include stor-layout.h.
+ Include attribs.h.
+ Include varasm.h.
+ * final.c: Include varasm.h.
+ * fold-const.c: Include stor-layout.h.
+ Include calls.h.
+ Include tree-iterator.h.
+ * function.c: Include stor-layout.h.
+ Include varasm.h.
+ Include stringpool.h.
+ * genattrtab.c (write_header): Emit includes for varasm.h,
+ stor-layout.h and calls.h.
+ * genautomata.c (main): Likewise.
+ * genemit.c: Likewise.
+ * genopinit.c: Likewise.
+ * genoutput.c (output_prologue): Likewise.
+ * genpeep.c: Likewise.
+ * genpreds.c (write_insn_preds_c): Likewise.
+ * gengtype.c (open_base_files): Add stringpool.h.
+ * gimple-expr.c: Include stringpool.h.
+ Include stor-layout.h.
+ * gimple-fold.c: Include stringpool.h.
+ Include expr.h.
+ Include stmt.h.
+ Include stor-layout.h.
+ * gimple-low.c: Include tree-nested.h.
+ Include calls.h.
+ * gimple-pretty-print.c: Include stringpool.h.
+ * gimple-ssa-strength-reduction.c: Include stor-layout.h.
+ Include expr.h.
+ * gimple-walk.c: Include stmt.h.
+ * gimple.c: Include calls.h.
+ Include stmt.h.
+ Include stor-layout.h.
+ * gimplify.c: Include stringpool.h.
+ Include calls.h.
+ Include varasm.h.
+ Include stor-layout.h.
+ Include stmt.h.
+ Include print-tree.h.
+ Include expr.h.
+ * gimplify-me.c: Include stmt.h
+ Include stor-layout.h
+ * internal-fn.c: Include stor-layout.h.
+ * ipa-devirt.c: Include print-tree.h.
+ Include calls.h.
+ * ipa-inline-analysis.c: Include stor-layout.h.
+ Include stringpool.h.
+ Include print-tree.h.
+ * ipa-inline.c: Include trans-mem.h.
+ Include calls.h.
+ * ipa-prop.c: Include expr.h.
+ Include stor-layout.h.
+ Include print-tree.h.
+ * ipa-pure-const.c: Include print-tree.h.
+ Include calls.h.
+ * ipa-reference.c: Include calls.h.
+ * ipa-split.c: Include stringpool.h.
+ Include expr.h.
+ Include calls.h.
+ * ipa.c: Include calls.h.
+ Include stringpool.h.
+ * langhooks.c: Include stringpool.h.
+ Include attribs.h.
+ * lto-cgraph.c: Include stringpool.h.
+ * lto-streamer-in.c: Include stringpool.h.
+ * lto-streamer-out.c: Include stor-layout.h.
+ Include stringpool.h.
+ * omp-low.c: Include stringpool.h.
+ Include stor-layout.h.
+ Include expr.h.
+ * optabs.c: Include stor-layout.h.
+ Include stringpool.h.
+ Include varasm.h.
+ * passes.c: Include varasm.h.
+ * predict.c: Include calls.h.
+ * print-rtl.c: Include print-tree.h.
+ * print-tree.c: Include varasm.h.
+ Include print-rtl.h.
+ Include stor-layout.h.
+ * realmpfr.c: Include stor-layout.h.
+ * reg-stack.c: Include varasm.h.
+ * sdbout.c: Include varasm.h.
+ Include stor-layout.h.
+ * simplify-rtx.c: Include varasm.h.
+ * stmt.c: Include varasm.h.
+ Include stor-layout.h.
+ * stor-layout.c: Include stor-layout.h.
+ Include stringpool.h.
+ Include varasm.h.
+ Include print-tree.h.
+ * symtab.c: Include rtl.h.
+ Include print-tree.h.
+ Include varasm.h.
+ Include function.h.
+ Include emit-rtl.h.
+ * targhooks.c: Include stor-layout.h.
+ Include varasm.h.
+ * toplev.c: Include varasm.h.
+ Include tree-inline.h.
+ * trans-mem.c: Include calls.h.
+ Include function.h.
+ Include rtl.h.
+ Include emit-rtl.h.
+ * tree-affine.c: Include expr.h.
+ * tree-browser.c: Include print-tree.h.
+ * tree-call-cdce.c: Include stor-layout.h.
+ * tree-cfg.c: Include trans-mem.h.
+ Include stor-layout.h.
+ Include print-tree.h.
+ * tree-complex.c: Include stor-layout.h.
+ * tree-data-ref.c: Include expr.h.
+ * tree-dfa.c: Include stor-layout.h.
+ * tree-eh.c: Include expr.h.
+ Include calls.h.
+ * tree-emutls.c: Include stor-layout.h.
+ Include varasm.h.
+ * tree-if-conv.c: Include stor-layout.h.
+ * tree-inline.c: Include stor-layout.h.
+ Include calls.h.
+ * tree-loop-distribution.c: Include stor-layout.h.
+ * tree-nested.c: Include stringpool.h.
+ Include stor-layout.h.
+ * tree-object-size.c: Include tree-object-size.h.
+ * tree-outof-ssa.c: Include stor-layout.h.
+ * tree-parloops.c: Include stor-layout.h.
+ Include tree-nested.h.
+ * tree-pretty-print.c: Include stor-layout.h.
+ Include expr.h.
+ * tree-profile.c: Include varasm.h.
+ Include tree-nested.h.
+ * tree-scalar-evolution.c: Include expr.h.
+ * tree-sra.c: Include stor-layout.h.
+ * tree-ssa-address.c: Include stor-layout.h.
+ * tree-ssa-ccp.c: Include stor-layout.h.
+ * tree-ssa-dce.c: Include calls.h.
+ * tree-ssa-dom.c: Include stor-layout.h.
+ * tree-ssa-forwprop.c: Include stor-layout.h.
+ * tree-ssa-ifcombine.c: Include stor-layout.h.
+ * tree-ssa-loop-ivopts.c: Include stor-layout.h.
+ * tree-ssa-loop-niter.c: Include calls.h.
+ Include expr.h.
+ * tree-ssa-loop-prefetch.c: Include stor-layout.h.
+ * tree-ssa-math-opts.c: Include stor-layout.h.
+ * tree-ssa-operands.c: Include stmt.h.
+ Include print-tree.h.
+ * tree-ssa-phiopt.c: Include stor-layout.h.
+ * tree-ssa-reassoc.c: Include stor-layout.h.
+ * tree-ssa-sccvn.c: Include stor-layout.h.
+ * tree-ssa-sink.c: Include stor-layout.h.
+ * tree-ssa-strlen.c: Include stor-layout.h.
+ * tree-ssa-structalias.c: Include stor-layout.h.
+ Include stmt.h.
+ * tree-ssa-tail-merge.c: Include stor-layout.h.
+ Include trans-mem.h.
+ * tree-ssa-uncprop.c: Include stor-layout.h.
+ * tree-ssa.c: Include stor-layout.h.
+ * tree-ssanames.c: Include stor-layout.h.
+ * tree-streamer-in.c: Include stringpool.h.
+ * tree-streamer-out.c: Include stor-layout.h.
+ * tree-switch-conversion.c: Include varasm.h.
+ Include stor-layout.h.
+ * tree-tailcall.c: Include stor-layout.h.
+ * tree-vect-data-refs.c: Include stor-layout.h.
+ * tree-vect-generic.c: Include stor-layout.h.
+ * tree-vect-loop.c: Include stor-layout.h.
+ * tree-vect-patterns.c: Include stor-layout.h.
+ * tree-vect-slp.c: Include stor-layout.h.
+ * tree-vect-stmts.c: Include stor-layout.h.
+ * tree-vectorizer.c: Include stor-layout.h.
+ * tree-vrp.c: Include stor-layout.h.
+ Include calls.h.
+ * tree.c: Include stor-layout.h.
+ Include calls.h.
+ Include attribs.h.
+ Include varasm.h.
+ * tsan.c: Include expr.h.
+ * ubsan.c: Include stor-layout.h.
+ Include stringpool.h.
+ * value-prof.c: Include tree-nested.h.
+ Include calls.h.
+ * var-tracking.c: Include varasm.h.
+ Include stor-layout.h.
+ * varasm.c: Include stor-layout.h.
+ Include stringpool.h.
+ Include gcc-symtab.h.
+ Include varasm.h.
+ * varpool.c: Include varasm.h.
+ * vmsdbgout.c: Include varasm.h.
+ * xcoffout.c: Include varasm.h.
+
2013-11-14 Joern Rennecke <joern.rennecke@embecosm.com>
* config/arc/arc.md (doloop_begin_i): Remove extra alignment;
diff --git a/gcc/DATESTAMP b/gcc/DATESTAMP
index 9717548f436..91b73955538 100644
--- a/gcc/DATESTAMP
+++ b/gcc/DATESTAMP
@@ -1 +1 @@
-20131118
+20131120
diff --git a/gcc/Makefile.in b/gcc/Makefile.in
index c11d14a00f7..958118d9c83 100644
--- a/gcc/Makefile.in
+++ b/gcc/Makefile.in
@@ -888,7 +888,7 @@ BASIC_BLOCK_H = basic-block.h $(PREDICT_H) $(VEC_H) $(FUNCTION_H) \
cfg-flags.def cfghooks.h
GIMPLE_H = gimple.h gimple.def gsstruct.def pointer-set.h $(VEC_H) \
$(GGC_H) $(BASIC_BLOCK_H) $(TREE_H) tree-ssa-operands.h \
- tree-ssa-alias.h $(INTERNAL_FN_H) $(HASH_TABLE_H)
+ tree-ssa-alias.h $(INTERNAL_FN_H) $(HASH_TABLE_H) is-a.h
GCOV_IO_H = gcov-io.h gcov-iov.h auto-host.h
RECOG_H = recog.h
EMIT_RTL_H = emit-rtl.h
@@ -3119,7 +3119,7 @@ PLUGIN_HEADERS = $(TREE_H) $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \
cppdefault.h flags.h $(MD5_H) params.def params.h prefix.h tree-inline.h \
$(GIMPLE_PRETTY_PRINT_H) realmpfr.h \
$(IPA_PROP_H) $(TARGET_H) $(RTL_H) $(TM_P_H) $(CFGLOOP_H) $(EMIT_RTL_H) \
- version.h
+ version.h stringpool.h
# generate the 'build fragment' b-header-vars
s-header-vars: Makefile
diff --git a/gcc/ada/ChangeLog b/gcc/ada/ChangeLog
index a9fd65b16cd..906bd7e4f51 100644
--- a/gcc/ada/ChangeLog
+++ b/gcc/ada/ChangeLog
@@ -1,3 +1,11 @@
+2013-11-20 Kenneth Zadeck <zadeck@naturalbridge.com>
+ Mike Stump <mikestump@comcast.net>
+ Richard Sandiford <rdsandiford@googlemail.com>
+
+ * gcc-interface/cuintp.c (UI_From_gnu): Use tree_to_shwi.
+ * gcc-interface/decl.c (gnat_to_gnu_entity): Use tree_to_uhwi.
+ * gcc-interface/utils.c (make_packable_type): Likewise.
+
2013-11-18 Richard Sandiford <rdsandiford@googlemail.com>
* gcc-interface/cuintp.c (UI_From_gnu): Use tree_to_shwi rather than
@@ -64,6 +72,25 @@
* gcc-interface/trans.c: Include gimple.h and pointer-set.h.
+2013-11-14 Diego Novillo <dnovillo@google.com>
+
+ * gcc-interface/decl.c: Include stringpool.h
+ Include stor-layout.h
+ * gcc-interface/misc.c: Include stor-layout.h
+ Include print-tree.h
+ * gcc-interface/trans.c: Include stringpool.h
+ Include stor-layout.h
+ Include stmt.h
+ Include varasm.h
+ * gcc-interface/utils.c: Include stringpool.h
+ Include stor-layout.h
+ Include attribs.h
+ Include varasm.h
+ * gcc-interface/utils2.c: Include stringpool.h
+ Include stor-layout.h
+ Include attribs.h
+ Include varasm.h
+
2013-11-12 Andrew MacLeod <amacleod@redhat.com>
* gcc-interface/trans.c: Include gimplify.h.
diff --git a/gcc/ada/gcc-interface/decl.c b/gcc/ada/gcc-interface/decl.c
index fd81ef070b8..61b2239132b 100644
--- a/gcc/ada/gcc-interface/decl.c
+++ b/gcc/ada/gcc-interface/decl.c
@@ -28,6 +28,8 @@
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
#include "flags.h"
#include "toplev.h"
#include "ggc.h"
diff --git a/gcc/ada/gcc-interface/misc.c b/gcc/ada/gcc-interface/misc.c
index a82f6fd8f82..fc74be28fb8 100644
--- a/gcc/ada/gcc-interface/misc.c
+++ b/gcc/ada/gcc-interface/misc.c
@@ -30,6 +30,8 @@
#include "options.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "print-tree.h"
#include "diagnostic.h"
#include "target.h"
#include "ggc.h"
diff --git a/gcc/ada/gcc-interface/trans.c b/gcc/ada/gcc-interface/trans.c
index f699fd85d18..53e66780f84 100644
--- a/gcc/ada/gcc-interface/trans.c
+++ b/gcc/ada/gcc-interface/trans.c
@@ -28,6 +28,10 @@
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
+#include "stmt.h"
+#include "varasm.h"
#include "flags.h"
#include "ggc.h"
#include "output.h"
diff --git a/gcc/ada/gcc-interface/utils.c b/gcc/ada/gcc-interface/utils.c
index eef41275b5d..dd652f21a96 100644
--- a/gcc/ada/gcc-interface/utils.c
+++ b/gcc/ada/gcc-interface/utils.c
@@ -28,6 +28,10 @@
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
+#include "attribs.h"
+#include "varasm.h"
#include "flags.h"
#include "toplev.h"
#include "diagnostic-core.h"
diff --git a/gcc/ada/gcc-interface/utils2.c b/gcc/ada/gcc-interface/utils2.c
index c45f1c3fc63..184a223ff13 100644
--- a/gcc/ada/gcc-interface/utils2.c
+++ b/gcc/ada/gcc-interface/utils2.c
@@ -28,6 +28,9 @@
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "stringpool.h"
+#include "varasm.h"
#include "flags.h"
#include "toplev.h"
#include "ggc.h"
diff --git a/gcc/alias.c b/gcc/alias.c
index 8a66d2baa44..8a1f09bc33e 100644
--- a/gcc/alias.c
+++ b/gcc/alias.c
@@ -24,6 +24,8 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "rtl.h"
#include "tree.h"
+#include "varasm.h"
+#include "expr.h"
#include "tm_p.h"
#include "function.h"
#include "alias.h"
@@ -2958,7 +2960,7 @@ init_alias_analysis (void)
The state of the arrays for the set chain in question does not matter
since the program has undefined behavior. */
- rpo = XNEWVEC (int, n_basic_blocks);
+ rpo = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
pass = 0;
diff --git a/gcc/asan.c b/gcc/asan.c
index 4353db63160..d44aa636c55 100644
--- a/gcc/asan.c
+++ b/gcc/asan.c
@@ -26,8 +26,12 @@ along with GCC; see the file COPYING3. If not see
#include "gimple.h"
#include "gimplify.h"
#include "gimple-iterator.h"
+#include "calls.h"
+#include "varasm.h"
+#include "stor-layout.h"
#include "tree-iterator.h"
#include "cgraph.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-pass.h"
#include "asan.h"
@@ -42,6 +46,7 @@ along with GCC; see the file COPYING3. If not see
#include "alloc-pool.h"
#include "cfgloop.h"
#include "gimple-builder.h"
+#include "ubsan.h"
/* AddressSanitizer finds out-of-bounds and use-after-free bugs
with <2x slowdown on average.
@@ -2370,4 +2375,87 @@ make_pass_asan_O0 (gcc::context *ctxt)
return new pass_asan_O0 (ctxt);
}
+/* Perform optimization of sanitize functions. */
+
+static unsigned int
+execute_sanopt (void)
+{
+ basic_block bb;
+
+ FOR_EACH_BB (bb)
+ {
+ gimple_stmt_iterator gsi;
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple stmt = gsi_stmt (gsi);
+
+ if (!is_gimple_call (stmt))
+ continue;
+
+ if (gimple_call_internal_p (stmt))
+ switch (gimple_call_internal_fn (stmt))
+ {
+ case IFN_UBSAN_NULL:
+ ubsan_expand_null_ifn (gsi);
+ break;
+ default:
+ break;
+ }
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Optimized\n ");
+ print_gimple_stmt (dump_file, stmt, 0, dump_flags);
+ fprintf (dump_file, "\n");
+ }
+ }
+ }
+ return 0;
+}
+
+static bool
+gate_sanopt (void)
+{
+ return flag_sanitize;
+}
+
+namespace {
+
+const pass_data pass_data_sanopt =
+{
+ GIMPLE_PASS, /* type */
+ "sanopt", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ true, /* has_gate */
+ true, /* has_execute */
+ TV_NONE, /* tv_id */
+ ( PROP_ssa | PROP_cfg | PROP_gimple_leh ), /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ ( TODO_verify_flow | TODO_verify_stmts
+ | TODO_update_ssa ), /* todo_flags_finish */
+};
+
+class pass_sanopt : public gimple_opt_pass
+{
+public:
+ pass_sanopt (gcc::context *ctxt)
+ : gimple_opt_pass (pass_data_sanopt, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ bool gate () { return gate_sanopt (); }
+ unsigned int execute () { return execute_sanopt (); }
+
+}; // class pass_sanopt
+
+} // anon namespace
+
+gimple_opt_pass *
+make_pass_sanopt (gcc::context *ctxt)
+{
+ return new pass_sanopt (ctxt);
+}
+
#include "gt-asan.h"
diff --git a/gcc/attribs.c b/gcc/attribs.c
index 296bcd6eb74..19b697d416f 100644
--- a/gcc/attribs.c
+++ b/gcc/attribs.c
@@ -22,6 +22,9 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
+#include "attribs.h"
+#include "stor-layout.h"
#include "flags.h"
#include "diagnostic-core.h"
#include "ggc.h"
diff --git a/gcc/attribs.h b/gcc/attribs.h
new file mode 100644
index 00000000000..042e112ea8e
--- /dev/null
+++ b/gcc/attribs.h
@@ -0,0 +1,40 @@
+/* Declarations and definitions dealing with attribute handling.
+ Copyright (C) 2013 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_ATTRIBS_H
+#define GCC_ATTRIBS_H
+
+extern const struct attribute_spec *lookup_attribute_spec (const_tree);
+extern void init_attributes (void);
+
+/* Process the attributes listed in ATTRIBUTES and install them in *NODE,
+ which is either a DECL (including a TYPE_DECL) or a TYPE. If a DECL,
+ it should be modified in place; if a TYPE, a copy should be created
+ unless ATTR_FLAG_TYPE_IN_PLACE is set in FLAGS. FLAGS gives further
+ information, in the form of a bitwise OR of flags in enum attribute_flags
+ from tree.h. Depending on these flags, some attributes may be
+ returned to be applied at a later stage (for example, to apply
+ a decl attribute to the declaration rather than to its type). */
+extern tree decl_attributes (tree *, tree, int);
+
+extern bool cxx11_attribute_p (const_tree);
+extern tree get_attribute_name (const_tree);
+extern void apply_tm_attr (tree, tree);
+
+#endif // GCC_ATTRIBS_H
diff --git a/gcc/basic-block.h b/gcc/basic-block.h
index fd1681209fb..58bacc33f87 100644
--- a/gcc/basic-block.h
+++ b/gcc/basic-block.h
@@ -312,11 +312,11 @@ struct GTY(()) control_flow_graph {
};
/* Defines for accessing the fields of the CFG structure for function FN. */
-#define ENTRY_BLOCK_PTR_FOR_FUNCTION(FN) ((FN)->cfg->x_entry_block_ptr)
-#define EXIT_BLOCK_PTR_FOR_FUNCTION(FN) ((FN)->cfg->x_exit_block_ptr)
+#define ENTRY_BLOCK_PTR_FOR_FN(FN) ((FN)->cfg->x_entry_block_ptr)
+#define EXIT_BLOCK_PTR_FOR_FN(FN) ((FN)->cfg->x_exit_block_ptr)
#define basic_block_info_for_function(FN) ((FN)->cfg->x_basic_block_info)
-#define n_basic_blocks_for_function(FN) ((FN)->cfg->x_n_basic_blocks)
-#define n_edges_for_function(FN) ((FN)->cfg->x_n_edges)
+#define n_basic_blocks_for_fn(FN) ((FN)->cfg->x_n_basic_blocks)
+#define n_edges_for_fn(FN) ((FN)->cfg->x_n_edges)
#define last_basic_block_for_function(FN) ((FN)->cfg->x_last_basic_block)
#define label_to_block_map_for_function(FN) ((FN)->cfg->x_label_to_block_map)
#define profile_status_for_function(FN) ((FN)->cfg->x_profile_status)
@@ -327,11 +327,7 @@ struct GTY(()) control_flow_graph {
((*basic_block_info_for_function (FN))[(N)] = (BB))
/* Defines for textual backward source compatibility. */
-#define ENTRY_BLOCK_PTR (cfun->cfg->x_entry_block_ptr)
-#define EXIT_BLOCK_PTR (cfun->cfg->x_exit_block_ptr)
#define basic_block_info (cfun->cfg->x_basic_block_info)
-#define n_basic_blocks (cfun->cfg->x_n_basic_blocks)
-#define n_edges (cfun->cfg->x_n_edges)
#define last_basic_block (cfun->cfg->x_last_basic_block)
#define label_to_block_map (cfun->cfg->x_label_to_block_map)
#define profile_status (cfun->cfg->x_profile_status)
@@ -380,10 +376,10 @@ struct GTY(()) control_flow_graph {
exit block). */
#define FOR_ALL_BB(BB) \
- for (BB = ENTRY_BLOCK_PTR; BB; BB = BB->next_bb)
+ for (BB = ENTRY_BLOCK_PTR_FOR_FN (cfun); BB; BB = BB->next_bb)
#define FOR_ALL_BB_FN(BB, FN) \
- for (BB = ENTRY_BLOCK_PTR_FOR_FUNCTION (FN); BB; BB = BB->next_bb)
+ for (BB = ENTRY_BLOCK_PTR_FOR_FN (FN); BB; BB = BB->next_bb)
/* Stuff for recording basic block info. */
diff --git a/gcc/bb-reorder.c b/gcc/bb-reorder.c
index 8e2348f476c..fc7b5b758ea 100644
--- a/gcc/bb-reorder.c
+++ b/gcc/bb-reorder.c
@@ -275,7 +275,7 @@ find_traces (int *n_traces, struct trace *traces)
heap = fibheap_new ();
max_entry_frequency = 0;
max_entry_count = 0;
- FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
+ FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
{
bbd[e->dest->index].heap = heap;
bbd[e->dest->index].node = fibheap_insert (heap, bb_to_key (e->dest),
@@ -348,7 +348,7 @@ rotate_loop (edge back_edge, struct trace *trace, int trace_n)
edge_iterator ei;
FOR_EACH_EDGE (e, ei, bb->succs)
- if (e->dest != EXIT_BLOCK_PTR
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& bb_visited_trace (e->dest) != trace_n
&& (e->flags & EDGE_CAN_FALLTHRU)
&& !(e->flags & EDGE_COMPLEX))
@@ -524,7 +524,7 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
{
gcc_assert (!(e->flags & EDGE_FAKE));
- if (e->dest == EXIT_BLOCK_PTR)
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
if (bb_visited_trace (e->dest)
@@ -605,7 +605,7 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
FOR_EACH_EDGE (e, ei, bb->succs)
{
if (e == best_edge
- || e->dest == EXIT_BLOCK_PTR
+ || e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
|| bb_visited_trace (e->dest))
continue;
@@ -680,7 +680,8 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
header is not the first block of the function
we can rotate the loop. */
- if (best_edge->dest != ENTRY_BLOCK_PTR->next_bb)
+ if (best_edge->dest
+ != ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb)
{
if (dump_file)
{
@@ -776,7 +777,7 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
is an end of the trace). */
FOR_EACH_EDGE (e, ei, bb->succs)
{
- if (e->dest == EXIT_BLOCK_PTR
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
|| bb_visited_trace (e->dest))
continue;
@@ -885,7 +886,8 @@ bb_to_key (basic_block bb)
or whose predecessor edge is EDGE_DFS_BACK. */
FOR_EACH_EDGE (e, ei, bb->preds)
{
- if ((e->src != ENTRY_BLOCK_PTR && bbd[e->src->index].end_of_trace >= 0)
+ if ((e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ && bbd[e->src->index].end_of_trace >= 0)
|| (e->flags & EDGE_DFS_BACK))
{
int edge_freq = EDGE_FREQUENCY (e);
@@ -1098,7 +1100,7 @@ connect_traces (int n_traces, struct trace *traces)
{
int si = e->src->index;
- if (e->src != ENTRY_BLOCK_PTR
+ if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
&& (e->flags & EDGE_CAN_FALLTHRU)
&& !(e->flags & EDGE_COMPLEX)
&& bbd[si].end_of_trace >= 0
@@ -1141,7 +1143,7 @@ connect_traces (int n_traces, struct trace *traces)
{
int di = e->dest->index;
- if (e->dest != EXIT_BLOCK_PTR
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& (e->flags & EDGE_CAN_FALLTHRU)
&& !(e->flags & EDGE_COMPLEX)
&& bbd[di].start_of_trace >= 0
@@ -1212,7 +1214,7 @@ connect_traces (int n_traces, struct trace *traces)
bool try_copy = false;
FOR_EACH_EDGE (e, ei, traces[t].last->succs)
- if (e->dest != EXIT_BLOCK_PTR
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& (e->flags & EDGE_CAN_FALLTHRU)
&& !(e->flags & EDGE_COMPLEX)
&& (!best || e->probability > best->probability))
@@ -1237,7 +1239,7 @@ connect_traces (int n_traces, struct trace *traces)
{
int di = e2->dest->index;
- if (e2->dest == EXIT_BLOCK_PTR
+ if (e2->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
|| ((e2->flags & EDGE_CAN_FALLTHRU)
&& !(e2->flags & EDGE_COMPLEX)
&& bbd[di].start_of_trace >= 0
@@ -1253,7 +1255,7 @@ connect_traces (int n_traces, struct trace *traces)
{
best = e;
best2 = e2;
- if (e2->dest != EXIT_BLOCK_PTR)
+ if (e2->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
best2_len = traces[bbd[di].start_of_trace].length;
else
best2_len = INT_MAX;
@@ -1282,7 +1284,7 @@ connect_traces (int n_traces, struct trace *traces)
traces[t].last->index, best->dest->index);
if (!next_bb)
fputc ('\n', dump_file);
- else if (next_bb == EXIT_BLOCK_PTR)
+ else if (next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
fprintf (dump_file, "exit\n");
else
fprintf (dump_file, "%d\n", next_bb->index);
@@ -1290,7 +1292,7 @@ connect_traces (int n_traces, struct trace *traces)
new_bb = copy_bb (best->dest, best, traces[t].last, t);
traces[t].last = new_bb;
- if (next_bb && next_bb != EXIT_BLOCK_PTR)
+ if (next_bb && next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
t = bbd[next_bb->index].start_of_trace;
traces[last_trace].last->aux = traces[t].first;
@@ -1413,7 +1415,7 @@ fix_up_crossing_landing_pad (eh_landing_pad old_lp, basic_block old_bb)
JUMP_LABEL (jump) = post_label;
/* Create new basic block to be dest for lp. */
- last_bb = EXIT_BLOCK_PTR->prev_bb;
+ last_bb = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
new_bb = create_basic_block (new_label, jump, last_bb);
new_bb->aux = last_bb->aux;
last_bb->aux = new_bb;
@@ -1663,8 +1665,8 @@ find_rarely_executed_basic_blocks_and_crossing_edges (void)
/* We should never have EDGE_CROSSING set yet. */
gcc_checking_assert ((flags & EDGE_CROSSING) == 0);
- if (e->src != ENTRY_BLOCK_PTR
- && e->dest != EXIT_BLOCK_PTR
+ if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& BB_PARTITION (e->src) != BB_PARTITION (e->dest))
{
crossing_edges.safe_push (e);
@@ -1731,14 +1733,14 @@ add_labels_and_missing_jumps (vec<edge> crossing_edges)
basic_block dest = e->dest;
rtx label, new_jump;
- if (dest == EXIT_BLOCK_PTR)
+ if (dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
/* Make sure dest has a label. */
label = block_label (dest);
/* Nothing to do for non-fallthru edges. */
- if (src == ENTRY_BLOCK_PTR)
+ if (src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
continue;
if ((e->flags & EDGE_FALLTHRU) == 0)
continue;
@@ -1832,7 +1834,7 @@ fix_up_fall_thru_edges (void)
}
}
- if (fall_thru && (fall_thru->dest != EXIT_BLOCK_PTR))
+ if (fall_thru && (fall_thru->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)))
{
/* Check to see if the fall-thru edge is a crossing edge. */
@@ -2066,7 +2068,7 @@ fix_crossing_conditional_branches (void)
new_jump = emit_jump_insn (gen_jump (old_label));
JUMP_LABEL (new_jump) = old_label;
- last_bb = EXIT_BLOCK_PTR->prev_bb;
+ last_bb = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
new_bb = create_basic_block (new_label, new_jump, last_bb);
new_bb->aux = last_bb->aux;
last_bb->aux = new_bb;
@@ -2220,7 +2222,7 @@ reorder_basic_blocks (void)
gcc_assert (current_ir_type () == IR_RTL_CFGLAYOUT);
- if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
+ if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1)
return;
set_edge_can_fallthru_flag ();
@@ -2244,7 +2246,7 @@ reorder_basic_blocks (void)
bbd[i].node = NULL;
}
- traces = XNEWVEC (struct trace, n_basic_blocks);
+ traces = XNEWVEC (struct trace, n_basic_blocks_for_fn (cfun));
n_traces = 0;
find_traces (&n_traces, traces);
connect_traces (n_traces, traces);
@@ -2319,7 +2321,7 @@ rest_of_handle_reorder_blocks (void)
cleanup_cfg (CLEANUP_EXPENSIVE);
FOR_EACH_BB (bb)
- if (bb->next_bb != EXIT_BLOCK_PTR)
+ if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
bb->aux = bb->next_bb;
cfg_layout_finalize ();
@@ -2388,7 +2390,7 @@ duplicate_computed_gotos (void)
bitmap candidates;
int max_size;
- if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
+ if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1)
return 0;
clear_bb_flags ();
@@ -2415,7 +2417,7 @@ duplicate_computed_gotos (void)
int size, all_flags;
/* Build the reorder chain for the original order of blocks. */
- if (bb->next_bb != EXIT_BLOCK_PTR)
+ if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
bb->aux = bb->next_bb;
/* Obviously the block has to end in a computed jump. */
@@ -2465,7 +2467,7 @@ duplicate_computed_gotos (void)
the exit block or the next block.
The destination must have more than one predecessor. */
if (!single_succ_p (bb)
- || single_succ (bb) == EXIT_BLOCK_PTR
+ || single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun)
|| single_succ (bb) == bb->next_bb
|| single_pred_p (single_succ (bb)))
continue;
@@ -2640,7 +2642,7 @@ partition_hot_cold_basic_blocks (void)
{
vec<edge> crossing_edges;
- if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
+ if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1)
return 0;
df_set_flags (DF_DEFER_INSN_RESCAN);
diff --git a/gcc/bt-load.c b/gcc/bt-load.c
index 5384d01d525..09eea06e379 100644
--- a/gcc/bt-load.c
+++ b/gcc/bt-load.c
@@ -900,7 +900,7 @@ augment_live_range (bitmap live_range, HARD_REG_SET *btrs_live_in_range,
{
basic_block *worklist, *tos;
- tos = worklist = XNEWVEC (basic_block, n_basic_blocks + 1);
+ tos = worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) + 1);
if (dominated_by_p (CDI_DOMINATORS, new_bb, head_bb))
{
@@ -1328,7 +1328,8 @@ migrate_btr_def (btr_def def, int min_cost)
def_basic_block_freq = basic_block_freq (def->bb);
for (attempt = get_immediate_dominator (CDI_DOMINATORS, def->bb);
- !give_up && attempt && attempt != ENTRY_BLOCK_PTR && def->cost >= min_cost;
+ !give_up && attempt && attempt != ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ && def->cost >= min_cost;
attempt = get_immediate_dominator (CDI_DOMINATORS, attempt))
{
/* Try to move the instruction that sets the target register into
diff --git a/gcc/builtins.c b/gcc/builtins.c
index 28d975586d8..8439fd95e05 100644
--- a/gcc/builtins.c
+++ b/gcc/builtins.c
@@ -24,6 +24,11 @@ along with GCC; see the file COPYING3. If not see
#include "machmode.h"
#include "rtl.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
+#include "calls.h"
+#include "varasm.h"
+#include "tree-object-size.h"
#include "realmpfr.h"
#include "gimple.h"
#include "flags.h"
@@ -3102,12 +3107,15 @@ builtin_memcpy_read_str (void *data, HOST_WIDE_INT offset,
}
/* LEN specify length of the block of memcpy/memset operation.
- Figure out its range and put it into MIN_SIZE/MAX_SIZE. */
+ Figure out its range and put it into MIN_SIZE/MAX_SIZE.
+ In some cases we can make very likely guess on max size, then we
+ set it into PROBABLE_MAX_SIZE. */
static void
determine_block_size (tree len, rtx len_rtx,
unsigned HOST_WIDE_INT *min_size,
- unsigned HOST_WIDE_INT *max_size)
+ unsigned HOST_WIDE_INT *max_size,
+ unsigned HOST_WIDE_INT *probable_max_size)
{
if (CONST_INT_P (len_rtx))
{
@@ -3117,28 +3125,47 @@ determine_block_size (tree len, rtx len_rtx,
else
{
widest_int min, max;
- if (TREE_CODE (len) == SSA_NAME
- && get_range_info (len, &min, &max) == VR_RANGE)
+ enum value_range_type range_type = VR_UNDEFINED;
+
+ /* Determine bounds from the type. */
+ if (tree_fits_uhwi_p (TYPE_MIN_VALUE (TREE_TYPE (len))))
+ *min_size = tree_to_uhwi (TYPE_MIN_VALUE (TREE_TYPE (len)));
+ else
+ *min_size = 0;
+ if (tree_fits_uhwi_p (TYPE_MAX_VALUE (TREE_TYPE (len))))
+ *probable_max_size = *max_size = tree_to_uhwi (TYPE_MAX_VALUE (TREE_TYPE (len)));
+ else
+ *probable_max_size = *max_size = GET_MODE_MASK (GET_MODE (len_rtx));
+
+ if (TREE_CODE (len) == SSA_NAME)
+ range_type = get_range_info (len, &min, &max);
+ if (range_type == VR_RANGE)
{
- if (wi::fits_uhwi_p (min))
+ if (wi::fits_uhwi_p (min) && *min_size < min.to_uhwi ())
*min_size = min.to_uhwi ();
- else
- *min_size = 0;
- if (wi::fits_uhwi_p (max))
- *max_size = max.to_uhwi ();
- else
- *max_size = (HOST_WIDE_INT)-1;
+ if (wi::fits_uhwi_p (max) && *max_size > max.to_uhwi ())
+ *probable_max_size = *max_size = max.to_uhwi ();
}
- else
+ else if (range_type == VR_ANTI_RANGE)
{
- if (tree_fits_uhwi_p (TYPE_MIN_VALUE (TREE_TYPE (len))))
- *min_size = tree_to_uhwi (TYPE_MIN_VALUE (TREE_TYPE (len)));
- else
- *min_size = 0;
- if (tree_fits_uhwi_p (TYPE_MAX_VALUE (TREE_TYPE (len))))
- *max_size = tree_to_uhwi (TYPE_MAX_VALUE (TREE_TYPE (len)));
- else
- *max_size = GET_MODE_MASK (GET_MODE (len_rtx));
+ /* Anti range 0...N lets us to determine minmal size to N+1. */
+ if (min == 0)
+ {
+ widest_int max_plus_one = max + 1;
+ if (wi::fits_uhwi_p (max_plus_one))
+ *min_size = max_plus_one.to_uhwi ();
+ }
+ /* Code like
+
+ int n;
+ if (n < 100)
+ memcpy (a,b, n)
+
+ Produce anti range allowing negative values of N. We still
+ can use the information and make a guess that N is not negative.
+ */
+ else if (!wi::leu_p (max, 1 << 30) && wi::fits_uhwi_p (min))
+ *probable_max_size = min.to_uhwi () - 1;
}
}
gcc_checking_assert (*max_size <=
@@ -3170,6 +3197,7 @@ expand_builtin_memcpy (tree exp, rtx target)
unsigned int expected_align = 0;
unsigned HOST_WIDE_INT min_size;
unsigned HOST_WIDE_INT max_size;
+ unsigned HOST_WIDE_INT probable_max_size;
/* If DEST is not a pointer type, call the normal function. */
if (dest_align == 0)
@@ -3189,7 +3217,8 @@ expand_builtin_memcpy (tree exp, rtx target)
dest_mem = get_memory_rtx (dest, len);
set_mem_align (dest_mem, dest_align);
len_rtx = expand_normal (len);
- determine_block_size (len, len_rtx, &min_size, &max_size);
+ determine_block_size (len, len_rtx, &min_size, &max_size,
+ &probable_max_size);
src_str = c_getstr (src);
/* If SRC is a string constant and block move would be done
@@ -3219,7 +3248,7 @@ expand_builtin_memcpy (tree exp, rtx target)
CALL_EXPR_TAILCALL (exp)
? BLOCK_OP_TAILCALL : BLOCK_OP_NORMAL,
expected_align, expected_size,
- min_size, max_size);
+ min_size, max_size, probable_max_size);
if (dest_addr == 0)
{
@@ -3635,6 +3664,7 @@ expand_builtin_memset_args (tree dest, tree val, tree len,
unsigned int expected_align = 0;
unsigned HOST_WIDE_INT min_size;
unsigned HOST_WIDE_INT max_size;
+ unsigned HOST_WIDE_INT probable_max_size;
dest_align = get_pointer_alignment (dest);
@@ -3663,7 +3693,8 @@ expand_builtin_memset_args (tree dest, tree val, tree len,
len = builtin_save_expr (len);
len_rtx = expand_normal (len);
- determine_block_size (len, len_rtx, &min_size, &max_size);
+ determine_block_size (len, len_rtx, &min_size, &max_size,
+ &probable_max_size);
dest_mem = get_memory_rtx (dest, len);
val_mode = TYPE_MODE (unsigned_char_type_node);
@@ -3690,7 +3721,8 @@ expand_builtin_memset_args (tree dest, tree val, tree len,
}
else if (!set_storage_via_setmem (dest_mem, len_rtx, val_rtx,
dest_align, expected_align,
- expected_size, min_size, max_size))
+ expected_size, min_size, max_size,
+ probable_max_size))
goto do_libcall;
dest_mem = force_operand (XEXP (dest_mem, 0), NULL_RTX);
@@ -3712,7 +3744,8 @@ expand_builtin_memset_args (tree dest, tree val, tree len,
else if (!set_storage_via_setmem (dest_mem, len_rtx,
gen_int_mode (c, val_mode),
dest_align, expected_align,
- expected_size, min_size, max_size))
+ expected_size, min_size, max_size,
+ probable_max_size))
goto do_libcall;
dest_mem = force_operand (XEXP (dest_mem, 0), NULL_RTX);
@@ -3725,7 +3758,8 @@ expand_builtin_memset_args (tree dest, tree val, tree len,
CALL_EXPR_TAILCALL (orig_exp)
? BLOCK_OP_TAILCALL : BLOCK_OP_NORMAL,
expected_align, expected_size,
- min_size, max_size);
+ min_size, max_size,
+ probable_max_size);
if (dest_addr == 0)
{
diff --git a/gcc/c-family/ChangeLog b/gcc/c-family/ChangeLog
index 113b4edda79..4c5c153943e 100644
--- a/gcc/c-family/ChangeLog
+++ b/gcc/c-family/ChangeLog
@@ -1,3 +1,45 @@
+2013-11-20 Kenneth Zadeck <zadeck@naturalbridge.com>
+ Mike Stump <mikestump@comcast.net>
+ Richard Sandiford <rdsandiford@googlemail.com>
+
+ * c-ada-spec.c (is_simple_enum): Use tree_to_shwi and tree_to_uhwi
+ instead of TREE_INT_CST_LOW, in cases where there is a protecting
+ tree_fits_shwi_p or tree_fits_uhwi_p.
+ (dump_generic_ada_node): Likewise.
+ * c-format.c (check_format_arg): Likewise.
+ * c-pretty-print.c (pp_c_integer_constant): Likewise.
+
+2013-11-20 Kenneth Zadeck <zadeck@naturalbridge.com>
+
+ * c-common.c (check_function_arguments_recurse): Use tree_to_uhwi.
+
+2013-11-19 Joshua J Cogliati <jrincayc@yahoo.com>
+
+ PR c/53001
+ * c-common.c (unsafe_conversion_p): Make this function
+ return an enumeration with more detail.
+ (conversion_warning): Use the new return type of
+ unsafe_conversion_p to separately warn either about conversions
+ that lower floating point number precision or about the other
+ kinds of conversions.
+ * c-common.h (enum conversion_safety): New enumeration.
+ (unsafe_conversion_p): switching return type to
+ conversion_safety enumeration.
+ * c.opt: Adding new warning -Wfloat-conversion and
+ enabling it with -Wconversion.
+
+2013-11-19 Basile Starynkevitch <basile@starynkevitch.net>
+
+ * c-opts.c: Include plugin.h.
+ (cb_file_change): Invoke plugin event PLUGIN_INCLUDE_FILE.
+
+2013-11-19 Marek Polacek <polacek@redhat.com>
+
+ * c-ubsan.c (ubsan_instrument_division): Adjust ubsan_create_data
+ call.
+ (ubsan_instrument_shift): Likewise.
+ (ubsan_instrument_vla): Likewise.
+
2013-11-18 Richard Sandiford <rdsandiford@googlemail.com>
* c-common.c (convert_vector_to_pointer_for_subscript): Remove
@@ -56,6 +98,29 @@
* c-gimplify.c: Likewise.
* cilk.c: Likewise.
+2013-11-14 Diego Novillo <dnovillo@google.com>
+
+ * c-common.c: Include fold-const.h.
+ Include stor-layout.h.
+ Include calls.h.
+ Include stringpool.h.
+ Include attribs.h.
+ Include varasm.h.
+ Include trans-mem.h.
+ * c-cppbuiltin.c: Include stor-layout.h.
+ Include stringpool.h.
+ * c-format.c: Include stringpool.h.
+ * c-lex.c: Include stringpool.h.
+ Include stor-layout.h.
+ * c-pragma.c: Include stringpool.h.
+ Include attribs.h.
+ Include varasm.h.
+ Include gcc-symtab.h.
+ * c-pretty-print.c: Include stor-layout.h.
+ Include attribs.h.
+ * cilk.c: Include stringpool.h.
+ Include calls.h.
+
2013-11-13 Joseph Myers <joseph@codesourcery.com>
* c-common.h (enum rid): Add RID_AUTO_TYPE.
diff --git a/gcc/c-family/c-common.c b/gcc/c-family/c-common.c
index 1a39a9d771c..4eea22711ea 100644
--- a/gcc/c-family/c-common.c
+++ b/gcc/c-family/c-common.c
@@ -23,6 +23,13 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "intl.h"
#include "tree.h"
+#include "fold-const.h"
+#include "stor-layout.h"
+#include "calls.h"
+#include "stringpool.h"
+#include "attribs.h"
+#include "varasm.h"
+#include "trans-mem.h"
#include "flags.h"
#include "c-pragma.h"
#include "ggc.h"
@@ -2531,7 +2538,7 @@ shorten_binary_op (tree result_type, tree op0, tree op1, bool bitwise)
}
/* Checks if expression EXPR of real/integer type cannot be converted
- to the real/integer type TYPE. Function returns true when:
+ to the real/integer type TYPE. Function returns non-zero when:
* EXPR is a constant which cannot be exactly converted to TYPE
* EXPR is not a constant and size of EXPR's type > than size of TYPE,
for EXPR type and TYPE being both integers or both real.
@@ -2539,12 +2546,12 @@ shorten_binary_op (tree result_type, tree op0, tree op1, bool bitwise)
* EXPR is not a constant of integer type which cannot be
exactly converted to real type.
Function allows conversions between types of different signedness and
- does not return true in that case. Function can produce signedness
- warnings if PRODUCE_WARNS is true. */
-bool
+ can return SAFE_CONVERSION (zero) in that case. Function can produce
+ signedness warnings if PRODUCE_WARNS is true. */
+enum conversion_safety
unsafe_conversion_p (tree type, tree expr, bool produce_warns)
{
- bool give_warning = false;
+ enum conversion_safety give_warning = SAFE_CONVERSION; /* is 0 or false */
tree expr_type = TREE_TYPE (expr);
location_t loc = EXPR_LOC_OR_HERE (expr);
@@ -2556,7 +2563,7 @@ unsafe_conversion_p (tree type, tree expr, bool produce_warns)
&& TREE_CODE (type) == INTEGER_TYPE)
{
if (!real_isinteger (TREE_REAL_CST_PTR (expr), TYPE_MODE (expr_type)))
- give_warning = true;
+ give_warning = UNSAFE_REAL;
}
/* Warn for an integer constant that does not fit into integer type. */
else if (TREE_CODE (expr_type) == INTEGER_TYPE
@@ -2577,7 +2584,7 @@ unsafe_conversion_p (tree type, tree expr, bool produce_warns)
" constant value to negative integer");
}
else
- give_warning = true;
+ give_warning = UNSAFE_OTHER;
}
else if (TREE_CODE (type) == REAL_TYPE)
{
@@ -2586,7 +2593,7 @@ unsafe_conversion_p (tree type, tree expr, bool produce_warns)
{
REAL_VALUE_TYPE a = real_value_from_int_cst (0, expr);
if (!exact_real_truncate (TYPE_MODE (type), &a))
- give_warning = true;
+ give_warning = UNSAFE_REAL;
}
/* Warn for a real constant that does not fit into a smaller
real type. */
@@ -2595,7 +2602,7 @@ unsafe_conversion_p (tree type, tree expr, bool produce_warns)
{
REAL_VALUE_TYPE a = TREE_REAL_CST (expr);
if (!exact_real_truncate (TYPE_MODE (type), &a))
- give_warning = true;
+ give_warning = UNSAFE_REAL;
}
}
}
@@ -2604,7 +2611,7 @@ unsafe_conversion_p (tree type, tree expr, bool produce_warns)
/* Warn for real types converted to integer types. */
if (TREE_CODE (expr_type) == REAL_TYPE
&& TREE_CODE (type) == INTEGER_TYPE)
- give_warning = true;
+ give_warning = UNSAFE_REAL;
else if (TREE_CODE (expr_type) == INTEGER_TYPE
&& TREE_CODE (type) == INTEGER_TYPE)
@@ -2642,7 +2649,7 @@ unsafe_conversion_p (tree type, tree expr, bool produce_warns)
&& int_fits_type_p (op1, c_common_signed_type (type))
&& int_fits_type_p (op1,
c_common_unsigned_type (type))))
- return false;
+ return SAFE_CONVERSION;
/* If constant is unsigned and fits in the target
type, then the result will also fit. */
else if ((TREE_CODE (op0) == INTEGER_CST
@@ -2651,12 +2658,12 @@ unsafe_conversion_p (tree type, tree expr, bool produce_warns)
|| (TREE_CODE (op1) == INTEGER_CST
&& unsigned1
&& int_fits_type_p (op1, type)))
- return false;
+ return SAFE_CONVERSION;
}
}
/* Warn for integer types converted to smaller integer types. */
if (TYPE_PRECISION (type) < TYPE_PRECISION (expr_type))
- give_warning = true;
+ give_warning = UNSAFE_OTHER;
/* When they are the same width but different signedness,
then the value may change. */
@@ -2692,14 +2699,14 @@ unsafe_conversion_p (tree type, tree expr, bool produce_warns)
if (!exact_real_truncate (TYPE_MODE (type), &real_low_bound)
|| !exact_real_truncate (TYPE_MODE (type), &real_high_bound))
- give_warning = true;
+ give_warning = UNSAFE_OTHER;
}
/* Warn for real types converted to smaller real types. */
else if (TREE_CODE (expr_type) == REAL_TYPE
&& TREE_CODE (type) == REAL_TYPE
&& TYPE_PRECISION (type) < TYPE_PRECISION (expr_type))
- give_warning = true;
+ give_warning = UNSAFE_REAL;
}
return give_warning;
@@ -2713,8 +2720,9 @@ conversion_warning (tree type, tree expr)
{
tree expr_type = TREE_TYPE (expr);
location_t loc = EXPR_LOC_OR_HERE (expr);
+ enum conversion_safety conversion_kind;
- if (!warn_conversion && !warn_sign_conversion)
+ if (!warn_conversion && !warn_sign_conversion && !warn_float_conversion)
return;
switch (TREE_CODE (expr))
@@ -2741,7 +2749,12 @@ conversion_warning (tree type, tree expr)
case REAL_CST:
case INTEGER_CST:
- if (unsafe_conversion_p (type, expr, true))
+ conversion_kind = unsafe_conversion_p (type, expr, true);
+ if (conversion_kind == UNSAFE_REAL)
+ warning_at (loc, OPT_Wfloat_conversion,
+ "conversion to %qT alters %qT constant value",
+ type, expr_type);
+ else if (conversion_kind)
warning_at (loc, OPT_Wconversion,
"conversion to %qT alters %qT constant value",
type, expr_type);
@@ -2760,7 +2773,12 @@ conversion_warning (tree type, tree expr)
}
default: /* 'expr' is not a constant. */
- if (unsafe_conversion_p (type, expr, true))
+ conversion_kind = unsafe_conversion_p (type, expr, true);
+ if (conversion_kind == UNSAFE_REAL)
+ warning_at (loc, OPT_Wfloat_conversion,
+ "conversion to %qT from %qT may alter its value",
+ type, expr_type);
+ else if (conversion_kind)
warning_at (loc, OPT_Wconversion,
"conversion to %qT from %qT may alter its value",
type, expr_type);
@@ -9194,8 +9212,6 @@ check_function_arguments_recurse (void (*callback)
to be valid. */
format_num_expr = TREE_VALUE (TREE_VALUE (attrs));
- gcc_assert (tree_fits_uhwi_p (format_num_expr));
-
format_num = tree_to_uhwi (format_num_expr);
for (inner_arg = first_call_expr_arg (param, &iter), i = 1;
diff --git a/gcc/c-family/c-common.h b/gcc/c-family/c-common.h
index b931fd6d2a8..664e9287a1b 100644
--- a/gcc/c-family/c-common.h
+++ b/gcc/c-family/c-common.h
@@ -688,6 +688,16 @@ struct visibility_flags
unsigned inlines_hidden : 1; /* True when -finlineshidden in effect. */
};
+/* These enumerators are possible types of unsafe conversions.
+ SAFE_CONVERSION The conversion is safe
+ UNSAFE_OTHER Another type of conversion with problems
+ UNSAFE_SIGN Conversion between signed and unsigned integers
+ which are all warned about immediately, so this is unused
+ UNSAFE_REAL Conversions that reduce the precision of reals
+ including conversions from reals to integers
+ */
+enum conversion_safety { SAFE_CONVERSION = 0, UNSAFE_OTHER, UNSAFE_SIGN, UNSAFE_REAL };
+
/* Global visibility options. */
extern struct visibility_flags visibility_options;
@@ -741,7 +751,7 @@ extern tree c_common_signed_type (tree);
extern tree c_common_signed_or_unsigned_type (int, tree);
extern void c_common_init_ts (void);
extern tree c_build_bitfield_integer_type (unsigned HOST_WIDE_INT, int);
-extern bool unsafe_conversion_p (tree, tree, bool);
+extern enum conversion_safety unsafe_conversion_p (tree, tree, bool);
extern bool decl_with_nonnull_addr_p (const_tree);
extern tree c_fully_fold (tree, bool, bool *);
extern tree decl_constant_value_for_optimization (tree);
diff --git a/gcc/c-family/c-cppbuiltin.c b/gcc/c-family/c-cppbuiltin.c
index c5e895c5850..eb96b8338b0 100644
--- a/gcc/c-family/c-cppbuiltin.c
+++ b/gcc/c-family/c-cppbuiltin.c
@@ -22,6 +22,8 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "stringpool.h"
#include "version.h"
#include "flags.h"
#include "c-common.h"
diff --git a/gcc/c-family/c-format.c b/gcc/c-family/c-format.c
index ed2cf89eaf2..0ae6733c2d7 100644
--- a/gcc/c-family/c-format.c
+++ b/gcc/c-family/c-format.c
@@ -22,6 +22,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
#include "flags.h"
#include "c-common.h"
#include "c-objc.h"
diff --git a/gcc/c-family/c-lex.c b/gcc/c-family/c-lex.c
index 4aff46db238..4594a5ac1b9 100644
--- a/gcc/c-family/c-lex.c
+++ b/gcc/c-family/c-lex.c
@@ -23,6 +23,8 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
#include "input.h"
#include "c-common.h"
#include "flags.h"
diff --git a/gcc/c-family/c-opts.c b/gcc/c-family/c-opts.c
index 34fe94de34b..f368cab289f 100644
--- a/gcc/c-family/c-opts.c
+++ b/gcc/c-family/c-opts.c
@@ -34,6 +34,7 @@ along with GCC; see the file COPYING3. If not see
#include "debug.h" /* For debug_hooks. */
#include "opts.h"
#include "options.h"
+#include "plugin.h" /* For PLUGIN_INCLUDE_FILE event. */
#include "mkdeps.h"
#include "c-target.h"
#include "tm.h" /* For BYTES_BIG_ENDIAN,
@@ -1397,6 +1398,17 @@ cb_file_change (cpp_reader * ARG_UNUSED (pfile),
else
fe_file_change (new_map);
+ if (new_map
+ && (new_map->reason == LC_ENTER || new_map->reason == LC_RENAME))
+ {
+ /* Signal to plugins that a file is included. This could happen
+ several times with the same file path, e.g. because of
+ several '#include' or '#line' directives... */
+ invoke_plugin_callbacks
+ (PLUGIN_INCLUDE_FILE,
+ const_cast<char*> (ORDINARY_MAP_FILE_NAME (new_map)));
+ }
+
if (new_map == 0 || (new_map->reason == LC_LEAVE && MAIN_FILE_P (new_map)))
{
pch_cpp_save_state ();
diff --git a/gcc/c-family/c-pragma.c b/gcc/c-family/c-pragma.c
index 029ab1e33ac..70fb39ae9cf 100644
--- a/gcc/c-family/c-pragma.c
+++ b/gcc/c-family/c-pragma.c
@@ -22,6 +22,10 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
+#include "attribs.h"
+#include "varasm.h"
+#include "gcc-symtab.h"
#include "function.h" /* For cfun. FIXME: Does the parser know
when it is inside a function, so that
we don't have to look at cfun? */
diff --git a/gcc/c-family/c-pretty-print.c b/gcc/c-family/c-pretty-print.c
index 1fe8c51e2f9..bd3b38183d3 100644
--- a/gcc/c-family/c-pretty-print.c
+++ b/gcc/c-family/c-pretty-print.c
@@ -23,6 +23,8 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "attribs.h"
#include "intl.h"
#include "c-pretty-print.h"
#include "tree-pretty-print.h"
diff --git a/gcc/c-family/c-ubsan.c b/gcc/c-family/c-ubsan.c
index dbac348bc0e..7a09e7b9596 100644
--- a/gcc/c-family/c-ubsan.c
+++ b/gcc/c-family/c-ubsan.c
@@ -73,7 +73,8 @@ ubsan_instrument_division (location_t loc, tree op0, tree op1)
make sure it gets evaluated before the condition. */
t = fold_build2 (COMPOUND_EXPR, TREE_TYPE (t), op0, t);
tree data = ubsan_create_data ("__ubsan_overflow_data",
- loc, ubsan_type_descriptor (type),
+ loc, NULL,
+ ubsan_type_descriptor (type, false),
NULL_TREE);
data = build_fold_addr_expr_loc (loc, data);
tt = builtin_decl_explicit (BUILT_IN_UBSAN_HANDLE_DIVREM_OVERFLOW);
@@ -141,8 +142,10 @@ ubsan_instrument_shift (location_t loc, enum tree_code code,
make sure it gets evaluated before the condition. */
t = fold_build2 (COMPOUND_EXPR, TREE_TYPE (t), op0, t);
tree data = ubsan_create_data ("__ubsan_shift_data",
- loc, ubsan_type_descriptor (type0),
- ubsan_type_descriptor (type1), NULL_TREE);
+ loc, NULL,
+ ubsan_type_descriptor (type0, false),
+ ubsan_type_descriptor (type1, false),
+ NULL_TREE);
data = build_fold_addr_expr_loc (loc, data);
@@ -166,7 +169,9 @@ ubsan_instrument_vla (location_t loc, tree size)
t = fold_build2 (LE_EXPR, boolean_type_node, size, build_int_cst (type, 0));
tree data = ubsan_create_data ("__ubsan_vla_data",
- loc, ubsan_type_descriptor (type), NULL_TREE);
+ loc, NULL,
+ ubsan_type_descriptor (type, false),
+ NULL_TREE);
data = build_fold_addr_expr_loc (loc, data);
tt = builtin_decl_explicit (BUILT_IN_UBSAN_HANDLE_VLA_BOUND_NOT_POSITIVE);
tt = build_call_expr_loc (loc, tt, 2, data, ubsan_encode_value (size));
diff --git a/gcc/c-family/c.opt b/gcc/c-family/c.opt
index 0026683730e..ac678855414 100644
--- a/gcc/c-family/c.opt
+++ b/gcc/c-family/c.opt
@@ -387,6 +387,10 @@ Werror-implicit-function-declaration
C ObjC RejectNegative Warning Alias(Werror=, implicit-function-declaration)
This switch is deprecated; use -Werror=implicit-function-declaration instead
+Wfloat-conversion
+C ObjC C++ ObjC++ Var(warn_float_conversion) LangEnabledBy(C ObjC C++ ObjC++,Wconversion)
+Warn for implicit type conversions that cause loss of floating point precision
+
Wfloat-equal
C ObjC C++ ObjC++ Var(warn_float_equal) Warning
Warn if testing floating point numbers for equality
diff --git a/gcc/c-family/cilk.c b/gcc/c-family/cilk.c
index 802d879749d..a68ef304dd6 100644
--- a/gcc/c-family/cilk.c
+++ b/gcc/c-family/cilk.c
@@ -24,6 +24,8 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "tree.h"
+#include "stringpool.h"
+#include "calls.h"
#include "langhooks.h"
#include "gimple.h"
#include "gimplify.h"
diff --git a/gcc/c/ChangeLog b/gcc/c/ChangeLog
index e8a5365592d..92dbcee4809 100644
--- a/gcc/c/ChangeLog
+++ b/gcc/c/ChangeLog
@@ -39,6 +39,24 @@
* c-typeck.c: Include only gimplify.h and gimple.h as needed.
+2013-11-14 Diego Novillo <dnovillo@google.com>
+
+ * c-decl.c: Include print-tree.h.
+ Include stor-layout.h.
+ Include varasm.h.
+ Include attribs.h.
+ Include stringpool.h.
+ * c-lang.c: Include fold-const.h.
+ * c-parser.c: Include stringpool.h.
+ Include attribs.h.
+ Include stor-layout.h.
+ Include varasm.h.
+ Include trans-mem.h.
+ * c-typeck.c: Include stor-layout.h.
+ Include trans-mem.h.
+ Include varasm.h.
+ Include stmt.h.
+
2013-11-13 Joseph Myers <joseph@codesourcery.com>
* c-tree.h (c_typespec_keyword): Add cts_auto_type.
diff --git a/gcc/c/c-decl.c b/gcc/c/c-decl.c
index 035afc7b86a..3773191dec2 100644
--- a/gcc/c/c-decl.c
+++ b/gcc/c/c-decl.c
@@ -31,6 +31,11 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "intl.h"
#include "tree.h"
+#include "print-tree.h"
+#include "stor-layout.h"
+#include "varasm.h"
+#include "attribs.h"
+#include "stringpool.h"
#include "tree-inline.h"
#include "flags.h"
#include "function.h"
diff --git a/gcc/c/c-lang.c b/gcc/c/c-lang.c
index 614c46d15eb..8b0dca601e9 100644
--- a/gcc/c/c-lang.c
+++ b/gcc/c/c-lang.c
@@ -23,6 +23,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "fold-const.h"
#include "c-tree.h"
#include "c-family/c-common.h"
#include "langhooks.h"
diff --git a/gcc/c/c-parser.c b/gcc/c/c-parser.c
index d8041a7d891..d08d1d0bcf1 100644
--- a/gcc/c/c-parser.c
+++ b/gcc/c/c-parser.c
@@ -40,6 +40,11 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h" /* For rtl.h: needs enum reg_class. */
#include "tree.h"
+#include "stringpool.h"
+#include "attribs.h"
+#include "stor-layout.h"
+#include "varasm.h"
+#include "trans-mem.h"
#include "langhooks.h"
#include "input.h"
#include "cpplib.h"
diff --git a/gcc/c/c-typeck.c b/gcc/c/c-typeck.c
index cbc1177aaff..26cc73195c8 100644
--- a/gcc/c/c-typeck.c
+++ b/gcc/c/c-typeck.c
@@ -28,6 +28,10 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "trans-mem.h"
+#include "varasm.h"
+#include "stmt.h"
#include "langhooks.h"
#include "c-tree.h"
#include "c-lang.h"
diff --git a/gcc/calls.c b/gcc/calls.c
index 4dcdb27c1c1..726ba23c3fc 100644
--- a/gcc/calls.c
+++ b/gcc/calls.c
@@ -23,6 +23,10 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "rtl.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "varasm.h"
+#include "stringpool.h"
+#include "attribs.h"
#include "gimple.h"
#include "flags.h"
#include "expr.h"
diff --git a/gcc/calls.h b/gcc/calls.h
new file mode 100644
index 00000000000..8e727382905
--- /dev/null
+++ b/gcc/calls.h
@@ -0,0 +1,31 @@
+/* Declarations anda data types for RTL call insn generation.
+ Copyright (C) 2013 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_CALLS_H
+#define GCC_CALLS_H
+
+extern int flags_from_decl_or_type (const_tree);
+extern int call_expr_flags (const_tree);
+extern int setjmp_call_p (const_tree);
+extern bool gimple_alloca_call_p (const_gimple);
+extern bool alloca_call_p (const_tree);
+extern bool must_pass_in_stack_var_size (enum machine_mode, const_tree);
+extern bool must_pass_in_stack_var_size_or_pad (enum machine_mode, const_tree);
+
+#endif // GCC_CALLS_H
diff --git a/gcc/cfg.c b/gcc/cfg.c
index cfada7395db..e35eee9a9bc 100644
--- a/gcc/cfg.c
+++ b/gcc/cfg.c
@@ -69,17 +69,17 @@ init_flow (struct function *the_fun)
{
if (!the_fun->cfg)
the_fun->cfg = ggc_alloc_cleared_control_flow_graph ();
- n_edges_for_function (the_fun) = 0;
- ENTRY_BLOCK_PTR_FOR_FUNCTION (the_fun)
+ n_edges_for_fn (the_fun) = 0;
+ ENTRY_BLOCK_PTR_FOR_FN (the_fun)
= ggc_alloc_cleared_basic_block_def ();
- ENTRY_BLOCK_PTR_FOR_FUNCTION (the_fun)->index = ENTRY_BLOCK;
- EXIT_BLOCK_PTR_FOR_FUNCTION (the_fun)
+ ENTRY_BLOCK_PTR_FOR_FN (the_fun)->index = ENTRY_BLOCK;
+ EXIT_BLOCK_PTR_FOR_FN (the_fun)
= ggc_alloc_cleared_basic_block_def ();
- EXIT_BLOCK_PTR_FOR_FUNCTION (the_fun)->index = EXIT_BLOCK;
- ENTRY_BLOCK_PTR_FOR_FUNCTION (the_fun)->next_bb
- = EXIT_BLOCK_PTR_FOR_FUNCTION (the_fun);
- EXIT_BLOCK_PTR_FOR_FUNCTION (the_fun)->prev_bb
- = ENTRY_BLOCK_PTR_FOR_FUNCTION (the_fun);
+ EXIT_BLOCK_PTR_FOR_FN (the_fun)->index = EXIT_BLOCK;
+ ENTRY_BLOCK_PTR_FOR_FN (the_fun)->next_bb
+ = EXIT_BLOCK_PTR_FOR_FN (the_fun);
+ EXIT_BLOCK_PTR_FOR_FN (the_fun)->prev_bb
+ = ENTRY_BLOCK_PTR_FOR_FN (the_fun);
}
/* Helper function for remove_edge and clear_edges. Frees edge structure
@@ -88,7 +88,7 @@ init_flow (struct function *the_fun)
static void
free_edge (edge e)
{
- n_edges--;
+ n_edges_for_fn (cfun)--;
ggc_free (e);
}
@@ -109,12 +109,12 @@ clear_edges (void)
vec_safe_truncate (bb->preds, 0);
}
- FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
+ FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
free_edge (e);
- vec_safe_truncate (EXIT_BLOCK_PTR->preds, 0);
- vec_safe_truncate (ENTRY_BLOCK_PTR->succs, 0);
+ vec_safe_truncate (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds, 0);
+ vec_safe_truncate (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs, 0);
- gcc_assert (!n_edges);
+ gcc_assert (!n_edges_for_fn (cfun));
}
/* Allocate memory for basic_block. */
@@ -153,8 +153,8 @@ compact_blocks (void)
{
int i;
- SET_BASIC_BLOCK (ENTRY_BLOCK, ENTRY_BLOCK_PTR);
- SET_BASIC_BLOCK (EXIT_BLOCK, EXIT_BLOCK_PTR);
+ SET_BASIC_BLOCK (ENTRY_BLOCK, ENTRY_BLOCK_PTR_FOR_FN (cfun));
+ SET_BASIC_BLOCK (EXIT_BLOCK, EXIT_BLOCK_PTR_FOR_FN (cfun));
if (df)
df_compact_blocks ();
@@ -169,12 +169,12 @@ compact_blocks (void)
bb->index = i;
i++;
}
- gcc_assert (i == n_basic_blocks);
+ gcc_assert (i == n_basic_blocks_for_fn (cfun));
for (; i < last_basic_block; i++)
SET_BASIC_BLOCK (i, NULL);
}
- last_basic_block = n_basic_blocks;
+ last_basic_block = n_basic_blocks_for_fn (cfun);
}
/* Remove block B from the basic block array. */
@@ -184,7 +184,7 @@ expunge_block (basic_block b)
{
unlink_block (b);
SET_BASIC_BLOCK (b->index, NULL);
- n_basic_blocks--;
+ n_basic_blocks_for_fn (cfun)--;
/* We should be able to ggc_free here, but we are not.
The dead SSA_NAMES are left pointing to dead statements that are pointing
to dead basic blocks making garbage collector to die.
@@ -262,7 +262,7 @@ unchecked_make_edge (basic_block src, basic_block dst, int flags)
{
edge e;
e = ggc_alloc_cleared_edge_def ();
- n_edges++;
+ n_edges_for_fn (cfun)++;
e->src = src;
e->dest = dst;
@@ -282,8 +282,8 @@ edge
cached_make_edge (sbitmap edge_cache, basic_block src, basic_block dst, int flags)
{
if (edge_cache == NULL
- || src == ENTRY_BLOCK_PTR
- || dst == EXIT_BLOCK_PTR)
+ || src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ || dst == EXIT_BLOCK_PTR_FOR_FN (cfun))
return make_edge (src, dst, flags);
/* Does the requested edge already exist? */
@@ -387,7 +387,7 @@ clear_bb_flags (void)
{
basic_block bb;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
bb->flags &= BB_FLAGS_TO_PRESERVE;
}
@@ -411,7 +411,7 @@ check_bb_profile (basic_block bb, FILE * file, int indent, int flags)
if (profile_status_for_function (fun) == PROFILE_ABSENT)
return;
- if (bb != EXIT_BLOCK_PTR_FOR_FUNCTION (fun))
+ if (bb != EXIT_BLOCK_PTR_FOR_FN (fun))
{
FOR_EACH_EDGE (e, ei, bb->succs)
sum += e->probability;
@@ -428,7 +428,7 @@ check_bb_profile (basic_block bb, FILE * file, int indent, int flags)
(flags & TDF_COMMENT) ? ";; " : "", s_indent,
(int) lsum, (int) bb->count);
}
- if (bb != ENTRY_BLOCK_PTR_FOR_FUNCTION (fun))
+ if (bb != ENTRY_BLOCK_PTR_FOR_FN (fun))
{
sum = 0;
FOR_EACH_EDGE (e, ei, bb->preds)
@@ -641,7 +641,8 @@ alloc_aux_for_edges (int size)
{
basic_block bb;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
+ EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
{
edge e;
edge_iterator ei;
@@ -660,7 +661,8 @@ clear_aux_for_edges (void)
basic_block bb;
edge e;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
+ EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
{
edge_iterator ei;
FOR_EACH_EDGE (e, ei, bb->succs)
diff --git a/gcc/cfganal.c b/gcc/cfganal.c
index b2216117227..30376b3db4c 100644
--- a/gcc/cfganal.c
+++ b/gcc/cfganal.c
@@ -76,7 +76,7 @@ mark_dfs_back_edges (void)
post = XCNEWVEC (int, last_basic_block);
/* Allocate stack for back-tracking up CFG. */
- stack = XNEWVEC (edge_iterator, n_basic_blocks + 1);
+ stack = XNEWVEC (edge_iterator, n_basic_blocks_for_fn (cfun) + 1);
sp = 0;
/* Allocate bitmap to track nodes that have been visited. */
@@ -86,7 +86,7 @@ mark_dfs_back_edges (void)
bitmap_clear (visited);
/* Push the first edge on to the stack. */
- stack[sp++] = ei_start (ENTRY_BLOCK_PTR->succs);
+ stack[sp++] = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
while (sp)
{
@@ -101,7 +101,8 @@ mark_dfs_back_edges (void)
ei_edge (ei)->flags &= ~EDGE_DFS_BACK;
/* Check if the edge destination has been visited yet. */
- if (dest != EXIT_BLOCK_PTR && ! bitmap_bit_p (visited, dest->index))
+ if (dest != EXIT_BLOCK_PTR_FOR_FN (cfun) && ! bitmap_bit_p (visited,
+ dest->index))
{
/* Mark that we have visited the destination. */
bitmap_set_bit (visited, dest->index);
@@ -118,12 +119,14 @@ mark_dfs_back_edges (void)
}
else
{
- if (dest != EXIT_BLOCK_PTR && src != ENTRY_BLOCK_PTR
+ if (dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
+ && src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
&& pre[src->index] >= pre[dest->index]
&& post[dest->index] == 0)
ei_edge (ei)->flags |= EDGE_DFS_BACK, found = true;
- if (ei_one_before_end_p (ei) && src != ENTRY_BLOCK_PTR)
+ if (ei_one_before_end_p (ei)
+ && src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
post[src->index] = postnum++;
if (!ei_one_before_end_p (ei))
@@ -152,7 +155,7 @@ find_unreachable_blocks (void)
edge_iterator ei;
basic_block *tos, *worklist, bb;
- tos = worklist = XNEWVEC (basic_block, n_basic_blocks);
+ tos = worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
/* Clear all the reachability flags. */
@@ -163,7 +166,7 @@ find_unreachable_blocks (void)
be only one. It isn't inconceivable that we might one day directly
support Fortran alternate entry points. */
- FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
+ FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
{
*tos++ = e->dest;
@@ -217,7 +220,8 @@ create_edge_list (void)
/* Determine the number of edges in the flow graph by counting successor
edges on each basic block. */
num_edges = 0;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
+ EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
{
num_edges += EDGE_COUNT (bb->succs);
}
@@ -229,7 +233,8 @@ create_edge_list (void)
num_edges = 0;
/* Follow successors of blocks, and register these edges. */
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
+ EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
FOR_EACH_EDGE (e, ei, bb->succs)
elist->index_to_edge[num_edges++] = e;
@@ -256,17 +261,17 @@ print_edge_list (FILE *f, struct edge_list *elist)
int x;
fprintf (f, "Compressed edge list, %d BBs + entry & exit, and %d edges\n",
- n_basic_blocks, elist->num_edges);
+ n_basic_blocks_for_fn (cfun), elist->num_edges);
for (x = 0; x < elist->num_edges; x++)
{
fprintf (f, " %-4d - edge(", x);
- if (INDEX_EDGE_PRED_BB (elist, x) == ENTRY_BLOCK_PTR)
+ if (INDEX_EDGE_PRED_BB (elist, x) == ENTRY_BLOCK_PTR_FOR_FN (cfun))
fprintf (f, "entry,");
else
fprintf (f, "%d,", INDEX_EDGE_PRED_BB (elist, x)->index);
- if (INDEX_EDGE_SUCC_BB (elist, x) == EXIT_BLOCK_PTR)
+ if (INDEX_EDGE_SUCC_BB (elist, x) == EXIT_BLOCK_PTR_FOR_FN (cfun))
fprintf (f, "exit)\n");
else
fprintf (f, "%d)\n", INDEX_EDGE_SUCC_BB (elist, x)->index);
@@ -285,7 +290,8 @@ verify_edge_list (FILE *f, struct edge_list *elist)
basic_block bb, p, s;
edge_iterator ei;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
+ EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
{
FOR_EACH_EDGE (e, ei, bb->succs)
{
@@ -310,8 +316,9 @@ verify_edge_list (FILE *f, struct edge_list *elist)
/* We've verified that all the edges are in the list, now lets make sure
there are no spurious edges in the list. This is an expensive check! */
- FOR_BB_BETWEEN (p, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
- FOR_BB_BETWEEN (s, ENTRY_BLOCK_PTR->next_bb, NULL, next_bb)
+ FOR_BB_BETWEEN (p, ENTRY_BLOCK_PTR_FOR_FN (cfun),
+ EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
+ FOR_BB_BETWEEN (s, ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb, NULL, next_bb)
{
int found_edge = 0;
@@ -348,9 +355,9 @@ void
control_dependences::set_control_dependence_map_bit (basic_block bb,
int edge_index)
{
- if (bb == ENTRY_BLOCK_PTR)
+ if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
return;
- gcc_assert (bb != EXIT_BLOCK_PTR);
+ gcc_assert (bb != EXIT_BLOCK_PTR_FOR_FN (cfun));
bitmap_set_bit (control_dependence_map[bb->index], edge_index);
}
@@ -367,15 +374,15 @@ control_dependences::clear_control_dependence_bitmap (basic_block bb)
static inline basic_block
find_pdom (basic_block block)
{
- gcc_assert (block != ENTRY_BLOCK_PTR);
+ gcc_assert (block != ENTRY_BLOCK_PTR_FOR_FN (cfun));
- if (block == EXIT_BLOCK_PTR)
- return EXIT_BLOCK_PTR;
+ if (block == EXIT_BLOCK_PTR_FOR_FN (cfun))
+ return EXIT_BLOCK_PTR_FOR_FN (cfun);
else
{
basic_block bb = get_immediate_dominator (CDI_POST_DOMINATORS, block);
if (! bb)
- return EXIT_BLOCK_PTR;
+ return EXIT_BLOCK_PTR_FOR_FN (cfun);
return bb;
}
}
@@ -389,15 +396,17 @@ control_dependences::find_control_dependence (int edge_index)
basic_block current_block;
basic_block ending_block;
- gcc_assert (INDEX_EDGE_PRED_BB (m_el, edge_index) != EXIT_BLOCK_PTR);
+ gcc_assert (INDEX_EDGE_PRED_BB (m_el, edge_index)
+ != EXIT_BLOCK_PTR_FOR_FN (cfun));
- if (INDEX_EDGE_PRED_BB (m_el, edge_index) == ENTRY_BLOCK_PTR)
- ending_block = single_succ (ENTRY_BLOCK_PTR);
+ if (INDEX_EDGE_PRED_BB (m_el, edge_index) == ENTRY_BLOCK_PTR_FOR_FN (cfun))
+ ending_block = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
else
ending_block = find_pdom (INDEX_EDGE_PRED_BB (m_el, edge_index));
for (current_block = INDEX_EDGE_SUCC_BB (m_el, edge_index);
- current_block != ending_block && current_block != EXIT_BLOCK_PTR;
+ current_block != ending_block
+ && current_block != EXIT_BLOCK_PTR_FOR_FN (cfun);
current_block = find_pdom (current_block))
{
edge e = INDEX_EDGE (m_el, edge_index);
@@ -523,7 +532,7 @@ remove_fake_edges (void)
{
basic_block bb;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb, NULL, next_bb)
remove_fake_predecessors (bb);
}
@@ -532,7 +541,7 @@ remove_fake_edges (void)
void
remove_fake_exit_edges (void)
{
- remove_fake_predecessors (EXIT_BLOCK_PTR);
+ remove_fake_predecessors (EXIT_BLOCK_PTR_FOR_FN (cfun));
}
@@ -547,7 +556,7 @@ add_noreturn_fake_exit_edges (void)
FOR_EACH_BB (bb)
if (EDGE_COUNT (bb->succs) == 0)
- make_single_succ_edge (bb, EXIT_BLOCK_PTR, EDGE_FAKE);
+ make_single_succ_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
}
/* This function adds a fake edge between any infinite loops to the
@@ -564,14 +573,14 @@ add_noreturn_fake_exit_edges (void)
void
connect_infinite_loops_to_exit (void)
{
- basic_block unvisited_block = EXIT_BLOCK_PTR;
+ basic_block unvisited_block = EXIT_BLOCK_PTR_FOR_FN (cfun);
basic_block deadend_block;
struct depth_first_search_dsS dfs_ds;
/* Perform depth-first search in the reverse graph to find nodes
reachable from the exit block. */
flow_dfs_compute_reverse_init (&dfs_ds);
- flow_dfs_compute_reverse_add_bb (&dfs_ds, EXIT_BLOCK_PTR);
+ flow_dfs_compute_reverse_add_bb (&dfs_ds, EXIT_BLOCK_PTR_FOR_FN (cfun));
/* Repeatedly add fake edges, updating the unreachable nodes. */
while (1)
@@ -582,7 +591,7 @@ connect_infinite_loops_to_exit (void)
break;
deadend_block = dfs_find_deadend (unvisited_block);
- make_edge (deadend_block, EXIT_BLOCK_PTR, EDGE_FAKE);
+ make_edge (deadend_block, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
flow_dfs_compute_reverse_add_bb (&dfs_ds, deadend_block);
}
@@ -609,7 +618,7 @@ post_order_compute (int *post_order, bool include_entry_exit,
post_order[post_order_num++] = EXIT_BLOCK;
/* Allocate stack for back-tracking up CFG. */
- stack = XNEWVEC (edge_iterator, n_basic_blocks + 1);
+ stack = XNEWVEC (edge_iterator, n_basic_blocks_for_fn (cfun) + 1);
sp = 0;
/* Allocate bitmap to track nodes that have been visited. */
@@ -619,7 +628,7 @@ post_order_compute (int *post_order, bool include_entry_exit,
bitmap_clear (visited);
/* Push the first edge on to the stack. */
- stack[sp++] = ei_start (ENTRY_BLOCK_PTR->succs);
+ stack[sp++] = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
while (sp)
{
@@ -633,7 +642,8 @@ post_order_compute (int *post_order, bool include_entry_exit,
dest = ei_edge (ei)->dest;
/* Check if the edge destination has been visited yet. */
- if (dest != EXIT_BLOCK_PTR && ! bitmap_bit_p (visited, dest->index))
+ if (dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
+ && ! bitmap_bit_p (visited, dest->index))
{
/* Mark that we have visited the destination. */
bitmap_set_bit (visited, dest->index);
@@ -647,7 +657,8 @@ post_order_compute (int *post_order, bool include_entry_exit,
}
else
{
- if (ei_one_before_end_p (ei) && src != ENTRY_BLOCK_PTR)
+ if (ei_one_before_end_p (ei)
+ && src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
post_order[post_order_num++] = src->index;
if (!ei_one_before_end_p (ei))
@@ -667,11 +678,12 @@ post_order_compute (int *post_order, bool include_entry_exit,
/* Delete the unreachable blocks if some were found and we are
supposed to do it. */
- if (delete_unreachable && (count != n_basic_blocks))
+ if (delete_unreachable && (count != n_basic_blocks_for_fn (cfun)))
{
basic_block b;
basic_block next_bb;
- for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
+ for (b = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; b
+ != EXIT_BLOCK_PTR_FOR_FN (cfun); b = next_bb)
{
next_bb = b->next_bb;
@@ -762,7 +774,7 @@ inverted_post_order_compute (int *post_order)
sbitmap visited;
/* Allocate stack for back-tracking up CFG. */
- stack = XNEWVEC (edge_iterator, n_basic_blocks + 1);
+ stack = XNEWVEC (edge_iterator, n_basic_blocks_for_fn (cfun) + 1);
sp = 0;
/* Allocate bitmap to track nodes that have been visited. */
@@ -813,7 +825,8 @@ inverted_post_order_compute (int *post_order)
}
else
{
- if (bb != EXIT_BLOCK_PTR && ei_one_before_end_p (ei))
+ if (bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
+ && ei_one_before_end_p (ei))
post_order[post_order_num++] = bb->index;
if (!ei_one_before_end_p (ei))
@@ -826,7 +839,8 @@ inverted_post_order_compute (int *post_order)
/* Detect any infinite loop and activate the kludge.
Note that this doesn't check EXIT_BLOCK itself
since EXIT_BLOCK is always added after the outer do-while loop. */
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
+ EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
if (!bitmap_bit_p (visited, bb->index))
{
has_unvisited_bb = true;
@@ -859,7 +873,7 @@ inverted_post_order_compute (int *post_order)
{
/* No blocks are reachable from EXIT at all.
Find a dead-end from the ENTRY, and restart the iteration. */
- basic_block be = dfs_find_deadend (ENTRY_BLOCK_PTR);
+ basic_block be = dfs_find_deadend (ENTRY_BLOCK_PTR_FOR_FN (cfun));
gcc_assert (be != NULL);
bitmap_set_bit (visited, be->index);
stack[sp++] = ei_start (be->preds);
@@ -898,11 +912,11 @@ pre_and_rev_post_order_compute_fn (struct function *fn,
edge_iterator *stack;
int sp;
int pre_order_num = 0;
- int rev_post_order_num = n_basic_blocks - 1;
+ int rev_post_order_num = n_basic_blocks_for_fn (cfun) - 1;
sbitmap visited;
/* Allocate stack for back-tracking up CFG. */
- stack = XNEWVEC (edge_iterator, n_basic_blocks + 1);
+ stack = XNEWVEC (edge_iterator, n_basic_blocks_for_fn (cfun) + 1);
sp = 0;
if (include_entry_exit)
@@ -923,7 +937,7 @@ pre_and_rev_post_order_compute_fn (struct function *fn,
bitmap_clear (visited);
/* Push the first edge on to the stack. */
- stack[sp++] = ei_start (ENTRY_BLOCK_PTR_FOR_FUNCTION (fn)->succs);
+ stack[sp++] = ei_start (ENTRY_BLOCK_PTR_FOR_FN (fn)->succs);
while (sp)
{
@@ -937,7 +951,7 @@ pre_and_rev_post_order_compute_fn (struct function *fn,
dest = ei_edge (ei)->dest;
/* Check if the edge destination has been visited yet. */
- if (dest != EXIT_BLOCK_PTR_FOR_FUNCTION (fn)
+ if (dest != EXIT_BLOCK_PTR_FOR_FN (fn)
&& ! bitmap_bit_p (visited, dest->index))
{
/* Mark that we have visited the destination. */
@@ -960,7 +974,7 @@ pre_and_rev_post_order_compute_fn (struct function *fn,
else
{
if (ei_one_before_end_p (ei)
- && src != ENTRY_BLOCK_PTR_FOR_FUNCTION (fn)
+ && src != ENTRY_BLOCK_PTR_FOR_FN (fn)
&& rev_post_order)
/* There are no more successors for the SRC node
so assign its reverse completion number. */
@@ -1000,11 +1014,12 @@ pre_and_rev_post_order_compute (int *pre_order, int *rev_post_order,
include_entry_exit);
if (include_entry_exit)
/* The number of nodes visited should be the number of blocks. */
- gcc_assert (pre_order_num == n_basic_blocks);
+ gcc_assert (pre_order_num == n_basic_blocks_for_fn (cfun));
else
/* The number of nodes visited should be the number of blocks minus
the entry and exit blocks which are not visited here. */
- gcc_assert (pre_order_num == n_basic_blocks - NUM_FIXED_BLOCKS);
+ gcc_assert (pre_order_num
+ == (n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS));
return pre_order_num;
}
@@ -1043,7 +1058,7 @@ static void
flow_dfs_compute_reverse_init (depth_first_search_ds data)
{
/* Allocate stack for back-tracking up CFG. */
- data->stack = XNEWVEC (basic_block, n_basic_blocks);
+ data->stack = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
data->sp = 0;
/* Allocate bitmap to track nodes that have been visited. */
@@ -1229,7 +1244,7 @@ compute_dominance_frontiers_1 (bitmap_head *frontiers)
{
basic_block runner = p->src;
basic_block domsb;
- if (runner == ENTRY_BLOCK_PTR)
+ if (runner == ENTRY_BLOCK_PTR_FOR_FN (cfun))
continue;
domsb = get_immediate_dominator (CDI_DOMINATORS, b);
@@ -1275,7 +1290,7 @@ compute_idf (bitmap def_blocks, bitmap_head *dfs)
bitmap phi_insertion_points;
/* Each block can appear at most twice on the work-stack. */
- work_stack.create (2 * n_basic_blocks);
+ work_stack.create (2 * n_basic_blocks_for_fn (cfun));
phi_insertion_points = BITMAP_ALLOC (NULL);
/* Seed the work list with all the blocks in DEF_BLOCKS. We use
@@ -1336,7 +1351,7 @@ bitmap_intersection_of_succs (sbitmap dst, sbitmap *src, basic_block b)
for (e = NULL, ix = 0; ix < EDGE_COUNT (b->succs); ix++)
{
e = EDGE_SUCC (b, ix);
- if (e->dest == EXIT_BLOCK_PTR)
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
bitmap_copy (dst, src[e->dest->index]);
@@ -1352,7 +1367,7 @@ bitmap_intersection_of_succs (sbitmap dst, sbitmap *src, basic_block b)
SBITMAP_ELT_TYPE *p, *r;
e = EDGE_SUCC (b, ix);
- if (e->dest == EXIT_BLOCK_PTR)
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
p = src[e->dest->index]->elms;
@@ -1377,7 +1392,7 @@ bitmap_intersection_of_preds (sbitmap dst, sbitmap *src, basic_block b)
for (e = NULL, ix = 0; ix < EDGE_COUNT (b->preds); ix++)
{
e = EDGE_PRED (b, ix);
- if (e->src == ENTRY_BLOCK_PTR)
+ if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
continue;
bitmap_copy (dst, src[e->src->index]);
@@ -1393,7 +1408,7 @@ bitmap_intersection_of_preds (sbitmap dst, sbitmap *src, basic_block b)
SBITMAP_ELT_TYPE *p, *r;
e = EDGE_PRED (b, ix);
- if (e->src == ENTRY_BLOCK_PTR)
+ if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
continue;
p = src[e->src->index]->elms;
@@ -1418,7 +1433,7 @@ bitmap_union_of_succs (sbitmap dst, sbitmap *src, basic_block b)
for (ix = 0; ix < EDGE_COUNT (b->succs); ix++)
{
e = EDGE_SUCC (b, ix);
- if (e->dest == EXIT_BLOCK_PTR)
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
bitmap_copy (dst, src[e->dest->index]);
@@ -1434,7 +1449,7 @@ bitmap_union_of_succs (sbitmap dst, sbitmap *src, basic_block b)
SBITMAP_ELT_TYPE *p, *r;
e = EDGE_SUCC (b, ix);
- if (e->dest == EXIT_BLOCK_PTR)
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
p = src[e->dest->index]->elms;
@@ -1459,7 +1474,7 @@ bitmap_union_of_preds (sbitmap dst, sbitmap *src, basic_block b)
for (ix = 0; ix < EDGE_COUNT (b->preds); ix++)
{
e = EDGE_PRED (b, ix);
- if (e->src== ENTRY_BLOCK_PTR)
+ if (e->src== ENTRY_BLOCK_PTR_FOR_FN (cfun))
continue;
bitmap_copy (dst, src[e->src->index]);
@@ -1475,7 +1490,7 @@ bitmap_union_of_preds (sbitmap dst, sbitmap *src, basic_block b)
SBITMAP_ELT_TYPE *p, *r;
e = EDGE_PRED (b, ix);
- if (e->src == ENTRY_BLOCK_PTR)
+ if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
continue;
p = src[e->src->index]->elms;
@@ -1493,8 +1508,8 @@ basic_block *
single_pred_before_succ_order (void)
{
basic_block x, y;
- basic_block *order = XNEWVEC (basic_block, n_basic_blocks);
- unsigned n = n_basic_blocks - NUM_FIXED_BLOCKS;
+ basic_block *order = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
+ unsigned n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
unsigned np, i;
sbitmap visited = sbitmap_alloc (last_basic_block);
@@ -1503,7 +1518,7 @@ single_pred_before_succ_order (void)
bitmap_clear (visited);
- MARK_VISITED (ENTRY_BLOCK_PTR);
+ MARK_VISITED (ENTRY_BLOCK_PTR_FOR_FN (cfun));
FOR_EACH_BB (x)
{
if (VISITED_P (x))
diff --git a/gcc/cfgbuild.c b/gcc/cfgbuild.c
index a9ed5f14b17..08534d4bdde 100644
--- a/gcc/cfgbuild.c
+++ b/gcc/cfgbuild.c
@@ -213,8 +213,8 @@ make_edges (basic_block min, basic_block max, int update_p)
/* By nature of the way these get numbered, ENTRY_BLOCK_PTR->next_bb block
is always the entry. */
- if (min == ENTRY_BLOCK_PTR->next_bb)
- make_edge (ENTRY_BLOCK_PTR, min, EDGE_FALLTHRU);
+ if (min == ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb)
+ make_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), min, EDGE_FALLTHRU);
FOR_BB_BETWEEN (bb, min, max->next_bb, next_bb)
{
@@ -233,14 +233,14 @@ make_edges (basic_block min, basic_block max, int update_p)
if (update_p)
{
FOR_EACH_EDGE (e, ei, bb->succs)
- if (e->dest != EXIT_BLOCK_PTR)
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
bitmap_set_bit (edge_cache, e->dest->index);
}
}
if (LABEL_P (BB_HEAD (bb))
&& LABEL_ALT_ENTRY_P (BB_HEAD (bb)))
- cached_make_edge (NULL, ENTRY_BLOCK_PTR, bb, 0);
+ cached_make_edge (NULL, ENTRY_BLOCK_PTR_FOR_FN (cfun), bb, 0);
/* Examine the last instruction of the block, and discover the
ways we can leave the block. */
@@ -294,7 +294,7 @@ make_edges (basic_block min, basic_block max, int update_p)
/* Returns create an exit out. */
else if (returnjump_p (insn))
- cached_make_edge (edge_cache, bb, EXIT_BLOCK_PTR, 0);
+ cached_make_edge (edge_cache, bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
/* Recognize asm goto and do the right thing. */
else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
@@ -318,7 +318,7 @@ make_edges (basic_block min, basic_block max, int update_p)
worry about EH edges, since we wouldn't have created the sibling call
in the first place. */
if (code == CALL_INSN && SIBLING_CALL_P (insn))
- cached_make_edge (edge_cache, bb, EXIT_BLOCK_PTR,
+ cached_make_edge (edge_cache, bb, EXIT_BLOCK_PTR_FOR_FN (cfun),
EDGE_SIBCALL | EDGE_ABNORMAL);
/* If this is a CALL_INSN, then mark it as reaching the active EH
@@ -359,7 +359,7 @@ make_edges (basic_block min, basic_block max, int update_p)
/* Find out if we can drop through to the next block. */
insn = NEXT_INSN (insn);
- e = find_edge (bb, EXIT_BLOCK_PTR);
+ e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
if (e && e->flags & EDGE_FALLTHRU)
insn = NULL;
@@ -369,8 +369,9 @@ make_edges (basic_block min, basic_block max, int update_p)
insn = NEXT_INSN (insn);
if (!insn)
- cached_make_edge (edge_cache, bb, EXIT_BLOCK_PTR, EDGE_FALLTHRU);
- else if (bb->next_bb != EXIT_BLOCK_PTR)
+ cached_make_edge (edge_cache, bb, EXIT_BLOCK_PTR_FOR_FN (cfun),
+ EDGE_FALLTHRU);
+ else if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
if (insn == BB_HEAD (bb->next_bb))
cached_make_edge (edge_cache, bb, bb->next_bb, EDGE_FALLTHRU);
@@ -480,7 +481,7 @@ find_bb_boundaries (basic_block bb)
remove_edge (fallthru);
flow_transfer_insn = NULL_RTX;
if (code == CODE_LABEL && LABEL_ALT_ENTRY_P (insn))
- make_edge (ENTRY_BLOCK_PTR, bb, 0);
+ make_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), bb, 0);
}
else if (code == BARRIER)
{
@@ -607,7 +608,7 @@ find_many_sub_basic_blocks (sbitmap blocks)
break;
min = max = bb;
- for (; bb != EXIT_BLOCK_PTR; bb = bb->next_bb)
+ for (; bb != EXIT_BLOCK_PTR_FOR_FN (cfun); bb = bb->next_bb)
if (STATE (bb) != BLOCK_ORIGINAL)
max = bb;
diff --git a/gcc/cfgcleanup.c b/gcc/cfgcleanup.c
index 51611907368..9c126102a54 100644
--- a/gcc/cfgcleanup.c
+++ b/gcc/cfgcleanup.c
@@ -134,7 +134,7 @@ try_simplify_condjump (basic_block cbranch_block)
unconditional jump. */
jump_block = cbranch_fallthru_edge->dest;
if (!single_pred_p (jump_block)
- || jump_block->next_bb == EXIT_BLOCK_PTR
+ || jump_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
|| !FORWARDER_BLOCK_P (jump_block))
return false;
jump_dest_block = single_succ (jump_block);
@@ -157,7 +157,7 @@ try_simplify_condjump (basic_block cbranch_block)
unconditional branch. */
cbranch_dest_block = cbranch_jump_edge->dest;
- if (cbranch_dest_block == EXIT_BLOCK_PTR
+ if (cbranch_dest_block == EXIT_BLOCK_PTR_FOR_FN (cfun)
|| !can_fallthru (jump_block, cbranch_dest_block))
return false;
@@ -455,11 +455,11 @@ try_forward_edges (int mode, basic_block b)
bb-reorder.c:partition_hot_cold_basic_blocks for complete
details. */
- if (first != EXIT_BLOCK_PTR
+ if (first != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& find_reg_note (BB_END (first), REG_CROSSING_JUMP, NULL_RTX))
return changed;
- while (counter < n_basic_blocks)
+ while (counter < n_basic_blocks_for_fn (cfun))
{
basic_block new_target = NULL;
bool new_target_threaded = false;
@@ -467,12 +467,12 @@ try_forward_edges (int mode, basic_block b)
if (FORWARDER_BLOCK_P (target)
&& !(single_succ_edge (target)->flags & EDGE_CROSSING)
- && single_succ (target) != EXIT_BLOCK_PTR)
+ && single_succ (target) != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
/* Bypass trivial infinite loops. */
new_target = single_succ (target);
if (target == new_target)
- counter = n_basic_blocks;
+ counter = n_basic_blocks_for_fn (cfun);
else if (!optimize)
{
/* When not optimizing, ensure that edges or forwarder
@@ -521,7 +521,8 @@ try_forward_edges (int mode, basic_block b)
if (t)
{
if (!threaded_edges)
- threaded_edges = XNEWVEC (edge, n_basic_blocks);
+ threaded_edges = XNEWVEC (edge,
+ n_basic_blocks_for_fn (cfun));
else
{
int i;
@@ -533,7 +534,7 @@ try_forward_edges (int mode, basic_block b)
break;
if (i < nthreaded_edges)
{
- counter = n_basic_blocks;
+ counter = n_basic_blocks_for_fn (cfun);
break;
}
}
@@ -542,7 +543,9 @@ try_forward_edges (int mode, basic_block b)
if (t->dest == b)
break;
- gcc_assert (nthreaded_edges < n_basic_blocks - NUM_FIXED_BLOCKS);
+ gcc_assert (nthreaded_edges
+ < (n_basic_blocks_for_fn (cfun)
+ - NUM_FIXED_BLOCKS));
threaded_edges[nthreaded_edges++] = t;
new_target = t->dest;
@@ -558,7 +561,7 @@ try_forward_edges (int mode, basic_block b)
threaded |= new_target_threaded;
}
- if (counter >= n_basic_blocks)
+ if (counter >= n_basic_blocks_for_fn (cfun))
{
if (dump_file)
fprintf (dump_file, "Infinite loop in BB %i.\n",
@@ -577,7 +580,7 @@ try_forward_edges (int mode, basic_block b)
e->goto_locus = goto_locus;
/* Don't force if target is exit block. */
- if (threaded && target != EXIT_BLOCK_PTR)
+ if (threaded && target != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
notice_new_block (redirect_edge_and_branch_force (e, target));
if (dump_file)
@@ -790,7 +793,7 @@ merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
fprintf (dump_file, "Merged %d and %d without moving.\n",
b_index, c_index);
- return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
+ return b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? b : b->prev_bb;
}
/* Otherwise we will need to move code around. Do that only if expensive
@@ -828,7 +831,7 @@ merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
if (! c_has_outgoing_fallthru)
{
merge_blocks_move_successor_nojumps (b, c);
- return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
+ return next == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? next->next_bb : next;
}
/* If B does not have an incoming fallthru, then it can be moved
@@ -840,7 +843,7 @@ merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
{
basic_block bb;
- if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
+ if (b_fallthru_edge->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
return NULL;
bb = force_nonfallthru (b_fallthru_edge);
if (bb)
@@ -848,7 +851,7 @@ merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
}
merge_blocks_move_predecessor_nojumps (b, c);
- return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
+ return next == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? next->next_bb : next;
}
return NULL;
@@ -1264,7 +1267,7 @@ walk_to_nondebug_insn (rtx *i1, basic_block *bb1, bool follow_fallthru,
return;
fallthru = find_fallthru_edge ((*bb1)->preds);
- if (!fallthru || fallthru->src == ENTRY_BLOCK_PTR_FOR_FUNCTION (cfun)
+ if (!fallthru || fallthru->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
|| !single_succ_p (fallthru->src))
return;
@@ -1537,7 +1540,8 @@ outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
whether they went through the prologue. Sibcalls are fine, we know
that we either didn't need or inserted an epilogue before them. */
if (crtl->shrink_wrapped
- && single_succ_p (bb1) && single_succ (bb1) == EXIT_BLOCK_PTR
+ && single_succ_p (bb1)
+ && single_succ (bb1) == EXIT_BLOCK_PTR_FOR_FN (cfun)
&& !JUMP_P (BB_END (bb1))
&& !(CALL_P (BB_END (bb1)) && SIBLING_CALL_P (BB_END (bb1))))
return false;
@@ -1899,7 +1903,8 @@ try_crossjump_to_edge (int mode, edge e1, edge e2,
e2 = single_pred_edge (src2), src2 = e2->src;
/* Nothing to do if we reach ENTRY, or a common source block. */
- if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
+ if (src1 == ENTRY_BLOCK_PTR_FOR_FN (cfun) || src2
+ == ENTRY_BLOCK_PTR_FOR_FN (cfun))
return false;
if (src1 == src2)
return false;
@@ -2143,7 +2148,7 @@ try_crossjump_bb (int mode, basic_block bb)
/* Don't crossjump if this block ends in a computed jump,
unless we are optimizing for size. */
if (optimize_bb_for_size_p (bb)
- && bb != EXIT_BLOCK_PTR
+ && bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& computed_jump_p (BB_END (bb)))
return false;
@@ -2284,7 +2289,7 @@ try_head_merge_bb (basic_block bb)
/* Don't crossjump if this block ends in a computed jump,
unless we are optimizing for size. */
if (optimize_bb_for_size_p (bb)
- && bb != EXIT_BLOCK_PTR
+ && bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& computed_jump_p (BB_END (bb)))
return false;
@@ -2300,7 +2305,7 @@ try_head_merge_bb (basic_block bb)
}
for (ix = 0; ix < nedges; ix++)
- if (EDGE_SUCC (bb, ix)->dest == EXIT_BLOCK_PTR)
+ if (EDGE_SUCC (bb, ix)->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
return false;
for (ix = 0; ix < nedges; ix++)
@@ -2620,7 +2625,8 @@ try_optimize_cfg (int mode)
"\n\ntry_optimize_cfg iteration %i\n\n",
iterations);
- for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
+ for (b = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; b
+ != EXIT_BLOCK_PTR_FOR_FN (cfun);)
{
basic_block c;
edge s;
@@ -2637,7 +2643,8 @@ try_optimize_cfg (int mode)
if (EDGE_COUNT (b->preds) == 0
|| (EDGE_COUNT (b->succs) == 0
&& trivially_empty_bb_p (b)
- && single_succ_edge (ENTRY_BLOCK_PTR)->dest != b))
+ && single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))->dest
+ != b))
{
c = b->prev_bb;
if (EDGE_COUNT (b->preds) > 0)
@@ -2678,7 +2685,7 @@ try_optimize_cfg (int mode)
delete_basic_block (b);
changed = true;
/* Avoid trying to remove ENTRY_BLOCK_PTR. */
- b = (c == ENTRY_BLOCK_PTR ? c->next_bb : c);
+ b = (c == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? c->next_bb : c);
continue;
}
@@ -2693,7 +2700,7 @@ try_optimize_cfg (int mode)
if CASE_DROPS_THRU, this can be a tablejump with
some element going to the same place as the
default (fallthru). */
- && (single_pred (b) == ENTRY_BLOCK_PTR
+ && (single_pred (b) == ENTRY_BLOCK_PTR_FOR_FN (cfun)
|| !JUMP_P (BB_END (single_pred (b)))
|| ! label_is_jump_target_p (BB_HEAD (b),
BB_END (single_pred (b)))))
@@ -2713,14 +2720,15 @@ try_optimize_cfg (int mode)
/* Note that forwarder_block_p true ensures that
there is a successor for this block. */
&& (single_succ_edge (b)->flags & EDGE_FALLTHRU)
- && n_basic_blocks > NUM_FIXED_BLOCKS + 1)
+ && n_basic_blocks_for_fn (cfun) > NUM_FIXED_BLOCKS + 1)
{
if (dump_file)
fprintf (dump_file,
"Deleting fallthru block %i.\n",
b->index);
- c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
+ c = ((b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
+ ? b->next_bb : b->prev_bb);
redirect_edge_succ_nodup (single_pred_edge (b),
single_succ (b));
delete_basic_block (b);
@@ -2733,7 +2741,7 @@ try_optimize_cfg (int mode)
if (single_succ_p (b)
&& (s = single_succ_edge (b))
&& !(s->flags & EDGE_COMPLEX)
- && (c = s->dest) != EXIT_BLOCK_PTR
+ && (c = s->dest) != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& single_pred_p (c)
&& b != c)
{
@@ -2777,7 +2785,7 @@ try_optimize_cfg (int mode)
can either delete the jump entirely, or replace it
with a simple unconditional jump. */
if (single_succ_p (b)
- && single_succ (b) != EXIT_BLOCK_PTR
+ && single_succ (b) != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& onlyjump_p (BB_END (b))
&& !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
&& try_redirect_by_replacing_jump (single_succ_edge (b),
@@ -2816,7 +2824,7 @@ try_optimize_cfg (int mode)
}
if ((mode & CLEANUP_CROSSJUMP)
- && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
+ && try_crossjump_bb (mode, EXIT_BLOCK_PTR_FOR_FN (cfun)))
changed = true;
if (block_was_dirty)
@@ -2873,7 +2881,8 @@ delete_unreachable_blocks (void)
if (MAY_HAVE_DEBUG_INSNS && current_ir_type () == IR_GIMPLE
&& dom_info_available_p (CDI_DOMINATORS))
{
- for (b = EXIT_BLOCK_PTR->prev_bb; b != ENTRY_BLOCK_PTR; b = prev_bb)
+ for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
+ b != ENTRY_BLOCK_PTR_FOR_FN (cfun); b = prev_bb)
{
prev_bb = b->prev_bb;
@@ -2909,7 +2918,8 @@ delete_unreachable_blocks (void)
}
else
{
- for (b = EXIT_BLOCK_PTR->prev_bb; b != ENTRY_BLOCK_PTR; b = prev_bb)
+ for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
+ b != ENTRY_BLOCK_PTR_FOR_FN (cfun); b = prev_bb)
{
prev_bb = b->prev_bb;
diff --git a/gcc/cfgexpand.c b/gcc/cfgexpand.c
index 85732c8f0d7..d431c8dea5e 100644
--- a/gcc/cfgexpand.c
+++ b/gcc/cfgexpand.c
@@ -24,6 +24,11 @@ along with GCC; see the file COPYING3. If not see
#include "rtl.h"
#include "hard-reg-set.h"
#include "tree.h"
+#include "stringpool.h"
+#include "varasm.h"
+#include "stor-layout.h"
+#include "stmt.h"
+#include "print-tree.h"
#include "tm_p.h"
#include "basic-block.h"
#include "function.h"
@@ -3358,7 +3363,7 @@ expand_gimple_tailcall (basic_block bb, gimple stmt, bool *can_fallthru)
{
if (!(e->flags & (EDGE_ABNORMAL | EDGE_EH)))
{
- if (e->dest != EXIT_BLOCK_PTR)
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
e->dest->count -= e->count;
e->dest->frequency -= EDGE_FREQUENCY (e);
@@ -3394,7 +3399,8 @@ expand_gimple_tailcall (basic_block bb, gimple stmt, bool *can_fallthru)
delete_insn (NEXT_INSN (last));
}
- e = make_edge (bb, EXIT_BLOCK_PTR, EDGE_ABNORMAL | EDGE_SIBCALL);
+ e = make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_ABNORMAL
+ | EDGE_SIBCALL);
e->probability += probability;
e->count += count;
BB_END (bb) = last;
@@ -4835,9 +4841,9 @@ expand_gimple_basic_block (basic_block bb, bool disable_tail_calls)
gimple ret_stmt = gsi_stmt (gsi);
gcc_assert (single_succ_p (bb));
- gcc_assert (single_succ (bb) == EXIT_BLOCK_PTR);
+ gcc_assert (single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun));
- if (bb->next_bb == EXIT_BLOCK_PTR
+ if (bb->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
&& !gimple_return_retval (ret_stmt))
{
gsi_remove (&gsi, false);
@@ -5179,17 +5185,17 @@ construct_init_block (void)
int flags;
/* Multiple entry points not supported yet. */
- gcc_assert (EDGE_COUNT (ENTRY_BLOCK_PTR->succs) == 1);
- init_rtl_bb_info (ENTRY_BLOCK_PTR);
- init_rtl_bb_info (EXIT_BLOCK_PTR);
- ENTRY_BLOCK_PTR->flags |= BB_RTL;
- EXIT_BLOCK_PTR->flags |= BB_RTL;
+ gcc_assert (EDGE_COUNT (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs) == 1);
+ init_rtl_bb_info (ENTRY_BLOCK_PTR_FOR_FN (cfun));
+ init_rtl_bb_info (EXIT_BLOCK_PTR_FOR_FN (cfun));
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)->flags |= BB_RTL;
+ EXIT_BLOCK_PTR_FOR_FN (cfun)->flags |= BB_RTL;
- e = EDGE_SUCC (ENTRY_BLOCK_PTR, 0);
+ e = EDGE_SUCC (ENTRY_BLOCK_PTR_FOR_FN (cfun), 0);
/* When entry edge points to first basic block, we don't need jump,
otherwise we have to jump into proper target. */
- if (e && e->dest != ENTRY_BLOCK_PTR->next_bb)
+ if (e && e->dest != ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb)
{
tree label = gimple_block_label (e->dest);
@@ -5201,11 +5207,11 @@ construct_init_block (void)
init_block = create_basic_block (NEXT_INSN (get_insns ()),
get_last_insn (),
- ENTRY_BLOCK_PTR);
- init_block->frequency = ENTRY_BLOCK_PTR->frequency;
- init_block->count = ENTRY_BLOCK_PTR->count;
- if (current_loops && ENTRY_BLOCK_PTR->loop_father)
- add_bb_to_loop (init_block, ENTRY_BLOCK_PTR->loop_father);
+ ENTRY_BLOCK_PTR_FOR_FN (cfun));
+ init_block->frequency = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency;
+ init_block->count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
+ if (current_loops && ENTRY_BLOCK_PTR_FOR_FN (cfun)->loop_father)
+ add_bb_to_loop (init_block, ENTRY_BLOCK_PTR_FOR_FN (cfun)->loop_father);
if (e)
{
first_block = e->dest;
@@ -5213,9 +5219,9 @@ construct_init_block (void)
e = make_edge (init_block, first_block, flags);
}
else
- e = make_edge (init_block, EXIT_BLOCK_PTR, EDGE_FALLTHRU);
+ e = make_edge (init_block, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FALLTHRU);
e->probability = REG_BR_PROB_BASE;
- e->count = ENTRY_BLOCK_PTR->count;
+ e->count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
update_bb_for_insn (init_block);
return init_block;
@@ -5246,9 +5252,9 @@ construct_exit_block (void)
edge e, e2;
unsigned ix;
edge_iterator ei;
- rtx orig_end = BB_END (EXIT_BLOCK_PTR->prev_bb);
+ rtx orig_end = BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
- rtl_profile_for_bb (EXIT_BLOCK_PTR);
+ rtl_profile_for_bb (EXIT_BLOCK_PTR_FOR_FN (cfun));
/* Make sure the locus is set to the end of the function, so that
epilogue line numbers and warnings are set properly. */
@@ -5263,30 +5269,30 @@ construct_exit_block (void)
return;
/* While emitting the function end we could move end of the last basic block.
*/
- BB_END (EXIT_BLOCK_PTR->prev_bb) = orig_end;
+ BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb) = orig_end;
while (NEXT_INSN (head) && NOTE_P (NEXT_INSN (head)))
head = NEXT_INSN (head);
exit_block = create_basic_block (NEXT_INSN (head), end,
- EXIT_BLOCK_PTR->prev_bb);
- exit_block->frequency = EXIT_BLOCK_PTR->frequency;
- exit_block->count = EXIT_BLOCK_PTR->count;
- if (current_loops && EXIT_BLOCK_PTR->loop_father)
- add_bb_to_loop (exit_block, EXIT_BLOCK_PTR->loop_father);
+ EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
+ exit_block->frequency = EXIT_BLOCK_PTR_FOR_FN (cfun)->frequency;
+ exit_block->count = EXIT_BLOCK_PTR_FOR_FN (cfun)->count;
+ if (current_loops && EXIT_BLOCK_PTR_FOR_FN (cfun)->loop_father)
+ add_bb_to_loop (exit_block, EXIT_BLOCK_PTR_FOR_FN (cfun)->loop_father);
ix = 0;
- while (ix < EDGE_COUNT (EXIT_BLOCK_PTR->preds))
+ while (ix < EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds))
{
- e = EDGE_PRED (EXIT_BLOCK_PTR, ix);
+ e = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), ix);
if (!(e->flags & EDGE_ABNORMAL))
redirect_edge_succ (e, exit_block);
else
ix++;
}
- e = make_edge (exit_block, EXIT_BLOCK_PTR, EDGE_FALLTHRU);
+ e = make_edge (exit_block, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FALLTHRU);
e->probability = REG_BR_PROB_BASE;
- e->count = EXIT_BLOCK_PTR->count;
- FOR_EACH_EDGE (e2, ei, EXIT_BLOCK_PTR->preds)
+ e->count = EXIT_BLOCK_PTR_FOR_FN (cfun)->count;
+ FOR_EACH_EDGE (e2, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
if (e2 != e)
{
e->count -= e2->count;
@@ -5516,7 +5522,7 @@ gimple_expand_cfg (void)
/* Dominators are not kept up-to-date as we may create new basic-blocks. */
free_dominance_info (CDI_DOMINATORS);
- rtl_profile_for_bb (ENTRY_BLOCK_PTR);
+ rtl_profile_for_bb (ENTRY_BLOCK_PTR_FOR_FN (cfun));
insn_locations_init ();
if (!DECL_IS_BUILTIN (current_function_decl))
@@ -5680,11 +5686,12 @@ gimple_expand_cfg (void)
/* Clear EDGE_EXECUTABLE on the entry edge(s). It is cleaned from the
remaining edges later. */
- FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
+ FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
e->flags &= ~EDGE_EXECUTABLE;
lab_rtx_for_bb = pointer_map_create ();
- FOR_BB_BETWEEN (bb, init_block->next_bb, EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (bb, init_block->next_bb, EXIT_BLOCK_PTR_FOR_FN (cfun),
+ next_bb)
bb = expand_gimple_basic_block (bb, var_ret_seq != NULL_RTX);
if (MAY_HAVE_DEBUG_INSNS)
@@ -5729,7 +5736,8 @@ gimple_expand_cfg (void)
split edges which edge insertions might do. */
rebuild_jump_labels (get_insns ());
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
+ EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
{
edge e;
edge_iterator ei;
@@ -5740,8 +5748,8 @@ gimple_expand_cfg (void)
rebuild_jump_labels_chain (e->insns.r);
/* Put insns after parm birth, but before
NOTE_INSNS_FUNCTION_BEG. */
- if (e->src == ENTRY_BLOCK_PTR
- && single_succ_p (ENTRY_BLOCK_PTR))
+ if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ && single_succ_p (ENTRY_BLOCK_PTR_FOR_FN (cfun)))
{
rtx insns = e->insns.r;
e->insns.r = NULL_RTX;
@@ -5762,7 +5770,8 @@ gimple_expand_cfg (void)
/* We're done expanding trees to RTL. */
currently_expanding_to_rtl = 0;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb, EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb,
+ EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
{
edge e;
edge_iterator ei;
diff --git a/gcc/cfghooks.c b/gcc/cfghooks.c
index c12a62fca50..2535c9027be 100644
--- a/gcc/cfghooks.c
+++ b/gcc/cfghooks.c
@@ -102,10 +102,10 @@ verify_flow_info (void)
edge_checksum = XCNEWVEC (size_t, last_basic_block);
/* Check bb chain & numbers. */
- last_bb_seen = ENTRY_BLOCK_PTR;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb, NULL, next_bb)
+ last_bb_seen = ENTRY_BLOCK_PTR_FOR_FN (cfun);
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb, NULL, next_bb)
{
- if (bb != EXIT_BLOCK_PTR
+ if (bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& bb != BASIC_BLOCK (bb->index))
{
error ("bb %d on wrong place", bb->index);
@@ -234,21 +234,21 @@ verify_flow_info (void)
edge e;
edge_iterator ei;
- FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
+ FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
edge_checksum[e->dest->index] += (size_t) e;
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
edge_checksum[e->dest->index] -= (size_t) e;
}
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
if (edge_checksum[bb->index])
{
error ("basic block %i edge lists are corrupted", bb->index);
err = 1;
}
- last_bb_seen = ENTRY_BLOCK_PTR;
+ last_bb_seen = ENTRY_BLOCK_PTR_FOR_FN (cfun);
/* Clean up. */
free (last_visited);
@@ -323,7 +323,8 @@ dump_flow_info (FILE *file, int flags)
{
basic_block bb;
- fprintf (file, "\n%d basic blocks, %d edges.\n", n_basic_blocks, n_edges);
+ fprintf (file, "\n%d basic blocks, %d edges.\n", n_basic_blocks_for_fn (cfun),
+ n_edges_for_fn (cfun));
FOR_ALL_BB (bb)
dump_bb (file, bb, 0, flags);
@@ -937,10 +938,11 @@ tidy_fallthru_edges (void)
if (!cfg_hooks->tidy_fallthru_edge)
return;
- if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR)
+ if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
return;
- FOR_BB_BETWEEN (b, ENTRY_BLOCK_PTR->next_bb, EXIT_BLOCK_PTR->prev_bb, next_bb)
+ FOR_BB_BETWEEN (b, ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb,
+ EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb, next_bb)
{
edge s;
@@ -1010,7 +1012,7 @@ can_duplicate_block_p (const_basic_block bb)
internal_error ("%s does not support can_duplicate_block_p",
cfg_hooks->name);
- if (bb == EXIT_BLOCK_PTR || bb == ENTRY_BLOCK_PTR)
+ if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun) || bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
return false;
return cfg_hooks->can_duplicate_block_p (bb);
@@ -1408,7 +1410,7 @@ account_profile_record (struct profile_record *record, int after_pass)
FOR_ALL_BB (bb)
{
- if (bb != EXIT_BLOCK_PTR_FOR_FUNCTION (cfun)
+ if (bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& profile_status != PROFILE_ABSENT)
{
sum = 0;
@@ -1423,7 +1425,7 @@ account_profile_record (struct profile_record *record, int after_pass)
&& (lsum - bb->count > 100 || lsum - bb->count < -100))
record->num_mismatched_count_out[after_pass]++;
}
- if (bb != ENTRY_BLOCK_PTR_FOR_FUNCTION (cfun)
+ if (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun)
&& profile_status != PROFILE_ABSENT)
{
sum = 0;
@@ -1439,8 +1441,8 @@ account_profile_record (struct profile_record *record, int after_pass)
if (lsum - bb->count > 100 || lsum - bb->count < -100)
record->num_mismatched_count_in[after_pass]++;
}
- if (bb == ENTRY_BLOCK_PTR_FOR_FUNCTION (cfun)
- || bb == EXIT_BLOCK_PTR_FOR_FUNCTION (cfun))
+ if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ || bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
gcc_assert (cfg_hooks->account_profile_record);
cfg_hooks->account_profile_record (bb, after_pass, record);
diff --git a/gcc/cfgloop.c b/gcc/cfgloop.c
index be0157edf67..08d7f0d04ac 100644
--- a/gcc/cfgloop.c
+++ b/gcc/cfgloop.c
@@ -156,7 +156,6 @@ flow_loop_dump (const struct loop *loop, FILE *file,
void
flow_loops_dump (FILE *file, void (*loop_dump_aux) (const struct loop *, FILE *, int), int verbose)
{
- loop_iterator li;
struct loop *loop;
if (!current_loops || ! file)
@@ -164,7 +163,7 @@ flow_loops_dump (FILE *file, void (*loop_dump_aux) (const struct loop *, FILE *,
fprintf (file, ";; %d loops found\n", number_of_loops (cfun));
- FOR_EACH_LOOP (li, loop, LI_INCLUDE_ROOT)
+ FOR_EACH_LOOP (loop, LI_INCLUDE_ROOT)
{
flow_loop_dump (loop, file, loop_dump_aux, verbose);
}
@@ -353,11 +352,11 @@ init_loops_structure (struct function *fn,
/* Dummy loop containing whole function. */
root = alloc_loop ();
- root->num_nodes = n_basic_blocks_for_function (fn);
- root->latch = EXIT_BLOCK_PTR_FOR_FUNCTION (fn);
- root->header = ENTRY_BLOCK_PTR_FOR_FUNCTION (fn);
- ENTRY_BLOCK_PTR_FOR_FUNCTION (fn)->loop_father = root;
- EXIT_BLOCK_PTR_FOR_FUNCTION (fn)->loop_father = root;
+ root->num_nodes = n_basic_blocks_for_fn (fn);
+ root->latch = EXIT_BLOCK_PTR_FOR_FN (fn);
+ root->header = ENTRY_BLOCK_PTR_FOR_FN (fn);
+ ENTRY_BLOCK_PTR_FOR_FN (fn)->loop_father = root;
+ EXIT_BLOCK_PTR_FOR_FN (fn)->loop_father = root;
loops->larray->quick_push (root);
loops->tree_root = root;
@@ -384,7 +383,7 @@ bb_loop_header_p (basic_block header)
FOR_EACH_EDGE (e, ei, header->preds)
{
basic_block latch = e->src;
- if (latch != ENTRY_BLOCK_PTR
+ if (latch != ENTRY_BLOCK_PTR_FOR_FN (cfun)
&& dominated_by_p (CDI_DOMINATORS, latch, header))
return true;
}
@@ -423,21 +422,21 @@ flow_loops_find (struct loops *loops)
/* Taking care of this degenerate case makes the rest of
this code simpler. */
- if (n_basic_blocks == NUM_FIXED_BLOCKS)
+ if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
return loops;
/* The root loop node contains all basic-blocks. */
- loops->tree_root->num_nodes = n_basic_blocks;
+ loops->tree_root->num_nodes = n_basic_blocks_for_fn (cfun);
/* Compute depth first search order of the CFG so that outer
natural loops will be found before inner natural loops. */
- rc_order = XNEWVEC (int, n_basic_blocks);
+ rc_order = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
pre_and_rev_post_order_compute (NULL, rc_order, false);
/* Gather all loop headers in reverse completion order and allocate
loop structures for loops that are not already present. */
larray.create (loops->larray->length ());
- for (b = 0; b < n_basic_blocks - NUM_FIXED_BLOCKS; b++)
+ for (b = 0; b < n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS; b++)
{
basic_block header = BASIC_BLOCK (rc_order[b]);
if (bb_loop_header_p (header))
@@ -747,7 +746,7 @@ disambiguate_multiple_latches (struct loop *loop)
block. This would cause problems if the entry edge was the one from the
entry block. To avoid having to handle this case specially, split
such entry edge. */
- e = find_edge (ENTRY_BLOCK_PTR, loop->header);
+ e = find_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), loop->header);
if (e)
split_edge (e);
@@ -768,10 +767,9 @@ disambiguate_multiple_latches (struct loop *loop)
void
disambiguate_loops_with_multiple_latches (void)
{
- loop_iterator li;
struct loop *loop;
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
{
if (!loop->latch)
disambiguate_multiple_latches (loop);
@@ -784,7 +782,8 @@ flow_bb_inside_loop_p (const struct loop *loop, const_basic_block bb)
{
struct loop *source_loop;
- if (bb == ENTRY_BLOCK_PTR || bb == EXIT_BLOCK_PTR)
+ if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ || bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
return 0;
source_loop = bb->loop_father;
@@ -829,13 +828,13 @@ get_loop_body (const struct loop *loop)
body = XNEWVEC (basic_block, loop->num_nodes);
- if (loop->latch == EXIT_BLOCK_PTR)
+ if (loop->latch == EXIT_BLOCK_PTR_FOR_FN (cfun))
{
/* There may be blocks unreachable from EXIT_BLOCK, hence we need to
special-case the fake loop that contains the whole function. */
- gcc_assert (loop->num_nodes == (unsigned) n_basic_blocks);
+ gcc_assert (loop->num_nodes == (unsigned) n_basic_blocks_for_fn (cfun));
body[tv++] = loop->header;
- body[tv++] = EXIT_BLOCK_PTR;
+ body[tv++] = EXIT_BLOCK_PTR_FOR_FN (cfun);
FOR_EACH_BB (bb)
body[tv++] = bb;
}
@@ -889,7 +888,7 @@ get_loop_body_in_dom_order (const struct loop *loop)
tovisit = XNEWVEC (basic_block, loop->num_nodes);
- gcc_assert (loop->latch != EXIT_BLOCK_PTR);
+ gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
tv = 0;
fill_sons_in_loop (loop, loop->header, tovisit, &tv);
@@ -924,7 +923,7 @@ get_loop_body_in_bfs_order (const struct loop *loop)
unsigned int vc = 1;
gcc_assert (loop->num_nodes);
- gcc_assert (loop->latch != EXIT_BLOCK_PTR);
+ gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
blocks = XNEWVEC (basic_block, loop->num_nodes);
visited = BITMAP_ALLOC (NULL);
@@ -1146,7 +1145,7 @@ get_loop_exit_edges (const struct loop *loop)
edge_iterator ei;
struct loop_exit *exit;
- gcc_assert (loop->latch != EXIT_BLOCK_PTR);
+ gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
/* If we maintain the lists of exits, use them. Otherwise we must
scan the body of the loop. */
@@ -1178,7 +1177,7 @@ num_loop_branches (const struct loop *loop)
unsigned i, n;
basic_block * body;
- gcc_assert (loop->latch != EXIT_BLOCK_PTR);
+ gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
body = get_loop_body (loop);
n = 0;
@@ -1328,7 +1327,6 @@ verify_loop_structure (void)
int err = 0;
edge e;
unsigned num = number_of_loops (cfun);
- loop_iterator li;
struct loop_exit *exit, *mexit;
bool dom_available = dom_info_available_p (CDI_DOMINATORS);
sbitmap visited;
@@ -1369,8 +1367,8 @@ verify_loop_structure (void)
/* Check the recorded loop father and sizes of loops. */
visited = sbitmap_alloc (last_basic_block);
bitmap_clear (visited);
- bbs = XNEWVEC (basic_block, n_basic_blocks);
- FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
+ bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
+ FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
{
unsigned n;
@@ -1381,7 +1379,7 @@ verify_loop_structure (void)
continue;
}
- n = get_loop_body_with_size (loop, bbs, n_basic_blocks);
+ n = get_loop_body_with_size (loop, bbs, n_basic_blocks_for_fn (cfun));
if (loop->num_nodes != n)
{
error ("size of loop %d should be %d, not %d",
@@ -1417,7 +1415,7 @@ verify_loop_structure (void)
sbitmap_free (visited);
/* Check headers and latches. */
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
{
i = loop->num;
if (loop->header == NULL)
@@ -1537,7 +1535,7 @@ verify_loop_structure (void)
}
/* Check the recorded loop exits. */
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
{
if (!loop->exits || loop->exits->e != NULL)
{
@@ -1631,7 +1629,7 @@ verify_loop_structure (void)
err = 1;
}
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
{
eloops = 0;
for (exit = loop->exits->next; exit->e; exit = exit->next)
diff --git a/gcc/cfgloop.h b/gcc/cfgloop.h
index 3aaa728c0d9..69fa996c6ff 100644
--- a/gcc/cfgloop.h
+++ b/gcc/cfgloop.h
@@ -543,48 +543,52 @@ enum li_flags
/* The iterator for loops. */
-typedef struct
+struct loop_iterator
{
+ loop_iterator (loop_p *loop, unsigned flags);
+ ~loop_iterator ();
+
+ inline loop_p next ();
+
/* The list of loops to visit. */
vec<int> to_visit;
/* The index of the actual loop. */
unsigned idx;
-} loop_iterator;
+};
-static inline void
-fel_next (loop_iterator *li, loop_p *loop)
+inline loop_p
+loop_iterator::next ()
{
int anum;
- while (li->to_visit.iterate (li->idx, &anum))
+ while (this->to_visit.iterate (this->idx, &anum))
{
- li->idx++;
- *loop = get_loop (cfun, anum);
- if (*loop)
- return;
+ this->idx++;
+ loop_p loop = get_loop (cfun, anum);
+ if (loop)
+ return loop;
}
- li->to_visit.release ();
- *loop = NULL;
+ return NULL;
}
-static inline void
-fel_init (loop_iterator *li, loop_p *loop, unsigned flags)
+inline
+loop_iterator::loop_iterator (loop_p *loop, unsigned flags)
{
struct loop *aloop;
unsigned i;
int mn;
- li->idx = 0;
+ this->idx = 0;
if (!current_loops)
{
- li->to_visit.create (0);
+ this->to_visit.create (0);
*loop = NULL;
return;
}
- li->to_visit.create (number_of_loops (cfun));
+ this->to_visit.create (number_of_loops (cfun));
mn = (flags & LI_INCLUDE_ROOT) ? 0 : 1;
if (flags & LI_ONLY_INNERMOST)
@@ -593,7 +597,7 @@ fel_init (loop_iterator *li, loop_p *loop, unsigned flags)
if (aloop != NULL
&& aloop->inner == NULL
&& aloop->num >= mn)
- li->to_visit.quick_push (aloop->num);
+ this->to_visit.quick_push (aloop->num);
}
else if (flags & LI_FROM_INNERMOST)
{
@@ -606,7 +610,7 @@ fel_init (loop_iterator *li, loop_p *loop, unsigned flags)
while (1)
{
if (aloop->num >= mn)
- li->to_visit.quick_push (aloop->num);
+ this->to_visit.quick_push (aloop->num);
if (aloop->next)
{
@@ -628,7 +632,7 @@ fel_init (loop_iterator *li, loop_p *loop, unsigned flags)
while (1)
{
if (aloop->num >= mn)
- li->to_visit.quick_push (aloop->num);
+ this->to_visit.quick_push (aloop->num);
if (aloop->inner != NULL)
aloop = aloop->inner;
@@ -643,19 +647,19 @@ fel_init (loop_iterator *li, loop_p *loop, unsigned flags)
}
}
- fel_next (li, loop);
+ *loop = this->next ();
}
-#define FOR_EACH_LOOP(LI, LOOP, FLAGS) \
- for (fel_init (&(LI), &(LOOP), FLAGS); \
- (LOOP); \
- fel_next (&(LI), &(LOOP)))
+inline
+loop_iterator::~loop_iterator ()
+{
+ this->to_visit.release ();
+}
-#define FOR_EACH_LOOP_BREAK(LI) \
- { \
- (LI).to_visit.release (); \
- break; \
- }
+#define FOR_EACH_LOOP(LOOP, FLAGS) \
+ for (loop_iterator li(&(LOOP), FLAGS); \
+ (LOOP); \
+ (LOOP) = li.next ())
/* The properties of the target. */
struct target_cfgloop {
diff --git a/gcc/cfgloopanal.c b/gcc/cfgloopanal.c
index 9300237c670..0cee6c68b28 100644
--- a/gcc/cfgloopanal.c
+++ b/gcc/cfgloopanal.c
@@ -85,7 +85,8 @@ mark_irreducible_loops (void)
gcc_assert (current_loops != NULL);
/* Reset the flags. */
- FOR_BB_BETWEEN (act, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (act, ENTRY_BLOCK_PTR_FOR_FN (cfun),
+ EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
{
act->flags &= ~BB_IRREDUCIBLE_LOOP;
FOR_EACH_EDGE (e, ei, act->succs)
@@ -95,11 +96,12 @@ mark_irreducible_loops (void)
/* Create the edge lists. */
g = new_graph (last_basic_block + num);
- FOR_BB_BETWEEN (act, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (act, ENTRY_BLOCK_PTR_FOR_FN (cfun),
+ EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
FOR_EACH_EDGE (e, ei, act->succs)
{
/* Ignore edges to exit. */
- if (e->dest == EXIT_BLOCK_PTR)
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
src = BB_REPR (act);
diff --git a/gcc/cfgloopmanip.c b/gcc/cfgloopmanip.c
index 0fc6552746b..6baa15afade 100644
--- a/gcc/cfgloopmanip.c
+++ b/gcc/cfgloopmanip.c
@@ -71,9 +71,9 @@ find_path (edge e, basic_block **bbs)
gcc_assert (EDGE_COUNT (e->dest->preds) <= 1);
/* Find bbs in the path. */
- *bbs = XNEWVEC (basic_block, n_basic_blocks);
+ *bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
return dfs_enumerate_from (e->dest, 0, rpe_enum_p, *bbs,
- n_basic_blocks, e->dest);
+ n_basic_blocks_for_fn (cfun), e->dest);
}
/* Fix placement of basic block BB inside loop hierarchy --
@@ -92,7 +92,7 @@ fix_bb_placement (basic_block bb)
FOR_EACH_EDGE (e, ei, bb->succs)
{
- if (e->dest == EXIT_BLOCK_PTR)
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
act = e->dest->loop_father;
@@ -343,7 +343,7 @@ remove_path (edge e)
nrem = find_path (e, &rem_bbs);
n_bord_bbs = 0;
- bord_bbs = XNEWVEC (basic_block, n_basic_blocks);
+ bord_bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
seen = sbitmap_alloc (last_basic_block);
bitmap_clear (seen);
@@ -352,7 +352,8 @@ remove_path (edge e)
bitmap_set_bit (seen, rem_bbs[i]->index);
if (!irred_invalidated)
FOR_EACH_EDGE (ae, ei, e->src->succs)
- if (ae != e && ae->dest != EXIT_BLOCK_PTR && !bitmap_bit_p (seen, ae->dest->index)
+ if (ae != e && ae->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
+ && !bitmap_bit_p (seen, ae->dest->index)
&& ae->flags & EDGE_IRREDUCIBLE_LOOP)
{
irred_invalidated = true;
@@ -363,7 +364,8 @@ remove_path (edge e)
{
bb = rem_bbs[i];
FOR_EACH_EDGE (ae, ei, rem_bbs[i]->succs)
- if (ae->dest != EXIT_BLOCK_PTR && !bitmap_bit_p (seen, ae->dest->index))
+ if (ae->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
+ && !bitmap_bit_p (seen, ae->dest->index))
{
bitmap_set_bit (seen, ae->dest->index);
bord_bbs[n_bord_bbs++] = ae->dest;
@@ -450,8 +452,8 @@ add_loop (struct loop *loop, struct loop *outer)
flow_loop_tree_node_add (outer, loop);
/* Find its nodes. */
- bbs = XNEWVEC (basic_block, n_basic_blocks);
- n = get_loop_body_with_size (loop, bbs, n_basic_blocks);
+ bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
+ n = get_loop_body_with_size (loop, bbs, n_basic_blocks_for_fn (cfun));
for (i = 0; i < n; i++)
{
@@ -1519,7 +1521,7 @@ create_preheader (struct loop *loop, int flags)
/* We do not allow entry block to be the loop preheader, since we
cannot emit code there. */
- if (single_entry->src == ENTRY_BLOCK_PTR)
+ if (single_entry->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
need_forwarder_block = true;
else
{
@@ -1585,13 +1587,12 @@ create_preheader (struct loop *loop, int flags)
void
create_preheaders (int flags)
{
- loop_iterator li;
struct loop *loop;
if (!current_loops)
return;
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
create_preheader (loop, flags);
loops_state_set (LOOPS_HAVE_PREHEADERS);
}
@@ -1601,11 +1602,10 @@ create_preheaders (int flags)
void
force_single_succ_latches (void)
{
- loop_iterator li;
struct loop *loop;
edge e;
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
{
if (loop->latch != loop->header && single_succ_p (loop->latch))
continue;
diff --git a/gcc/cfgrtl.c b/gcc/cfgrtl.c
index c7ee7eee636..7ad38721606 100644
--- a/gcc/cfgrtl.c
+++ b/gcc/cfgrtl.c
@@ -361,7 +361,7 @@ rtl_create_basic_block (void *headp, void *endp, basic_block after)
vec_safe_grow_cleared (basic_block_info, new_size);
}
- n_basic_blocks++;
+ n_basic_blocks_for_fn (cfun)++;
bb = create_basic_block_structure (head, end, NULL, after);
bb->aux = NULL;
@@ -500,8 +500,8 @@ make_pass_free_cfg (gcc::context *ctxt)
rtx
entry_of_function (void)
{
- return (n_basic_blocks > NUM_FIXED_BLOCKS ?
- BB_HEAD (ENTRY_BLOCK_PTR->next_bb) : get_insns ());
+ return (n_basic_blocks_for_fn (cfun) > NUM_FIXED_BLOCKS ?
+ BB_HEAD (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb) : get_insns ());
}
/* Emit INSN at the entry point of the function, ensuring that it is only
@@ -509,7 +509,7 @@ entry_of_function (void)
void
emit_insn_at_entry (rtx insn)
{
- edge_iterator ei = ei_start (ENTRY_BLOCK_PTR->succs);
+ edge_iterator ei = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
edge e = ei_safe_edge (ei);
gcc_assert (e->flags & EDGE_FALLTHRU);
@@ -573,7 +573,7 @@ contains_no_active_insn_p (const_basic_block bb)
{
rtx insn;
- if (bb == EXIT_BLOCK_PTR || bb == ENTRY_BLOCK_PTR
+ if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun) || bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
|| !single_succ_p (bb))
return false;
@@ -620,7 +620,7 @@ can_fallthru (basic_block src, basic_block target)
edge e;
edge_iterator ei;
- if (target == EXIT_BLOCK_PTR)
+ if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
return true;
if (src->next_bb != target)
return false;
@@ -630,7 +630,7 @@ can_fallthru (basic_block src, basic_block target)
return false;
FOR_EACH_EDGE (e, ei, src->succs)
- if (e->dest == EXIT_BLOCK_PTR
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
&& e->flags & EDGE_FALLTHRU)
return false;
@@ -650,10 +650,10 @@ could_fall_through (basic_block src, basic_block target)
edge e;
edge_iterator ei;
- if (target == EXIT_BLOCK_PTR)
+ if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
return true;
FOR_EACH_EDGE (e, ei, src->succs)
- if (e->dest == EXIT_BLOCK_PTR
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
&& e->flags & EDGE_FALLTHRU)
return 0;
return true;
@@ -958,7 +958,8 @@ rtl_can_merge_blocks (basic_block a, basic_block b)
/* Must be simple edge. */
&& !(single_succ_edge (a)->flags & EDGE_COMPLEX)
&& a->next_bb == b
- && a != ENTRY_BLOCK_PTR && b != EXIT_BLOCK_PTR
+ && a != ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ && b != EXIT_BLOCK_PTR_FOR_FN (cfun)
/* If the jump insn has side effects,
we can't kill the edge. */
&& (!JUMP_P (BB_END (a))
@@ -972,7 +973,7 @@ rtl_can_merge_blocks (basic_block a, basic_block b)
rtx
block_label (basic_block block)
{
- if (block == EXIT_BLOCK_PTR)
+ if (block == EXIT_BLOCK_PTR_FOR_FN (cfun))
return NULL_RTX;
if (!LABEL_P (BB_HEAD (block)))
@@ -1084,13 +1085,13 @@ try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout)
INSN_UID (insn), e->dest->index, target->index);
if (!redirect_jump (insn, block_label (target), 0))
{
- gcc_assert (target == EXIT_BLOCK_PTR);
+ gcc_assert (target == EXIT_BLOCK_PTR_FOR_FN (cfun));
return NULL;
}
}
/* Cannot do anything for target exit block. */
- else if (target == EXIT_BLOCK_PTR)
+ else if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
return NULL;
/* Or replace possibly complicated jump insn by simple jump insn. */
@@ -1178,7 +1179,7 @@ patch_jump_insn (rtx insn, rtx old_label, basic_block new_bb)
int j;
rtx new_label = block_label (new_bb);
- if (new_bb == EXIT_BLOCK_PTR)
+ if (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
return false;
if (GET_CODE (PATTERN (tmp)) == ADDR_VEC)
vec = XVEC (PATTERN (tmp), 0);
@@ -1211,7 +1212,7 @@ patch_jump_insn (rtx insn, rtx old_label, basic_block new_bb)
int i, n = ASM_OPERANDS_LABEL_LENGTH (tmp);
rtx new_label, note;
- if (new_bb == EXIT_BLOCK_PTR)
+ if (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
return false;
new_label = block_label (new_bb);
@@ -1268,7 +1269,7 @@ patch_jump_insn (rtx insn, rtx old_label, basic_block new_bb)
target is exit block on some arches. */
if (!redirect_jump (insn, block_label (new_bb), 0))
{
- gcc_assert (new_bb == EXIT_BLOCK_PTR);
+ gcc_assert (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun));
return false;
}
}
@@ -1324,7 +1325,8 @@ fixup_partition_crossing (edge e)
{
rtx note;
- if (e->src == ENTRY_BLOCK_PTR || e->dest == EXIT_BLOCK_PTR)
+ if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) || e->dest
+ == EXIT_BLOCK_PTR_FOR_FN (cfun))
return;
/* If we redirected an existing edge, it may already be marked
crossing, even though the new src is missing a reg crossing note.
@@ -1392,7 +1394,7 @@ fixup_new_cold_bb (basic_block bb)
boundary fixup by calling fixup_partition_crossing itself. */
if ((e->flags & EDGE_FALLTHRU)
&& BB_PARTITION (bb) != BB_PARTITION (e->dest)
- && e->dest != EXIT_BLOCK_PTR)
+ && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
force_nonfallthru (e);
else
fixup_partition_crossing (e);
@@ -1470,7 +1472,8 @@ force_nonfallthru_and_redirect (edge e, basic_block target, rtx jump_label)
/* In the case the last instruction is conditional jump to the next
instruction, first redirect the jump itself and then continue
by creating a basic block afterwards to redirect fallthru edge. */
- if (e->src != ENTRY_BLOCK_PTR && e->dest != EXIT_BLOCK_PTR
+ if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& any_condjump_p (BB_END (e->src))
&& JUMP_LABEL (BB_END (e->src)) == BB_HEAD (e->dest))
{
@@ -1512,7 +1515,7 @@ force_nonfallthru_and_redirect (edge e, basic_block target, rtx jump_label)
else
{
gcc_assert (e->flags & EDGE_FALLTHRU);
- if (e->src == ENTRY_BLOCK_PTR)
+ if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
{
/* We can't redirect the entry block. Create an empty block
at the start of the function which we use to add the new
@@ -1521,16 +1524,18 @@ force_nonfallthru_and_redirect (edge e, basic_block target, rtx jump_label)
edge_iterator ei;
bool found = false;
- basic_block bb = create_basic_block (BB_HEAD (e->dest), NULL, ENTRY_BLOCK_PTR);
+ basic_block bb = create_basic_block (BB_HEAD (e->dest), NULL,
+ ENTRY_BLOCK_PTR_FOR_FN (cfun));
/* Change the existing edge's source to be the new block, and add
a new edge from the entry block to the new block. */
e->src = bb;
- for (ei = ei_start (ENTRY_BLOCK_PTR->succs); (tmp = ei_safe_edge (ei)); )
+ for (ei = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
+ (tmp = ei_safe_edge (ei)); )
{
if (tmp == e)
{
- ENTRY_BLOCK_PTR->succs->unordered_remove (ei.index);
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs->unordered_remove (ei.index);
found = true;
break;
}
@@ -1541,14 +1546,15 @@ force_nonfallthru_and_redirect (edge e, basic_block target, rtx jump_label)
gcc_assert (found);
vec_safe_push (bb->succs, e);
- make_single_succ_edge (ENTRY_BLOCK_PTR, bb, EDGE_FALLTHRU);
+ make_single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), bb,
+ EDGE_FALLTHRU);
}
}
/* If e->src ends with asm goto, see if any of the ASM_OPERANDS_LABELs
don't point to the target or fallthru label. */
if (JUMP_P (BB_END (e->src))
- && target != EXIT_BLOCK_PTR
+ && target != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& (e->flags & EDGE_FALLTHRU)
&& (note = extract_asm_operands (PATTERN (BB_END (e->src)))))
{
@@ -1650,7 +1656,7 @@ force_nonfallthru_and_redirect (edge e, basic_block target, rtx jump_label)
loc = e->goto_locus;
e->flags &= ~EDGE_FALLTHRU;
- if (target == EXIT_BLOCK_PTR)
+ if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
{
if (jump_label == ret_rtx)
{
@@ -1784,7 +1790,7 @@ static basic_block
last_bb_in_partition (basic_block start_bb)
{
basic_block bb;
- FOR_BB_BETWEEN (bb, start_bb, EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (bb, start_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
{
if (BB_PARTITION (start_bb) != BB_PARTITION (bb->next_bb))
return bb;
@@ -1820,14 +1826,15 @@ rtl_split_edge (edge edge_in)
}
/* Create the basic block note. */
- if (edge_in->dest != EXIT_BLOCK_PTR)
+ if (edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
before = BB_HEAD (edge_in->dest);
else
before = NULL_RTX;
/* If this is a fall through edge to the exit block, the blocks might be
not adjacent, and the right place is after the source. */
- if ((edge_in->flags & EDGE_FALLTHRU) && edge_in->dest == EXIT_BLOCK_PTR)
+ if ((edge_in->flags & EDGE_FALLTHRU)
+ && edge_in->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
{
before = NEXT_INSN (BB_END (edge_in->src));
bb = create_basic_block (before, NULL, edge_in->src);
@@ -1835,7 +1842,7 @@ rtl_split_edge (edge edge_in)
}
else
{
- if (edge_in->src == ENTRY_BLOCK_PTR)
+ if (edge_in->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
{
bb = create_basic_block (before, NULL, edge_in->dest->prev_bb);
BB_COPY_PARTITION (bb, edge_in->dest);
@@ -1873,7 +1880,7 @@ rtl_split_edge (edge edge_in)
/* Can't allow a region crossing edge to be fallthrough. */
if (BB_PARTITION (bb) != BB_PARTITION (edge_in->dest)
- && edge_in->dest != EXIT_BLOCK_PTR)
+ && edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
new_bb = force_nonfallthru (single_succ_edge (bb));
gcc_assert (!new_bb);
@@ -1888,7 +1895,7 @@ rtl_split_edge (edge edge_in)
}
else
{
- if (edge_in->src != ENTRY_BLOCK_PTR)
+ if (edge_in->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
{
/* For asm goto even splitting of fallthru edge might
need insn patching, as other labels might point to the
@@ -1896,7 +1903,7 @@ rtl_split_edge (edge edge_in)
rtx last = BB_END (edge_in->src);
if (last
&& JUMP_P (last)
- && edge_in->dest != EXIT_BLOCK_PTR
+ && edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& extract_asm_operands (PATTERN (last)) != NULL_RTX
&& patch_jump_insn (last, before, bb))
df_set_bb_dirty (edge_in->src);
@@ -1943,7 +1950,7 @@ commit_one_edge_insertion (edge e)
/* Figure out where to put these insns. If the destination has
one predecessor, insert there. Except for the exit block. */
- if (single_pred_p (e->dest) && e->dest != EXIT_BLOCK_PTR)
+ if (single_pred_p (e->dest) && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
bb = e->dest;
@@ -1972,7 +1979,7 @@ commit_one_edge_insertion (edge e)
the basic block. */
else if ((e->flags & EDGE_ABNORMAL) == 0
&& single_succ_p (e->src)
- && e->src != ENTRY_BLOCK_PTR
+ && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
&& (!JUMP_P (BB_END (e->src))
|| simplejump_p (BB_END (e->src))))
{
@@ -2025,7 +2032,7 @@ commit_one_edge_insertion (edge e)
to EXIT. */
e = single_succ_edge (bb);
- gcc_assert (e->dest == EXIT_BLOCK_PTR
+ gcc_assert (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
&& single_succ_p (bb) && (e->flags & EDGE_FALLTHRU));
e->flags &= ~EDGE_FALLTHRU;
@@ -2057,7 +2064,8 @@ commit_edge_insertions (void)
verify_flow_info ();
#endif
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
+ EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
{
edge e;
edge_iterator ei;
@@ -2428,8 +2436,8 @@ rtl_verify_edges (void)
n_fallthru++, fallthru = e;
is_crossing = (BB_PARTITION (e->src) != BB_PARTITION (e->dest)
- && e->src != ENTRY_BLOCK_PTR
- && e->dest != EXIT_BLOCK_PTR);
+ && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun));
has_crossing_edge |= is_crossing;
if (e->flags & EDGE_CROSSING)
{
@@ -2832,8 +2840,8 @@ rtl_verify_fallthru (void)
break;
}
}
- else if (e->src != ENTRY_BLOCK_PTR
- && e->dest != EXIT_BLOCK_PTR)
+ else if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
rtx insn;
@@ -2872,10 +2880,10 @@ rtl_verify_bb_layout (void)
rtx x;
int num_bb_notes;
const rtx rtx_first = get_insns ();
- basic_block last_bb_seen = ENTRY_BLOCK_PTR, curr_bb = NULL;
+ basic_block last_bb_seen = ENTRY_BLOCK_PTR_FOR_FN (cfun), curr_bb = NULL;
num_bb_notes = 0;
- last_bb_seen = ENTRY_BLOCK_PTR;
+ last_bb_seen = ENTRY_BLOCK_PTR_FOR_FN (cfun);
for (x = rtx_first; x; x = NEXT_INSN (x))
{
@@ -2921,10 +2929,10 @@ rtl_verify_bb_layout (void)
curr_bb = NULL;
}
- if (num_bb_notes != n_basic_blocks - NUM_FIXED_BLOCKS)
+ if (num_bb_notes != n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS)
internal_error
("number of bb notes in insn chain (%d) != n_basic_blocks (%d)",
- num_bb_notes, n_basic_blocks);
+ num_bb_notes, n_basic_blocks_for_fn (cfun));
return err;
}
@@ -3062,7 +3070,7 @@ purge_dead_edges (basic_block bb)
ei_next (&ei);
continue;
}
- else if (e->dest != EXIT_BLOCK_PTR
+ else if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& BB_HEAD (e->dest) == JUMP_LABEL (insn))
/* If the destination block is the target of the jump,
keep the edge. */
@@ -3070,7 +3078,8 @@ purge_dead_edges (basic_block bb)
ei_next (&ei);
continue;
}
- else if (e->dest == EXIT_BLOCK_PTR && returnjump_p (insn))
+ else if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
+ && returnjump_p (insn))
/* If the destination block is the exit block, and this
instruction is a return, then keep the edge. */
{
@@ -3319,7 +3328,7 @@ skip_insns_after_block (basic_block bb)
rtx insn, last_insn, next_head, prev;
next_head = NULL_RTX;
- if (bb->next_bb != EXIT_BLOCK_PTR)
+ if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
next_head = BB_HEAD (bb->next_bb);
for (last_insn = insn = BB_END (bb); (insn = NEXT_INSN (insn)) != 0; )
@@ -3468,7 +3477,7 @@ outof_cfg_layout_mode (void)
basic_block bb;
FOR_EACH_BB (bb)
- if (bb->next_bb != EXIT_BLOCK_PTR)
+ if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
bb->aux = bb->next_bb;
cfg_layout_finalize ();
@@ -3577,7 +3586,8 @@ relink_block_chain (bool stay_in_cfglayout_mode)
if (dump_file)
{
fprintf (dump_file, "Reordered sequence:\n");
- for (bb = ENTRY_BLOCK_PTR->next_bb, index = NUM_FIXED_BLOCKS;
+ for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb, index =
+ NUM_FIXED_BLOCKS;
bb;
bb = (basic_block) bb->aux, index++)
{
@@ -3595,15 +3605,15 @@ relink_block_chain (bool stay_in_cfglayout_mode)
}
/* Now reorder the blocks. */
- prev_bb = ENTRY_BLOCK_PTR;
- bb = ENTRY_BLOCK_PTR->next_bb;
+ prev_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
+ bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
for (; bb; prev_bb = bb, bb = (basic_block) bb->aux)
{
bb->prev_bb = prev_bb;
prev_bb->next_bb = bb;
}
- prev_bb->next_bb = EXIT_BLOCK_PTR;
- EXIT_BLOCK_PTR->prev_bb = prev_bb;
+ prev_bb->next_bb = EXIT_BLOCK_PTR_FOR_FN (cfun);
+ EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb = prev_bb;
/* Then, clean up the aux fields. */
FOR_ALL_BB (bb)
@@ -3644,7 +3654,8 @@ fixup_reorder_chain (void)
/* First do the bulk reordering -- rechain the blocks without regard to
the needed changes to jumps and labels. */
- for (bb = ENTRY_BLOCK_PTR->next_bb; bb; bb = (basic_block) bb->aux)
+ for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; bb; bb = (basic_block)
+ bb->aux)
{
if (BB_HEADER (bb))
{
@@ -3687,7 +3698,8 @@ fixup_reorder_chain (void)
/* Now add jumps and labels as needed to match the blocks new
outgoing edges. */
- for (bb = ENTRY_BLOCK_PTR->next_bb; bb ; bb = (basic_block) bb->aux)
+ for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; bb ; bb = (basic_block)
+ bb->aux)
{
edge e_fall, e_taken, e;
rtx bb_end_insn;
@@ -3728,7 +3740,7 @@ fixup_reorder_chain (void)
/* If the old fallthru is still next, nothing to do. */
if (bb->aux == e_fall->dest
- || e_fall->dest == EXIT_BLOCK_PTR)
+ || e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
/* The degenerated case of conditional jump jumping to the next
@@ -3749,7 +3761,8 @@ fixup_reorder_chain (void)
if (note
&& XINT (note, 0) < REG_BR_PROB_BASE / 2
&& invert_jump (bb_end_insn,
- (e_fall->dest == EXIT_BLOCK_PTR
+ (e_fall->dest
+ == EXIT_BLOCK_PTR_FOR_FN (cfun)
? NULL_RTX
: label_for_bb (e_fall->dest)), 0))
{
@@ -3771,7 +3784,8 @@ fixup_reorder_chain (void)
/* Otherwise we can try to invert the jump. This will
basically never fail, however, keep up the pretense. */
else if (invert_jump (bb_end_insn,
- (e_fall->dest == EXIT_BLOCK_PTR
+ (e_fall->dest
+ == EXIT_BLOCK_PTR_FOR_FN (cfun)
? NULL_RTX
: label_for_bb (e_fall->dest)), 0))
{
@@ -3793,7 +3807,7 @@ fixup_reorder_chain (void)
__builtin_unreachable ()), nothing to do. */
if (! e_fall
|| bb->aux == e_fall->dest
- || e_fall->dest == EXIT_BLOCK_PTR)
+ || e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
/* Otherwise we'll have to use the fallthru fixup below. */
@@ -3820,7 +3834,7 @@ fixup_reorder_chain (void)
continue;
/* A fallthru to exit block. */
- if (e_fall->dest == EXIT_BLOCK_PTR)
+ if (e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
}
@@ -3880,7 +3894,7 @@ fixup_reorder_chain (void)
continue;
}
dest = e->dest;
- if (dest == EXIT_BLOCK_PTR)
+ if (dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
{
/* Non-fallthru edges to the exit block cannot be split. */
if (!(e->flags & EDGE_FALLTHRU))
@@ -3958,13 +3972,13 @@ fixup_fallthru_exit_predecessor (void)
value. */
gcc_assert (reload_completed);
- e = find_fallthru_edge (EXIT_BLOCK_PTR->preds);
+ e = find_fallthru_edge (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
if (e)
bb = e->src;
if (bb && bb->aux)
{
- basic_block c = ENTRY_BLOCK_PTR->next_bb;
+ basic_block c = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
/* If the very first block is the one with the fall-through exit
edge, we have to split that block. */
@@ -4000,7 +4014,7 @@ force_one_exit_fallthru (void)
edge_iterator ei;
basic_block forwarder, bb;
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
if (e->flags & EDGE_FALLTHRU)
{
if (predecessor == NULL)
@@ -4018,7 +4032,8 @@ force_one_exit_fallthru (void)
/* Exit has several fallthru predecessors. Create a forwarder block for
them. */
forwarder = split_edge (predecessor);
- for (ei = ei_start (EXIT_BLOCK_PTR->preds); (e = ei_safe_edge (ei)); )
+ for (ei = ei_start (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
+ (e = ei_safe_edge (ei)); )
{
if (e->src == forwarder
|| !(e->flags & EDGE_FALLTHRU))
@@ -4166,7 +4181,7 @@ cfg_layout_duplicate_bb (basic_block bb)
insn = duplicate_insn_chain (BB_HEAD (bb), BB_END (bb));
new_bb = create_basic_block (insn,
insn ? get_last_insn () : NULL,
- EXIT_BLOCK_PTR->prev_bb);
+ EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
BB_COPY_PARTITION (new_bb, bb);
if (BB_HEADER (bb))
@@ -4204,6 +4219,15 @@ cfg_layout_initialize (unsigned int flags)
rtx x;
basic_block bb;
+ /* Once bb reordering is complete, cfg layout mode should not be re-entered.
+ Entering cfg layout mode will perform optimizations on the cfg that
+ could affect the bb layout negatively or even require fixups. An
+ example of the latter is if edge forwarding performed when optimizing
+ the cfg layout required moving a block from the hot to the cold section
+ under -freorder-blocks-and-partition. This would create an illegal
+ partitioning unless some manual fixup was performed. */
+ gcc_assert (!crtl->bb_reorder_complete);
+
initialize_original_copy_tables ();
cfg_layout_rtl_register_cfg_hooks ();
@@ -4304,14 +4328,14 @@ cfg_layout_redirect_edge_and_branch (edge e, basic_block dest)
if (e->dest == dest)
return e;
- if (e->src != ENTRY_BLOCK_PTR
+ if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
&& (ret = try_redirect_by_replacing_jump (e, dest, true)))
{
df_set_bb_dirty (src);
return ret;
}
- if (e->src == ENTRY_BLOCK_PTR
+ if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
&& (e->flags & EDGE_FALLTHRU) && !(e->flags & EDGE_COMPLEX))
{
if (dump_file)
@@ -4438,7 +4462,7 @@ cfg_layout_delete_block (basic_block bb)
set_last_insn (insn);
}
}
- if (bb->next_bb != EXIT_BLOCK_PTR)
+ if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
to = &BB_HEADER (bb->next_bb);
else
to = &cfg_layout_function_footer;
@@ -4495,7 +4519,7 @@ cfg_layout_can_merge_blocks_p (basic_block a, basic_block b)
if (NEXT_INSN (BB_END (a)) != BB_HEAD (b))
{
edge e = find_fallthru_edge (b->succs);
- if (e && e->dest == EXIT_BLOCK_PTR)
+ if (e && e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
return false;
}
@@ -4506,7 +4530,8 @@ cfg_layout_can_merge_blocks_p (basic_block a, basic_block b)
&& a != b
/* Must be simple edge. */
&& !(single_succ_edge (a)->flags & EDGE_COMPLEX)
- && a != ENTRY_BLOCK_PTR && b != EXIT_BLOCK_PTR
+ && a != ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ && b != EXIT_BLOCK_PTR_FOR_FN (cfun)
/* If the jump insn has side effects, we can't kill the edge.
When not optimizing, try_redirect_by_replacing_jump will
not allow us to redirect an edge by replacing a table jump. */
@@ -4625,11 +4650,11 @@ static basic_block
cfg_layout_split_edge (edge e)
{
basic_block new_bb =
- create_basic_block (e->src != ENTRY_BLOCK_PTR
+ create_basic_block (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
? NEXT_INSN (BB_END (e->src)) : get_insns (),
NULL_RTX, e->src);
- if (e->dest == EXIT_BLOCK_PTR)
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
BB_COPY_PARTITION (new_bb, e->src);
else
BB_COPY_PARTITION (new_bb, e->dest);
@@ -4654,7 +4679,8 @@ rtl_block_empty_p (basic_block bb)
{
rtx insn;
- if (bb == ENTRY_BLOCK_PTR || bb == EXIT_BLOCK_PTR)
+ if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ || bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
return true;
FOR_BB_INSNS (bb, insn)
@@ -4755,13 +4781,14 @@ rtl_flow_call_edges_add (sbitmap blocks)
int last_bb = last_basic_block;
bool check_last_block = false;
- if (n_basic_blocks == NUM_FIXED_BLOCKS)
+ if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
return 0;
if (! blocks)
check_last_block = true;
else
- check_last_block = bitmap_bit_p (blocks, EXIT_BLOCK_PTR->prev_bb->index);
+ check_last_block = bitmap_bit_p (blocks,
+ EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb->index);
/* In the last basic block, before epilogue generation, there will be
a fallthru edge to EXIT. Special care is required if the last insn
@@ -4777,7 +4804,7 @@ rtl_flow_call_edges_add (sbitmap blocks)
Handle this by adding a dummy instruction in a new last basic block. */
if (check_last_block)
{
- basic_block bb = EXIT_BLOCK_PTR->prev_bb;
+ basic_block bb = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
rtx insn = BB_END (bb);
/* Back up past insns that must be kept in the same block as a call. */
@@ -4789,7 +4816,7 @@ rtl_flow_call_edges_add (sbitmap blocks)
{
edge e;
- e = find_edge (bb, EXIT_BLOCK_PTR);
+ e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
if (e)
{
insert_insn_on_edge (gen_use (const0_rtx), e);
@@ -4837,7 +4864,7 @@ rtl_flow_call_edges_add (sbitmap blocks)
#ifdef ENABLE_CHECKING
if (split_at_insn == BB_END (bb))
{
- e = find_edge (bb, EXIT_BLOCK_PTR);
+ e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
gcc_assert (e == NULL);
}
#endif
@@ -4851,7 +4878,7 @@ rtl_flow_call_edges_add (sbitmap blocks)
blocks_split++;
}
- make_edge (bb, EXIT_BLOCK_PTR, EDGE_FAKE);
+ make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
}
if (insn == BB_HEAD (bb))
@@ -4943,7 +4970,7 @@ rtl_can_remove_branch_p (const_edge e)
const_rtx insn = BB_END (src), set;
/* The conditions are taken from try_redirect_by_replacing_jump. */
- if (target == EXIT_BLOCK_PTR)
+ if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
return false;
if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
diff --git a/gcc/cgraph.c b/gcc/cgraph.c
index 29cca125458..936b405294c 100644
--- a/gcc/cgraph.c
+++ b/gcc/cgraph.c
@@ -28,6 +28,9 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "varasm.h"
+#include "calls.h"
+#include "print-tree.h"
#include "tree-inline.h"
#include "langhooks.h"
#include "hashtab.h"
@@ -958,16 +961,26 @@ cgraph_create_indirect_edge (struct cgraph_node *caller, gimple call_stmt,
&& (target = gimple_call_fn (call_stmt))
&& virtual_method_call_p (target))
{
- tree type = obj_type_ref_class (target);
+ tree otr_type;
+ HOST_WIDE_INT otr_token;
+ ipa_polymorphic_call_context context;
+ get_polymorphic_call_info (caller->decl,
+ target,
+ &otr_type, &otr_token,
+ &context);
/* Only record types can have virtual calls. */
- gcc_assert (TREE_CODE (type) == RECORD_TYPE);
+ gcc_assert (TREE_CODE (otr_type) == RECORD_TYPE);
+ edge->indirect_info->polymorphic = true;
edge->indirect_info->param_index = -1;
- edge->indirect_info->otr_token
- = tree_to_uhwi (OBJ_TYPE_REF_TOKEN (target));
- edge->indirect_info->otr_type = type;
- edge->indirect_info->polymorphic = 1;
+ edge->indirect_info->otr_token = otr_token;
+ edge->indirect_info->otr_type = otr_type;
+ edge->indirect_info->outer_type = context.outer_type;
+ edge->indirect_info->offset = context.offset;
+ edge->indirect_info->maybe_in_construction
+ = context.maybe_in_construction;
+ edge->indirect_info->maybe_derived_type = context.maybe_derived_type;
}
edge->next_callee = caller->indirect_calls;
diff --git a/gcc/cgraph.h b/gcc/cgraph.h
index db36f5e8be7..4acf2d0c286 100644
--- a/gcc/cgraph.h
+++ b/gcc/cgraph.h
@@ -434,7 +434,7 @@ struct GTY(()) cgraph_indirect_call_info
/* OBJ_TYPE_REF_TOKEN of a polymorphic call (if polymorphic is set). */
HOST_WIDE_INT otr_token;
/* Type of the object from OBJ_TYPE_REF_OBJECT. */
- tree otr_type;
+ tree otr_type, outer_type;
/* Index of the parameter that is called. */
int param_index;
/* ECF flags determined from the caller. */
@@ -455,6 +455,8 @@ struct GTY(()) cgraph_indirect_call_info
/* When the previous bit is set, this one determines whether the destination
is loaded from a parameter passed by reference. */
unsigned by_ref : 1;
+ unsigned int maybe_in_construction : 1;
+ unsigned int maybe_derived_type : 1;
};
struct GTY((chain_next ("%h.next_caller"), chain_prev ("%h.prev_caller"))) cgraph_edge {
@@ -741,7 +743,7 @@ void cgraph_finalize_function (tree, bool);
void finalize_compilation_unit (void);
void compile (void);
void init_cgraph (void);
-bool cgraph_process_new_functions (void);
+void cgraph_process_new_functions (void);
void cgraph_process_same_body_aliases (void);
void fixup_same_cpp_alias_visibility (symtab_node *, symtab_node *target, tree);
/* Initialize datastructures so DECL is a function in lowered gimple form.
@@ -850,6 +852,8 @@ void symtab_initialize_asm_name_hash (void);
void symtab_prevail_in_asm_name_hash (symtab_node *node);
void varpool_remove_initializer (struct varpool_node *);
+/* In cgraph.c */
+extern void change_decl_assembler_name (tree, tree);
/* Return callgraph node for given symbol and check it is a function. */
static inline struct cgraph_node *
diff --git a/gcc/cgraphbuild.c b/gcc/cgraphbuild.c
index 7834b065d52..21f6ebe8d09 100644
--- a/gcc/cgraphbuild.c
+++ b/gcc/cgraphbuild.c
@@ -198,7 +198,7 @@ record_eh_tables (struct cgraph_node *node, struct function *fun)
int
compute_call_stmt_bb_frequency (tree decl, basic_block bb)
{
- int entry_freq = ENTRY_BLOCK_PTR_FOR_FUNCTION
+ int entry_freq = ENTRY_BLOCK_PTR_FOR_FN
(DECL_STRUCT_FUNCTION (decl))->frequency;
int freq = bb->frequency;
@@ -441,7 +441,7 @@ rebuild_cgraph_edges (void)
cgraph_node_remove_callees (node);
ipa_remove_all_references (&node->ref_list);
- node->count = ENTRY_BLOCK_PTR->count;
+ node->count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
FOR_EACH_BB (bb)
{
@@ -493,7 +493,7 @@ cgraph_rebuild_references (void)
else
i++;
- node->count = ENTRY_BLOCK_PTR->count;
+ node->count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
FOR_EACH_BB (bb)
{
diff --git a/gcc/cgraphclones.c b/gcc/cgraphclones.c
index 28e40639e32..795a321ae57 100644
--- a/gcc/cgraphclones.c
+++ b/gcc/cgraphclones.c
@@ -68,9 +68,12 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "tm.h"
+#include "rtl.h"
#include "tree.h"
+#include "stringpool.h"
+#include "function.h"
+#include "emit-rtl.h"
#include "gimple.h"
-#include "rtl.h"
#include "bitmap.h"
#include "tree-cfg.h"
#include "tree-inline.h"
diff --git a/gcc/cgraphunit.c b/gcc/cgraphunit.c
index 4a351180ae5..fb23abed8da 100644
--- a/gcc/cgraphunit.c
+++ b/gcc/cgraphunit.c
@@ -162,6 +162,9 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "varasm.h"
+#include "stor-layout.h"
+#include "stringpool.h"
#include "output.h"
#include "rtl.h"
#include "gimple.h"
@@ -265,11 +268,13 @@ decide_is_symbol_needed (symtab_node *node)
return false;
}
-/* Head of the queue of nodes to be processed while building callgraph */
+/* Head and terminator of the queue of nodes to be processed while building
+ callgraph. */
-static symtab_node *first = (symtab_node *)(void *)1;
+static symtab_node symtab_terminator;
+static symtab_node *queued_nodes = &symtab_terminator;
-/* Add NODE to queue starting at FIRST.
+/* Add NODE to queue starting at QUEUED_NODES.
The queue is linked via AUX pointers and terminated by pointer to 1. */
static void
@@ -277,25 +282,24 @@ enqueue_node (symtab_node *node)
{
if (node->aux)
return;
- gcc_checking_assert (first);
- node->aux = first;
- first = node;
+ gcc_checking_assert (queued_nodes);
+ node->aux = queued_nodes;
+ queued_nodes = node;
}
/* Process CGRAPH_NEW_FUNCTIONS and perform actions necessary to add these
functions into callgraph in a way so they look like ordinary reachable
functions inserted into callgraph already at construction time. */
-bool
+void
cgraph_process_new_functions (void)
{
- bool output = false;
tree fndecl;
struct cgraph_node *node;
cgraph_node_set_iterator csi;
if (!cgraph_new_nodes)
- return false;
+ return;
handle_alias_pairs ();
/* Note that this queue may grow as its being processed, as the new
functions may generate new ones. */
@@ -310,7 +314,6 @@ cgraph_process_new_functions (void)
it into reachable functions list. */
cgraph_finalize_function (fndecl, false);
- output = true;
cgraph_call_function_insertion_hooks (node);
enqueue_node (node);
break;
@@ -351,7 +354,6 @@ cgraph_process_new_functions (void)
}
free_cgraph_node_set (cgraph_new_nodes);
cgraph_new_nodes = NULL;
- return output;
}
/* As an GCC extension we allow redefinition of the function. The
@@ -829,7 +831,8 @@ varpool_finalize_decl (tree decl)
varpool_analyze_node (node);
/* Some frontends produce various interface variables after compilation
finished. */
- if (cgraph_state == CGRAPH_STATE_FINISHED)
+ if (cgraph_state == CGRAPH_STATE_FINISHED
+ || (!flag_toplevel_reorder && cgraph_state == CGRAPH_STATE_EXPANSION))
varpool_assemble_decl (node);
}
@@ -981,11 +984,11 @@ analyze_functions (void)
/* Lower representation, build callgraph edges and references for all trivially
needed symbols and all symbols referred by them. */
- while (first != (symtab_node *)(void *)1)
+ while (queued_nodes != &symtab_terminator)
{
changed = true;
- node = first;
- first = (symtab_node *)first->aux;
+ node = queued_nodes;
+ queued_nodes = (symtab_node *)queued_nodes->aux;
cgraph_node *cnode = dyn_cast <cgraph_node> (node);
if (cnode && cnode->definition)
{
@@ -1333,10 +1336,10 @@ init_lowered_empty_function (tree decl, bool in_ssa)
loops_for_fn (cfun)->state |= LOOPS_MAY_HAVE_MULTIPLE_LATCHES;
/* Create BB for body of the function and connect it properly. */
- bb = create_basic_block (NULL, (void *) 0, ENTRY_BLOCK_PTR);
- make_edge (ENTRY_BLOCK_PTR, bb, EDGE_FALLTHRU);
- make_edge (bb, EXIT_BLOCK_PTR, 0);
- add_bb_to_loop (bb, ENTRY_BLOCK_PTR->loop_father);
+ bb = create_basic_block (NULL, (void *) 0, ENTRY_BLOCK_PTR_FOR_FN (cfun));
+ make_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), bb, EDGE_FALLTHRU);
+ make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
+ add_bb_to_loop (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun)->loop_father);
return bb;
}
@@ -1624,7 +1627,7 @@ expand_thunk (struct cgraph_node *node, bool output_asm_thunks)
gsi_insert_after (&bsi, stmt, GSI_NEW_STMT);
make_edge (bb, then_bb, EDGE_TRUE_VALUE);
make_edge (bb, else_bb, EDGE_FALSE_VALUE);
- make_edge (return_bb, EXIT_BLOCK_PTR, 0);
+ make_edge (return_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
make_edge (then_bb, return_bb, EDGE_FALLTHRU);
make_edge (else_bb, return_bb, EDGE_FALLTHRU);
bsi = gsi_last_bb (then_bb);
@@ -2019,9 +2022,6 @@ ipa_passes (void)
if (flag_generate_lto)
targetm.asm_out.lto_start ();
- execute_ipa_summary_passes ((struct ipa_opt_pass_d *)
- passes->all_lto_gen_passes);
-
if (!in_lto_p)
ipa_write_summaries ();
diff --git a/gcc/cilk-common.c b/gcc/cilk-common.c
index 98b85988764..bdea617a2e8 100644
--- a/gcc/cilk-common.c
+++ b/gcc/cilk-common.c
@@ -24,6 +24,8 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
#include "langhooks.h"
#include "expr.h"
#include "optabs.h"
diff --git a/gcc/combine.c b/gcc/combine.c
index b21ffce6247..52382a75712 100644
--- a/gcc/combine.c
+++ b/gcc/combine.c
@@ -81,6 +81,7 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "rtl.h"
#include "tree.h"
+#include "stor-layout.h"
#include "tm_p.h"
#include "flags.h"
#include "regs.h"
@@ -1156,7 +1157,7 @@ combine_instructions (rtx f, unsigned int nregs)
setup_incoming_promotions (first);
/* Allow the entry block and the first block to fall into the same EBB.
Conceptually the incoming promotions are assigned to the entry block. */
- last_bb = ENTRY_BLOCK_PTR;
+ last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
create_log_links ();
FOR_EACH_BB (this_basic_block)
@@ -1208,7 +1209,7 @@ combine_instructions (rtx f, unsigned int nregs)
label_tick = label_tick_ebb_start = 1;
init_reg_last ();
setup_incoming_promotions (first);
- last_bb = ENTRY_BLOCK_PTR;
+ last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
FOR_EACH_BB (this_basic_block)
{
@@ -1591,7 +1592,7 @@ set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data)
/* If this register is undefined at the start of the file, we can't
say what its contents were. */
&& ! REGNO_REG_SET_P
- (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), REGNO (x))
+ (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), REGNO (x))
&& HWI_COMPUTABLE_MODE_P (GET_MODE (x)))
{
reg_stat_type *rsp = &reg_stat[REGNO (x)];
@@ -3930,7 +3931,7 @@ try_combine (rtx i3, rtx i2, rtx i1, rtx i0, int *new_direct_jump_p,
ni2dest = SET_DEST (newi2pat);
for (insn = NEXT_INSN (i3);
- insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR
+ insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
|| insn != BB_HEAD (this_basic_block->next_bb));
insn = NEXT_INSN (insn))
{
@@ -4046,7 +4047,8 @@ try_combine (rtx i3, rtx i2, rtx i1, rtx i0, int *new_direct_jump_p,
&& ! find_reg_note (i2, REG_UNUSED,
SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
for (temp = NEXT_INSN (i2);
- temp && (this_basic_block->next_bb == EXIT_BLOCK_PTR
+ temp
+ && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
|| BB_HEAD (this_basic_block) != temp);
temp = NEXT_INSN (temp))
if (temp != i3 && INSN_P (temp))
@@ -9460,7 +9462,8 @@ reg_nonzero_bits_for_combine (const_rtx x, enum machine_mode mode,
|| (REGNO (x) >= FIRST_PSEUDO_REGISTER
&& REG_N_SETS (REGNO (x)) == 1
&& !REGNO_REG_SET_P
- (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), REGNO (x)))))
+ (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
+ REGNO (x)))))
{
*nonzero &= rsp->last_set_nonzero_bits;
return NULL;
@@ -9527,7 +9530,8 @@ reg_num_sign_bit_copies_for_combine (const_rtx x, enum machine_mode mode,
|| (REGNO (x) >= FIRST_PSEUDO_REGISTER
&& REG_N_SETS (REGNO (x)) == 1
&& !REGNO_REG_SET_P
- (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), REGNO (x)))))
+ (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
+ REGNO (x)))))
{
*result = rsp->last_set_sign_bit_copies;
return NULL;
@@ -12556,7 +12560,8 @@ get_last_value_validate (rtx *loc, rtx insn, int tick, int replace)
|| (! (regno >= FIRST_PSEUDO_REGISTER
&& REG_N_SETS (regno) == 1
&& (!REGNO_REG_SET_P
- (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), regno)))
+ (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
+ regno)))
&& rsp->last_set_label > tick))
{
if (replace)
@@ -12671,7 +12676,7 @@ get_last_value (const_rtx x)
&& (regno < FIRST_PSEUDO_REGISTER
|| REG_N_SETS (regno) != 1
|| REGNO_REG_SET_P
- (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), regno))))
+ (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), regno))))
return 0;
/* If the value was set in a later insn than the ones we are processing,
@@ -13732,7 +13737,7 @@ distribute_links (struct insn_link *links)
since most links don't point very far away. */
for (insn = NEXT_INSN (link->insn);
- (insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR
+ (insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
|| BB_HEAD (this_basic_block->next_bb) != insn));
insn = NEXT_INSN (insn))
if (DEBUG_INSN_P (insn))
diff --git a/gcc/common.opt b/gcc/common.opt
index d5971df6418..43d49ace352 100644
--- a/gcc/common.opt
+++ b/gcc/common.opt
@@ -1156,7 +1156,7 @@ ffast-math
Common
ffat-lto-objects
-Common Var(flag_fat_lto_objects) Init(1)
+Common Var(flag_fat_lto_objects)
Output lto objects containing both the intermediate language and binary output.
ffinite-math-only
@@ -1392,6 +1392,10 @@ fipa-pure-const
Common Report Var(flag_ipa_pure_const) Init(0) Optimization
Discover pure and const functions
+fipa-sem-equality
+Common Report Var(flag_ipa_sem_equality) Iinit(1) Optimization
+Perform Semantic function equality
+
fipa-reference
Common Report Var(flag_ipa_reference) Init(0) Optimization
Discover readonly and non addressable static variables
diff --git a/gcc/common/config/i386/i386-common.c b/gcc/common/config/i386/i386-common.c
index 341637b4a10..e07479da28c 100644
--- a/gcc/common/config/i386/i386-common.c
+++ b/gcc/common/config/i386/i386-common.c
@@ -789,6 +789,8 @@ static const struct default_options ix86_option_optimization_table[] =
{
/* Enable redundant extension instructions removal at -O2 and higher. */
{ OPT_LEVELS_2_PLUS, OPT_free, NULL, 1 },
+ /* Enable function splitting at -O2 and higher. */
+ { OPT_LEVELS_2_PLUS, OPT_freorder_blocks_and_partition, NULL, 1 },
/* Turn off -fschedule-insns by default. It tends to make the
problem with not enough registers even worse. */
{ OPT_LEVELS_ALL, OPT_fschedule_insns, NULL, 0 },
diff --git a/gcc/config/aarch64/aarch64-builtins.c b/gcc/config/aarch64/aarch64-builtins.c
index 2f1a8d03cb1..fec7b222529 100644
--- a/gcc/config/aarch64/aarch64-builtins.c
+++ b/gcc/config/aarch64/aarch64-builtins.c
@@ -24,6 +24,9 @@
#include "tm.h"
#include "rtl.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "stringpool.h"
+#include "calls.h"
#include "expr.h"
#include "tm_p.h"
#include "recog.h"
@@ -81,57 +84,101 @@ enum aarch64_simd_builtin_type_mode
#define UP(X) X##_UP
-typedef enum
+#define SIMD_MAX_BUILTIN_ARGS 5
+
+enum aarch64_type_qualifiers
{
- AARCH64_SIMD_BINOP,
- AARCH64_SIMD_TERNOP,
- AARCH64_SIMD_QUADOP,
- AARCH64_SIMD_UNOP,
- AARCH64_SIMD_GETLANE,
- AARCH64_SIMD_SETLANE,
- AARCH64_SIMD_CREATE,
- AARCH64_SIMD_DUP,
- AARCH64_SIMD_DUPLANE,
- AARCH64_SIMD_COMBINE,
- AARCH64_SIMD_SPLIT,
- AARCH64_SIMD_LANEMUL,
- AARCH64_SIMD_LANEMULL,
- AARCH64_SIMD_LANEMULH,
- AARCH64_SIMD_LANEMAC,
- AARCH64_SIMD_SCALARMUL,
- AARCH64_SIMD_SCALARMULL,
- AARCH64_SIMD_SCALARMULH,
- AARCH64_SIMD_SCALARMAC,
- AARCH64_SIMD_CONVERT,
- AARCH64_SIMD_FIXCONV,
- AARCH64_SIMD_SELECT,
- AARCH64_SIMD_RESULTPAIR,
- AARCH64_SIMD_REINTERP,
- AARCH64_SIMD_VTBL,
- AARCH64_SIMD_VTBX,
- AARCH64_SIMD_LOAD1,
- AARCH64_SIMD_LOAD1LANE,
- AARCH64_SIMD_STORE1,
- AARCH64_SIMD_STORE1LANE,
- AARCH64_SIMD_LOADSTRUCT,
- AARCH64_SIMD_LOADSTRUCTLANE,
- AARCH64_SIMD_STORESTRUCT,
- AARCH64_SIMD_STORESTRUCTLANE,
- AARCH64_SIMD_LOGICBINOP,
- AARCH64_SIMD_SHIFTINSERT,
- AARCH64_SIMD_SHIFTIMM,
- AARCH64_SIMD_SHIFTACC
-} aarch64_simd_itype;
+ /* T foo. */
+ qualifier_none = 0x0,
+ /* unsigned T foo. */
+ qualifier_unsigned = 0x1, /* 1 << 0 */
+ /* const T foo. */
+ qualifier_const = 0x2, /* 1 << 1 */
+ /* T *foo. */
+ qualifier_pointer = 0x4, /* 1 << 2 */
+ /* const T *foo. */
+ qualifier_const_pointer = 0x6, /* qualifier_const | qualifier_pointer */
+ /* Used when expanding arguments if an operand could
+ be an immediate. */
+ qualifier_immediate = 0x8, /* 1 << 3 */
+ qualifier_maybe_immediate = 0x10, /* 1 << 4 */
+ /* void foo (...). */
+ qualifier_void = 0x20, /* 1 << 5 */
+ /* Some patterns may have internal operands, this qualifier is an
+ instruction to the initialisation code to skip this operand. */
+ qualifier_internal = 0x40, /* 1 << 6 */
+ /* Some builtins should use the T_*mode* encoded in a simd_builtin_datum
+ rather than using the type of the operand. */
+ qualifier_map_mode = 0x80, /* 1 << 7 */
+ /* qualifier_pointer | qualifier_map_mode */
+ qualifier_pointer_map_mode = 0x84,
+ /* qualifier_const_pointer | qualifier_map_mode */
+ qualifier_const_pointer_map_mode = 0x86
+};
typedef struct
{
const char *name;
- const aarch64_simd_itype itype;
enum aarch64_simd_builtin_type_mode mode;
const enum insn_code code;
unsigned int fcode;
+ enum aarch64_type_qualifiers *qualifiers;
} aarch64_simd_builtin_datum;
+static enum aarch64_type_qualifiers
+aarch64_types_unop_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_none, qualifier_none };
+#define TYPES_UNOP (aarch64_types_unop_qualifiers)
+#define TYPES_CREATE (aarch64_types_unop_qualifiers)
+#define TYPES_REINTERP (aarch64_types_unop_qualifiers)
+static enum aarch64_type_qualifiers
+aarch64_types_binop_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_none, qualifier_none, qualifier_maybe_immediate };
+#define TYPES_BINOP (aarch64_types_binop_qualifiers)
+static enum aarch64_type_qualifiers
+aarch64_types_ternop_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_none, qualifier_none, qualifier_none, qualifier_none };
+#define TYPES_TERNOP (aarch64_types_ternop_qualifiers)
+static enum aarch64_type_qualifiers
+aarch64_types_quadop_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_none, qualifier_none, qualifier_none,
+ qualifier_none, qualifier_none };
+#define TYPES_QUADOP (aarch64_types_quadop_qualifiers)
+
+static enum aarch64_type_qualifiers
+aarch64_types_getlane_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_none, qualifier_none, qualifier_immediate };
+#define TYPES_GETLANE (aarch64_types_getlane_qualifiers)
+#define TYPES_SHIFTIMM (aarch64_types_getlane_qualifiers)
+static enum aarch64_type_qualifiers
+aarch64_types_setlane_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_none, qualifier_none, qualifier_none, qualifier_immediate };
+#define TYPES_SETLANE (aarch64_types_setlane_qualifiers)
+#define TYPES_SHIFTINSERT (aarch64_types_setlane_qualifiers)
+#define TYPES_SHIFTACC (aarch64_types_setlane_qualifiers)
+
+static enum aarch64_type_qualifiers
+aarch64_types_combine_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_none, qualifier_none, qualifier_none };
+#define TYPES_COMBINE (aarch64_types_combine_qualifiers)
+
+static enum aarch64_type_qualifiers
+aarch64_types_load1_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_none, qualifier_const_pointer_map_mode };
+#define TYPES_LOAD1 (aarch64_types_load1_qualifiers)
+#define TYPES_LOADSTRUCT (aarch64_types_load1_qualifiers)
+
+/* The first argument (return type) of a store should be void type,
+ which we represent with qualifier_void. Their first operand will be
+ a DImode pointer to the location to store to, so we must use
+ qualifier_map_mode | qualifier_pointer to build a pointer to the
+ element type of the vector. */
+static enum aarch64_type_qualifiers
+aarch64_types_store1_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_void, qualifier_pointer_map_mode, qualifier_none };
+#define TYPES_STORE1 (aarch64_types_store1_qualifiers)
+#define TYPES_STORESTRUCT (aarch64_types_store1_qualifiers)
+
#define CF0(N, X) CODE_FOR_aarch64_##N##X
#define CF1(N, X) CODE_FOR_##N##X##1
#define CF2(N, X) CODE_FOR_##N##X##2
@@ -140,7 +187,7 @@ typedef struct
#define CF10(N, X) CODE_FOR_##N##X
#define VAR1(T, N, MAP, A) \
- {#N, AARCH64_SIMD_##T, UP (A), CF##MAP (N, A), 0},
+ {#N, UP (A), CF##MAP (N, A), 0, TYPES_##T},
#define VAR2(T, N, MAP, A, B) \
VAR1 (T, N, MAP, A) \
VAR1 (T, N, MAP, B)
@@ -279,118 +326,175 @@ static GTY(()) tree aarch64_builtin_decls[AARCH64_BUILTIN_MAX];
#define NUM_DREG_TYPES 6
#define NUM_QREG_TYPES 6
+/* Return a tree for a signed or unsigned argument of either
+ the mode specified by MODE, or the inner mode of MODE. */
+tree
+aarch64_build_scalar_type (enum machine_mode mode, bool unsigned_p)
+{
+#undef INT_TYPES
+#define INT_TYPES \
+ AARCH64_TYPE_BUILDER (QI) \
+ AARCH64_TYPE_BUILDER (HI) \
+ AARCH64_TYPE_BUILDER (SI) \
+ AARCH64_TYPE_BUILDER (DI) \
+ AARCH64_TYPE_BUILDER (EI) \
+ AARCH64_TYPE_BUILDER (OI) \
+ AARCH64_TYPE_BUILDER (CI) \
+ AARCH64_TYPE_BUILDER (XI) \
+ AARCH64_TYPE_BUILDER (TI) \
+
+/* Statically declare all the possible types we might need. */
+#undef AARCH64_TYPE_BUILDER
+#define AARCH64_TYPE_BUILDER(X) \
+ static tree X##_aarch64_type_node_s = NULL; \
+ static tree X##_aarch64_type_node_u = NULL;
+
+ INT_TYPES
+
+ static tree float_aarch64_type_node = NULL;
+ static tree double_aarch64_type_node = NULL;
+
+ gcc_assert (!VECTOR_MODE_P (mode));
+
+/* If we've already initialised this type, don't initialise it again,
+ otherwise ask for a new type of the correct size. */
+#undef AARCH64_TYPE_BUILDER
+#define AARCH64_TYPE_BUILDER(X) \
+ case X##mode: \
+ if (unsigned_p) \
+ return (X##_aarch64_type_node_u \
+ ? X##_aarch64_type_node_u \
+ : X##_aarch64_type_node_u \
+ = make_unsigned_type (GET_MODE_PRECISION (mode))); \
+ else \
+ return (X##_aarch64_type_node_s \
+ ? X##_aarch64_type_node_s \
+ : X##_aarch64_type_node_s \
+ = make_signed_type (GET_MODE_PRECISION (mode))); \
+ break;
+
+ switch (mode)
+ {
+ INT_TYPES
+ case SFmode:
+ if (!float_aarch64_type_node)
+ {
+ float_aarch64_type_node = make_node (REAL_TYPE);
+ TYPE_PRECISION (float_aarch64_type_node) = FLOAT_TYPE_SIZE;
+ layout_type (float_aarch64_type_node);
+ }
+ return float_aarch64_type_node;
+ break;
+ case DFmode:
+ if (!double_aarch64_type_node)
+ {
+ double_aarch64_type_node = make_node (REAL_TYPE);
+ TYPE_PRECISION (double_aarch64_type_node) = DOUBLE_TYPE_SIZE;
+ layout_type (double_aarch64_type_node);
+ }
+ return double_aarch64_type_node;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+}
+
+tree
+aarch64_build_vector_type (enum machine_mode mode, bool unsigned_p)
+{
+ tree eltype;
+
+#define VECTOR_TYPES \
+ AARCH64_TYPE_BUILDER (V16QI) \
+ AARCH64_TYPE_BUILDER (V8HI) \
+ AARCH64_TYPE_BUILDER (V4SI) \
+ AARCH64_TYPE_BUILDER (V2DI) \
+ AARCH64_TYPE_BUILDER (V8QI) \
+ AARCH64_TYPE_BUILDER (V4HI) \
+ AARCH64_TYPE_BUILDER (V2SI) \
+ \
+ AARCH64_TYPE_BUILDER (V4SF) \
+ AARCH64_TYPE_BUILDER (V2DF) \
+ AARCH64_TYPE_BUILDER (V2SF) \
+/* Declare our "cache" of values. */
+#undef AARCH64_TYPE_BUILDER
+#define AARCH64_TYPE_BUILDER(X) \
+ static tree X##_aarch64_type_node_s = NULL; \
+ static tree X##_aarch64_type_node_u = NULL;
+
+ VECTOR_TYPES
+
+ gcc_assert (VECTOR_MODE_P (mode));
+
+#undef AARCH64_TYPE_BUILDER
+#define AARCH64_TYPE_BUILDER(X) \
+ case X##mode: \
+ if (unsigned_p) \
+ return X##_aarch64_type_node_u \
+ ? X##_aarch64_type_node_u \
+ : X##_aarch64_type_node_u \
+ = build_vector_type_for_mode (aarch64_build_scalar_type \
+ (GET_MODE_INNER (mode), \
+ unsigned_p), mode); \
+ else \
+ return X##_aarch64_type_node_s \
+ ? X##_aarch64_type_node_s \
+ : X##_aarch64_type_node_s \
+ = build_vector_type_for_mode (aarch64_build_scalar_type \
+ (GET_MODE_INNER (mode), \
+ unsigned_p), mode); \
+ break;
+
+ switch (mode)
+ {
+ default:
+ eltype = aarch64_build_scalar_type (GET_MODE_INNER (mode), unsigned_p);
+ return build_vector_type_for_mode (eltype, mode);
+ break;
+ VECTOR_TYPES
+ }
+}
+
+tree
+aarch64_build_type (enum machine_mode mode, bool unsigned_p)
+{
+ if (VECTOR_MODE_P (mode))
+ return aarch64_build_vector_type (mode, unsigned_p);
+ else
+ return aarch64_build_scalar_type (mode, unsigned_p);
+}
+
static void
aarch64_init_simd_builtins (void)
{
unsigned int i, fcode = AARCH64_SIMD_BUILTIN_BASE + 1;
- /* Scalar type nodes. */
- tree aarch64_simd_intQI_type_node;
- tree aarch64_simd_intHI_type_node;
- tree aarch64_simd_polyQI_type_node;
- tree aarch64_simd_polyHI_type_node;
- tree aarch64_simd_intSI_type_node;
- tree aarch64_simd_intDI_type_node;
- tree aarch64_simd_float_type_node;
- tree aarch64_simd_double_type_node;
-
- /* Pointer to scalar type nodes. */
- tree intQI_pointer_node;
- tree intHI_pointer_node;
- tree intSI_pointer_node;
- tree intDI_pointer_node;
- tree float_pointer_node;
- tree double_pointer_node;
-
- /* Const scalar type nodes. */
- tree const_intQI_node;
- tree const_intHI_node;
- tree const_intSI_node;
- tree const_intDI_node;
- tree const_float_node;
- tree const_double_node;
-
- /* Pointer to const scalar type nodes. */
- tree const_intQI_pointer_node;
- tree const_intHI_pointer_node;
- tree const_intSI_pointer_node;
- tree const_intDI_pointer_node;
- tree const_float_pointer_node;
- tree const_double_pointer_node;
-
- /* Vector type nodes. */
- tree V8QI_type_node;
- tree V4HI_type_node;
- tree V2SI_type_node;
- tree V2SF_type_node;
- tree V16QI_type_node;
- tree V8HI_type_node;
- tree V4SI_type_node;
- tree V4SF_type_node;
- tree V2DI_type_node;
- tree V2DF_type_node;
-
- /* Scalar unsigned type nodes. */
- tree intUQI_type_node;
- tree intUHI_type_node;
- tree intUSI_type_node;
- tree intUDI_type_node;
-
- /* Opaque integer types for structures of vectors. */
- tree intEI_type_node;
- tree intOI_type_node;
- tree intCI_type_node;
- tree intXI_type_node;
-
- /* Pointer to vector type nodes. */
- tree V8QI_pointer_node;
- tree V4HI_pointer_node;
- tree V2SI_pointer_node;
- tree V2SF_pointer_node;
- tree V16QI_pointer_node;
- tree V8HI_pointer_node;
- tree V4SI_pointer_node;
- tree V4SF_pointer_node;
- tree V2DI_pointer_node;
- tree V2DF_pointer_node;
-
- /* Operations which return results as pairs. */
- tree void_ftype_pv8qi_v8qi_v8qi;
- tree void_ftype_pv4hi_v4hi_v4hi;
- tree void_ftype_pv2si_v2si_v2si;
- tree void_ftype_pv2sf_v2sf_v2sf;
- tree void_ftype_pdi_di_di;
- tree void_ftype_pv16qi_v16qi_v16qi;
- tree void_ftype_pv8hi_v8hi_v8hi;
- tree void_ftype_pv4si_v4si_v4si;
- tree void_ftype_pv4sf_v4sf_v4sf;
- tree void_ftype_pv2di_v2di_v2di;
- tree void_ftype_pv2df_v2df_v2df;
-
- tree reinterp_ftype_dreg[NUM_DREG_TYPES][NUM_DREG_TYPES];
- tree reinterp_ftype_qreg[NUM_QREG_TYPES][NUM_QREG_TYPES];
- tree dreg_types[NUM_DREG_TYPES], qreg_types[NUM_QREG_TYPES];
-
- /* Create distinguished type nodes for AARCH64_SIMD vector element types,
- and pointers to values of such types, so we can detect them later. */
- aarch64_simd_intQI_type_node =
- make_signed_type (GET_MODE_PRECISION (QImode));
- aarch64_simd_intHI_type_node =
- make_signed_type (GET_MODE_PRECISION (HImode));
- aarch64_simd_polyQI_type_node =
+ /* In order that 'poly' types mangle correctly they must not share
+ a base tree with the other scalar types, thus we must generate them
+ as a special case. */
+ tree aarch64_simd_polyQI_type_node =
make_signed_type (GET_MODE_PRECISION (QImode));
- aarch64_simd_polyHI_type_node =
+ tree aarch64_simd_polyHI_type_node =
make_signed_type (GET_MODE_PRECISION (HImode));
- aarch64_simd_intSI_type_node =
- make_signed_type (GET_MODE_PRECISION (SImode));
- aarch64_simd_intDI_type_node =
- make_signed_type (GET_MODE_PRECISION (DImode));
- aarch64_simd_float_type_node = make_node (REAL_TYPE);
- aarch64_simd_double_type_node = make_node (REAL_TYPE);
- TYPE_PRECISION (aarch64_simd_float_type_node) = FLOAT_TYPE_SIZE;
- TYPE_PRECISION (aarch64_simd_double_type_node) = DOUBLE_TYPE_SIZE;
- layout_type (aarch64_simd_float_type_node);
- layout_type (aarch64_simd_double_type_node);
+
+ /* Scalar type nodes. */
+ tree aarch64_simd_intQI_type_node = aarch64_build_type (QImode, false);
+ tree aarch64_simd_intHI_type_node = aarch64_build_type (HImode, false);
+ tree aarch64_simd_intSI_type_node = aarch64_build_type (SImode, false);
+ tree aarch64_simd_intDI_type_node = aarch64_build_type (DImode, false);
+ tree aarch64_simd_intTI_type_node = aarch64_build_type (TImode, false);
+ tree aarch64_simd_intEI_type_node = aarch64_build_type (EImode, false);
+ tree aarch64_simd_intOI_type_node = aarch64_build_type (OImode, false);
+ tree aarch64_simd_intCI_type_node = aarch64_build_type (CImode, false);
+ tree aarch64_simd_intXI_type_node = aarch64_build_type (XImode, false);
+ tree aarch64_simd_intUQI_type_node = aarch64_build_type (QImode, true);
+ tree aarch64_simd_intUHI_type_node = aarch64_build_type (HImode, true);
+ tree aarch64_simd_intUSI_type_node = aarch64_build_type (SImode, true);
+ tree aarch64_simd_intUDI_type_node = aarch64_build_type (DImode, true);
+
+ /* Float type nodes. */
+ tree aarch64_simd_float_type_node = aarch64_build_type (SFmode, false);
+ tree aarch64_simd_double_type_node = aarch64_build_type (DFmode, false);
/* Define typedefs which exactly correspond to the modes we are basing vector
types on. If you change these names you'll need to change
@@ -411,518 +515,129 @@ aarch64_init_simd_builtins (void)
"__builtin_aarch64_simd_poly8");
(*lang_hooks.types.register_builtin_type) (aarch64_simd_polyHI_type_node,
"__builtin_aarch64_simd_poly16");
-
- intQI_pointer_node = build_pointer_type (aarch64_simd_intQI_type_node);
- intHI_pointer_node = build_pointer_type (aarch64_simd_intHI_type_node);
- intSI_pointer_node = build_pointer_type (aarch64_simd_intSI_type_node);
- intDI_pointer_node = build_pointer_type (aarch64_simd_intDI_type_node);
- float_pointer_node = build_pointer_type (aarch64_simd_float_type_node);
- double_pointer_node = build_pointer_type (aarch64_simd_double_type_node);
-
- /* Next create constant-qualified versions of the above types. */
- const_intQI_node = build_qualified_type (aarch64_simd_intQI_type_node,
- TYPE_QUAL_CONST);
- const_intHI_node = build_qualified_type (aarch64_simd_intHI_type_node,
- TYPE_QUAL_CONST);
- const_intSI_node = build_qualified_type (aarch64_simd_intSI_type_node,
- TYPE_QUAL_CONST);
- const_intDI_node = build_qualified_type (aarch64_simd_intDI_type_node,
- TYPE_QUAL_CONST);
- const_float_node = build_qualified_type (aarch64_simd_float_type_node,
- TYPE_QUAL_CONST);
- const_double_node = build_qualified_type (aarch64_simd_double_type_node,
- TYPE_QUAL_CONST);
-
- const_intQI_pointer_node = build_pointer_type (const_intQI_node);
- const_intHI_pointer_node = build_pointer_type (const_intHI_node);
- const_intSI_pointer_node = build_pointer_type (const_intSI_node);
- const_intDI_pointer_node = build_pointer_type (const_intDI_node);
- const_float_pointer_node = build_pointer_type (const_float_node);
- const_double_pointer_node = build_pointer_type (const_double_node);
-
- /* Now create vector types based on our AARCH64 SIMD element types. */
- /* 64-bit vectors. */
- V8QI_type_node =
- build_vector_type_for_mode (aarch64_simd_intQI_type_node, V8QImode);
- V4HI_type_node =
- build_vector_type_for_mode (aarch64_simd_intHI_type_node, V4HImode);
- V2SI_type_node =
- build_vector_type_for_mode (aarch64_simd_intSI_type_node, V2SImode);
- V2SF_type_node =
- build_vector_type_for_mode (aarch64_simd_float_type_node, V2SFmode);
- /* 128-bit vectors. */
- V16QI_type_node =
- build_vector_type_for_mode (aarch64_simd_intQI_type_node, V16QImode);
- V8HI_type_node =
- build_vector_type_for_mode (aarch64_simd_intHI_type_node, V8HImode);
- V4SI_type_node =
- build_vector_type_for_mode (aarch64_simd_intSI_type_node, V4SImode);
- V4SF_type_node =
- build_vector_type_for_mode (aarch64_simd_float_type_node, V4SFmode);
- V2DI_type_node =
- build_vector_type_for_mode (aarch64_simd_intDI_type_node, V2DImode);
- V2DF_type_node =
- build_vector_type_for_mode (aarch64_simd_double_type_node, V2DFmode);
-
- /* Unsigned integer types for various mode sizes. */
- intUQI_type_node = make_unsigned_type (GET_MODE_PRECISION (QImode));
- intUHI_type_node = make_unsigned_type (GET_MODE_PRECISION (HImode));
- intUSI_type_node = make_unsigned_type (GET_MODE_PRECISION (SImode));
- intUDI_type_node = make_unsigned_type (GET_MODE_PRECISION (DImode));
-
- (*lang_hooks.types.register_builtin_type) (intUQI_type_node,
- "__builtin_aarch64_simd_uqi");
- (*lang_hooks.types.register_builtin_type) (intUHI_type_node,
- "__builtin_aarch64_simd_uhi");
- (*lang_hooks.types.register_builtin_type) (intUSI_type_node,
- "__builtin_aarch64_simd_usi");
- (*lang_hooks.types.register_builtin_type) (intUDI_type_node,
- "__builtin_aarch64_simd_udi");
-
- /* Opaque integer types for structures of vectors. */
- intEI_type_node = make_signed_type (GET_MODE_PRECISION (EImode));
- intOI_type_node = make_signed_type (GET_MODE_PRECISION (OImode));
- intCI_type_node = make_signed_type (GET_MODE_PRECISION (CImode));
- intXI_type_node = make_signed_type (GET_MODE_PRECISION (XImode));
-
- (*lang_hooks.types.register_builtin_type) (intTI_type_node,
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_intTI_type_node,
"__builtin_aarch64_simd_ti");
- (*lang_hooks.types.register_builtin_type) (intEI_type_node,
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_intEI_type_node,
"__builtin_aarch64_simd_ei");
- (*lang_hooks.types.register_builtin_type) (intOI_type_node,
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_intOI_type_node,
"__builtin_aarch64_simd_oi");
- (*lang_hooks.types.register_builtin_type) (intCI_type_node,
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_intCI_type_node,
"__builtin_aarch64_simd_ci");
- (*lang_hooks.types.register_builtin_type) (intXI_type_node,
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_intXI_type_node,
"__builtin_aarch64_simd_xi");
- /* Pointers to vector types. */
- V8QI_pointer_node = build_pointer_type (V8QI_type_node);
- V4HI_pointer_node = build_pointer_type (V4HI_type_node);
- V2SI_pointer_node = build_pointer_type (V2SI_type_node);
- V2SF_pointer_node = build_pointer_type (V2SF_type_node);
- V16QI_pointer_node = build_pointer_type (V16QI_type_node);
- V8HI_pointer_node = build_pointer_type (V8HI_type_node);
- V4SI_pointer_node = build_pointer_type (V4SI_type_node);
- V4SF_pointer_node = build_pointer_type (V4SF_type_node);
- V2DI_pointer_node = build_pointer_type (V2DI_type_node);
- V2DF_pointer_node = build_pointer_type (V2DF_type_node);
-
- /* Operations which return results as pairs. */
- void_ftype_pv8qi_v8qi_v8qi =
- build_function_type_list (void_type_node, V8QI_pointer_node,
- V8QI_type_node, V8QI_type_node, NULL);
- void_ftype_pv4hi_v4hi_v4hi =
- build_function_type_list (void_type_node, V4HI_pointer_node,
- V4HI_type_node, V4HI_type_node, NULL);
- void_ftype_pv2si_v2si_v2si =
- build_function_type_list (void_type_node, V2SI_pointer_node,
- V2SI_type_node, V2SI_type_node, NULL);
- void_ftype_pv2sf_v2sf_v2sf =
- build_function_type_list (void_type_node, V2SF_pointer_node,
- V2SF_type_node, V2SF_type_node, NULL);
- void_ftype_pdi_di_di =
- build_function_type_list (void_type_node, intDI_pointer_node,
- aarch64_simd_intDI_type_node,
- aarch64_simd_intDI_type_node, NULL);
- void_ftype_pv16qi_v16qi_v16qi =
- build_function_type_list (void_type_node, V16QI_pointer_node,
- V16QI_type_node, V16QI_type_node, NULL);
- void_ftype_pv8hi_v8hi_v8hi =
- build_function_type_list (void_type_node, V8HI_pointer_node,
- V8HI_type_node, V8HI_type_node, NULL);
- void_ftype_pv4si_v4si_v4si =
- build_function_type_list (void_type_node, V4SI_pointer_node,
- V4SI_type_node, V4SI_type_node, NULL);
- void_ftype_pv4sf_v4sf_v4sf =
- build_function_type_list (void_type_node, V4SF_pointer_node,
- V4SF_type_node, V4SF_type_node, NULL);
- void_ftype_pv2di_v2di_v2di =
- build_function_type_list (void_type_node, V2DI_pointer_node,
- V2DI_type_node, V2DI_type_node, NULL);
- void_ftype_pv2df_v2df_v2df =
- build_function_type_list (void_type_node, V2DF_pointer_node,
- V2DF_type_node, V2DF_type_node, NULL);
-
- dreg_types[0] = V8QI_type_node;
- dreg_types[1] = V4HI_type_node;
- dreg_types[2] = V2SI_type_node;
- dreg_types[3] = V2SF_type_node;
- dreg_types[4] = aarch64_simd_intDI_type_node;
- dreg_types[5] = aarch64_simd_double_type_node;
-
- qreg_types[0] = V16QI_type_node;
- qreg_types[1] = V8HI_type_node;
- qreg_types[2] = V4SI_type_node;
- qreg_types[3] = V4SF_type_node;
- qreg_types[4] = V2DI_type_node;
- qreg_types[5] = V2DF_type_node;
-
- /* If NUM_DREG_TYPES != NUM_QREG_TYPES, we will need separate nested loops
- for qreg and dreg reinterp inits. */
- for (i = 0; i < NUM_DREG_TYPES; i++)
- {
- int j;
- for (j = 0; j < NUM_DREG_TYPES; j++)
- {
- reinterp_ftype_dreg[i][j]
- = build_function_type_list (dreg_types[i], dreg_types[j], NULL);
- reinterp_ftype_qreg[i][j]
- = build_function_type_list (qreg_types[i], qreg_types[j], NULL);
- }
- }
+ /* Unsigned integer types for various mode sizes. */
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_intUQI_type_node,
+ "__builtin_aarch64_simd_uqi");
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_intUHI_type_node,
+ "__builtin_aarch64_simd_uhi");
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_intUSI_type_node,
+ "__builtin_aarch64_simd_usi");
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_intUDI_type_node,
+ "__builtin_aarch64_simd_udi");
for (i = 0; i < ARRAY_SIZE (aarch64_simd_builtin_data); i++, fcode++)
{
+ bool print_type_signature_p = false;
+ char type_signature[SIMD_MAX_BUILTIN_ARGS] = { 0 };
aarch64_simd_builtin_datum *d = &aarch64_simd_builtin_data[i];
const char *const modenames[] =
- {
- "v8qi", "v4hi", "v2si", "v2sf", "di", "df",
- "v16qi", "v8hi", "v4si", "v4sf", "v2di", "v2df",
- "ti", "ei", "oi", "xi", "si", "sf", "hi", "qi"
- };
+ {
+ "v8qi", "v4hi", "v2si", "v2sf", "di", "df",
+ "v16qi", "v8hi", "v4si", "v4sf", "v2di", "v2df",
+ "ti", "ei", "oi", "xi", "si", "sf", "hi", "qi"
+ };
+ const enum machine_mode modes[] =
+ {
+ V8QImode, V4HImode, V2SImode, V2SFmode, DImode, DFmode,
+ V16QImode, V8HImode, V4SImode, V4SFmode, V2DImode,
+ V2DFmode, TImode, EImode, OImode, XImode, SImode,
+ SFmode, HImode, QImode
+ };
char namebuf[60];
tree ftype = NULL;
tree fndecl = NULL;
- int is_load = 0;
- int is_store = 0;
gcc_assert (ARRAY_SIZE (modenames) == T_MAX);
d->fcode = fcode;
- switch (d->itype)
+ /* We must track two variables here. op_num is
+ the operand number as in the RTL pattern. This is
+ required to access the mode (e.g. V4SF mode) of the
+ argument, from which the base type can be derived.
+ arg_num is an index in to the qualifiers data, which
+ gives qualifiers to the type (e.g. const unsigned).
+ The reason these two variables may differ by one is the
+ void return type. While all return types take the 0th entry
+ in the qualifiers array, there is no operand for them in the
+ RTL pattern. */
+ int op_num = insn_data[d->code].n_operands - 1;
+ int arg_num = d->qualifiers[0] & qualifier_void
+ ? op_num + 1
+ : op_num;
+ tree return_type = void_type_node, args = void_list_node;
+ tree eltype;
+
+ /* Build a function type directly from the insn_data for this
+ builtin. The build_function_type () function takes care of
+ removing duplicates for us. */
+ for (; op_num >= 0; arg_num--, op_num--)
{
- case AARCH64_SIMD_LOAD1:
- case AARCH64_SIMD_LOAD1LANE:
- case AARCH64_SIMD_LOADSTRUCT:
- case AARCH64_SIMD_LOADSTRUCTLANE:
- is_load = 1;
- /* Fall through. */
- case AARCH64_SIMD_STORE1:
- case AARCH64_SIMD_STORE1LANE:
- case AARCH64_SIMD_STORESTRUCT:
- case AARCH64_SIMD_STORESTRUCTLANE:
- if (!is_load)
- is_store = 1;
- /* Fall through. */
- case AARCH64_SIMD_UNOP:
- case AARCH64_SIMD_BINOP:
- case AARCH64_SIMD_TERNOP:
- case AARCH64_SIMD_QUADOP:
- case AARCH64_SIMD_COMBINE:
- case AARCH64_SIMD_CONVERT:
- case AARCH64_SIMD_CREATE:
- case AARCH64_SIMD_DUP:
- case AARCH64_SIMD_DUPLANE:
- case AARCH64_SIMD_FIXCONV:
- case AARCH64_SIMD_GETLANE:
- case AARCH64_SIMD_LANEMAC:
- case AARCH64_SIMD_LANEMUL:
- case AARCH64_SIMD_LANEMULH:
- case AARCH64_SIMD_LANEMULL:
- case AARCH64_SIMD_LOGICBINOP:
- case AARCH64_SIMD_SCALARMAC:
- case AARCH64_SIMD_SCALARMUL:
- case AARCH64_SIMD_SCALARMULH:
- case AARCH64_SIMD_SCALARMULL:
- case AARCH64_SIMD_SELECT:
- case AARCH64_SIMD_SETLANE:
- case AARCH64_SIMD_SHIFTACC:
- case AARCH64_SIMD_SHIFTIMM:
- case AARCH64_SIMD_SHIFTINSERT:
- case AARCH64_SIMD_SPLIT:
- case AARCH64_SIMD_VTBL:
- case AARCH64_SIMD_VTBX:
- {
- int k;
- tree return_type = void_type_node, args = void_list_node;
- tree eltype;
- /* Build a function type directly from the insn_data for this
- builtin. The build_function_type () function takes care of
- removing duplicates for us. */
-
- for (k = insn_data[d->code].n_operands -1; k >= 0; k--)
- {
- /* Skip an internal operand for vget_{low, high}. */
- if (k == 2 && d->itype == AARCH64_SIMD_SPLIT)
- continue;
-
- if (is_load && k == 1)
- {
- /* AdvSIMD load patterns always have the memory operand
- (a DImode pointer) in the operand 1 position. We
- want a const pointer to the element type in that
- position. */
- gcc_assert (insn_data[d->code].operand[k].mode == DImode);
-
- switch (d->mode)
- {
- case T_V8QI:
- case T_V16QI:
- eltype = const_intQI_pointer_node;
- break;
-
- case T_V4HI:
- case T_V8HI:
- eltype = const_intHI_pointer_node;
- break;
-
- case T_V2SI:
- case T_V4SI:
- eltype = const_intSI_pointer_node;
- break;
-
- case T_V2SF:
- case T_V4SF:
- eltype = const_float_pointer_node;
- break;
-
- case T_DI:
- case T_V2DI:
- eltype = const_intDI_pointer_node;
- break;
-
- case T_DF:
- case T_V2DF:
- eltype = const_double_pointer_node;
- break;
-
- default:
- gcc_unreachable ();
- }
- }
- else if (is_store && k == 0)
- {
- /* Similarly, AdvSIMD store patterns use operand 0 as
- the memory location to store to (a DImode pointer).
- Use a pointer to the element type of the store in
- that position. */
- gcc_assert (insn_data[d->code].operand[k].mode == DImode);
-
- switch (d->mode)
- {
- case T_V8QI:
- case T_V16QI:
- eltype = intQI_pointer_node;
- break;
-
- case T_V4HI:
- case T_V8HI:
- eltype = intHI_pointer_node;
- break;
-
- case T_V2SI:
- case T_V4SI:
- eltype = intSI_pointer_node;
- break;
-
- case T_V2SF:
- case T_V4SF:
- eltype = float_pointer_node;
- break;
-
- case T_DI:
- case T_V2DI:
- eltype = intDI_pointer_node;
- break;
-
- case T_DF:
- case T_V2DF:
- eltype = double_pointer_node;
- break;
-
- default:
- gcc_unreachable ();
- }
- }
- else
- {
- switch (insn_data[d->code].operand[k].mode)
- {
- case VOIDmode:
- eltype = void_type_node;
- break;
- /* Scalars. */
- case QImode:
- eltype = aarch64_simd_intQI_type_node;
- break;
- case HImode:
- eltype = aarch64_simd_intHI_type_node;
- break;
- case SImode:
- eltype = aarch64_simd_intSI_type_node;
- break;
- case SFmode:
- eltype = aarch64_simd_float_type_node;
- break;
- case DFmode:
- eltype = aarch64_simd_double_type_node;
- break;
- case DImode:
- eltype = aarch64_simd_intDI_type_node;
- break;
- case TImode:
- eltype = intTI_type_node;
- break;
- case EImode:
- eltype = intEI_type_node;
- break;
- case OImode:
- eltype = intOI_type_node;
- break;
- case CImode:
- eltype = intCI_type_node;
- break;
- case XImode:
- eltype = intXI_type_node;
- break;
- /* 64-bit vectors. */
- case V8QImode:
- eltype = V8QI_type_node;
- break;
- case V4HImode:
- eltype = V4HI_type_node;
- break;
- case V2SImode:
- eltype = V2SI_type_node;
- break;
- case V2SFmode:
- eltype = V2SF_type_node;
- break;
- /* 128-bit vectors. */
- case V16QImode:
- eltype = V16QI_type_node;
- break;
- case V8HImode:
- eltype = V8HI_type_node;
- break;
- case V4SImode:
- eltype = V4SI_type_node;
- break;
- case V4SFmode:
- eltype = V4SF_type_node;
- break;
- case V2DImode:
- eltype = V2DI_type_node;
- break;
- case V2DFmode:
- eltype = V2DF_type_node;
- break;
- default:
- gcc_unreachable ();
- }
- }
-
- if (k == 0 && !is_store)
- return_type = eltype;
- else
- args = tree_cons (NULL_TREE, eltype, args);
- }
- ftype = build_function_type (return_type, args);
- }
- break;
+ enum machine_mode op_mode = insn_data[d->code].operand[op_num].mode;
+ enum aarch64_type_qualifiers qualifiers = d->qualifiers[arg_num];
- case AARCH64_SIMD_RESULTPAIR:
- {
- switch (insn_data[d->code].operand[1].mode)
- {
- case V8QImode:
- ftype = void_ftype_pv8qi_v8qi_v8qi;
- break;
- case V4HImode:
- ftype = void_ftype_pv4hi_v4hi_v4hi;
- break;
- case V2SImode:
- ftype = void_ftype_pv2si_v2si_v2si;
- break;
- case V2SFmode:
- ftype = void_ftype_pv2sf_v2sf_v2sf;
- break;
- case DImode:
- ftype = void_ftype_pdi_di_di;
- break;
- case V16QImode:
- ftype = void_ftype_pv16qi_v16qi_v16qi;
- break;
- case V8HImode:
- ftype = void_ftype_pv8hi_v8hi_v8hi;
- break;
- case V4SImode:
- ftype = void_ftype_pv4si_v4si_v4si;
- break;
- case V4SFmode:
- ftype = void_ftype_pv4sf_v4sf_v4sf;
- break;
- case V2DImode:
- ftype = void_ftype_pv2di_v2di_v2di;
- break;
- case V2DFmode:
- ftype = void_ftype_pv2df_v2df_v2df;
- break;
- default:
- gcc_unreachable ();
- }
- }
- break;
+ if (qualifiers & qualifier_unsigned)
+ {
+ type_signature[arg_num] = 'u';
+ print_type_signature_p = true;
+ }
+ else
+ type_signature[arg_num] = 's';
+
+ /* Skip an internal operand for vget_{low, high}. */
+ if (qualifiers & qualifier_internal)
+ continue;
+
+ /* Some builtins have different user-facing types
+ for certain arguments, encoded in d->mode. */
+ if (qualifiers & qualifier_map_mode)
+ op_mode = modes[d->mode];
+
+ /* For pointers, we want a pointer to the basic type
+ of the vector. */
+ if (qualifiers & qualifier_pointer && VECTOR_MODE_P (op_mode))
+ op_mode = GET_MODE_INNER (op_mode);
+
+ eltype = aarch64_build_type (op_mode,
+ qualifiers & qualifier_unsigned);
+
+ /* Add qualifiers. */
+ if (qualifiers & qualifier_const)
+ eltype = build_qualified_type (eltype, TYPE_QUAL_CONST);
+
+ if (qualifiers & qualifier_pointer)
+ eltype = build_pointer_type (eltype);
+
+ /* If we have reached arg_num == 0, we are at a non-void
+ return type. Otherwise, we are still processing
+ arguments. */
+ if (arg_num == 0)
+ return_type = eltype;
+ else
+ args = tree_cons (NULL_TREE, eltype, args);
+ }
- case AARCH64_SIMD_REINTERP:
- {
- /* We iterate over 6 doubleword types, then 6 quadword
- types. */
- int rhs_d = d->mode % NUM_DREG_TYPES;
- int rhs_q = (d->mode - NUM_DREG_TYPES) % NUM_QREG_TYPES;
- switch (insn_data[d->code].operand[0].mode)
- {
- case V8QImode:
- ftype = reinterp_ftype_dreg[0][rhs_d];
- break;
- case V4HImode:
- ftype = reinterp_ftype_dreg[1][rhs_d];
- break;
- case V2SImode:
- ftype = reinterp_ftype_dreg[2][rhs_d];
- break;
- case V2SFmode:
- ftype = reinterp_ftype_dreg[3][rhs_d];
- break;
- case DImode:
- ftype = reinterp_ftype_dreg[4][rhs_d];
- break;
- case DFmode:
- ftype = reinterp_ftype_dreg[5][rhs_d];
- break;
- case V16QImode:
- ftype = reinterp_ftype_qreg[0][rhs_q];
- break;
- case V8HImode:
- ftype = reinterp_ftype_qreg[1][rhs_q];
- break;
- case V4SImode:
- ftype = reinterp_ftype_qreg[2][rhs_q];
- break;
- case V4SFmode:
- ftype = reinterp_ftype_qreg[3][rhs_q];
- break;
- case V2DImode:
- ftype = reinterp_ftype_qreg[4][rhs_q];
- break;
- case V2DFmode:
- ftype = reinterp_ftype_qreg[5][rhs_q];
- break;
- default:
- gcc_unreachable ();
- }
- }
- break;
+ ftype = build_function_type (return_type, args);
- default:
- gcc_unreachable ();
- }
gcc_assert (ftype != NULL);
- snprintf (namebuf, sizeof (namebuf), "__builtin_aarch64_%s%s",
- d->name, modenames[d->mode]);
+ if (print_type_signature_p)
+ snprintf (namebuf, sizeof (namebuf), "__builtin_aarch64_%s%s_%s",
+ d->name, modenames[d->mode], type_signature);
+ else
+ snprintf (namebuf, sizeof (namebuf), "__builtin_aarch64_%s%s",
+ d->name, modenames[d->mode]);
fndecl = add_builtin_function (namebuf, ftype, fcode, BUILT_IN_MD,
NULL, NULL_TREE);
@@ -953,8 +668,6 @@ typedef enum
SIMD_ARG_STOP
} builtin_simd_arg;
-#define SIMD_MAX_BUILTIN_ARGS 5
-
static rtx
aarch64_simd_expand_args (rtx target, int icode, int have_retval,
tree exp, ...)
@@ -1082,99 +795,58 @@ aarch64_simd_expand_builtin (int fcode, tree exp, rtx target)
{
aarch64_simd_builtin_datum *d =
&aarch64_simd_builtin_data[fcode - (AARCH64_SIMD_BUILTIN_BASE + 1)];
- aarch64_simd_itype itype = d->itype;
enum insn_code icode = d->code;
+ builtin_simd_arg args[SIMD_MAX_BUILTIN_ARGS];
+ int num_args = insn_data[d->code].n_operands;
+ int is_void = 0;
+ int k;
- switch (itype)
- {
- case AARCH64_SIMD_UNOP:
- return aarch64_simd_expand_args (target, icode, 1, exp,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_STOP);
+ is_void = !!(d->qualifiers[0] & qualifier_void);
- case AARCH64_SIMD_BINOP:
- {
- rtx arg2 = expand_normal (CALL_EXPR_ARG (exp, 1));
- /* Handle constants only if the predicate allows it. */
- bool op1_const_int_p =
- (CONST_INT_P (arg2)
- && (*insn_data[icode].operand[2].predicate)
- (arg2, insn_data[icode].operand[2].mode));
- return aarch64_simd_expand_args
- (target, icode, 1, exp,
- SIMD_ARG_COPY_TO_REG,
- op1_const_int_p ? SIMD_ARG_CONSTANT : SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_STOP);
- }
+ num_args += is_void;
+
+ for (k = 1; k < num_args; k++)
+ {
+ /* We have four arrays of data, each indexed in a different fashion.
+ qualifiers - element 0 always describes the function return type.
+ operands - element 0 is either the operand for return value (if
+ the function has a non-void return type) or the operand for the
+ first argument.
+ expr_args - element 0 always holds the first argument.
+ args - element 0 is always used for the return type. */
+ int qualifiers_k = k;
+ int operands_k = k - is_void;
+ int expr_args_k = k - 1;
+
+ if (d->qualifiers[qualifiers_k] & qualifier_immediate)
+ args[k] = SIMD_ARG_CONSTANT;
+ else if (d->qualifiers[qualifiers_k] & qualifier_maybe_immediate)
+ {
+ rtx arg
+ = expand_normal (CALL_EXPR_ARG (exp,
+ (expr_args_k)));
+ /* Handle constants only if the predicate allows it. */
+ bool op_const_int_p =
+ (CONST_INT_P (arg)
+ && (*insn_data[icode].operand[operands_k].predicate)
+ (arg, insn_data[icode].operand[operands_k].mode));
+ args[k] = op_const_int_p ? SIMD_ARG_CONSTANT : SIMD_ARG_COPY_TO_REG;
+ }
+ else
+ args[k] = SIMD_ARG_COPY_TO_REG;
- case AARCH64_SIMD_TERNOP:
- return aarch64_simd_expand_args (target, icode, 1, exp,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_STOP);
-
- case AARCH64_SIMD_QUADOP:
- return aarch64_simd_expand_args (target, icode, 1, exp,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_STOP);
- case AARCH64_SIMD_LOAD1:
- case AARCH64_SIMD_LOADSTRUCT:
- return aarch64_simd_expand_args (target, icode, 1, exp,
- SIMD_ARG_COPY_TO_REG, SIMD_ARG_STOP);
-
- case AARCH64_SIMD_STORE1:
- case AARCH64_SIMD_STORESTRUCT:
- return aarch64_simd_expand_args (target, icode, 0, exp,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_COPY_TO_REG, SIMD_ARG_STOP);
-
- case AARCH64_SIMD_REINTERP:
- return aarch64_simd_expand_args (target, icode, 1, exp,
- SIMD_ARG_COPY_TO_REG, SIMD_ARG_STOP);
-
- case AARCH64_SIMD_CREATE:
- return aarch64_simd_expand_args (target, icode, 1, exp,
- SIMD_ARG_COPY_TO_REG, SIMD_ARG_STOP);
-
- case AARCH64_SIMD_COMBINE:
- return aarch64_simd_expand_args (target, icode, 1, exp,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_COPY_TO_REG, SIMD_ARG_STOP);
-
- case AARCH64_SIMD_GETLANE:
- return aarch64_simd_expand_args (target, icode, 1, exp,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_CONSTANT,
- SIMD_ARG_STOP);
-
- case AARCH64_SIMD_SETLANE:
- return aarch64_simd_expand_args (target, icode, 1, exp,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_CONSTANT,
- SIMD_ARG_STOP);
-
- case AARCH64_SIMD_SHIFTIMM:
- return aarch64_simd_expand_args (target, icode, 1, exp,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_CONSTANT,
- SIMD_ARG_STOP);
-
- case AARCH64_SIMD_SHIFTACC:
- case AARCH64_SIMD_SHIFTINSERT:
- return aarch64_simd_expand_args (target, icode, 1, exp,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_CONSTANT,
- SIMD_ARG_STOP);
-
- default:
- gcc_unreachable ();
}
+ args[k] = SIMD_ARG_STOP;
+
+ /* The interface to aarch64_simd_expand_args expects a 0 if
+ the function is void, and a 1 if it is not. */
+ return aarch64_simd_expand_args
+ (target, icode, !is_void, exp,
+ args[1],
+ args[2],
+ args[3],
+ args[4],
+ SIMD_ARG_STOP);
}
/* Expand an expression EXP that calls a built-in function,
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index f9eb975a559..b175e6c4d9d 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -26,6 +26,10 @@
#include "rtl.h"
#include "insn-attr.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
+#include "calls.h"
+#include "varasm.h"
#include "regs.h"
#include "df.h"
#include "hard-reg-set.h"
@@ -1796,7 +1800,8 @@ aarch64_save_or_restore_fprs (int start_offset, int increment,
unsigned regno;
unsigned regno2;
rtx insn;
- rtx (*gen_mem_ref)(enum machine_mode, rtx) = (frame_pointer_needed)? gen_frame_mem : gen_rtx_MEM;
+ rtx (*gen_mem_ref)(enum machine_mode, rtx)
+ = (frame_pointer_needed)? gen_frame_mem : gen_rtx_MEM;
for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
@@ -1839,16 +1844,17 @@ aarch64_save_or_restore_fprs (int start_offset, int increment,
( gen_load_pairdf (gen_rtx_REG (DFmode, regno), mem,
gen_rtx_REG (DFmode, regno2), mem2));
- add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DFmode, regno));
- add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DFmode, regno2));
+ add_reg_note (insn, REG_CFA_RESTORE,
+ gen_rtx_REG (DFmode, regno));
+ add_reg_note (insn, REG_CFA_RESTORE,
+ gen_rtx_REG (DFmode, regno2));
}
/* The first part of a frame-related parallel insn
is always assumed to be relevant to the frame
calculations; subsequent parts, are only
frame-related if explicitly marked. */
- RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0,
- 1)) = 1;
+ RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
regno = regno2;
start_offset += increment * 2;
}
@@ -1859,7 +1865,8 @@ aarch64_save_or_restore_fprs (int start_offset, int increment,
else
{
insn = emit_move_insn (gen_rtx_REG (DFmode, regno), mem);
- add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, regno));
+ add_reg_note (insn, REG_CFA_RESTORE,
+ gen_rtx_REG (DImode, regno));
}
start_offset += increment;
}
diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
index 8b55a7bb7b5..228115f50fc 100644
--- a/gcc/config/aarch64/aarch64.h
+++ b/gcc/config/aarch64/aarch64.h
@@ -789,13 +789,13 @@ do { \
/* Emit rtl for profiling. Output assembler code to FILE
to call "_mcount" for profiling a function entry. */
-#define PROFILE_HOOK(LABEL) \
-{ \
- rtx fun,lr; \
- lr = get_hard_reg_initial_val (Pmode, LR_REGNUM); \
- fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_NAME); \
- emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lr, Pmode); \
-}
+#define PROFILE_HOOK(LABEL) \
+ { \
+ rtx fun, lr; \
+ lr = get_hard_reg_initial_val (Pmode, LR_REGNUM); \
+ fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_NAME); \
+ emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lr, Pmode); \
+ }
/* All the work done in PROFILE_HOOK, but still required. */
#define FUNCTION_PROFILER(STREAM, LABELNO) do { } while (0)
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index 47f3eb3f653..22051ec27e6 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -125,155 +125,6 @@
(define_attr "mode2" "unknown,none,QI,HI,SI,DI,TI,SF,DF,TF"
(const_string "unknown"))
-; The "v8type" attribute is used to for fine grained classification of
-; AArch64 instructions. This table briefly explains the meaning of each type.
-
-; adc add/subtract with carry.
-; adcs add/subtract with carry (setting condition flags).
-; adr calculate address.
-; alu simple alu instruction (no memory or fp regs access).
-; alu_ext simple alu instruction (sign/zero-extended register).
-; alu_shift simple alu instruction, with a source operand shifted by a constant.
-; alus simple alu instruction (setting condition flags).
-; alus_ext simple alu instruction (sign/zero-extended register, setting condition flags).
-; alus_shift simple alu instruction, with a source operand shifted by a constant (setting condition flags).
-; bfm bitfield move operation.
-; branch branch.
-; call subroutine call.
-; ccmp conditional compare.
-; clz count leading zeros/sign bits.
-; csel conditional select.
-; dmb data memory barrier.
-; extend sign/zero-extend (specialised bitfield move).
-; extr extract register-sized bitfield encoding.
-; fpsimd_load load single floating point / simd scalar register from memory.
-; fpsimd_load2 load pair of floating point / simd scalar registers from memory.
-; fpsimd_store store single floating point / simd scalar register to memory.
-; fpsimd_store2 store pair floating point / simd scalar registers to memory.
-; fadd floating point add/sub.
-; fccmp floating point conditional compare.
-; fcmp floating point comparison.
-; fconst floating point load immediate.
-; fcsel floating point conditional select.
-; fcvt floating point convert (float to float).
-; fcvtf2i floating point convert (float to integer).
-; fcvti2f floating point convert (integer to float).
-; fdiv floating point division operation.
-; ffarith floating point abs, neg or cpy.
-; fmadd floating point multiply-add/sub.
-; fminmax floating point min/max.
-; fmov floating point move (float to float).
-; fmovf2i floating point move (float to integer).
-; fmovi2f floating point move (integer to float).
-; fmul floating point multiply.
-; frint floating point round to integral.
-; fsqrt floating point square root.
-; load_acq load-acquire.
-; load load single general register from memory
-; load2 load pair of general registers from memory
-; logic logical operation (register).
-; logic_imm and/or/xor operation (immediate).
-; logic_shift logical operation with shift.
-; logics logical operation (register, setting condition flags).
-; logics_imm and/or/xor operation (immediate, setting condition flags).
-; logics_shift logical operation with shift (setting condition flags).
-; madd integer multiply-add/sub.
-; maddl widening integer multiply-add/sub.
-; misc miscellaneous - any type that doesn't fit into the rest.
-; move integer move operation.
-; move2 double integer move operation.
-; movk move 16-bit immediate with keep.
-; movz move 16-bit immmediate with zero/one.
-; mrs system/special register move.
-; mulh 64x64 to 128-bit multiply (high part).
-; mull widening multiply.
-; mult integer multiply instruction.
-; prefetch memory prefetch.
-; rbit reverse bits.
-; rev reverse bytes.
-; sdiv integer division operation (signed).
-; shift variable shift operation.
-; shift_imm immediate shift operation (specialised bitfield move).
-; store_rel store-release.
-; store store single general register to memory.
-; store2 store pair of general registers to memory.
-; udiv integer division operation (unsigned).
-
-(define_attr "v8type"
- "adc,\
- adcs,\
- adr,\
- alu,\
- alu_ext,\
- alu_shift,\
- alus,\
- alus_ext,\
- alus_shift,\
- bfm,\
- branch,\
- call,\
- ccmp,\
- clz,\
- csel,\
- dmb,\
- div,\
- div64,\
- extend,\
- extr,\
- fpsimd_load,\
- fpsimd_load2,\
- fpsimd_store2,\
- fpsimd_store,\
- fadd,\
- fccmp,\
- fcvt,\
- fcvtf2i,\
- fcvti2f,\
- fcmp,\
- fconst,\
- fcsel,\
- fdiv,\
- ffarith,\
- fmadd,\
- fminmax,\
- fmov,\
- fmovf2i,\
- fmovi2f,\
- fmul,\
- frint,\
- fsqrt,\
- load_acq,\
- load1,\
- load2,\
- logic,\
- logic_imm,\
- logic_shift,\
- logics,\
- logics_imm,\
- logics_shift,\
- madd,\
- maddl,\
- misc,\
- move,\
- move2,\
- movk,\
- movz,\
- mrs,\
- mulh,\
- mull,\
- mult,\
- prefetch,\
- rbit,\
- rev,\
- sdiv,\
- shift,\
- shift_imm,\
- store_rel,\
- store1,\
- store2,\
- udiv"
- (const_string "alu"))
-
; The "type" attribute is is included here from AArch32 backend to be able
; to share pipeline descriptions.
(include "../arm/types.md")
@@ -328,16 +179,14 @@
[(set (pc) (match_operand:DI 0 "register_operand" "r"))]
""
"br\\t%0"
- [(set_attr "v8type" "branch")
- (set_attr "type" "branch")]
+ [(set_attr "type" "branch")]
)
(define_insn "jump"
[(set (pc) (label_ref (match_operand 0 "" "")))]
""
"b\\t%l0"
- [(set_attr "v8type" "branch")
- (set_attr "type" "branch")]
+ [(set_attr "type" "branch")]
)
(define_expand "cbranch<mode>4"
@@ -375,8 +224,7 @@
(pc)))]
""
"b%m0\\t%l2"
- [(set_attr "v8type" "branch")
- (set_attr "type" "branch")]
+ [(set_attr "type" "branch")]
)
(define_expand "casesi"
@@ -440,7 +288,6 @@
return aarch64_output_casesi (operands);
"
[(set_attr "length" "16")
- (set_attr "v8type" "branch")
(set_attr "type" "branch")]
)
@@ -448,7 +295,7 @@
[(unspec[(const_int 0)] UNSPEC_NOP)]
""
"nop"
- [(set_attr "v8type" "misc")]
+ [(set_attr "type" "no_insn")]
)
(define_expand "prologue"
@@ -482,8 +329,7 @@
[(return)]
""
"ret"
- [(set_attr "v8type" "branch")
- (set_attr "type" "branch")]
+ [(set_attr "type" "branch")]
)
(define_insn "eh_return"
@@ -491,8 +337,7 @@
UNSPECV_EH_RETURN)]
""
"#"
- [(set_attr "v8type" "branch")
- (set_attr "type" "branch")]
+ [(set_attr "type" "branch")]
)
@@ -513,8 +358,7 @@
(pc)))]
""
"<cbz>\\t%<w>0, %l1"
- [(set_attr "v8type" "branch")
- (set_attr "type" "branch")]
+ [(set_attr "type" "branch")]
)
@@ -533,8 +377,7 @@
return \"ubfx\\t%<w>3, %<w>0, %1, #1\;<cbz>\\t%<w>3, %l2\";
return \"<tbz>\\t%<w>0, %1, %l2\";
"
- [(set_attr "v8type" "branch")
- (set_attr "type" "branch")
+ [(set_attr "type" "branch")
(set_attr "mode" "<MODE>")
(set (attr "length")
(if_then_else (and (ge (minus (match_dup 2) (pc)) (const_int -32768))
@@ -555,8 +398,7 @@
return \"ubfx\\t%<w>2, %<w>0, <sizem1>, #1\;<cbz>\\t%<w>2, %l1\";
return \"<tbz>\\t%<w>0, <sizem1>, %l1\";
"
- [(set_attr "v8type" "branch")
- (set_attr "type" "branch")
+ [(set_attr "type" "branch")
(set_attr "mode" "<MODE>")
(set (attr "length")
(if_then_else (and (ge (minus (match_dup 1) (pc)) (const_int -32768))
@@ -601,8 +443,7 @@
(clobber (reg:DI LR_REGNUM))]
""
"blr\\t%0"
- [(set_attr "v8type" "call")
- (set_attr "type" "call")]
+ [(set_attr "type" "call")]
)
(define_insn "*call_symbol"
@@ -613,8 +454,7 @@
"GET_CODE (operands[0]) == SYMBOL_REF
&& !aarch64_is_long_call_p (operands[0])"
"bl\\t%a0"
- [(set_attr "v8type" "call")
- (set_attr "type" "call")]
+ [(set_attr "type" "call")]
)
(define_expand "call_value"
@@ -651,8 +491,7 @@
(clobber (reg:DI LR_REGNUM))]
""
"blr\\t%1"
- [(set_attr "v8type" "call")
- (set_attr "type" "call")]
+ [(set_attr "type" "call")]
)
@@ -665,8 +504,7 @@
"GET_CODE (operands[1]) == SYMBOL_REF
&& !aarch64_is_long_call_p (operands[1])"
"bl\\t%a1"
- [(set_attr "v8type" "call")
- (set_attr "type" "call")]
+ [(set_attr "type" "call")]
)
(define_expand "sibcall"
@@ -701,8 +539,7 @@
(use (match_operand 2 "" ""))]
"GET_CODE (operands[0]) == SYMBOL_REF"
"b\\t%a0"
- [(set_attr "v8type" "branch")
- (set_attr "type" "branch")]
+ [(set_attr "type" "branch")]
)
@@ -714,8 +551,7 @@
(use (match_operand 3 "" ""))]
"GET_CODE (operands[1]) == SYMBOL_REF"
"b\\t%a1"
- [(set_attr "v8type" "branch")
- (set_attr "type" "branch")]
+ [(set_attr "type" "branch")]
)
;; Call subroutine returning any type.
@@ -792,8 +628,7 @@
gcc_unreachable ();
}
}
- [(set_attr "v8type" "move,alu,alu,load1,load1,store1,store1,*,*,*")
- (set_attr "type" "mov_reg,mov_imm,mov_imm,load1,load1,store1,store1,\
+ [(set_attr "type" "mov_reg,mov_imm,mov_imm,load1,load1,store1,store1,\
neon_from_gp<q>,neon_from_gp<q>, neon_dup")
(set_attr "simd" "*,*,yes,*,*,*,*,yes,yes,yes")
(set_attr "mode" "<MODE>")]
@@ -834,8 +669,7 @@
fmov\\t%s0, %w1
fmov\\t%w0, %s1
fmov\\t%s0, %s1"
- [(set_attr "v8type" "move,move,move,alu,load1,load1,store1,store1,adr,adr,fmov,fmov,fmov")
- (set_attr "type" "mov_reg,mov_reg,mov_reg,mov_imm,load1,load1,store1,store1,\
+ [(set_attr "type" "mov_reg,mov_reg,mov_reg,mov_imm,load1,load1,store1,store1,\
adr,adr,fmov,fmov,fmov")
(set_attr "mode" "SI")
(set_attr "fp" "*,*,*,*,*,yes,*,yes,*,*,yes,yes,yes")]
@@ -861,8 +695,7 @@
fmov\\t%x0, %d1
fmov\\t%d0, %d1
movi\\t%d0, %1"
- [(set_attr "v8type" "move,move,move,alu,load1,load1,store1,store1,adr,adr,fmov,fmov,fmov,fmov")
- (set_attr "type" "mov_reg,mov_reg,mov_reg,mov_imm,load1,load1,store1,store1,\
+ [(set_attr "type" "mov_reg,mov_reg,mov_reg,mov_imm,load1,load1,store1,store1,\
adr,adr,fmov,fmov,fmov,fmov")
(set_attr "mode" "DI")
(set_attr "fp" "*,*,*,*,*,yes,*,yes,*,*,yes,yes,yes,*")
@@ -877,8 +710,7 @@
"UINTVAL (operands[1]) < GET_MODE_BITSIZE (<MODE>mode)
&& UINTVAL (operands[1]) % 16 == 0"
"movk\\t%<w>0, %X2, lsl %1"
- [(set_attr "v8type" "movk")
- (set_attr "type" "mov_imm")
+ [(set_attr "type" "mov_imm")
(set_attr "mode" "<MODE>")]
)
@@ -909,9 +741,7 @@
stp\\txzr, xzr, %0
ldr\\t%q0, %1
str\\t%q1, %0"
- [(set_attr "v8type" "move2,fmovi2f,fmovf2i,*, \
- load2,store2,store2,fpsimd_load,fpsimd_store")
- (set_attr "type" "multiple,f_mcr,f_mrc,neon_logic_q, \
+ [(set_attr "type" "multiple,f_mcr,f_mrc,neon_logic_q, \
load2,store2,store2,f_loadd,f_stored")
(set_attr "mode" "DI,DI,DI,TI,DI,DI,DI,TI,TI")
(set_attr "length" "8,8,8,4,4,4,4,4,4")
@@ -963,10 +793,7 @@
ldr\\t%w0, %1
str\\t%w1, %0
mov\\t%w0, %w1"
- [(set_attr "v8type" "fmovi2f,fmovf2i,\
- fmov,fconst,fpsimd_load,\
- fpsimd_store,fpsimd_load,fpsimd_store,fmov")
- (set_attr "type" "f_mcr,f_mrc,fmov,fconsts,\
+ [(set_attr "type" "f_mcr,f_mrc,fmov,fconsts,\
f_loads,f_stores,f_loads,f_stores,fmov")
(set_attr "mode" "SF")]
)
@@ -986,10 +813,7 @@
ldr\\t%x0, %1
str\\t%x1, %0
mov\\t%x0, %x1"
- [(set_attr "v8type" "fmovi2f,fmovf2i,\
- fmov,fconst,fpsimd_load,\
- fpsimd_store,fpsimd_load,fpsimd_store,move")
- (set_attr "type" "f_mcr,f_mrc,fmov,fconstd,\
+ [(set_attr "type" "f_mcr,f_mrc,fmov,fconstd,\
f_loadd,f_stored,f_loadd,f_stored,mov_reg")
(set_attr "mode" "DF")]
)
@@ -1028,8 +852,7 @@
str\\t%q1, %0
ldp\\t%0, %H0, %1
stp\\t%1, %H1, %0"
- [(set_attr "v8type" "logic,move2,fmovi2f,fmovf2i,fconst,fconst,fpsimd_load,fpsimd_store,fpsimd_load2,fpsimd_store2")
- (set_attr "type" "logic_reg,multiple,f_mcr,f_mrc,fconstd,fconstd,\
+ [(set_attr "type" "logic_reg,multiple,f_mcr,f_mrc,fconstd,fconstd,\
f_loadd,f_stored,neon_load1_2reg,neon_store1_2reg")
(set_attr "mode" "DF,DF,DF,DF,DF,DF,TF,TF,DF,DF")
(set_attr "length" "4,8,8,8,4,4,4,4,4,4")
@@ -1060,8 +883,7 @@
XEXP (operands[1], 0),
GET_MODE_SIZE (<MODE>mode)))"
"ldp\\t%<w>0, %<w>2, %1"
- [(set_attr "v8type" "load2")
- (set_attr "type" "load2")
+ [(set_attr "type" "load2")
(set_attr "mode" "<MODE>")]
)
@@ -1077,8 +899,7 @@
XEXP (operands[0], 0),
GET_MODE_SIZE (<MODE>mode)))"
"stp\\t%<w>1, %<w>3, %0"
- [(set_attr "v8type" "store2")
- (set_attr "type" "store2")
+ [(set_attr "type" "store2")
(set_attr "mode" "<MODE>")]
)
@@ -1094,8 +915,7 @@
XEXP (operands[1], 0),
GET_MODE_SIZE (<MODE>mode)))"
"ldp\\t%<w>0, %<w>2, %1"
- [(set_attr "v8type" "fpsimd_load2")
- (set_attr "type" "neon_load1_2reg<q>")
+ [(set_attr "type" "neon_load1_2reg<q>")
(set_attr "mode" "<MODE>")]
)
@@ -1111,8 +931,7 @@
XEXP (operands[0], 0),
GET_MODE_SIZE (<MODE>mode)))"
"stp\\t%<w>1, %<w>3, %0"
- [(set_attr "v8type" "fpsimd_store2")
- (set_attr "type" "neon_store1_2reg<q>")
+ [(set_attr "type" "neon_store1_2reg<q>")
(set_attr "mode" "<MODE>")]
)
@@ -1131,8 +950,7 @@
(match_operand:P 5 "const_int_operand" "n"))))])]
"INTVAL (operands[5]) == INTVAL (operands[4]) + GET_MODE_SIZE (<GPI:MODE>mode)"
"ldp\\t%<w>2, %<w>3, [%1], %4"
- [(set_attr "v8type" "load2")
- (set_attr "type" "load2")
+ [(set_attr "type" "load2")
(set_attr "mode" "<GPI:MODE>")]
)
@@ -1151,8 +969,7 @@
(match_operand:GPI 3 "register_operand" "r"))])]
"INTVAL (operands[5]) == INTVAL (operands[4]) + GET_MODE_SIZE (<GPI:MODE>mode)"
"stp\\t%<w>2, %<w>3, [%0, %4]!"
- [(set_attr "v8type" "store2")
- (set_attr "type" "store2")
+ [(set_attr "type" "store2")
(set_attr "mode" "<GPI:MODE>")]
)
@@ -1173,8 +990,7 @@
"@
sxtw\t%0, %w1
ldrsw\t%0, %1"
- [(set_attr "v8type" "extend,load1")
- (set_attr "type" "extend,load1")
+ [(set_attr "type" "extend,load1")
(set_attr "mode" "DI")]
)
@@ -1185,8 +1001,7 @@
"@
uxtw\t%0, %w1
ldr\t%w0, %1"
- [(set_attr "v8type" "extend,load1")
- (set_attr "type" "extend,load1")
+ [(set_attr "type" "extend,load1")
(set_attr "mode" "DI")]
)
@@ -1203,8 +1018,7 @@
"@
sxt<SHORT:size>\t%<GPI:w>0, %w1
ldrs<SHORT:size>\t%<GPI:w>0, %1"
- [(set_attr "v8type" "extend,load1")
- (set_attr "type" "extend,load1")
+ [(set_attr "type" "extend,load1")
(set_attr "mode" "<GPI:MODE>")]
)
@@ -1216,8 +1030,7 @@
uxt<SHORT:size>\t%<GPI:w>0, %w1
ldr<SHORT:size>\t%w0, %1
ldr\t%<SHORT:size>0, %1"
- [(set_attr "v8type" "extend,load1,load1")
- (set_attr "type" "extend,load1,load1")
+ [(set_attr "type" "extend,load1,load1")
(set_attr "mode" "<GPI:MODE>")]
)
@@ -1234,8 +1047,7 @@
"@
<su>xtb\t%w0, %w1
<ldrxt>b\t%w0, %1"
- [(set_attr "v8type" "extend,load1")
- (set_attr "type" "extend,load1")
+ [(set_attr "type" "extend,load1")
(set_attr "mode" "HI")]
)
@@ -1279,8 +1091,7 @@
add\\t%w0, %w1, %2
add\\t%w0, %w1, %w2
sub\\t%w0, %w1, #%n2"
- [(set_attr "v8type" "alu")
- (set_attr "type" "alu_imm,alu_reg,alu_imm")
+ [(set_attr "type" "alu_imm,alu_reg,alu_imm")
(set_attr "mode" "SI")]
)
@@ -1296,8 +1107,7 @@
add\\t%w0, %w1, %2
add\\t%w0, %w1, %w2
sub\\t%w0, %w1, #%n2"
- [(set_attr "v8type" "alu")
- (set_attr "type" "alu_imm,alu_reg,alu_imm")
+ [(set_attr "type" "alu_imm,alu_reg,alu_imm")
(set_attr "mode" "SI")]
)
@@ -1313,8 +1123,7 @@
add\\t%x0, %x1, %x2
sub\\t%x0, %x1, #%n2
add\\t%d0, %d1, %d2"
- [(set_attr "v8type" "alu")
- (set_attr "type" "alu_imm,alu_reg,alu_imm,alu_reg")
+ [(set_attr "type" "alu_imm,alu_reg,alu_imm,alu_reg")
(set_attr "mode" "DI")
(set_attr "simd" "*,*,*,yes")]
)
@@ -1332,8 +1141,7 @@
adds\\t%<w>0, %<w>1, %<w>2
adds\\t%<w>0, %<w>1, %<w>2
subs\\t%<w>0, %<w>1, #%n2"
- [(set_attr "v8type" "alus")
- (set_attr "type" "alus_reg,alus_imm,alus_imm")
+ [(set_attr "type" "alus_reg,alus_imm,alus_imm")
(set_attr "mode" "<MODE>")]
)
@@ -1351,8 +1159,7 @@
adds\\t%w0, %w1, %w2
adds\\t%w0, %w1, %w2
subs\\t%w0, %w1, #%n2"
- [(set_attr "v8type" "alus")
- (set_attr "type" "alus_reg,alus_imm,alus_imm")
+ [(set_attr "type" "alus_reg,alus_imm,alus_imm")
(set_attr "mode" "SI")]
)
@@ -1369,8 +1176,7 @@
(match_dup 3)))]
""
"adds\\t%<w>0, %<w>3, %<w>1, lsl %p2"
- [(set_attr "v8type" "alus_shift")
- (set_attr "type" "alus_shift_imm")
+ [(set_attr "type" "alus_shift_imm")
(set_attr "mode" "<MODE>")]
)
@@ -1387,8 +1193,7 @@
(mult:GPI (match_dup 2) (match_dup 3))))]
""
"subs\\t%<w>0, %<w>1, %<w>2, lsl %p3"
- [(set_attr "v8type" "alus_shift")
- (set_attr "type" "alus_shift_imm")
+ [(set_attr "type" "alus_shift_imm")
(set_attr "mode" "<MODE>")]
)
@@ -1403,8 +1208,7 @@
(plus:GPI (ANY_EXTEND:GPI (match_dup 1)) (match_dup 2)))]
""
"adds\\t%<GPI:w>0, %<GPI:w>2, %<GPI:w>1, <su>xt<ALLX:size>"
- [(set_attr "v8type" "alus_ext")
- (set_attr "type" "alus_ext")
+ [(set_attr "type" "alus_ext")
(set_attr "mode" "<GPI:MODE>")]
)
@@ -1419,8 +1223,7 @@
(minus:GPI (match_dup 1) (ANY_EXTEND:GPI (match_dup 2))))]
""
"subs\\t%<GPI:w>0, %<GPI:w>1, %<GPI:w>2, <su>xt<ALLX:size>"
- [(set_attr "v8type" "alus_ext")
- (set_attr "type" "alus_ext")
+ [(set_attr "type" "alus_ext")
(set_attr "mode" "<GPI:MODE>")]
)
@@ -1441,8 +1244,7 @@
(match_dup 4)))]
"aarch64_is_extend_from_extract (<MODE>mode, operands[2], operands[3])"
"adds\\t%<w>0, %<w>4, %<w>1, <su>xt%e3 %p2"
- [(set_attr "v8type" "alus_ext")
- (set_attr "type" "alus_ext")
+ [(set_attr "type" "alus_ext")
(set_attr "mode" "<MODE>")]
)
@@ -1463,8 +1265,7 @@
(const_int 0))))]
"aarch64_is_extend_from_extract (<MODE>mode, operands[2], operands[3])"
"subs\\t%<w>0, %<w>4, %<w>1, <su>xt%e3 %p2"
- [(set_attr "v8type" "alus_ext")
- (set_attr "type" "alus_ext")
+ [(set_attr "type" "alus_ext")
(set_attr "mode" "<MODE>")]
)
@@ -1479,8 +1280,7 @@
cmn\\t%<w>0, %<w>1
cmn\\t%<w>0, %<w>1
cmp\\t%<w>0, #%n1"
- [(set_attr "v8type" "alus")
- (set_attr "type" "alus_reg,alus_imm,alus_imm")
+ [(set_attr "type" "alus_reg,alus_imm,alus_imm")
(set_attr "mode" "<MODE>")]
)
@@ -1491,8 +1291,7 @@
(match_operand:GPI 1 "register_operand" "r")))]
""
"cmn\\t%<w>1, %<w>0"
- [(set_attr "v8type" "alus")
- (set_attr "type" "alus_reg")
+ [(set_attr "type" "alus_reg")
(set_attr "mode" "<MODE>")]
)
@@ -1503,8 +1302,7 @@
(match_operand:GPI 3 "register_operand" "r")))]
""
"add\\t%<w>0, %<w>3, %<w>1, <shift> %2"
- [(set_attr "v8type" "alu_shift")
- (set_attr "type" "alu_shift_imm")
+ [(set_attr "type" "alu_shift_imm")
(set_attr "mode" "<MODE>")]
)
@@ -1513,12 +1311,11 @@
[(set (match_operand:DI 0 "register_operand" "=r")
(zero_extend:DI
(plus:SI (ASHIFT:SI (match_operand:SI 1 "register_operand" "r")
- (match_operand:QI 2 "aarch64_shift_imm_si" "n"))
- (match_operand:SI 3 "register_operand" "r"))))]
+ (match_operand:QI 2 "aarch64_shift_imm_si" "n"))
+ (match_operand:SI 3 "register_operand" "r"))))]
""
"add\\t%w0, %w3, %w1, <shift> %2"
- [(set_attr "v8type" "alu_shift")
- (set_attr "type" "alu_shift_imm")
+ [(set_attr "type" "alu_shift_imm")
(set_attr "mode" "SI")]
)
@@ -1529,8 +1326,7 @@
(match_operand:GPI 3 "register_operand" "r")))]
""
"add\\t%<w>0, %<w>3, %<w>1, lsl %p2"
- [(set_attr "v8type" "alu_shift")
- (set_attr "type" "alu_shift_imm")
+ [(set_attr "type" "alu_shift_imm")
(set_attr "mode" "<MODE>")]
)
@@ -1540,8 +1336,7 @@
(match_operand:GPI 2 "register_operand" "r")))]
""
"add\\t%<GPI:w>0, %<GPI:w>2, %<GPI:w>1, <su>xt<ALLX:size>"
- [(set_attr "v8type" "alu_ext")
- (set_attr "type" "alu_ext")
+ [(set_attr "type" "alu_ext")
(set_attr "mode" "<GPI:MODE>")]
)
@@ -1553,8 +1348,7 @@
(match_operand:GPI 2 "register_operand" "r"))))]
""
"add\\t%w0, %w2, %w1, <su>xt<SHORT:size>"
- [(set_attr "v8type" "alu_ext")
- (set_attr "type" "alu_ext")
+ [(set_attr "type" "alu_ext")
(set_attr "mode" "SI")]
)
@@ -1566,8 +1360,7 @@
(match_operand:GPI 3 "register_operand" "r")))]
""
"add\\t%<GPI:w>0, %<GPI:w>3, %<GPI:w>1, <su>xt<ALLX:size> %2"
- [(set_attr "v8type" "alu_ext")
- (set_attr "type" "alu_ext")
+ [(set_attr "type" "alu_ext")
(set_attr "mode" "<GPI:MODE>")]
)
@@ -1581,8 +1374,7 @@
(match_operand:SI 3 "register_operand" "r"))))]
""
"add\\t%w0, %w3, %w1, <su>xt<SHORT:size> %2"
- [(set_attr "v8type" "alu_ext")
- (set_attr "type" "alu_ext")
+ [(set_attr "type" "alu_ext")
(set_attr "mode" "SI")]
)
@@ -1594,8 +1386,7 @@
(match_operand:GPI 3 "register_operand" "r")))]
""
"add\\t%<GPI:w>0, %<GPI:w>3, %<GPI:w>1, <su>xt<ALLX:size> %p2"
- [(set_attr "v8type" "alu_ext")
- (set_attr "type" "alu_ext")
+ [(set_attr "type" "alu_ext")
(set_attr "mode" "<GPI:MODE>")]
)
@@ -1608,8 +1399,7 @@
(match_operand:SI 3 "register_operand" "r"))))]
""
"add\\t%w0, %w3, %w1, <su>xt<SHORT:size> %p2"
- [(set_attr "v8type" "alu_ext")
- (set_attr "type" "alu_ext")
+ [(set_attr "type" "alu_ext")
(set_attr "mode" "SI")]
)
@@ -1623,8 +1413,7 @@
(match_operand:GPI 4 "register_operand" "r")))]
"aarch64_is_extend_from_extract (<MODE>mode, operands[2], operands[3])"
"add\\t%<w>0, %<w>4, %<w>1, <su>xt%e3 %p2"
- [(set_attr "v8type" "alu_ext")
- (set_attr "type" "alu_ext")
+ [(set_attr "type" "alu_ext")
(set_attr "mode" "<MODE>")]
)
@@ -1640,8 +1429,7 @@
(match_operand:SI 4 "register_operand" "r"))))]
"aarch64_is_extend_from_extract (SImode, operands[2], operands[3])"
"add\\t%w0, %w4, %w1, <su>xt%e3 %p2"
- [(set_attr "v8type" "alu_ext")
- (set_attr "type" "alu_ext")
+ [(set_attr "type" "alu_ext")
(set_attr "mode" "SI")]
)
@@ -1654,8 +1442,7 @@
(match_operand:GPI 2 "register_operand" "r"))))]
""
"adc\\t%<w>0, %<w>1, %<w>2"
- [(set_attr "v8type" "adc")
- (set_attr "type" "adc_reg")
+ [(set_attr "type" "adc_reg")
(set_attr "mode" "<MODE>")]
)
@@ -1670,8 +1457,7 @@
(match_operand:SI 2 "register_operand" "r")))))]
""
"adc\\t%w0, %w1, %w2"
- [(set_attr "v8type" "adc")
- (set_attr "type" "adc_reg")
+ [(set_attr "type" "adc_reg")
(set_attr "mode" "SI")]
)
@@ -1684,8 +1470,7 @@
(geu:GPI (reg:CC CC_REGNUM) (const_int 0))))]
""
"adc\\t%<w>0, %<w>1, %<w>2"
- [(set_attr "v8type" "adc")
- (set_attr "type" "adc_reg")
+ [(set_attr "type" "adc_reg")
(set_attr "mode" "<MODE>")]
)
@@ -1700,8 +1485,7 @@
(geu:SI (reg:CC CC_REGNUM) (const_int 0)))))]
""
"adc\\t%w0, %w1, %w2"
- [(set_attr "v8type" "adc")
- (set_attr "type" "adc_reg")
+ [(set_attr "type" "adc_reg")
(set_attr "mode" "SI")]
)
@@ -1714,8 +1498,7 @@
(match_operand:GPI 2 "register_operand" "r")))]
""
"adc\\t%<w>0, %<w>1, %<w>2"
- [(set_attr "v8type" "adc")
- (set_attr "type" "adc_reg")
+ [(set_attr "type" "adc_reg")
(set_attr "mode" "<MODE>")]
)
@@ -1730,8 +1513,7 @@
(match_operand:SI 2 "register_operand" "r"))))]
""
"adc\\t%w0, %w1, %w2"
- [(set_attr "v8type" "adc")
- (set_attr "type" "adc_reg")
+ [(set_attr "type" "adc_reg")
(set_attr "mode" "SI")]
)
@@ -1744,8 +1526,7 @@
(match_operand:GPI 1 "register_operand" "r")))]
""
"adc\\t%<w>0, %<w>1, %<w>2"
- [(set_attr "v8type" "adc")
- (set_attr "type" "adc_reg")
+ [(set_attr "type" "adc_reg")
(set_attr "mode" "<MODE>")]
)
@@ -1760,8 +1541,7 @@
(match_operand:SI 1 "register_operand" "r"))))]
""
"adc\\t%w0, %w1, %w2"
- [(set_attr "v8type" "adc")
- (set_attr "type" "adc_reg")
+ [(set_attr "type" "adc_reg")
(set_attr "mode" "SI")]
)
@@ -1777,8 +1557,7 @@
operands[3] = GEN_INT (aarch64_uxt_size (exact_log2 (INTVAL (operands[2])),
INTVAL (operands[3])));
return \"add\t%<w>0, %<w>4, %<w>1, uxt%e3 %p2\";"
- [(set_attr "v8type" "alu_ext")
- (set_attr "type" "alu_ext")
+ [(set_attr "type" "alu_ext")
(set_attr "mode" "<MODE>")]
)
@@ -1796,8 +1575,7 @@
operands[3] = GEN_INT (aarch64_uxt_size (exact_log2 (INTVAL (operands[2])),
INTVAL (operands[3])));
return \"add\t%w0, %w4, %w1, uxt%e3 %p2\";"
- [(set_attr "v8type" "alu_ext")
- (set_attr "type" "alu_ext")
+ [(set_attr "type" "alu_ext")
(set_attr "mode" "SI")]
)
@@ -1807,8 +1585,7 @@
(match_operand:SI 2 "register_operand" "r")))]
""
"sub\\t%w0, %w1, %w2"
- [(set_attr "v8type" "alu")
- (set_attr "type" "alu_reg")
+ [(set_attr "type" "alu_reg")
(set_attr "mode" "SI")]
)
@@ -1820,8 +1597,7 @@
(match_operand:SI 2 "register_operand" "r"))))]
""
"sub\\t%w0, %w1, %w2"
- [(set_attr "v8type" "alu")
- (set_attr "type" "alu_reg")
+ [(set_attr "type" "alu_reg")
(set_attr "mode" "SI")]
)
@@ -1833,8 +1609,7 @@
"@
sub\\t%x0, %x1, %x2
sub\\t%d0, %d1, %d2"
- [(set_attr "v8type" "alu")
- (set_attr "type" "alu_reg")
+ [(set_attr "type" "alu_reg, neon_sub")
(set_attr "mode" "DI")
(set_attr "simd" "*,yes")]
)
@@ -1849,8 +1624,7 @@
(minus:GPI (match_dup 1) (match_dup 2)))]
""
"subs\\t%<w>0, %<w>1, %<w>2"
- [(set_attr "v8type" "alus")
- (set_attr "type" "alus_reg")
+ [(set_attr "type" "alus_reg")
(set_attr "mode" "<MODE>")]
)
@@ -1864,8 +1638,7 @@
(zero_extend:DI (minus:SI (match_dup 1) (match_dup 2))))]
""
"subs\\t%w0, %w1, %w2"
- [(set_attr "v8type" "alus")
- (set_attr "type" "alus_reg")
+ [(set_attr "type" "alus_reg")
(set_attr "mode" "SI")]
)
@@ -1877,8 +1650,7 @@
(match_operand:QI 2 "aarch64_shift_imm_<mode>" "n"))))]
""
"sub\\t%<w>0, %<w>3, %<w>1, <shift> %2"
- [(set_attr "v8type" "alu_shift")
- (set_attr "type" "alu_shift_imm")
+ [(set_attr "type" "alu_shift_imm")
(set_attr "mode" "<MODE>")]
)
@@ -1892,8 +1664,7 @@
(match_operand:QI 2 "aarch64_shift_imm_si" "n")))))]
""
"sub\\t%w0, %w3, %w1, <shift> %2"
- [(set_attr "v8type" "alu_shift")
- (set_attr "type" "alu_shift_imm")
+ [(set_attr "type" "alu_shift_imm")
(set_attr "mode" "SI")]
)
@@ -1905,8 +1676,7 @@
(match_operand:QI 2 "aarch64_pwr_2_<mode>" "n"))))]
""
"sub\\t%<w>0, %<w>3, %<w>1, lsl %p2"
- [(set_attr "v8type" "alu_shift")
- (set_attr "type" "alu_shift_imm")
+ [(set_attr "type" "alu_shift_imm")
(set_attr "mode" "<MODE>")]
)
@@ -1920,8 +1690,7 @@
(match_operand:QI 2 "aarch64_pwr_2_si" "n")))))]
""
"sub\\t%w0, %w3, %w1, lsl %p2"
- [(set_attr "v8type" "alu_shift")
- (set_attr "type" "alu_shift_imm")
+ [(set_attr "type" "alu_shift_imm")
(set_attr "mode" "SI")]
)
@@ -1932,8 +1701,7 @@
(match_operand:ALLX 2 "register_operand" "r"))))]
""
"sub\\t%<GPI:w>0, %<GPI:w>1, %<GPI:w>2, <su>xt<ALLX:size>"
- [(set_attr "v8type" "alu_ext")
- (set_attr "type" "alu_ext")
+ [(set_attr "type" "alu_ext")
(set_attr "mode" "<GPI:MODE>")]
)
@@ -1946,8 +1714,7 @@
(match_operand:SHORT 2 "register_operand" "r")))))]
""
"sub\\t%w0, %w1, %w2, <su>xt<SHORT:size>"
- [(set_attr "v8type" "alu_ext")
- (set_attr "type" "alu_ext")
+ [(set_attr "type" "alu_ext")
(set_attr "mode" "SI")]
)
@@ -1959,8 +1726,7 @@
(match_operand 3 "aarch64_imm3" "Ui3"))))]
""
"sub\\t%<GPI:w>0, %<GPI:w>1, %<GPI:w>2, <su>xt<ALLX:size> %3"
- [(set_attr "v8type" "alu_ext")
- (set_attr "type" "alu_ext")
+ [(set_attr "type" "alu_ext")
(set_attr "mode" "<GPI:MODE>")]
)
@@ -1974,8 +1740,7 @@
(match_operand 3 "aarch64_imm3" "Ui3")))))]
""
"sub\\t%w0, %w1, %w2, <su>xt<SHORT:size> %3"
- [(set_attr "v8type" "alu_ext")
- (set_attr "type" "alu_ext")
+ [(set_attr "type" "alu_ext")
(set_attr "mode" "SI")]
)
@@ -1989,8 +1754,7 @@
(const_int 0))))]
"aarch64_is_extend_from_extract (<MODE>mode, operands[2], operands[3])"
"sub\\t%<w>0, %<w>4, %<w>1, <su>xt%e3 %p2"
- [(set_attr "v8type" "alu_ext")
- (set_attr "type" "alu_ext")
+ [(set_attr "type" "alu_ext")
(set_attr "mode" "<MODE>")]
)
@@ -2006,8 +1770,7 @@
(const_int 0)))))]
"aarch64_is_extend_from_extract (SImode, operands[2], operands[3])"
"sub\\t%w0, %w4, %w1, <su>xt%e3 %p2"
- [(set_attr "v8type" "alu_ext")
- (set_attr "type" "alu_ext")
+ [(set_attr "type" "alu_ext")
(set_attr "mode" "SI")]
)
@@ -2020,8 +1783,7 @@
(match_operand:GPI 2 "register_operand" "r")))]
""
"sbc\\t%<w>0, %<w>1, %<w>2"
- [(set_attr "v8type" "adc")
- (set_attr "type" "adc_reg")
+ [(set_attr "type" "adc_reg")
(set_attr "mode" "<MODE>")]
)
@@ -2036,8 +1798,7 @@
(match_operand:SI 2 "register_operand" "r"))))]
""
"sbc\\t%w0, %w1, %w2"
- [(set_attr "v8type" "adc")
- (set_attr "type" "adc_reg")
+ [(set_attr "type" "adc_reg")
(set_attr "mode" "SI")]
)
@@ -2053,8 +1814,7 @@
operands[3] = GEN_INT (aarch64_uxt_size (exact_log2 (INTVAL (operands[2])),
INTVAL (operands[3])));
return \"sub\t%<w>0, %<w>4, %<w>1, uxt%e3 %p2\";"
- [(set_attr "v8type" "alu_ext")
- (set_attr "type" "alu_ext")
+ [(set_attr "type" "alu_ext")
(set_attr "mode" "<MODE>")]
)
@@ -2072,8 +1832,7 @@
operands[3] = GEN_INT (aarch64_uxt_size (exact_log2 (INTVAL (operands[2])),
INTVAL (operands[3])));
return \"sub\t%w0, %w4, %w1, uxt%e3 %p2\";"
- [(set_attr "v8type" "alu_ext")
- (set_attr "type" "alu_ext")
+ [(set_attr "type" "alu_ext")
(set_attr "mode" "SI")]
)
@@ -2105,8 +1864,7 @@
GEN_INT (63)))));
DONE;
}
- [(set_attr "v8type" "alu")
- (set_attr "type" "alu_reg")
+ [(set_attr "type" "alu_reg")
(set_attr "mode" "DI")]
)
@@ -2117,8 +1875,7 @@
"@
neg\\t%<w>0, %<w>1
neg\\t%<rtn>0<vas>, %<rtn>1<vas>"
- [(set_attr "v8type" "alu")
- (set_attr "type" "alu_reg, neon_neg<q>")
+ [(set_attr "type" "alu_reg, neon_neg<q>")
(set_attr "simd" "*,yes")
(set_attr "mode" "<MODE>")]
)
@@ -2129,8 +1886,7 @@
(zero_extend:DI (neg:SI (match_operand:SI 1 "register_operand" "r"))))]
""
"neg\\t%w0, %w1"
- [(set_attr "v8type" "alu")
- (set_attr "type" "alu_reg")
+ [(set_attr "type" "alu_reg")
(set_attr "mode" "SI")]
)
@@ -2140,8 +1896,7 @@
(match_operand:GPI 1 "register_operand" "r")))]
""
"ngc\\t%<w>0, %<w>1"
- [(set_attr "v8type" "adc")
- (set_attr "type" "adc_reg")
+ [(set_attr "type" "adc_reg")
(set_attr "mode" "<MODE>")]
)
@@ -2152,8 +1907,7 @@
(match_operand:SI 1 "register_operand" "r"))))]
""
"ngc\\t%w0, %w1"
- [(set_attr "v8type" "adc")
- (set_attr "type" "adc_reg")
+ [(set_attr "type" "adc_reg")
(set_attr "mode" "SI")]
)
@@ -2165,8 +1919,7 @@
(neg:GPI (match_dup 1)))]
""
"negs\\t%<w>0, %<w>1"
- [(set_attr "v8type" "alus")
- (set_attr "type" "alus_reg")
+ [(set_attr "type" "alus_reg")
(set_attr "mode" "<MODE>")]
)
@@ -2179,8 +1932,7 @@
(zero_extend:DI (neg:SI (match_dup 1))))]
""
"negs\\t%w0, %w1"
- [(set_attr "v8type" "alus")
- (set_attr "type" "alus_reg")
+ [(set_attr "type" "alus_reg")
(set_attr "mode" "SI")]
)
@@ -2195,8 +1947,7 @@
(neg:GPI (ASHIFT:GPI (match_dup 1) (match_dup 2))))]
""
"negs\\t%<w>0, %<w>1, <shift> %2"
- [(set_attr "v8type" "alus_shift")
- (set_attr "type" "alus_shift_imm")
+ [(set_attr "type" "alus_shift_imm")
(set_attr "mode" "<MODE>")]
)
@@ -2207,8 +1958,7 @@
(match_operand:QI 2 "aarch64_shift_imm_<mode>" "n"))))]
""
"neg\\t%<w>0, %<w>1, <shift> %2"
- [(set_attr "v8type" "alu_shift")
- (set_attr "type" "alu_shift_imm")
+ [(set_attr "type" "alu_shift_imm")
(set_attr "mode" "<MODE>")]
)
@@ -2221,8 +1971,7 @@
(match_operand:QI 2 "aarch64_shift_imm_si" "n")))))]
""
"neg\\t%w0, %w1, <shift> %2"
- [(set_attr "v8type" "alu_shift")
- (set_attr "type" "alu_shift_imm")
+ [(set_attr "type" "alu_shift_imm")
(set_attr "mode" "SI")]
)
@@ -2233,8 +1982,7 @@
(match_operand:QI 2 "aarch64_pwr_2_<mode>" "n"))))]
""
"neg\\t%<w>0, %<w>1, lsl %p2"
- [(set_attr "v8type" "alu_shift")
- (set_attr "type" "alu_shift_imm")
+ [(set_attr "type" "alu_shift_imm")
(set_attr "mode" "<MODE>")]
)
@@ -2247,8 +1995,7 @@
(match_operand:QI 2 "aarch64_pwr_2_si" "n")))))]
""
"neg\\t%w0, %w1, lsl %p2"
- [(set_attr "v8type" "alu_shift")
- (set_attr "type" "alu_shift_imm")
+ [(set_attr "type" "alu_shift_imm")
(set_attr "mode" "SI")]
)
@@ -2258,8 +2005,7 @@
(match_operand:GPI 2 "register_operand" "r")))]
""
"mul\\t%<w>0, %<w>1, %<w>2"
- [(set_attr "v8type" "mult")
- (set_attr "type" "mul")
+ [(set_attr "type" "mul")
(set_attr "mode" "<MODE>")]
)
@@ -2271,8 +2017,7 @@
(match_operand:SI 2 "register_operand" "r"))))]
""
"mul\\t%w0, %w1, %w2"
- [(set_attr "v8type" "mult")
- (set_attr "type" "mul")
+ [(set_attr "type" "mul")
(set_attr "mode" "SI")]
)
@@ -2283,8 +2028,7 @@
(match_operand:GPI 3 "register_operand" "r")))]
""
"madd\\t%<w>0, %<w>1, %<w>2, %<w>3"
- [(set_attr "v8type" "madd")
- (set_attr "type" "mla")
+ [(set_attr "type" "mla")
(set_attr "mode" "<MODE>")]
)
@@ -2297,8 +2041,7 @@
(match_operand:SI 3 "register_operand" "r"))))]
""
"madd\\t%w0, %w1, %w2, %w3"
- [(set_attr "v8type" "madd")
- (set_attr "type" "mla")
+ [(set_attr "type" "mla")
(set_attr "mode" "SI")]
)
@@ -2310,8 +2053,7 @@
""
"msub\\t%<w>0, %<w>1, %<w>2, %<w>3"
- [(set_attr "v8type" "madd")
- (set_attr "type" "mla")
+ [(set_attr "type" "mla")
(set_attr "mode" "<MODE>")]
)
@@ -2325,8 +2067,7 @@
""
"msub\\t%w0, %w1, %w2, %w3"
- [(set_attr "v8type" "madd")
- (set_attr "type" "mla")
+ [(set_attr "type" "mla")
(set_attr "mode" "SI")]
)
@@ -2337,8 +2078,7 @@
""
"mneg\\t%<w>0, %<w>1, %<w>2"
- [(set_attr "v8type" "mult")
- (set_attr "type" "mul")
+ [(set_attr "type" "mul")
(set_attr "mode" "<MODE>")]
)
@@ -2351,8 +2091,7 @@
""
"mneg\\t%w0, %w1, %w2"
- [(set_attr "v8type" "mult")
- (set_attr "type" "mul")
+ [(set_attr "type" "mul")
(set_attr "mode" "SI")]
)
@@ -2362,8 +2101,7 @@
(ANY_EXTEND:DI (match_operand:SI 2 "register_operand" "r"))))]
""
"<su>mull\\t%0, %w1, %w2"
- [(set_attr "v8type" "mull")
- (set_attr "type" "<su>mull")
+ [(set_attr "type" "<su>mull")
(set_attr "mode" "DI")]
)
@@ -2375,8 +2113,7 @@
(match_operand:DI 3 "register_operand" "r")))]
""
"<su>maddl\\t%0, %w1, %w2, %3"
- [(set_attr "v8type" "maddl")
- (set_attr "type" "<su>mlal")
+ [(set_attr "type" "<su>mlal")
(set_attr "mode" "DI")]
)
@@ -2389,8 +2126,7 @@
(match_operand:SI 2 "register_operand" "r")))))]
""
"<su>msubl\\t%0, %w1, %w2, %3"
- [(set_attr "v8type" "maddl")
- (set_attr "type" "<su>mlal")
+ [(set_attr "type" "<su>mlal")
(set_attr "mode" "DI")]
)
@@ -2401,8 +2137,7 @@
(ANY_EXTEND:DI (match_operand:SI 2 "register_operand" "r"))))]
""
"<su>mnegl\\t%0, %w1, %w2"
- [(set_attr "v8type" "mull")
- (set_attr "type" "<su>mull")
+ [(set_attr "type" "<su>mull")
(set_attr "mode" "DI")]
)
@@ -2416,8 +2151,7 @@
(const_int 64))))]
""
"<su>mulh\\t%0, %1, %2"
- [(set_attr "v8type" "mulh")
- (set_attr "type" "<su>mull")
+ [(set_attr "type" "<su>mull")
(set_attr "mode" "DI")]
)
@@ -2427,8 +2161,7 @@
(match_operand:GPI 2 "register_operand" "r")))]
""
"<su>div\\t%<w>0, %<w>1, %<w>2"
- [(set_attr "v8type" "<su>div")
- (set_attr "type" "<su>div")
+ [(set_attr "type" "<su>div")
(set_attr "mode" "<MODE>")]
)
@@ -2440,8 +2173,7 @@
(match_operand:SI 2 "register_operand" "r"))))]
""
"<su>div\\t%w0, %w1, %w2"
- [(set_attr "v8type" "<su>div")
- (set_attr "type" "<su>div")
+ [(set_attr "type" "<su>div")
(set_attr "mode" "SI")]
)
@@ -2458,8 +2190,7 @@
cmp\\t%<w>0, %<w>1
cmp\\t%<w>0, %<w>1
cmn\\t%<w>0, #%n1"
- [(set_attr "v8type" "alus")
- (set_attr "type" "alus_reg,alus_imm,alus_imm")
+ [(set_attr "type" "alus_reg,alus_imm,alus_imm")
(set_attr "mode" "<MODE>")]
)
@@ -2471,8 +2202,7 @@
"@
fcmp\\t%<s>0, #0.0
fcmp\\t%<s>0, %<s>1"
- [(set_attr "v8type" "fcmp")
- (set_attr "type" "fcmp<s>")
+ [(set_attr "type" "fcmp<s>")
(set_attr "mode" "<MODE>")]
)
@@ -2484,8 +2214,7 @@
"@
fcmpe\\t%<s>0, #0.0
fcmpe\\t%<s>0, %<s>1"
- [(set_attr "v8type" "fcmp")
- (set_attr "type" "fcmp<s>")
+ [(set_attr "type" "fcmp<s>")
(set_attr "mode" "<MODE>")]
)
@@ -2497,8 +2226,7 @@
(match_operand:GPI 2 "aarch64_reg_or_zero" "rZ")))]
""
"cmp\\t%<w>2, %<w>0, <shift> %1"
- [(set_attr "v8type" "alus_shift")
- (set_attr "type" "alus_shift_imm")
+ [(set_attr "type" "alus_shift_imm")
(set_attr "mode" "<MODE>")]
)
@@ -2509,8 +2237,7 @@
(match_operand:GPI 1 "register_operand" "r")))]
""
"cmp\\t%<GPI:w>1, %<GPI:w>0, <su>xt<ALLX:size>"
- [(set_attr "v8type" "alus_ext")
- (set_attr "type" "alus_ext")
+ [(set_attr "type" "alus_ext")
(set_attr "mode" "<GPI:MODE>")]
)
@@ -2523,8 +2250,7 @@
(match_operand:GPI 2 "register_operand" "r")))]
""
"cmp\\t%<GPI:w>2, %<GPI:w>0, <su>xt<ALLX:size> %1"
- [(set_attr "v8type" "alus_ext")
- (set_attr "type" "alus_ext")
+ [(set_attr "type" "alus_ext")
(set_attr "mode" "<GPI:MODE>")]
)
@@ -2564,8 +2290,7 @@
[(match_operand 2 "cc_register" "") (const_int 0)]))]
""
"cset\\t%<w>0, %m1"
- [(set_attr "v8type" "csel")
- (set_attr "type" "csel")
+ [(set_attr "type" "csel")
(set_attr "mode" "<MODE>")]
)
@@ -2577,8 +2302,7 @@
[(match_operand 2 "cc_register" "") (const_int 0)])))]
""
"cset\\t%w0, %m1"
- [(set_attr "v8type" "csel")
- (set_attr "type" "csel")
+ [(set_attr "type" "csel")
(set_attr "mode" "SI")]
)
@@ -2588,8 +2312,7 @@
[(match_operand 2 "cc_register" "") (const_int 0)])))]
""
"csetm\\t%<w>0, %m1"
- [(set_attr "v8type" "csel")
- (set_attr "type" "csel")
+ [(set_attr "type" "csel")
(set_attr "mode" "<MODE>")]
)
@@ -2601,8 +2324,7 @@
[(match_operand 2 "cc_register" "") (const_int 0)]))))]
""
"csetm\\t%w0, %m1"
- [(set_attr "v8type" "csel")
- (set_attr "type" "csel")
+ [(set_attr "type" "csel")
(set_attr "mode" "SI")]
)
@@ -2656,8 +2378,7 @@
csinc\\t%<w>0, %<w>4, <w>zr, %M1
mov\\t%<w>0, -1
mov\\t%<w>0, 1"
- [(set_attr "v8type" "csel")
- (set_attr "type" "csel")
+ [(set_attr "type" "csel")
(set_attr "mode" "<MODE>")]
)
@@ -2681,8 +2402,7 @@
csinc\\t%w0, %w4, wzr, %M1
mov\\t%w0, -1
mov\\t%w0, 1"
- [(set_attr "v8type" "csel")
- (set_attr "type" "csel")
+ [(set_attr "type" "csel")
(set_attr "mode" "SI")]
)
@@ -2695,8 +2415,7 @@
(match_operand:GPF 4 "register_operand" "w")))]
"TARGET_FLOAT"
"fcsel\\t%<s>0, %<s>3, %<s>4, %m1"
- [(set_attr "v8type" "fcsel")
- (set_attr "type" "fcsel")
+ [(set_attr "type" "fcsel")
(set_attr "mode" "<MODE>")]
)
@@ -2745,8 +2464,7 @@
(match_operand:GPI 1 "register_operand" "r")))]
""
"csinc\\t%<w>0, %<w>1, %<w>1, %M2"
- [(set_attr "v8type" "csel")
- (set_attr "type" "csel")
+ [(set_attr "type" "csel")
(set_attr "mode" "<MODE>")])
(define_insn "csinc3<mode>_insn"
@@ -2759,8 +2477,7 @@
(match_operand:GPI 4 "aarch64_reg_or_zero" "rZ")))]
""
"csinc\\t%<w>0, %<w>4, %<w>3, %M1"
- [(set_attr "v8type" "csel")
- (set_attr "type" "csel")
+ [(set_attr "type" "csel")
(set_attr "mode" "<MODE>")]
)
@@ -2773,8 +2490,7 @@
(match_operand:GPI 4 "aarch64_reg_or_zero" "rZ")))]
""
"csinv\\t%<w>0, %<w>4, %<w>3, %M1"
- [(set_attr "v8type" "csel")
- (set_attr "type" "csel")
+ [(set_attr "type" "csel")
(set_attr "mode" "<MODE>")])
(define_insn "*csneg3<mode>_insn"
@@ -2786,8 +2502,7 @@
(match_operand:GPI 4 "aarch64_reg_or_zero" "rZ")))]
""
"csneg\\t%<w>0, %<w>4, %<w>3, %M1"
- [(set_attr "v8type" "csel")
- (set_attr "type" "csel")
+ [(set_attr "type" "csel")
(set_attr "mode" "<MODE>")])
;; -------------------------------------------------------------------
@@ -2800,8 +2515,7 @@
(match_operand:GPI 2 "aarch64_logical_operand" "r,<lconst>")))]
""
"<logical>\\t%<w>0, %<w>1, %<w>2"
- [(set_attr "v8type" "logic,logic_imm")
- (set_attr "type" "logic_reg,logic_imm")
+ [(set_attr "type" "logic_reg,logic_imm")
(set_attr "mode" "<MODE>")])
;; zero_extend version of above
@@ -2812,8 +2526,7 @@
(match_operand:SI 2 "aarch64_logical_operand" "r,K"))))]
""
"<logical>\\t%w0, %w1, %w2"
- [(set_attr "v8type" "logic,logic_imm")
- (set_attr "type" "logic_reg,logic_imm")
+ [(set_attr "type" "logic_reg,logic_imm")
(set_attr "mode" "SI")])
(define_insn "*and<mode>3_compare0"
@@ -2826,8 +2539,7 @@
(and:GPI (match_dup 1) (match_dup 2)))]
""
"ands\\t%<w>0, %<w>1, %<w>2"
- [(set_attr "v8type" "logics,logics_imm")
- (set_attr "type" "logics_reg,logics_imm")
+ [(set_attr "type" "logics_reg,logics_imm")
(set_attr "mode" "<MODE>")]
)
@@ -2842,8 +2554,7 @@
(zero_extend:DI (and:SI (match_dup 1) (match_dup 2))))]
""
"ands\\t%w0, %w1, %w2"
- [(set_attr "v8type" "logics,logics_imm")
- (set_attr "type" "logics_reg,logics_imm")
+ [(set_attr "type" "logics_reg,logics_imm")
(set_attr "mode" "SI")]
)
@@ -2859,8 +2570,7 @@
(and:GPI (SHIFT:GPI (match_dup 1) (match_dup 2)) (match_dup 3)))]
""
"ands\\t%<w>0, %<w>3, %<w>1, <SHIFT:shift> %2"
- [(set_attr "v8type" "logics_shift")
- (set_attr "type" "logics_shift_imm")
+ [(set_attr "type" "logics_shift_imm")
(set_attr "mode" "<MODE>")]
)
@@ -2878,8 +2588,7 @@
(match_dup 3))))]
""
"ands\\t%w0, %w3, %w1, <SHIFT:shift> %2"
- [(set_attr "v8type" "logics_shift")
- (set_attr "type" "logics_shift_imm")
+ [(set_attr "type" "logics_shift_imm")
(set_attr "mode" "SI")]
)
@@ -2891,8 +2600,7 @@
(match_operand:GPI 3 "register_operand" "r")))]
""
"<LOGICAL:logical>\\t%<w>0, %<w>3, %<w>1, <SHIFT:shift> %2"
- [(set_attr "v8type" "logic_shift")
- (set_attr "type" "logic_shift_imm")
+ [(set_attr "type" "logic_shift_imm")
(set_attr "mode" "<MODE>")])
;; zero_extend version of above
@@ -2905,8 +2613,7 @@
(match_operand:SI 3 "register_operand" "r"))))]
""
"<LOGICAL:logical>\\t%w0, %w3, %w1, <SHIFT:shift> %2"
- [(set_attr "v8type" "logic_shift")
- (set_attr "type" "logic_shift_imm")
+ [(set_attr "type" "logic_shift_imm")
(set_attr "mode" "SI")])
(define_insn "one_cmpl<mode>2"
@@ -2914,8 +2621,7 @@
(not:GPI (match_operand:GPI 1 "register_operand" "r")))]
""
"mvn\\t%<w>0, %<w>1"
- [(set_attr "v8type" "logic")
- (set_attr "type" "logic_reg")
+ [(set_attr "type" "logic_reg")
(set_attr "mode" "<MODE>")])
(define_insn "*one_cmpl_<optab><mode>2"
@@ -2924,8 +2630,7 @@
(match_operand:QI 2 "aarch64_shift_imm_<mode>" "n"))))]
""
"mvn\\t%<w>0, %<w>1, <shift> %2"
- [(set_attr "v8type" "logic_shift")
- (set_attr "type" "logic_shift_imm")
+ [(set_attr "type" "logic_shift_imm")
(set_attr "mode" "<MODE>")])
(define_insn "*<LOGICAL:optab>_one_cmpl<mode>3"
@@ -2935,8 +2640,7 @@
(match_operand:GPI 2 "register_operand" "r")))]
""
"<LOGICAL:nlogical>\\t%<w>0, %<w>2, %<w>1"
- [(set_attr "v8type" "logic")
- (set_attr "type" "logic_reg")
+ [(set_attr "type" "logic_reg")
(set_attr "mode" "<MODE>")])
(define_insn "*and_one_cmpl<mode>3_compare0"
@@ -2950,8 +2654,7 @@
(and:GPI (not:GPI (match_dup 1)) (match_dup 2)))]
""
"bics\\t%<w>0, %<w>2, %<w>1"
- [(set_attr "v8type" "logics")
- (set_attr "type" "logics_reg")
+ [(set_attr "type" "logics_reg")
(set_attr "mode" "<MODE>")])
;; zero_extend version of above
@@ -2966,8 +2669,7 @@
(zero_extend:DI (and:SI (not:SI (match_dup 1)) (match_dup 2))))]
""
"bics\\t%w0, %w2, %w1"
- [(set_attr "v8type" "logics")
- (set_attr "type" "logics_reg")
+ [(set_attr "type" "logics_reg")
(set_attr "mode" "SI")])
(define_insn "*<LOGICAL:optab>_one_cmpl_<SHIFT:optab><mode>3"
@@ -2979,8 +2681,7 @@
(match_operand:GPI 3 "register_operand" "r")))]
""
"<LOGICAL:nlogical>\\t%<w>0, %<w>3, %<w>1, <SHIFT:shift> %2"
- [(set_attr "v8type" "logic_shift")
- (set_attr "type" "logics_shift_imm")
+ [(set_attr "type" "logics_shift_imm")
(set_attr "mode" "<MODE>")])
(define_insn "*and_one_cmpl_<SHIFT:optab><mode>3_compare0"
@@ -2998,8 +2699,7 @@
(match_dup 1) (match_dup 2))) (match_dup 3)))]
""
"bics\\t%<w>0, %<w>3, %<w>1, <SHIFT:shift> %2"
- [(set_attr "v8type" "logics_shift")
- (set_attr "type" "logics_shift_imm")
+ [(set_attr "type" "logics_shift_imm")
(set_attr "mode" "<MODE>")])
;; zero_extend version of above
@@ -3018,8 +2718,7 @@
(SHIFT:SI (match_dup 1) (match_dup 2))) (match_dup 3))))]
""
"bics\\t%w0, %w3, %w1, <SHIFT:shift> %2"
- [(set_attr "v8type" "logics_shift")
- (set_attr "type" "logics_shift_imm")
+ [(set_attr "type" "logics_shift_imm")
(set_attr "mode" "SI")])
(define_insn "clz<mode>2"
@@ -3027,8 +2726,7 @@
(clz:GPI (match_operand:GPI 1 "register_operand" "r")))]
""
"clz\\t%<w>0, %<w>1"
- [(set_attr "v8type" "clz")
- (set_attr "type" "clz")
+ [(set_attr "type" "clz")
(set_attr "mode" "<MODE>")])
(define_expand "ffs<mode>2"
@@ -3051,8 +2749,7 @@
(unspec:GPI [(match_operand:GPI 1 "register_operand" "r")] UNSPEC_CLS))]
""
"cls\\t%<w>0, %<w>1"
- [(set_attr "v8type" "clz")
- (set_attr "type" "clz")
+ [(set_attr "type" "clz")
(set_attr "mode" "<MODE>")])
(define_insn "rbit<mode>2"
@@ -3060,8 +2757,7 @@
(unspec:GPI [(match_operand:GPI 1 "register_operand" "r")] UNSPEC_RBIT))]
""
"rbit\\t%<w>0, %<w>1"
- [(set_attr "v8type" "rbit")
- (set_attr "type" "rbit")
+ [(set_attr "type" "rbit")
(set_attr "mode" "<MODE>")])
(define_expand "ctz<mode>2"
@@ -3083,8 +2779,7 @@
(const_int 0)))]
""
"tst\\t%<w>0, %<w>1"
- [(set_attr "v8type" "logics")
- (set_attr "type" "logics_reg")
+ [(set_attr "type" "logics_reg")
(set_attr "mode" "<MODE>")])
(define_insn "*and_<SHIFT:optab><mode>3nr_compare0"
@@ -3097,8 +2792,7 @@
(const_int 0)))]
""
"tst\\t%<w>2, %<w>0, <SHIFT:shift> %1"
- [(set_attr "v8type" "logics_shift")
- (set_attr "type" "logics_shift_imm")
+ [(set_attr "type" "logics_shift_imm")
(set_attr "mode" "<MODE>")])
;; -------------------------------------------------------------------
@@ -3200,7 +2894,6 @@
ushl\t%<rtn>0<vas>, %<rtn>1<vas>, %<rtn>2<vas>
lsl\t%<w>0, %<w>1, %<w>2"
[(set_attr "simd" "yes,yes,no")
- (set_attr "v8type" "*,*,shift")
(set_attr "type" "neon_shift_imm<q>, neon_shift_reg<q>,shift_reg")
(set_attr "mode" "*,*,<MODE>")]
)
@@ -3217,7 +2910,6 @@
#
lsr\t%<w>0, %<w>1, %<w>2"
[(set_attr "simd" "yes,yes,no")
- (set_attr "v8type" "*,*,shift")
(set_attr "type" "neon_shift_imm<q>,neon_shift_reg<q>,shift_reg")
(set_attr "mode" "*,*,<MODE>")]
)
@@ -3260,7 +2952,6 @@
#
asr\t%<w>0, %<w>1, %<w>2"
[(set_attr "simd" "yes,yes,no")
- (set_attr "v8type" "*,*,shift")
(set_attr "type" "neon_shift_imm<q>,neon_shift_reg<q>,shift_reg")
(set_attr "mode" "*,*,<MODE>")]
)
@@ -3353,8 +3044,7 @@
(match_operand:QI 2 "aarch64_reg_or_shift_imm_<mode>" "rUs<cmode>")))]
""
"ror\\t%<w>0, %<w>1, %<w>2"
- [(set_attr "v8type" "shift")
- (set_attr "type" "shift_reg")
+ [(set_attr "type" "shift_reg")
(set_attr "mode" "<MODE>")]
)
@@ -3366,8 +3056,7 @@
(match_operand:QI 2 "aarch64_reg_or_shift_imm_si" "rUss"))))]
""
"<shift>\\t%w0, %w1, %w2"
- [(set_attr "v8type" "shift")
- (set_attr "type" "shift_reg")
+ [(set_attr "type" "shift_reg")
(set_attr "mode" "SI")]
)
@@ -3377,8 +3066,7 @@
(match_operand:QI 2 "aarch64_reg_or_shift_imm_si" "rUss")))]
""
"lsl\\t%<w>0, %<w>1, %<w>2"
- [(set_attr "v8type" "shift")
- (set_attr "type" "shift_reg")
+ [(set_attr "type" "shift_reg")
(set_attr "mode" "<MODE>")]
)
@@ -3391,8 +3079,7 @@
operands[3] = GEN_INT (<sizen> - UINTVAL (operands[2]));
return "<bfshift>\t%w0, %w1, %2, %3";
}
- [(set_attr "v8type" "bfm")
- (set_attr "type" "bfm")
+ [(set_attr "type" "bfm")
(set_attr "mode" "<MODE>")]
)
@@ -3405,8 +3092,7 @@
"UINTVAL (operands[3]) < GET_MODE_BITSIZE (<MODE>mode) &&
(UINTVAL (operands[3]) + UINTVAL (operands[4]) == GET_MODE_BITSIZE (<MODE>mode))"
"extr\\t%<w>0, %<w>1, %<w>2, %4"
- [(set_attr "v8type" "shift")
- (set_attr "type" "shift_imm")
+ [(set_attr "type" "shift_imm")
(set_attr "mode" "<MODE>")]
)
@@ -3421,8 +3107,7 @@
"UINTVAL (operands[3]) < 32 &&
(UINTVAL (operands[3]) + UINTVAL (operands[4]) == 32)"
"extr\\t%w0, %w1, %w2, %4"
- [(set_attr "v8type" "shift")
- (set_attr "type" "shift_imm")
+ [(set_attr "type" "shift_imm")
(set_attr "mode" "SI")]
)
@@ -3435,8 +3120,7 @@
operands[3] = GEN_INT (<sizen> - UINTVAL (operands[2]));
return "ror\\t%<w>0, %<w>1, %3";
}
- [(set_attr "v8type" "shift")
- (set_attr "type" "shift_imm")
+ [(set_attr "type" "shift_imm")
(set_attr "mode" "<MODE>")]
)
@@ -3451,8 +3135,7 @@
operands[3] = GEN_INT (32 - UINTVAL (operands[2]));
return "ror\\t%w0, %w1, %3";
}
- [(set_attr "v8type" "shift")
- (set_attr "type" "shift_imm")
+ [(set_attr "type" "shift_imm")
(set_attr "mode" "SI")]
)
@@ -3466,8 +3149,7 @@
operands[3] = GEN_INT (<SHORT:sizen> - UINTVAL (operands[2]));
return "<su>bfiz\t%<GPI:w>0, %<GPI:w>1, %2, %3";
}
- [(set_attr "v8type" "bfm")
- (set_attr "type" "bfm")
+ [(set_attr "type" "bfm")
(set_attr "mode" "<GPI:MODE>")]
)
@@ -3481,8 +3163,7 @@
operands[3] = GEN_INT (<SHORT:sizen> - UINTVAL (operands[2]));
return "ubfx\t%<GPI:w>0, %<GPI:w>1, %2, %3";
}
- [(set_attr "v8type" "bfm")
- (set_attr "type" "bfm")
+ [(set_attr "type" "bfm")
(set_attr "mode" "<GPI:MODE>")]
)
@@ -3496,8 +3177,7 @@
operands[3] = GEN_INT (<SHORT:sizen> - UINTVAL (operands[2]));
return "sbfx\\t%<GPI:w>0, %<GPI:w>1, %2, %3";
}
- [(set_attr "v8type" "bfm")
- (set_attr "type" "bfm")
+ [(set_attr "type" "bfm")
(set_attr "mode" "<GPI:MODE>")]
)
@@ -3521,8 +3201,7 @@
(match_operand 3 "const_int_operand" "n")))]
""
"<su>bfx\\t%<w>0, %<w>1, %3, %2"
- [(set_attr "v8type" "bfm")
- (set_attr "type" "bfm")
+ [(set_attr "type" "bfm")
(set_attr "mode" "<MODE>")]
)
@@ -3566,8 +3245,7 @@
|| (UINTVAL (operands[2]) + UINTVAL (operands[1])
> GET_MODE_BITSIZE (<MODE>mode)))"
"bfi\\t%<w>0, %<w>3, %2, %1"
- [(set_attr "v8type" "bfm")
- (set_attr "type" "bfm")
+ [(set_attr "type" "bfm")
(set_attr "mode" "<MODE>")]
)
@@ -3582,8 +3260,7 @@
|| (UINTVAL (operands[3]) + UINTVAL (operands[1])
> GET_MODE_BITSIZE (<MODE>mode)))"
"bfxil\\t%<w>0, %<w>2, %3, %1"
- [(set_attr "v8type" "bfm")
- (set_attr "type" "bfm")
+ [(set_attr "type" "bfm")
(set_attr "mode" "<MODE>")]
)
@@ -3599,8 +3276,7 @@
: GEN_INT (<GPI:sizen> - UINTVAL (operands[2]));
return "<su>bfiz\t%<GPI:w>0, %<GPI:w>1, %2, %3";
}
- [(set_attr "v8type" "bfm")
- (set_attr "type" "bfm")
+ [(set_attr "type" "bfm")
(set_attr "mode" "<GPI:MODE>")]
)
@@ -3614,8 +3290,7 @@
"exact_log2 ((INTVAL (operands[3]) >> INTVAL (operands[2])) + 1) >= 0
&& (INTVAL (operands[3]) & ((1 << INTVAL (operands[2])) - 1)) == 0"
"ubfiz\\t%<w>0, %<w>1, %2, %P3"
- [(set_attr "v8type" "bfm")
- (set_attr "type" "bfm")
+ [(set_attr "type" "bfm")
(set_attr "mode" "<MODE>")]
)
@@ -3624,8 +3299,7 @@
(bswap:GPI (match_operand:GPI 1 "register_operand" "r")))]
""
"rev\\t%<w>0, %<w>1"
- [(set_attr "v8type" "rev")
- (set_attr "type" "rev")
+ [(set_attr "type" "rev")
(set_attr "mode" "<MODE>")]
)
@@ -3634,8 +3308,7 @@
(bswap:HI (match_operand:HI 1 "register_operand" "r")))]
""
"rev16\\t%w0, %w1"
- [(set_attr "v8type" "rev")
- (set_attr "type" "rev")
+ [(set_attr "type" "rev")
(set_attr "mode" "HI")]
)
@@ -3645,8 +3318,7 @@
(zero_extend:DI (bswap:SI (match_operand:SI 1 "register_operand" "r"))))]
""
"rev\\t%w0, %w1"
- [(set_attr "v8type" "rev")
- (set_attr "type" "rev")
+ [(set_attr "type" "rev")
(set_attr "mode" "SI")]
)
@@ -3663,8 +3335,7 @@
FRINT))]
"TARGET_FLOAT"
"frint<frint_suffix>\\t%<s>0, %<s>1"
- [(set_attr "v8type" "frint")
- (set_attr "type" "f_rint<s>")
+ [(set_attr "type" "f_rint<s>")
(set_attr "mode" "<MODE>")]
)
@@ -3676,8 +3347,7 @@
FCVT)))]
"TARGET_FLOAT"
"fcvt<frint_suffix><su>\\t%<GPI:w>0, %<GPF:s>1"
- [(set_attr "v8type" "fcvtf2i")
- (set_attr "type" "f_cvtf2i")
+ [(set_attr "type" "f_cvtf2i")
(set_attr "mode" "<GPF:MODE>")
(set_attr "mode2" "<GPI:MODE>")]
)
@@ -3691,8 +3361,7 @@
(match_operand:GPF 3 "register_operand" "w")))]
"TARGET_FLOAT"
"fmadd\\t%<s>0, %<s>1, %<s>2, %<s>3"
- [(set_attr "v8type" "fmadd")
- (set_attr "type" "fmac<s>")
+ [(set_attr "type" "fmac<s>")
(set_attr "mode" "<MODE>")]
)
@@ -3703,8 +3372,7 @@
(match_operand:GPF 3 "register_operand" "w")))]
"TARGET_FLOAT"
"fmsub\\t%<s>0, %<s>1, %<s>2, %<s>3"
- [(set_attr "v8type" "fmadd")
- (set_attr "type" "fmac<s>")
+ [(set_attr "type" "fmac<s>")
(set_attr "mode" "<MODE>")]
)
@@ -3715,8 +3383,7 @@
(neg:GPF (match_operand:GPF 3 "register_operand" "w"))))]
"TARGET_FLOAT"
"fnmsub\\t%<s>0, %<s>1, %<s>2, %<s>3"
- [(set_attr "v8type" "fmadd")
- (set_attr "type" "fmac<s>")
+ [(set_attr "type" "fmac<s>")
(set_attr "mode" "<MODE>")]
)
@@ -3727,8 +3394,7 @@
(neg:GPF (match_operand:GPF 3 "register_operand" "w"))))]
"TARGET_FLOAT"
"fnmadd\\t%<s>0, %<s>1, %<s>2, %<s>3"
- [(set_attr "v8type" "fmadd")
- (set_attr "type" "fmac<s>")
+ [(set_attr "type" "fmac<s>")
(set_attr "mode" "<MODE>")]
)
@@ -3740,8 +3406,7 @@
(match_operand:GPF 3 "register_operand" "w"))))]
"!HONOR_SIGNED_ZEROS (<MODE>mode) && TARGET_FLOAT"
"fnmadd\\t%<s>0, %<s>1, %<s>2, %<s>3"
- [(set_attr "v8type" "fmadd")
- (set_attr "type" "fmac<s>")
+ [(set_attr "type" "fmac<s>")
(set_attr "mode" "<MODE>")]
)
@@ -3754,8 +3419,7 @@
(float_extend:DF (match_operand:SF 1 "register_operand" "w")))]
"TARGET_FLOAT"
"fcvt\\t%d0, %s1"
- [(set_attr "v8type" "fcvt")
- (set_attr "type" "f_cvt")
+ [(set_attr "type" "f_cvt")
(set_attr "mode" "DF")
(set_attr "mode2" "SF")]
)
@@ -3765,8 +3429,7 @@
(float_truncate:SF (match_operand:DF 1 "register_operand" "w")))]
"TARGET_FLOAT"
"fcvt\\t%s0, %d1"
- [(set_attr "v8type" "fcvt")
- (set_attr "type" "f_cvt")
+ [(set_attr "type" "f_cvt")
(set_attr "mode" "SF")
(set_attr "mode2" "DF")]
)
@@ -3776,8 +3439,7 @@
(fix:GPI (match_operand:GPF 1 "register_operand" "w")))]
"TARGET_FLOAT"
"fcvtzs\\t%<GPI:w>0, %<GPF:s>1"
- [(set_attr "v8type" "fcvtf2i")
- (set_attr "type" "f_cvtf2i")
+ [(set_attr "type" "f_cvtf2i")
(set_attr "mode" "<GPF:MODE>")
(set_attr "mode2" "<GPI:MODE>")]
)
@@ -3787,8 +3449,7 @@
(unsigned_fix:GPI (match_operand:GPF 1 "register_operand" "w")))]
"TARGET_FLOAT"
"fcvtzu\\t%<GPI:w>0, %<GPF:s>1"
- [(set_attr "v8type" "fcvtf2i")
- (set_attr "type" "f_cvtf2i")
+ [(set_attr "type" "f_cvtf2i")
(set_attr "mode" "<GPF:MODE>")
(set_attr "mode2" "<GPI:MODE>")]
)
@@ -3798,8 +3459,7 @@
(float:GPF (match_operand:GPI 1 "register_operand" "r")))]
"TARGET_FLOAT"
"scvtf\\t%<GPF:s>0, %<GPI:w>1"
- [(set_attr "v8type" "fcvti2f")
- (set_attr "type" "f_cvti2f")
+ [(set_attr "type" "f_cvti2f")
(set_attr "mode" "<GPF:MODE>")
(set_attr "mode2" "<GPI:MODE>")]
)
@@ -3809,8 +3469,7 @@
(unsigned_float:GPF (match_operand:GPI 1 "register_operand" "r")))]
"TARGET_FLOAT"
"ucvtf\\t%<GPF:s>0, %<GPI:w>1"
- [(set_attr "v8type" "fcvt")
- (set_attr "type" "f_cvt")
+ [(set_attr "type" "f_cvt")
(set_attr "mode" "<GPF:MODE>")
(set_attr "mode2" "<GPI:MODE>")]
)
@@ -3826,8 +3485,7 @@
(match_operand:GPF 2 "register_operand" "w")))]
"TARGET_FLOAT"
"fadd\\t%<s>0, %<s>1, %<s>2"
- [(set_attr "v8type" "fadd")
- (set_attr "type" "fadd<s>")
+ [(set_attr "type" "fadd<s>")
(set_attr "mode" "<MODE>")]
)
@@ -3838,8 +3496,7 @@
(match_operand:GPF 2 "register_operand" "w")))]
"TARGET_FLOAT"
"fsub\\t%<s>0, %<s>1, %<s>2"
- [(set_attr "v8type" "fadd")
- (set_attr "type" "fadd<s>")
+ [(set_attr "type" "fadd<s>")
(set_attr "mode" "<MODE>")]
)
@@ -3850,8 +3507,7 @@
(match_operand:GPF 2 "register_operand" "w")))]
"TARGET_FLOAT"
"fmul\\t%<s>0, %<s>1, %<s>2"
- [(set_attr "v8type" "fmul")
- (set_attr "type" "fmul<s>")
+ [(set_attr "type" "fmul<s>")
(set_attr "mode" "<MODE>")]
)
@@ -3862,8 +3518,7 @@
(match_operand:GPF 2 "register_operand" "w")))]
"TARGET_FLOAT"
"fnmul\\t%<s>0, %<s>1, %<s>2"
- [(set_attr "v8type" "fmul")
- (set_attr "type" "fmul<s>")
+ [(set_attr "type" "fmul<s>")
(set_attr "mode" "<MODE>")]
)
@@ -3874,8 +3529,7 @@
(match_operand:GPF 2 "register_operand" "w")))]
"TARGET_FLOAT"
"fdiv\\t%<s>0, %<s>1, %<s>2"
- [(set_attr "v8type" "fdiv")
- (set_attr "type" "fdiv<s>")
+ [(set_attr "type" "fdiv<s>")
(set_attr "mode" "<MODE>")]
)
@@ -3884,8 +3538,7 @@
(neg:GPF (match_operand:GPF 1 "register_operand" "w")))]
"TARGET_FLOAT"
"fneg\\t%<s>0, %<s>1"
- [(set_attr "v8type" "ffarith")
- (set_attr "type" "ffarith<s>")
+ [(set_attr "type" "ffarith<s>")
(set_attr "mode" "<MODE>")]
)
@@ -3894,8 +3547,7 @@
(sqrt:GPF (match_operand:GPF 1 "register_operand" "w")))]
"TARGET_FLOAT"
"fsqrt\\t%<s>0, %<s>1"
- [(set_attr "v8type" "fsqrt")
- (set_attr "type" "fsqrt<s>")
+ [(set_attr "type" "fsqrt<s>")
(set_attr "mode" "<MODE>")]
)
@@ -3904,8 +3556,7 @@
(abs:GPF (match_operand:GPF 1 "register_operand" "w")))]
"TARGET_FLOAT"
"fabs\\t%<s>0, %<s>1"
- [(set_attr "v8type" "ffarith")
- (set_attr "type" "ffarith<s>")
+ [(set_attr "type" "ffarith<s>")
(set_attr "mode" "<MODE>")]
)
@@ -3919,8 +3570,7 @@
(match_operand:GPF 2 "register_operand" "w")))]
"TARGET_FLOAT"
"fmaxnm\\t%<s>0, %<s>1, %<s>2"
- [(set_attr "v8type" "fminmax")
- (set_attr "type" "f_minmax<s>")
+ [(set_attr "type" "f_minmax<s>")
(set_attr "mode" "<MODE>")]
)
@@ -3930,8 +3580,7 @@
(match_operand:GPF 2 "register_operand" "w")))]
"TARGET_FLOAT"
"fminnm\\t%<s>0, %<s>1, %<s>2"
- [(set_attr "v8type" "fminmax")
- (set_attr "type" "f_minmax<s>")
+ [(set_attr "type" "f_minmax<s>")
(set_attr "mode" "<MODE>")]
)
@@ -3958,14 +3607,13 @@
;; The following secondary reload helpers patterns are invoked
;; after or during reload as we don't want these patterns to start
;; kicking in during the combiner.
-
+
(define_insn "aarch64_movdi_<mode>low"
[(set (match_operand:DI 0 "register_operand" "=r")
(truncate:DI (match_operand:TX 1 "register_operand" "w")))]
"reload_completed || reload_in_progress"
"fmov\\t%x0, %d1"
- [(set_attr "v8type" "fmovf2i")
- (set_attr "type" "f_mrc")
+ [(set_attr "type" "f_mrc")
(set_attr "mode" "DI")
(set_attr "length" "4")
])
@@ -3977,8 +3625,7 @@
(const_int 64))))]
"reload_completed || reload_in_progress"
"fmov\\t%x0, %1.d[1]"
- [(set_attr "v8type" "fmovf2i")
- (set_attr "type" "f_mrc")
+ [(set_attr "type" "f_mrc")
(set_attr "mode" "DI")
(set_attr "length" "4")
])
@@ -3989,8 +3636,7 @@
(zero_extend:TX (match_operand:DI 1 "register_operand" "r")))]
"reload_completed || reload_in_progress"
"fmov\\t%0.d[1], %x1"
- [(set_attr "v8type" "fmovi2f")
- (set_attr "type" "f_mcr")
+ [(set_attr "type" "f_mcr")
(set_attr "mode" "DI")
(set_attr "length" "4")
])
@@ -4000,8 +3646,7 @@
(zero_extend:TX (match_operand:DI 1 "register_operand" "r")))]
"reload_completed || reload_in_progress"
"fmov\\t%d0, %x1"
- [(set_attr "v8type" "fmovi2f")
- (set_attr "type" "f_mcr")
+ [(set_attr "type" "f_mcr")
(set_attr "mode" "DI")
(set_attr "length" "4")
])
@@ -4012,8 +3657,7 @@
(truncate:DI (match_operand:TI 1 "register_operand" "w"))))]
"reload_completed || reload_in_progress"
"fmov\\t%d0, %d1"
- [(set_attr "v8type" "fmovi2f")
- (set_attr "type" "f_mcr")
+ [(set_attr "type" "f_mcr")
(set_attr "mode" "DI")
(set_attr "length" "4")
])
@@ -4045,8 +3689,7 @@
(match_operand 2 "aarch64_valid_symref" "S")))]
""
"add\\t%<w>0, %<w>1, :lo12:%a2"
- [(set_attr "v8type" "alu")
- (set_attr "type" "alu_reg")
+ [(set_attr "type" "alu_reg")
(set_attr "mode" "<MODE>")]
)
@@ -4058,8 +3701,7 @@
UNSPEC_GOTSMALLPIC))]
""
"ldr\\t%<w>0, [%1, #:got_lo12:%a2]"
- [(set_attr "v8type" "load1")
- (set_attr "type" "load1")
+ [(set_attr "type" "load1")
(set_attr "mode" "<MODE>")]
)
@@ -4072,8 +3714,7 @@
UNSPEC_GOTSMALLPIC)))]
"TARGET_ILP32"
"ldr\\t%w0, [%1, #:got_lo12:%a2]"
- [(set_attr "v8type" "load1")
- (set_attr "type" "load1")
+ [(set_attr "type" "load1")
(set_attr "mode" "DI")]
)
@@ -4083,8 +3724,7 @@
UNSPEC_GOTTINYPIC))]
""
"ldr\\t%0, %L1"
- [(set_attr "v8type" "load1")
- (set_attr "type" "load1")
+ [(set_attr "type" "load1")
(set_attr "mode" "DI")]
)
@@ -4093,8 +3733,7 @@
(unspec:DI [(const_int 0)] UNSPEC_TLS))]
""
"mrs\\t%0, tpidr_el0"
- [(set_attr "v8type" "mrs")
- (set_attr "type" "mrs")
+ [(set_attr "type" "mrs")
(set_attr "mode" "DI")]
)
@@ -4119,8 +3758,7 @@
]
""
"adrp\\tx0, %A1\;add\\tx0, x0, %L1\;bl\\t%2\;nop"
- [(set_attr "v8type" "call")
- (set_attr "type" "call")
+ [(set_attr "type" "call")
(set_attr "length" "16")])
(define_insn "tlsie_small"
@@ -4129,8 +3767,7 @@
UNSPEC_GOTSMALLTLS))]
""
"adrp\\t%0, %A1\;ldr\\t%0, [%0, #%L1]"
- [(set_attr "v8type" "load1")
- (set_attr "type" "load1")
+ [(set_attr "type" "load1")
(set_attr "mode" "DI")
(set_attr "length" "8")]
)
@@ -4142,8 +3779,7 @@
UNSPEC_GOTSMALLTLS))]
""
"add\\t%0, %1, #%G2\;add\\t%0, %0, #%L2"
- [(set_attr "v8type" "alu")
- (set_attr "type" "alu_reg")
+ [(set_attr "type" "alu_reg")
(set_attr "mode" "DI")
(set_attr "length" "8")]
)
@@ -4156,8 +3792,7 @@
(clobber (match_scratch:DI 1 "=r"))]
"TARGET_TLS_DESC"
"adrp\\tx0, %A0\;ldr\\t%1, [x0, #%L0]\;add\\tx0, x0, %L0\;.tlsdesccall\\t%0\;blr\\t%1"
- [(set_attr "v8type" "call")
- (set_attr "type" "call")
+ [(set_attr "type" "call")
(set_attr "length" "16")])
(define_insn "stack_tie"
diff --git a/gcc/config/alpha/alpha.c b/gcc/config/alpha/alpha.c
index e710b0c3717..c55835e7fe1 100644
--- a/gcc/config/alpha/alpha.c
+++ b/gcc/config/alpha/alpha.c
@@ -25,6 +25,9 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "rtl.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "calls.h"
+#include "varasm.h"
#include "regs.h"
#include "hard-reg-set.h"
#include "insn-config.h"
@@ -51,6 +54,7 @@ along with GCC; see the file COPYING3. If not see
#include "gimple.h"
#include "gimplify.h"
#include "gimple-ssa.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-stdarg.h"
#include "tm-constrs.h"
@@ -4831,7 +4835,8 @@ alpha_gp_save_rtx (void)
label. Emit the sequence properly on the edge. We are only
invoked from dw2_build_landing_pads and finish_eh_generation
will call commit_edge_insertions thanks to a kludge. */
- insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR));
+ insert_insn_on_edge (seq,
+ single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
cfun->machine->gp_save_rtx = m;
}
diff --git a/gcc/config/arc/arc.c b/gcc/config/arc/arc.c
index e9a0d24b973..5ad807e996e 100644
--- a/gcc/config/arc/arc.c
+++ b/gcc/config/arc/arc.c
@@ -34,6 +34,10 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "varasm.h"
+#include "stor-layout.h"
+#include "stringpool.h"
+#include "calls.h"
#include "rtl.h"
#include "regs.h"
#include "hard-reg-set.h"
diff --git a/gcc/config/arm/aarch-common-protos.h b/gcc/config/arm/aarch-common-protos.h
index 841f544e83d..c3652a72c81 100644
--- a/gcc/config/arm/aarch-common-protos.h
+++ b/gcc/config/arm/aarch-common-protos.h
@@ -31,6 +31,7 @@ extern int arm_no_early_alu_shift_dep (rtx, rtx);
extern int arm_no_early_alu_shift_value_dep (rtx, rtx);
extern int arm_no_early_mul_dep (rtx, rtx);
extern int arm_no_early_store_addr_dep (rtx, rtx);
+extern bool arm_rtx_shift_left_p (rtx);
/* RTX cost table definitions. These are used when tuning for speed rather
than for size and should reflect the _additional_ cost over the cost
diff --git a/gcc/config/arm/aarch-common.c b/gcc/config/arm/aarch-common.c
index 201e581a4a6..a46e6751a7b 100644
--- a/gcc/config/arm/aarch-common.c
+++ b/gcc/config/arm/aarch-common.c
@@ -40,7 +40,7 @@ typedef struct
/* Return TRUE if X is either an arithmetic shift left, or
is a multiplication by a power of two. */
-static bool
+bool
arm_rtx_shift_left_p (rtx x)
{
enum rtx_code code = GET_CODE (x);
diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
index 8957309ad1d..12934233a48 100644
--- a/gcc/config/arm/arm.c
+++ b/gcc/config/arm/arm.c
@@ -27,6 +27,10 @@
#include "tm.h"
#include "rtl.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
+#include "calls.h"
+#include "varasm.h"
#include "obstack.h"
#include "regs.h"
#include "hard-reg-set.h"
@@ -2449,6 +2453,10 @@ arm_option_override (void)
arm_pic_register = pic_register;
}
+ if (TARGET_VXWORKS_RTP
+ && !global_options_set.x_arm_pic_data_is_text_relative)
+ arm_pic_data_is_text_relative = 0;
+
/* Enable -mfix-cortex-m3-ldrd by default for Cortex-M3 cores. */
if (fix_cm3_ldrd == 2)
{
@@ -5929,7 +5937,8 @@ require_pic_register (void)
we can't yet emit instructions directly in the final
insn stream. Queue the insns on the entry edge, they will
be committed after everything else is expanded. */
- insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR));
+ insert_insn_on_edge (seq,
+ single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
}
}
}
@@ -5959,7 +5968,7 @@ legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
|| (GET_CODE (orig) == SYMBOL_REF &&
SYMBOL_REF_LOCAL_P (orig)))
&& NEED_GOT_RELOC
- && !TARGET_VXWORKS_RTP)
+ && arm_pic_data_is_text_relative)
insn = arm_pic_static_addr (orig, reg);
else
{
@@ -8752,6 +8761,30 @@ arm_unspec_cost (rtx x, enum rtx_code /* outer_code */, bool speed_p, int *cost)
call (one insn for -Os) and then one for processing the result. */
#define LIBCALL_COST(N) COSTS_N_INSNS (N + (speed_p ? 18 : 2))
+#define HANDLE_NARROW_SHIFT_ARITH(OP, IDX) \
+ do \
+ { \
+ shift_op = shifter_op_p (XEXP (x, IDX), &shift_reg); \
+ if (shift_op != NULL \
+ && arm_rtx_shift_left_p (XEXP (x, IDX))) \
+ { \
+ if (shift_reg) \
+ { \
+ if (speed_p) \
+ *cost += extra_cost->alu.arith_shift_reg; \
+ *cost += rtx_cost (shift_reg, ASHIFT, 1, speed_p); \
+ } \
+ else if (speed_p) \
+ *cost += extra_cost->alu.arith_shift; \
+ \
+ *cost += (rtx_cost (shift_op, ASHIFT, 0, speed_p) \
+ + rtx_cost (XEXP (x, 1 - IDX), \
+ OP, 1, speed_p)); \
+ return true; \
+ } \
+ } \
+ while (0);
+
/* RTX costs. Make an estimate of the cost of executing the operation
X, which is contained with an operation with code OUTER_CODE.
SPEED_P indicates whether the cost desired is the performance cost,
@@ -9108,6 +9141,15 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
if (GET_MODE_CLASS (mode) == MODE_INT
&& GET_MODE_SIZE (mode) < 4)
{
+ rtx shift_op, shift_reg;
+ shift_reg = NULL;
+
+ /* We check both sides of the MINUS for shifter operands since,
+ unlike PLUS, it's not commutative. */
+
+ HANDLE_NARROW_SHIFT_ARITH (MINUS, 0)
+ HANDLE_NARROW_SHIFT_ARITH (MINUS, 1)
+
/* Slightly disparage, as we might need to widen the result. */
*cost = 1 + COSTS_N_INSNS (1);
if (speed_p)
@@ -9207,11 +9249,18 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
return false;
}
+ /* Narrow modes can be synthesized in SImode, but the range
+ of useful sub-operations is limited. Check for shift operations
+ on one of the operands. Only left shifts can be used in the
+ narrow modes. */
if (GET_MODE_CLASS (mode) == MODE_INT
&& GET_MODE_SIZE (mode) < 4)
{
- /* Narrow modes can be synthesized in SImode, but the range
- of useful sub-operations is limited. */
+ rtx shift_op, shift_reg;
+ shift_reg = NULL;
+
+ HANDLE_NARROW_SHIFT_ARITH (PLUS, 0)
+
if (CONST_INT_P (XEXP (x, 1)))
{
int insns = arm_gen_constant (PLUS, SImode, NULL_RTX,
@@ -10330,6 +10379,8 @@ arm_new_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
}
}
+#undef HANDLE_NARROW_SHIFT_ARITH
+
/* RTX costs when optimizing for size. */
static bool
arm_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
@@ -18330,7 +18381,8 @@ arm_r3_live_at_start_p (void)
/* Just look at cfg info, which is still close enough to correct at this
point. This gives false positives for broken functions that might use
uninitialized data that happens to be allocated in r3, but who cares? */
- return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR), 3);
+ return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
+ 3);
}
/* Compute the number of bytes used to store the static chain register on the
@@ -19863,7 +19915,7 @@ any_sibcall_could_use_r3 (void)
if (!crtl->tail_call_emit)
return false;
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
if (e->flags & EDGE_SIBCALL)
{
rtx call = BB_END (e->src);
@@ -21452,7 +21504,7 @@ arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
{
/* See legitimize_pic_address for an explanation of the
TARGET_VXWORKS_RTP check. */
- if (TARGET_VXWORKS_RTP
+ if (!arm_pic_data_is_text_relative
|| (GET_CODE (x) == SYMBOL_REF && !SYMBOL_REF_LOCAL_P (x)))
fputs ("(GOT)", asm_out_file);
else
diff --git a/gcc/config/arm/arm.h b/gcc/config/arm/arm.h
index 1781b75b34b..dbd841ec842 100644
--- a/gcc/config/arm/arm.h
+++ b/gcc/config/arm/arm.h
@@ -568,6 +568,10 @@ extern int prefer_neon_for_64bits;
#define NEED_PLT_RELOC 0
#endif
+#ifndef TARGET_DEFAULT_PIC_DATA_IS_TEXT_RELATIVE
+#define TARGET_DEFAULT_PIC_DATA_IS_TEXT_RELATIVE 1
+#endif
+
/* Nonzero if we need to refer to the GOT with a PC-relative
offset. In other words, generate
diff --git a/gcc/config/arm/arm.md b/gcc/config/arm/arm.md
index 3726201dd4f..a26550a476a 100644
--- a/gcc/config/arm/arm.md
+++ b/gcc/config/arm/arm.md
@@ -5170,7 +5170,8 @@
[(set_attr "length" "8,4,8,8")
(set_attr "arch" "neon_for_64bits,*,*,avoid_neon_for_64bits")
(set_attr "ce_count" "2")
- (set_attr "predicable" "yes")]
+ (set_attr "predicable" "yes")
+ (set_attr "type" "multiple,mov_reg,multiple,multiple")]
)
(define_insn "extend<mode>di2"
diff --git a/gcc/config/arm/arm.opt b/gcc/config/arm/arm.opt
index 9b740386ca3..fa0839a9e12 100644
--- a/gcc/config/arm/arm.opt
+++ b/gcc/config/arm/arm.opt
@@ -158,6 +158,10 @@ mlong-calls
Target Report Mask(LONG_CALLS)
Generate call insns as indirect calls, if necessary
+mpic-data-is-text-relative
+Target Report Var(arm_pic_data_is_text_relative) Init(TARGET_DEFAULT_PIC_DATA_IS_TEXT_RELATIVE)
+Assume data segments are relative to text segment.
+
mpic-register=
Target RejectNegative Joined Var(arm_pic_register_string)
Specify the register to be used for PIC addressing
diff --git a/gcc/config/avr/avr-c.c b/gcc/config/avr/avr-c.c
index 4e64405a351..2cfb264ad20 100644
--- a/gcc/config/avr/avr-c.c
+++ b/gcc/config/avr/avr-c.c
@@ -26,6 +26,7 @@
#include "tm_p.h"
#include "cpplib.h"
#include "tree.h"
+#include "stor-layout.h"
#include "target.h"
#include "c-family/c-common.h"
#include "langhooks.h"
diff --git a/gcc/config/avr/avr-log.c b/gcc/config/avr/avr-log.c
index 87fa14d7006..3d2f54d9707 100644
--- a/gcc/config/avr/avr-log.c
+++ b/gcc/config/avr/avr-log.c
@@ -24,6 +24,7 @@
#include "tm.h"
#include "rtl.h"
#include "tree.h"
+#include "print-tree.h"
#include "output.h"
#include "input.h"
#include "function.h"
diff --git a/gcc/config/avr/avr.c b/gcc/config/avr/avr.c
index f0383a7b314..f1241f4e0f2 100644
--- a/gcc/config/avr/avr.c
+++ b/gcc/config/avr/avr.c
@@ -32,6 +32,10 @@
#include "flags.h"
#include "reload.h"
#include "tree.h"
+#include "print-tree.h"
+#include "calls.h"
+#include "stor-layout.h"
+#include "stringpool.h"
#include "output.h"
#include "expr.h"
#include "c-family/c-common.h"
diff --git a/gcc/config/bfin/bfin.c b/gcc/config/bfin/bfin.c
index 5a9f27dc4cf..88fe426dd43 100644
--- a/gcc/config/bfin/bfin.c
+++ b/gcc/config/bfin/bfin.c
@@ -32,6 +32,8 @@
#include "output.h"
#include "insn-attr.h"
#include "tree.h"
+#include "varasm.h"
+#include "calls.h"
#include "flags.h"
#include "except.h"
#include "function.h"
@@ -3597,7 +3599,7 @@ hwloop_optimize (hwloop_info loop)
if (single_pred_p (bb)
&& single_pred_edge (bb)->flags & EDGE_FALLTHRU
- && single_pred (bb) != ENTRY_BLOCK_PTR)
+ && single_pred (bb) != ENTRY_BLOCK_PTR_FOR_FN (cfun))
{
bb = single_pred (bb);
last_insn = BB_END (bb);
diff --git a/gcc/config/c6x/c6x.c b/gcc/config/c6x/c6x.c
index a37e02ff834..af310bac8dc 100644
--- a/gcc/config/c6x/c6x.c
+++ b/gcc/config/c6x/c6x.c
@@ -25,6 +25,10 @@
#include "tm.h"
#include "rtl.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "varasm.h"
+#include "calls.h"
+#include "stringpool.h"
#include "insn-flags.h"
#include "output.h"
#include "insn-attr.h"
diff --git a/gcc/config/cr16/cr16.c b/gcc/config/cr16/cr16.c
index 1ac29cc800a..b3972766d5b 100644
--- a/gcc/config/cr16/cr16.c
+++ b/gcc/config/cr16/cr16.c
@@ -24,6 +24,8 @@
#include "tm.h"
#include "rtl.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "calls.h"
#include "tm_p.h"
#include "regs.h"
#include "hard-reg-set.h"
diff --git a/gcc/config/cris/cris.c b/gcc/config/cris/cris.c
index 7432251b950..2d2a108031d 100644
--- a/gcc/config/cris/cris.c
+++ b/gcc/config/cris/cris.c
@@ -30,6 +30,10 @@ along with GCC; see the file COPYING3. If not see
#include "insn-attr.h"
#include "flags.h"
#include "tree.h"
+#include "varasm.h"
+#include "stor-layout.h"
+#include "calls.h"
+#include "stmt.h"
#include "expr.h"
#include "except.h"
#include "function.h"
diff --git a/gcc/config/darwin.c b/gcc/config/darwin.c
index 81e18dfb969..50fb3f05f1f 100644
--- a/gcc/config/darwin.c
+++ b/gcc/config/darwin.c
@@ -32,6 +32,9 @@ along with GCC; see the file COPYING3. If not see
#include "insn-attr.h"
#include "flags.h"
#include "tree.h"
+#include "stringpool.h"
+#include "varasm.h"
+#include "stor-layout.h"
#include "expr.h"
#include "reload.h"
#include "function.h"
diff --git a/gcc/config/epiphany/epiphany.c b/gcc/config/epiphany/epiphany.c
index c3200250c06..c264cdaee78 100644
--- a/gcc/config/epiphany/epiphany.c
+++ b/gcc/config/epiphany/epiphany.c
@@ -23,6 +23,10 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "varasm.h"
+#include "calls.h"
+#include "stringpool.h"
#include "rtl.h"
#include "regs.h"
#include "hard-reg-set.h"
@@ -2762,7 +2766,7 @@ epiphany_special_round_type_align (tree type, unsigned computed,
|| tree_to_uhwi (offset) >= try_align
|| tree_to_uhwi (size) >= try_align)
return try_align;
- total = TREE_INT_CST_LOW (offset) + TREE_INT_CST_LOW (size);
+ total = tree_to_uhwi (offset) + tree_to_uhwi (size);
if (total > max)
max = total;
}
diff --git a/gcc/config/fr30/fr30.c b/gcc/config/fr30/fr30.c
index 4a45feafd04..caa50d9e691 100644
--- a/gcc/config/fr30/fr30.c
+++ b/gcc/config/fr30/fr30.c
@@ -33,6 +33,8 @@
#include "flags.h"
#include "recog.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "varasm.h"
#include "output.h"
#include "expr.h"
#include "obstack.h"
diff --git a/gcc/config/frv/frv.c b/gcc/config/frv/frv.c
index bcd55111434..a5eb2c1c844 100644
--- a/gcc/config/frv/frv.c
+++ b/gcc/config/frv/frv.c
@@ -23,6 +23,9 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "rtl.h"
#include "tree.h"
+#include "varasm.h"
+#include "stor-layout.h"
+#include "stringpool.h"
#include "regs.h"
#include "hard-reg-set.h"
#include "insn-config.h"
@@ -8024,7 +8027,7 @@ frv_optimize_membar_global (basic_block bb, struct frv_io *first_io,
/* We need to keep the membar if there is an edge to the exit block. */
FOR_EACH_EDGE (succ, ei, bb->succs)
/* for (succ = bb->succ; succ != 0; succ = succ->succ_next) */
- if (succ->dest == EXIT_BLOCK_PTR)
+ if (succ->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
return;
/* Work out the union of all successor blocks. */
diff --git a/gcc/config/h8300/h8300.c b/gcc/config/h8300/h8300.c
index 69f37fd02d6..f0ebca30f2c 100644
--- a/gcc/config/h8300/h8300.c
+++ b/gcc/config/h8300/h8300.c
@@ -25,6 +25,10 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "rtl.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "varasm.h"
+#include "calls.h"
+#include "stringpool.h"
#include "regs.h"
#include "hard-reg-set.h"
#include "insn-config.h"
diff --git a/gcc/config/i386/i386-protos.h b/gcc/config/i386/i386-protos.h
index 85ed7a20034..bceb8f2ef6d 100644
--- a/gcc/config/i386/i386-protos.h
+++ b/gcc/config/i386/i386-protos.h
@@ -60,7 +60,7 @@ extern int avx_vperm2f128_parallel (rtx par, enum machine_mode mode);
extern bool ix86_expand_strlen (rtx, rtx, rtx, rtx);
extern bool ix86_expand_set_or_movmem (rtx, rtx, rtx, rtx, rtx, rtx,
- rtx, rtx, rtx, bool);
+ rtx, rtx, rtx, rtx, bool);
extern bool constant_address_p (rtx);
extern bool legitimate_pic_operand_p (rtx);
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index d332b5bc80e..8e6bbd14dff 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -23,6 +23,11 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "rtl.h"
#include "tree.h"
+#include "stringpool.h"
+#include "attribs.h"
+#include "calls.h"
+#include "stor-layout.h"
+#include "varasm.h"
#include "tm_p.h"
#include "regs.h"
#include "hard-reg-set.h"
@@ -5589,7 +5594,7 @@ ix86_eax_live_at_start_p (void)
to correct at this point. This gives false positives for broken
functions that might use uninitialized data that happens to be
allocated in eax, but who cares? */
- return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR), 0);
+ return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR_FOR_FN (cfun)), 0);
}
static bool
@@ -9297,7 +9302,7 @@ ix86_compute_frame_layout (struct ix86_frame *frame)
Recompute the value as needed. Do not recompute when amount of registers
didn't change as reload does multiple calls to the function and does not
expect the decision to change within single iteration. */
- else if (!optimize_bb_for_size_p (ENTRY_BLOCK_PTR)
+ else if (!optimize_bb_for_size_p (ENTRY_BLOCK_PTR_FOR_FN (cfun))
&& cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
{
int count = frame->nregs;
@@ -11386,7 +11391,7 @@ ix86_expand_epilogue (int style)
/* Leave results in shorter dependency chains on CPUs that are
able to grok it fast. */
else if (TARGET_USE_LEAVE
- || optimize_bb_for_size_p (EXIT_BLOCK_PTR)
+ || optimize_bb_for_size_p (EXIT_BLOCK_PTR_FOR_FN (cfun))
|| !cfun->machine->use_fast_prologue_epilogue)
ix86_emit_leave ();
else
@@ -11870,27 +11875,6 @@ ix86_live_on_entry (bitmap regs)
}
}
-/* Determine if op is suitable SUBREG RTX for address. */
-
-static bool
-ix86_address_subreg_operand (rtx op)
-{
- enum machine_mode mode;
-
- if (!REG_P (op))
- return false;
-
- mode = GET_MODE (op);
-
- /* Don't allow SUBREGs that span more than a word. It can lead to spill
- failures when the register is one word out of a two word structure. */
- if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
- return false;
-
- /* Allow only SUBREGs of non-eliminable hard registers. */
- return register_no_elim_operand (op, mode);
-}
-
/* Extract the parts of an RTL expression that is a valid memory address
for an instruction. Return 0 if the structure of the address is
grossly off. Return -1 if the address contains ASHIFT, so it is not
@@ -11947,7 +11931,7 @@ ix86_decompose_address (rtx addr, struct ix86_address *out)
base = addr;
else if (GET_CODE (addr) == SUBREG)
{
- if (ix86_address_subreg_operand (SUBREG_REG (addr)))
+ if (REG_P (SUBREG_REG (addr)))
base = addr;
else
return 0;
@@ -12011,7 +11995,7 @@ ix86_decompose_address (rtx addr, struct ix86_address *out)
break;
case SUBREG:
- if (!ix86_address_subreg_operand (SUBREG_REG (op)))
+ if (!REG_P (SUBREG_REG (op)))
return 0;
/* FALLTHRU */
@@ -12064,18 +12048,12 @@ ix86_decompose_address (rtx addr, struct ix86_address *out)
if (REG_P (index))
;
else if (GET_CODE (index) == SUBREG
- && ix86_address_subreg_operand (SUBREG_REG (index)))
+ && REG_P (SUBREG_REG (index)))
;
else
return 0;
}
-/* Address override works only on the (%reg) part of %fs:(%reg). */
- if (seg != SEG_DEFAULT
- && ((base && GET_MODE (base) != word_mode)
- || (index && GET_MODE (index) != word_mode)))
- return 0;
-
/* Extract the integral value of scale. */
if (scale_rtx)
{
@@ -12592,6 +12570,45 @@ ix86_legitimize_reload_address (rtx x,
return false;
}
+/* Determine if op is suitable RTX for an address register.
+ Return naked register if a register or a register subreg is
+ found, otherwise return NULL_RTX. */
+
+static rtx
+ix86_validate_address_register (rtx op)
+{
+ enum machine_mode mode = GET_MODE (op);
+
+ /* Only SImode or DImode registers can form the address. */
+ if (mode != SImode && mode != DImode)
+ return NULL_RTX;
+
+ if (REG_P (op))
+ return op;
+ else if (GET_CODE (op) == SUBREG)
+ {
+ rtx reg = SUBREG_REG (op);
+
+ if (!REG_P (reg))
+ return NULL_RTX;
+
+ mode = GET_MODE (reg);
+
+ /* Don't allow SUBREGs that span more than a word. It can
+ lead to spill failures when the register is one word out
+ of a two word structure. */
+ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
+ return NULL_RTX;
+
+ /* Allow only SUBREGs of non-eliminable hard registers. */
+ if (register_no_elim_operand (reg, mode))
+ return reg;
+ }
+
+ /* Op is not a register. */
+ return NULL_RTX;
+}
+
/* Recognizes RTL expressions that are valid memory addresses for an
instruction. The MODE argument is the machine mode for the MEM
expression that wants to use this address.
@@ -12607,6 +12624,7 @@ ix86_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
struct ix86_address parts;
rtx base, index, disp;
HOST_WIDE_INT scale;
+ enum ix86_address_seg seg;
if (ix86_decompose_address (addr, &parts) <= 0)
/* Decomposition failed. */
@@ -12616,21 +12634,14 @@ ix86_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
index = parts.index;
disp = parts.disp;
scale = parts.scale;
+ seg = parts.seg;
/* Validate base register. */
if (base)
{
- rtx reg;
-
- if (REG_P (base))
- reg = base;
- else if (GET_CODE (base) == SUBREG && REG_P (SUBREG_REG (base)))
- reg = SUBREG_REG (base);
- else
- /* Base is not a register. */
- return false;
+ rtx reg = ix86_validate_address_register (base);
- if (GET_MODE (base) != SImode && GET_MODE (base) != DImode)
+ if (reg == NULL_RTX)
return false;
if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
@@ -12642,17 +12653,9 @@ ix86_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
/* Validate index register. */
if (index)
{
- rtx reg;
+ rtx reg = ix86_validate_address_register (index);
- if (REG_P (index))
- reg = index;
- else if (GET_CODE (index) == SUBREG && REG_P (SUBREG_REG (index)))
- reg = SUBREG_REG (index);
- else
- /* Index is not a register. */
- return false;
-
- if (GET_MODE (index) != SImode && GET_MODE (index) != DImode)
+ if (reg == NULL_RTX)
return false;
if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
@@ -12666,6 +12669,12 @@ ix86_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
&& GET_MODE (base) != GET_MODE (index))
return false;
+ /* Address override works only on the (%reg) part of %fs:(%reg). */
+ if (seg != SEG_DEFAULT
+ && ((base && GET_MODE (base) != word_mode)
+ || (index && GET_MODE (index) != word_mode)))
+ return false;
+
/* Validate scale factor. */
if (scale != 1)
{
@@ -23712,7 +23721,8 @@ bool
ix86_expand_set_or_movmem (rtx dst, rtx src, rtx count_exp, rtx val_exp,
rtx align_exp, rtx expected_align_exp,
rtx expected_size_exp, rtx min_size_exp,
- rtx max_size_exp, bool issetmem)
+ rtx max_size_exp, rtx probable_max_size_exp,
+ bool issetmem)
{
rtx destreg;
rtx srcreg = NULL;
@@ -23736,6 +23746,7 @@ ix86_expand_set_or_movmem (rtx dst, rtx src, rtx count_exp, rtx val_exp,
/* TODO: Once vlaue ranges are available, fill in proper data. */
unsigned HOST_WIDE_INT min_size = 0;
unsigned HOST_WIDE_INT max_size = -1;
+ unsigned HOST_WIDE_INT probable_max_size = -1;
bool misaligned_prologue_used = false;
if (CONST_INT_P (align_exp))
@@ -23751,13 +23762,19 @@ ix86_expand_set_or_movmem (rtx dst, rtx src, rtx count_exp, rtx val_exp,
align = MEM_ALIGN (dst) / BITS_PER_UNIT;
if (CONST_INT_P (count_exp))
- min_size = max_size = count = expected_size = INTVAL (count_exp);
- if (min_size_exp)
- min_size = INTVAL (min_size_exp);
- if (max_size_exp)
- max_size = INTVAL (max_size_exp);
- if (CONST_INT_P (expected_size_exp) && count == 0)
- expected_size = INTVAL (expected_size_exp);
+ min_size = max_size = probable_max_size = count = expected_size
+ = INTVAL (count_exp);
+ else
+ {
+ if (min_size_exp)
+ min_size = INTVAL (min_size_exp);
+ if (max_size_exp)
+ max_size = INTVAL (max_size_exp);
+ if (probable_max_size_exp)
+ probable_max_size = INTVAL (probable_max_size_exp);
+ if (CONST_INT_P (expected_size_exp) && count == 0)
+ expected_size = INTVAL (expected_size_exp);
+ }
/* Make sure we don't need to care about overflow later on. */
if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
@@ -23765,7 +23782,8 @@ ix86_expand_set_or_movmem (rtx dst, rtx src, rtx count_exp, rtx val_exp,
/* Step 0: Decide on preferred algorithm, desired alignment and
size of chunks to be copied by main loop. */
- alg = decide_alg (count, expected_size, min_size, max_size, issetmem,
+ alg = decide_alg (count, expected_size, min_size, probable_max_size,
+ issetmem,
issetmem && val_exp == const0_rtx,
&dynamic_check, &noalign);
if (alg == libcall)
@@ -29820,7 +29838,7 @@ add_condition_to_bb (tree function_decl, tree version_decl,
make_edge (bb1, bb3, EDGE_FALSE_VALUE);
remove_edge (e23);
- make_edge (bb2, EXIT_BLOCK_PTR, 0);
+ make_edge (bb2, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
pop_cfun ();
@@ -36555,7 +36573,7 @@ ix86_pad_returns (void)
edge e;
edge_iterator ei;
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
{
basic_block bb = e->src;
rtx ret = BB_END (bb);
@@ -36655,14 +36673,14 @@ ix86_count_insn (basic_block bb)
edge prev_e;
edge_iterator prev_ei;
- if (e->src == ENTRY_BLOCK_PTR)
+ if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
{
min_prev_count = 0;
break;
}
FOR_EACH_EDGE (prev_e, prev_ei, e->src->preds)
{
- if (prev_e->src == ENTRY_BLOCK_PTR)
+ if (prev_e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
{
int count = ix86_count_insn_bb (e->src);
if (count < min_prev_count)
@@ -36686,7 +36704,7 @@ ix86_pad_short_function (void)
edge e;
edge_iterator ei;
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
{
rtx ret = BB_END (e->src);
if (JUMP_P (ret) && ANY_RETURN_P (PATTERN (ret)))
@@ -36726,7 +36744,7 @@ ix86_seh_fixup_eh_fallthru (void)
edge e;
edge_iterator ei;
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
{
rtx insn, next;
diff --git a/gcc/config/i386/i386.md b/gcc/config/i386/i386.md
index 045d4ae8eb7..8178f9b9ab0 100644
--- a/gcc/config/i386/i386.md
+++ b/gcc/config/i386/i386.md
@@ -15506,13 +15506,15 @@
(use (match_operand:SI 4 "const_int_operand"))
(use (match_operand:SI 5 "const_int_operand"))
(use (match_operand:SI 6 ""))
- (use (match_operand:SI 7 ""))]
+ (use (match_operand:SI 7 ""))
+ (use (match_operand:SI 8 ""))]
""
{
if (ix86_expand_set_or_movmem (operands[0], operands[1],
operands[2], NULL, operands[3],
operands[4], operands[5],
- operands[6], operands[7], false))
+ operands[6], operands[7],
+ operands[8], false))
DONE;
else
FAIL;
@@ -15702,14 +15704,15 @@
(use (match_operand:SI 4 "const_int_operand"))
(use (match_operand:SI 5 "const_int_operand"))
(use (match_operand:SI 6 ""))
- (use (match_operand:SI 7 ""))]
+ (use (match_operand:SI 7 ""))
+ (use (match_operand:SI 8 ""))]
""
{
if (ix86_expand_set_or_movmem (operands[0], NULL,
operands[1], operands[2],
operands[3], operands[4],
operands[5], operands[6],
- operands[7], true))
+ operands[7], operands[8], true))
DONE;
else
FAIL;
diff --git a/gcc/config/i386/winnt-cxx.c b/gcc/config/i386/winnt-cxx.c
index 92de46abd59..d466299abed 100644
--- a/gcc/config/i386/winnt-cxx.c
+++ b/gcc/config/i386/winnt-cxx.c
@@ -23,6 +23,8 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
+#include "attribs.h"
#include "cp/cp-tree.h" /* This is why we're a separate module. */
#include "flags.h"
#include "tm_p.h"
diff --git a/gcc/config/i386/winnt.c b/gcc/config/i386/winnt.c
index 94155d89a7f..2c1677eec88 100644
--- a/gcc/config/i386/winnt.c
+++ b/gcc/config/i386/winnt.c
@@ -27,6 +27,8 @@ along with GCC; see the file COPYING3. If not see
#include "hard-reg-set.h"
#include "output.h"
#include "tree.h"
+#include "stringpool.h"
+#include "varasm.h"
#include "flags.h"
#include "tm_p.h"
#include "diagnostic-core.h"
diff --git a/gcc/config/ia64/ia64-c.c b/gcc/config/ia64/ia64-c.c
index 4d4dbc84369..6489668b7d9 100644
--- a/gcc/config/ia64/ia64-c.c
+++ b/gcc/config/ia64/ia64-c.c
@@ -23,6 +23,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
#include "cpplib.h"
#include "c-family/c-common.h"
#include "c-family/c-pragma.h"
diff --git a/gcc/config/ia64/ia64.c b/gcc/config/ia64/ia64.c
index 4fde7aab43e..71bc666b685 100644
--- a/gcc/config/ia64/ia64.c
+++ b/gcc/config/ia64/ia64.c
@@ -25,6 +25,10 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "rtl.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
+#include "calls.h"
+#include "varasm.h"
#include "regs.h"
#include "hard-reg-set.h"
#include "insn-config.h"
@@ -3488,7 +3492,7 @@ ia64_expand_prologue (void)
edge e;
edge_iterator ei;
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
if ((e->flags & EDGE_FAKE) == 0
&& (e->flags & EDGE_FALLTHRU) != 0)
break;
@@ -10183,7 +10187,8 @@ ia64_asm_unwind_emit (FILE *asm_out_file, rtx insn)
if (NOTE_INSN_BASIC_BLOCK_P (insn))
{
- last_block = NOTE_BASIC_BLOCK (insn)->next_bb == EXIT_BLOCK_PTR;
+ last_block = NOTE_BASIC_BLOCK (insn)->next_bb
+ == EXIT_BLOCK_PTR_FOR_FN (cfun);
/* Restore unwind state from immediately before the epilogue. */
if (need_copy_state)
diff --git a/gcc/config/iq2000/iq2000.c b/gcc/config/iq2000/iq2000.c
index e65d0ccdc03..e6d1171ca85 100644
--- a/gcc/config/iq2000/iq2000.c
+++ b/gcc/config/iq2000/iq2000.c
@@ -22,6 +22,9 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "calls.h"
+#include "varasm.h"
#include "rtl.h"
#include "regs.h"
#include "hard-reg-set.h"
diff --git a/gcc/config/lm32/lm32.c b/gcc/config/lm32/lm32.c
index 6483a03e57d..6bddc488727 100644
--- a/gcc/config/lm32/lm32.c
+++ b/gcc/config/lm32/lm32.c
@@ -35,6 +35,7 @@
#include "recog.h"
#include "output.h"
#include "tree.h"
+#include "calls.h"
#include "expr.h"
#include "flags.h"
#include "reload.h"
diff --git a/gcc/config/m32c/m32c.c b/gcc/config/m32c/m32c.c
index deac40c228f..ec30b8d7f9b 100644
--- a/gcc/config/m32c/m32c.c
+++ b/gcc/config/m32c/m32c.c
@@ -36,6 +36,9 @@
#include "diagnostic-core.h"
#include "obstack.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "varasm.h"
+#include "calls.h"
#include "expr.h"
#include "optabs.h"
#include "except.h"
diff --git a/gcc/config/m32r/m32r.c b/gcc/config/m32r/m32r.c
index c94da538fcf..6cee5d728b3 100644
--- a/gcc/config/m32r/m32r.c
+++ b/gcc/config/m32r/m32r.c
@@ -22,6 +22,10 @@
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "varasm.h"
+#include "stringpool.h"
+#include "calls.h"
#include "rtl.h"
#include "regs.h"
#include "hard-reg-set.h"
diff --git a/gcc/config/m68k/m68k.c b/gcc/config/m68k/m68k.c
index 7035504bfe3..2b5bc22ecb2 100644
--- a/gcc/config/m68k/m68k.c
+++ b/gcc/config/m68k/m68k.c
@@ -22,6 +22,9 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "calls.h"
+#include "stor-layout.h"
+#include "varasm.h"
#include "rtl.h"
#include "function.h"
#include "regs.h"
@@ -514,7 +517,7 @@ m68k_option_override (void)
{
enum target_device dev;
dev = all_microarchs[M68K_DEFAULT_TUNE].device;
- m68k_tune_flags = all_devices[dev]->flags;
+ m68k_tune_flags = all_devices[dev].flags;
}
#endif
else
diff --git a/gcc/config/mcore/mcore.c b/gcc/config/mcore/mcore.c
index 6550b6905f0..6bd60702fa2 100644
--- a/gcc/config/mcore/mcore.c
+++ b/gcc/config/mcore/mcore.c
@@ -23,6 +23,10 @@
#include "tm.h"
#include "rtl.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "varasm.h"
+#include "stringpool.h"
+#include "calls.h"
#include "tm_p.h"
#include "mcore.h"
#include "regs.h"
diff --git a/gcc/config/mep/mep.c b/gcc/config/mep/mep.c
index 489bef9c2d3..6ce6c530077 100644
--- a/gcc/config/mep/mep.c
+++ b/gcc/config/mep/mep.c
@@ -24,6 +24,10 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "rtl.h"
#include "tree.h"
+#include "varasm.h"
+#include "calls.h"
+#include "stringpool.h"
+#include "stor-layout.h"
#include "regs.h"
#include "hard-reg-set.h"
#include "insn-config.h"
diff --git a/gcc/config/microblaze/microblaze.c b/gcc/config/microblaze/microblaze.c
index 3258a95ef3a..93dede4d189 100644
--- a/gcc/config/microblaze/microblaze.c
+++ b/gcc/config/microblaze/microblaze.c
@@ -33,6 +33,9 @@
#include "insn-attr.h"
#include "recog.h"
#include "tree.h"
+#include "varasm.h"
+#include "stor-layout.h"
+#include "calls.h"
#include "function.h"
#include "expr.h"
#include "flags.h"
diff --git a/gcc/config/mips/mips.c b/gcc/config/mips/mips.c
index 80bbb00c2c8..d06d5747081 100644
--- a/gcc/config/mips/mips.c
+++ b/gcc/config/mips/mips.c
@@ -34,6 +34,10 @@ along with GCC; see the file COPYING3. If not see
#include "recog.h"
#include "output.h"
#include "tree.h"
+#include "varasm.h"
+#include "stringpool.h"
+#include "stor-layout.h"
+#include "calls.h"
#include "function.h"
#include "expr.h"
#include "optabs.h"
@@ -14838,7 +14842,7 @@ r10k_simplify_address (rtx x, rtx insn)
/* Replace the incoming value of $sp with
virtual_incoming_args_rtx. */
if (x == stack_pointer_rtx
- && DF_REF_BB (def) == ENTRY_BLOCK_PTR)
+ && DF_REF_BB (def) == ENTRY_BLOCK_PTR_FOR_FN (cfun))
newx = virtual_incoming_args_rtx;
}
else if (dominated_by_p (CDI_DOMINATORS, DF_REF_BB (use),
@@ -16072,10 +16076,13 @@ mips_reorg_process_insns (void)
if (crtl->profile)
cfun->machine->all_noreorder_p = false;
- /* Code compiled with -mfix-vr4120 or -mfix-24k can't be all noreorder
- because we rely on the assembler to work around some errata.
- The r5900 too has several bugs. */
- if (TARGET_FIX_VR4120 || TARGET_FIX_24K || TARGET_MIPS5900)
+ /* Code compiled with -mfix-vr4120, -mfix-rm7000 or -mfix-24k can't be
+ all noreorder because we rely on the assembler to work around some
+ errata. The R5900 too has several bugs. */
+ if (TARGET_FIX_VR4120
+ || TARGET_FIX_RM7000
+ || TARGET_FIX_24K
+ || TARGET_MIPS5900)
cfun->machine->all_noreorder_p = false;
/* The same is true for -mfix-vr4130 if we might generate MFLO or
diff --git a/gcc/config/mips/mips.h b/gcc/config/mips/mips.h
index c4a2a4a6862..11687b8a053 100644
--- a/gcc/config/mips/mips.h
+++ b/gcc/config/mips/mips.h
@@ -1167,6 +1167,7 @@ struct mips_cpu_info {
%{meva} %{mno-eva} \
%{msmartmips} %{mno-smartmips} \
%{mmt} %{mno-mt} \
+%{mfix-rm7000} %{mno-fix-rm7000} \
%{mfix-vr4120} %{mfix-vr4130} \
%{mfix-24k} \
%{noasmopt:-O0; O0|fno-delayed-branch:-O1; O*:-O2; :-O1} \
diff --git a/gcc/config/mips/mips.md b/gcc/config/mips/mips.md
index 3554beb3033..6991f203df4 100644
--- a/gcc/config/mips/mips.md
+++ b/gcc/config/mips/mips.md
@@ -6776,7 +6776,7 @@
(define_insn "*mov<GPR:mode>_on_<MOVECC:mode>"
[(set (match_operand:GPR 0 "register_operand" "=d,d")
(if_then_else:GPR
- (match_operator:MOVECC 4 "equality_operator"
+ (match_operator 4 "equality_operator"
[(match_operand:MOVECC 1 "register_operand" "<MOVECC:reg>,<MOVECC:reg>")
(const_int 0)])
(match_operand:GPR 2 "reg_or_0_operand" "dJ,0")
@@ -6788,10 +6788,23 @@
[(set_attr "type" "condmove")
(set_attr "mode" "<GPR:MODE>")])
+(define_insn "*mov<GPR:mode>_on_<GPR2:mode>_ne"
+ [(set (match_operand:GPR 0 "register_operand" "=d,d")
+ (if_then_else:GPR
+ (match_operand:GPR2 1 "register_operand" "<GPR2:reg>,<GPR2:reg>")
+ (match_operand:GPR 2 "reg_or_0_operand" "dJ,0")
+ (match_operand:GPR 3 "reg_or_0_operand" "0,dJ")))]
+ "ISA_HAS_CONDMOVE"
+ "@
+ movn\t%0,%z2,%1
+ movz\t%0,%z3,%1"
+ [(set_attr "type" "condmove")
+ (set_attr "mode" "<GPR:MODE>")])
+
(define_insn "*mov<SCALARF:mode>_on_<MOVECC:mode>"
[(set (match_operand:SCALARF 0 "register_operand" "=f,f")
(if_then_else:SCALARF
- (match_operator:MOVECC 4 "equality_operator"
+ (match_operator 4 "equality_operator"
[(match_operand:MOVECC 1 "register_operand" "<MOVECC:reg>,<MOVECC:reg>")
(const_int 0)])
(match_operand:SCALARF 2 "register_operand" "f,0")
diff --git a/gcc/config/mips/mips.opt b/gcc/config/mips/mips.opt
index 0324041dbea..10faf4216a5 100644
--- a/gcc/config/mips/mips.opt
+++ b/gcc/config/mips/mips.opt
@@ -165,6 +165,10 @@ mfix-r4400
Target Report Mask(FIX_R4400)
Work around certain R4400 errata
+mfix-rm7000
+Target Report Var(TARGET_FIX_RM7000)
+Work around certain RM7000 errata
+
mfix-r10000
Target Report Mask(FIX_R10000)
Work around certain R10000 errata
diff --git a/gcc/config/mmix/mmix.c b/gcc/config/mmix/mmix.c
index 34b4fea7503..eb43af71d08 100644
--- a/gcc/config/mmix/mmix.c
+++ b/gcc/config/mmix/mmix.c
@@ -31,6 +31,9 @@ along with GCC; see the file COPYING3. If not see
#include "basic-block.h"
#include "flags.h"
#include "tree.h"
+#include "varasm.h"
+#include "stor-layout.h"
+#include "calls.h"
#include "function.h"
#include "expr.h"
#include "diagnostic-core.h"
diff --git a/gcc/config/mn10300/mn10300.c b/gcc/config/mn10300/mn10300.c
index df563d03eac..7304e8638c7 100644
--- a/gcc/config/mn10300/mn10300.c
+++ b/gcc/config/mn10300/mn10300.c
@@ -24,6 +24,9 @@
#include "tm.h"
#include "rtl.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "varasm.h"
+#include "calls.h"
#include "regs.h"
#include "hard-reg-set.h"
#include "insn-config.h"
@@ -3226,7 +3229,6 @@ mn10300_loop_contains_call_insn (loop_p loop)
static void
mn10300_scan_for_setlb_lcc (void)
{
- loop_iterator liter;
loop_p loop;
DUMP ("Looking for loops that can use the SETLB insn", NULL_RTX);
@@ -3241,7 +3243,7 @@ mn10300_scan_for_setlb_lcc (void)
if an inner loop is not suitable for use with the SETLB/Lcc insns, it may
be the case that its parent loop is suitable. Thus we should check all
loops, but work from the innermost outwards. */
- FOR_EACH_LOOP (liter, loop, LI_ONLY_INNERMOST)
+ FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
{
const char * reason = NULL;
diff --git a/gcc/config/moxie/moxie.c b/gcc/config/moxie/moxie.c
index d4f7d6d9d68..abba0aebd2d 100644
--- a/gcc/config/moxie/moxie.c
+++ b/gcc/config/moxie/moxie.c
@@ -36,6 +36,9 @@
#include "diagnostic-core.h"
#include "obstack.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "varasm.h"
+#include "calls.h"
#include "expr.h"
#include "optabs.h"
#include "except.h"
diff --git a/gcc/config/msp430/msp430.c b/gcc/config/msp430/msp430.c
index 8721f3a8229..e3f6712596a 100644
--- a/gcc/config/msp430/msp430.c
+++ b/gcc/config/msp430/msp430.c
@@ -23,6 +23,8 @@
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "calls.h"
#include "rtl.h"
#include "regs.h"
#include "hard-reg-set.h"
diff --git a/gcc/config/nds32/nds32.c b/gcc/config/nds32/nds32.c
index 7dfcdc7366d..80ca1f647b7 100644
--- a/gcc/config/nds32/nds32.c
+++ b/gcc/config/nds32/nds32.c
@@ -25,6 +25,9 @@
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "varasm.h"
+#include "calls.h"
#include "rtl.h"
#include "regs.h"
#include "hard-reg-set.h"
@@ -4563,7 +4566,7 @@ nds32_fp_as_gp_check_available (void)
|| frame_pointer_needed
|| NDS32_REQUIRED_CALLEE_SAVED_P (FP_REGNUM)
|| (cfun->stdarg == 1)
- || (find_fallthru_edge (EXIT_BLOCK_PTR->preds) == NULL))
+ || (find_fallthru_edge (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) == NULL))
return 0;
/* Now we can check the possibility of using fp_as_gp optimization. */
diff --git a/gcc/config/pa/pa.c b/gcc/config/pa/pa.c
index 260830f00d7..2aa63c6bd0c 100644
--- a/gcc/config/pa/pa.c
+++ b/gcc/config/pa/pa.c
@@ -30,6 +30,10 @@ along with GCC; see the file COPYING3. If not see
#include "insn-attr.h"
#include "flags.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "stringpool.h"
+#include "varasm.h"
+#include "calls.h"
#include "output.h"
#include "dbxout.h"
#include "except.h"
diff --git a/gcc/config/pdp11/pdp11.c b/gcc/config/pdp11/pdp11.c
index 0a310c50d79..42237b5d798 100644
--- a/gcc/config/pdp11/pdp11.c
+++ b/gcc/config/pdp11/pdp11.c
@@ -33,6 +33,9 @@ along with GCC; see the file COPYING3. If not see
#include "flags.h"
#include "recog.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "varasm.h"
+#include "calls.h"
#include "expr.h"
#include "diagnostic-core.h"
#include "tm_p.h"
diff --git a/gcc/config/picochip/picochip.c b/gcc/config/picochip/picochip.c
index 641bccb81f6..4756cb78b72 100644
--- a/gcc/config/picochip/picochip.c
+++ b/gcc/config/picochip/picochip.c
@@ -34,6 +34,10 @@ along with GCC; see the file COPYING3. If not, see
#include "recog.h"
#include "obstack.h"
#include "tree.h"
+#include "calls.h"
+#include "stor-layout.h"
+#include "stringpool.h"
+#include "varasm.h"
#include "expr.h"
#include "optabs.h"
#include "except.h"
diff --git a/gcc/config/rl78/rl78.c b/gcc/config/rl78/rl78.c
index f071e31daf2..72aefc205a1 100644
--- a/gcc/config/rl78/rl78.c
+++ b/gcc/config/rl78/rl78.c
@@ -23,6 +23,9 @@
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "varasm.h"
+#include "stor-layout.h"
+#include "calls.h"
#include "rtl.h"
#include "regs.h"
#include "hard-reg-set.h"
diff --git a/gcc/config/rs6000/linux64.h b/gcc/config/rs6000/linux64.h
index 78e84531300..66b483ec116 100644
--- a/gcc/config/rs6000/linux64.h
+++ b/gcc/config/rs6000/linux64.h
@@ -71,7 +71,11 @@ extern int dot_symbols;
#undef PROCESSOR_DEFAULT
#define PROCESSOR_DEFAULT PROCESSOR_POWER7
#undef PROCESSOR_DEFAULT64
+#ifdef LINUX64_DEFAULT_ABI_ELFv2
+#define PROCESSOR_DEFAULT64 PROCESSOR_POWER8
+#else
#define PROCESSOR_DEFAULT64 PROCESSOR_POWER7
+#endif
/* We don't need to generate entries in .fixup, except when
-mrelocatable or -mrelocatable-lib is given. */
diff --git a/gcc/config/rs6000/rs6000-c.c b/gcc/config/rs6000/rs6000-c.c
index e2e5409d998..8cc0dc691cf 100644
--- a/gcc/config/rs6000/rs6000-c.c
+++ b/gcc/config/rs6000/rs6000-c.c
@@ -26,6 +26,8 @@
#include "tm.h"
#include "cpplib.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "stringpool.h"
#include "wide-int.h"
#include "c-family/c-common.h"
#include "c-family/c-pragma.h"
diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c
index c530ccde536..df2ca2440b3 100644
--- a/gcc/config/rs6000/rs6000.c
+++ b/gcc/config/rs6000/rs6000.c
@@ -32,6 +32,11 @@
#include "recog.h"
#include "obstack.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
+#include "calls.h"
+#include "print-tree.h"
+#include "varasm.h"
#include "expr.h"
#include "optabs.h"
#include "except.h"
@@ -3217,6 +3222,12 @@ rs6000_option_override_internal (bool global_init_p)
}
}
+ /* If little-endian, default to -mstrict-align on older processors.
+ Testing for htm matches power8 and later. */
+ if (!BYTES_BIG_ENDIAN
+ && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
+ rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
+
/* Add some warnings for VSX. */
if (TARGET_VSX)
{
@@ -7984,6 +7995,7 @@ rs6000_emit_le_vsx_move (rtx dest, rtx source, enum machine_mode mode)
gcc_assert (!BYTES_BIG_ENDIAN
&& VECTOR_MEM_VSX_P (mode)
&& mode != TImode
+ && !gpr_or_gpr_p (dest, source)
&& (MEM_P (source) ^ MEM_P (dest)));
if (MEM_P (source))
@@ -22938,7 +22950,7 @@ rs6000_emit_prologue (void)
&& DEFAULT_ABI == ABI_V4
&& flag_pic
&& ! info->lr_save_p
- && EDGE_COUNT (EXIT_BLOCK_PTR->preds) > 0);
+ && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
if (save_LR_around_toc_setup)
{
rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
@@ -29801,6 +29813,8 @@ altivec_expand_vec_perm_const (rtx operands[4])
break;
if (i == 16)
{
+ if (!BYTES_BIG_ENDIAN)
+ elt = 15 - elt;
emit_insn (gen_altivec_vspltb (target, op0, GEN_INT (elt)));
return true;
}
diff --git a/gcc/config/rs6000/sysv4.h b/gcc/config/rs6000/sysv4.h
index ba4ceb3ff2e..73c3ec16c2c 100644
--- a/gcc/config/rs6000/sysv4.h
+++ b/gcc/config/rs6000/sysv4.h
@@ -538,12 +538,7 @@ ENDIAN_SELECT(" -mbig", " -mlittle", DEFAULT_ASM_ENDIAN)
#define CC1_ENDIAN_BIG_SPEC ""
-#define CC1_ENDIAN_LITTLE_SPEC "\
-%{!mstrict-align: %{!mno-strict-align: \
- %{!mcall-i960-old: \
- -mstrict-align \
- } \
-}}"
+#define CC1_ENDIAN_LITTLE_SPEC ""
#define CC1_ENDIAN_DEFAULT_SPEC "%(cc1_endian_big)"
diff --git a/gcc/config/rs6000/vector.md b/gcc/config/rs6000/vector.md
index 10a401813d1..650fbddc2bb 100644
--- a/gcc/config/rs6000/vector.md
+++ b/gcc/config/rs6000/vector.md
@@ -108,6 +108,7 @@
if (!BYTES_BIG_ENDIAN
&& VECTOR_MEM_VSX_P (<MODE>mode)
&& <MODE>mode != TImode
+ && !gpr_or_gpr_p (operands[0], operands[1])
&& (memory_operand (operands[0], <MODE>mode)
^ memory_operand (operands[1], <MODE>mode)))
{
diff --git a/gcc/config/rx/rx.c b/gcc/config/rx/rx.c
index 89860927a82..662ab9b72cc 100644
--- a/gcc/config/rx/rx.c
+++ b/gcc/config/rx/rx.c
@@ -27,6 +27,9 @@
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "varasm.h"
+#include "stor-layout.h"
+#include "calls.h"
#include "rtl.h"
#include "regs.h"
#include "hard-reg-set.h"
diff --git a/gcc/config/s390/htmxlintrin.h b/gcc/config/s390/htmxlintrin.h
index 800d5f0aa0c..d1c7ec566e1 100644
--- a/gcc/config/s390/htmxlintrin.h
+++ b/gcc/config/s390/htmxlintrin.h
@@ -33,13 +33,20 @@ extern "C" {
the IBM XL compiler. For documentation please see the "z/OS XL
C/C++ Programming Guide" publicly available on the web. */
-extern __inline long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+/* FIXME: __TM_simple_begin and __TM_begin should be marked
+ __always_inline__ as well but this currently produces an error
+ since the tbegin builtins are "returns_twice" and setjmp_call_p
+ (calls.c) therefore identifies the functions as calling setjmp.
+ The tree inliner currently refuses to inline functions calling
+ setjmp. */
+
+long
__TM_simple_begin ()
{
return __builtin_tbegin_nofloat (0);
}
-extern __inline long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+long
__TM_begin (void* const tdb)
{
return __builtin_tbegin_nofloat (tdb);
@@ -78,7 +85,7 @@ __TM_nesting_depth (void* const tdb_ptr)
if (depth != 0)
return depth;
- if (tdb->format == 0)
+ if (tdb->format != 1)
return 0;
return tdb->nesting_depth;
}
@@ -90,7 +97,7 @@ __TM_is_user_abort (void* const tdb_ptr)
{
struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr;
- if (tdb->format == 0)
+ if (tdb->format != 1)
return 0;
return !!(tdb->abort_code >= _HTM_FIRST_USER_ABORT_CODE);
@@ -101,7 +108,7 @@ __TM_is_named_user_abort (void* const tdb_ptr, unsigned char* code)
{
struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr;
- if (tdb->format == 0)
+ if (tdb->format != 1)
return 0;
if (tdb->abort_code >= _HTM_FIRST_USER_ABORT_CODE)
@@ -117,7 +124,7 @@ __TM_is_illegal (void* const tdb_ptr)
{
struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr;
- return (tdb->format == 0
+ return (tdb->format == 1
&& (tdb->abort_code == 4 /* unfiltered program interruption */
|| tdb->abort_code == 11 /* restricted instruction */));
}
@@ -127,7 +134,7 @@ __TM_is_footprint_exceeded (void* const tdb_ptr)
{
struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr;
- return (tdb->format == 0
+ return (tdb->format == 1
&& (tdb->abort_code == 7 /* fetch overflow */
|| tdb->abort_code == 8 /* store overflow */));
}
@@ -137,7 +144,7 @@ __TM_is_nested_too_deep (void* const tdb_ptr)
{
struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr;
- return tdb->format == 0 && tdb->abort_code == 13; /* depth exceeded */
+ return tdb->format == 1 && tdb->abort_code == 13; /* depth exceeded */
}
extern __inline long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -145,7 +152,7 @@ __TM_is_conflict (void* const tdb_ptr)
{
struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr;
- return (tdb->format == 0
+ return (tdb->format == 1
&& (tdb->abort_code == 9 /* fetch conflict */
|| tdb->abort_code == 10 /* store conflict */));
}
diff --git a/gcc/config/s390/s390.c b/gcc/config/s390/s390.c
index 39453038fe7..62d162ab087 100644
--- a/gcc/config/s390/s390.c
+++ b/gcc/config/s390/s390.c
@@ -26,6 +26,11 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "rtl.h"
#include "tree.h"
+#include "print-tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
+#include "varasm.h"
+#include "calls.h"
#include "tm_p.h"
#include "regs.h"
#include "hard-reg-set.h"
@@ -895,7 +900,8 @@ s390_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
{
/* For CCRAWmode put the required cc mask into the second
operand. */
- if (GET_MODE (XVECEXP (*op0, 0, 0)) == CCRAWmode)
+ if (GET_MODE (XVECEXP (*op0, 0, 0)) == CCRAWmode
+ && INTVAL (*op1) >= 0 && INTVAL (*op1) <= 3)
*op1 = gen_rtx_CONST_INT (VOIDmode, 1 << (3 - INTVAL (*op1)));
*op0 = XVECEXP (*op0, 0, 0);
*code = new_code;
@@ -7964,10 +7970,13 @@ s390_optimize_nonescaping_tx (void)
if (!cfun->machine->tbegin_p)
return;
- for (bb_index = 0; bb_index < n_basic_blocks; bb_index++)
+ for (bb_index = 0; bb_index < n_basic_blocks_for_fn (cfun); bb_index++)
{
bb = BASIC_BLOCK (bb_index);
+ if (!bb)
+ continue;
+
FOR_BB_INSNS (bb, insn)
{
rtx ite, cc, pat, target;
@@ -8081,7 +8090,10 @@ s390_optimize_nonescaping_tx (void)
if (!result)
return;
- PATTERN (tbegin_insn) = XVECEXP (PATTERN (tbegin_insn), 0, 0);
+ PATTERN (tbegin_insn) = gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (2,
+ XVECEXP (PATTERN (tbegin_insn), 0, 0),
+ XVECEXP (PATTERN (tbegin_insn), 0, 1)));
INSN_CODE (tbegin_insn) = -1;
df_insn_rescan (tbegin_insn);
@@ -9793,6 +9805,7 @@ s390_expand_tbegin (rtx dest, rtx tdb, rtx retry, bool clobber_fprs_p)
const int CC3 = 1 << 0;
rtx abort_label = gen_label_rtx ();
rtx leave_label = gen_label_rtx ();
+ rtx retry_plus_two = gen_reg_rtx (SImode);
rtx retry_reg = gen_reg_rtx (SImode);
rtx retry_label = NULL_RTX;
rtx jump;
@@ -9801,16 +9814,17 @@ s390_expand_tbegin (rtx dest, rtx tdb, rtx retry, bool clobber_fprs_p)
if (retry != NULL_RTX)
{
emit_move_insn (retry_reg, retry);
+ emit_insn (gen_addsi3 (retry_plus_two, retry_reg, const2_rtx));
+ emit_insn (gen_addsi3 (retry_reg, retry_reg, const1_rtx));
retry_label = gen_label_rtx ();
emit_label (retry_label);
}
if (clobber_fprs_p)
- emit_insn (gen_tbegin_1 (tdb,
- gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK)));
+ emit_insn (gen_tbegin_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK), tdb));
else
- emit_insn (gen_tbegin_nofloat_1 (tdb,
- gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK)));
+ emit_insn (gen_tbegin_nofloat_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
+ tdb));
jump = s390_emit_jump (abort_label,
gen_rtx_NE (VOIDmode,
@@ -9831,6 +9845,10 @@ s390_expand_tbegin (rtx dest, rtx tdb, rtx retry, bool clobber_fprs_p)
/* Abort handler code. */
emit_label (abort_label);
+ emit_move_insn (dest, gen_rtx_UNSPEC (SImode,
+ gen_rtvec (1, gen_rtx_REG (CCRAWmode,
+ CC_REGNUM)),
+ UNSPEC_CC_TO_INT));
if (retry != NULL_RTX)
{
rtx count = gen_reg_rtx (SImode);
@@ -9842,7 +9860,7 @@ s390_expand_tbegin (rtx dest, rtx tdb, rtx retry, bool clobber_fprs_p)
add_int_reg_note (jump, REG_BR_PROB, very_unlikely);
/* CC2 - transient failure. Perform retry with ppa. */
- emit_move_insn (count, retry);
+ emit_move_insn (count, retry_plus_two);
emit_insn (gen_subsi3 (count, count, retry_reg));
emit_insn (gen_tx_assist (count));
jump = emit_jump_insn (gen_doloop_si64 (retry_label,
@@ -9852,10 +9870,6 @@ s390_expand_tbegin (rtx dest, rtx tdb, rtx retry, bool clobber_fprs_p)
LABEL_NUSES (retry_label) = 1;
}
- emit_move_insn (dest, gen_rtx_UNSPEC (SImode,
- gen_rtvec (1, gen_rtx_REG (CCRAWmode,
- CC_REGNUM)),
- UNSPEC_CC_TO_INT));
emit_label (leave_label);
}
@@ -9894,6 +9908,9 @@ static void
s390_init_builtins (void)
{
tree ftype, uint64_type;
+ tree returns_twice_attr = tree_cons (get_identifier ("returns_twice"),
+ NULL, NULL);
+ tree noreturn_attr = tree_cons (get_identifier ("noreturn"), NULL, NULL);
/* void foo (void) */
ftype = build_function_type_list (void_type_node, NULL_TREE);
@@ -9904,17 +9921,17 @@ s390_init_builtins (void)
ftype = build_function_type_list (void_type_node, integer_type_node,
NULL_TREE);
add_builtin_function ("__builtin_tabort", ftype,
- S390_BUILTIN_TABORT, BUILT_IN_MD, NULL, NULL_TREE);
+ S390_BUILTIN_TABORT, BUILT_IN_MD, NULL, noreturn_attr);
add_builtin_function ("__builtin_tx_assist", ftype,
S390_BUILTIN_TX_ASSIST, BUILT_IN_MD, NULL, NULL_TREE);
/* int foo (void *) */
ftype = build_function_type_list (integer_type_node, ptr_type_node, NULL_TREE);
add_builtin_function ("__builtin_tbegin", ftype, S390_BUILTIN_TBEGIN,
- BUILT_IN_MD, NULL, NULL_TREE);
+ BUILT_IN_MD, NULL, returns_twice_attr);
add_builtin_function ("__builtin_tbegin_nofloat", ftype,
S390_BUILTIN_TBEGIN_NOFLOAT,
- BUILT_IN_MD, NULL, NULL_TREE);
+ BUILT_IN_MD, NULL, returns_twice_attr);
/* int foo (void *, int) */
ftype = build_function_type_list (integer_type_node, ptr_type_node,
@@ -9922,11 +9939,11 @@ s390_init_builtins (void)
add_builtin_function ("__builtin_tbegin_retry", ftype,
S390_BUILTIN_TBEGIN_RETRY,
BUILT_IN_MD,
- NULL, NULL_TREE);
+ NULL, returns_twice_attr);
add_builtin_function ("__builtin_tbegin_retry_nofloat", ftype,
S390_BUILTIN_TBEGIN_RETRY_NOFLOAT,
BUILT_IN_MD,
- NULL, NULL_TREE);
+ NULL, returns_twice_attr);
/* int foo (void) */
ftype = build_function_type_list (integer_type_node, NULL_TREE);
diff --git a/gcc/config/s390/s390.md b/gcc/config/s390/s390.md
index 8354e263892..d537d29d24f 100644
--- a/gcc/config/s390/s390.md
+++ b/gcc/config/s390/s390.md
@@ -155,6 +155,7 @@
; Transactional Execution support
UNSPECV_TBEGIN
+ UNSPECV_TBEGIN_TDB
UNSPECV_TBEGINC
UNSPECV_TEND
UNSPECV_TABORT
@@ -9997,9 +9998,10 @@
(define_insn "tbegin_1"
[(set (reg:CCRAW CC_REGNUM)
- (unspec_volatile:CCRAW [(match_operand:BLK 0 "memory_operand" "=Q")
- (match_operand 1 "const_int_operand" " D")]
+ (unspec_volatile:CCRAW [(match_operand 0 "const_int_operand" "D")]
UNSPECV_TBEGIN))
+ (set (match_operand:BLK 1 "memory_operand" "=Q")
+ (unspec_volatile:BLK [(match_dup 0)] UNSPECV_TBEGIN_TDB))
(clobber (reg:DF 16))
(clobber (reg:DF 17))
(clobber (reg:DF 18))
@@ -10018,18 +10020,19 @@
(clobber (reg:DF 31))]
; CONST_OK_FOR_CONSTRAINT_P does not work with D constraint since D is
; not supposed to be used for immediates (see genpreds.c).
- "TARGET_HTM && INTVAL (operands[1]) >= 0 && INTVAL (operands[1]) <= 0xffff"
- "tbegin\t%0,%x1"
+ "TARGET_HTM && INTVAL (operands[0]) >= 0 && INTVAL (operands[0]) <= 0xffff"
+ "tbegin\t%1,%x0"
[(set_attr "op_type" "SIL")])
; Same as above but without the FPR clobbers
(define_insn "tbegin_nofloat_1"
[(set (reg:CCRAW CC_REGNUM)
- (unspec_volatile:CCRAW [(match_operand:BLK 0 "memory_operand" "=Q")
- (match_operand 1 "const_int_operand" " D")]
- UNSPECV_TBEGIN))]
- "TARGET_HTM && INTVAL (operands[1]) >= 0 && INTVAL (operands[1]) <= 0xffff"
- "tbegin\t%0,%x1"
+ (unspec_volatile:CCRAW [(match_operand 0 "const_int_operand" "D")]
+ UNSPECV_TBEGIN))
+ (set (match_operand:BLK 1 "memory_operand" "=Q")
+ (unspec_volatile:BLK [(match_dup 0)] UNSPECV_TBEGIN_TDB))]
+ "TARGET_HTM && INTVAL (operands[0]) >= 0 && INTVAL (operands[0]) <= 0xffff"
+ "tbegin\t%1,%x0"
[(set_attr "op_type" "SIL")])
@@ -10113,15 +10116,12 @@
; Transaction perform processor assist
(define_expand "tx_assist"
- [(set (match_dup 1) (const_int 0))
- (unspec_volatile [(match_operand:SI 0 "register_operand" "")
- (match_dup 1)
+ [(unspec_volatile [(match_operand:SI 0 "register_operand" "")
+ (reg:SI GPR0_REGNUM)
(const_int 1)]
UNSPECV_PPA)]
"TARGET_HTM"
-{
- operands[1] = gen_reg_rtx (SImode);
-})
+ "")
(define_insn "*ppa"
[(unspec_volatile [(match_operand:SI 0 "register_operand" "d")
@@ -10129,5 +10129,5 @@
(match_operand 2 "const_int_operand" "I")]
UNSPECV_PPA)]
"TARGET_HTM && INTVAL (operands[2]) < 16"
- "ppa\t%0,%1,1"
+ "ppa\t%0,%1,%2"
[(set_attr "op_type" "RRF")])
diff --git a/gcc/config/score/score.c b/gcc/config/score/score.c
index c25aaa2da93..3fdf2ea9050 100644
--- a/gcc/config/score/score.c
+++ b/gcc/config/score/score.c
@@ -32,6 +32,10 @@
#include "diagnostic-core.h"
#include "output.h"
#include "tree.h"
+#include "stringpool.h"
+#include "calls.h"
+#include "varasm.h"
+#include "stor-layout.h"
#include "function.h"
#include "expr.h"
#include "optabs.h"
diff --git a/gcc/config/sh/sh-c.c b/gcc/config/sh/sh-c.c
index 4f3a41a46e7..0d7937f4822 100644
--- a/gcc/config/sh/sh-c.c
+++ b/gcc/config/sh/sh-c.c
@@ -23,6 +23,8 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
+#include "attribs.h"
#include "tm_p.h"
#include "cpplib.h"
#include "c-family/c-common.h"
diff --git a/gcc/config/sh/sh.c b/gcc/config/sh/sh.c
index 088ef396313..d5f7f15c1cc 100644
--- a/gcc/config/sh/sh.c
+++ b/gcc/config/sh/sh.c
@@ -26,6 +26,10 @@ along with GCC; see the file COPYING3. If not see
#include "insn-config.h"
#include "rtl.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
+#include "calls.h"
+#include "varasm.h"
#include "flags.h"
#include "expr.h"
#include "optabs.h"
diff --git a/gcc/config/sol2-c.c b/gcc/config/sol2-c.c
index 0accac40020..1a47e39e9cf 100644
--- a/gcc/config/sol2-c.c
+++ b/gcc/config/sol2-c.c
@@ -22,6 +22,8 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "tree.h"
+#include "stringpool.h"
+#include "attribs.h"
#include "tm.h"
#include "tm_p.h"
diff --git a/gcc/config/sol2-cxx.c b/gcc/config/sol2-cxx.c
index e1b450d759b..d3d79554b96 100644
--- a/gcc/config/sol2-cxx.c
+++ b/gcc/config/sol2-cxx.c
@@ -21,6 +21,7 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "tree.h"
+#include "stringpool.h"
#include "cp/cp-tree.h"
#include "tm.h"
#include "tm_p.h"
diff --git a/gcc/config/sol2.c b/gcc/config/sol2.c
index 7c7c429db3d..4200e620e16 100644
--- a/gcc/config/sol2.c
+++ b/gcc/config/sol2.c
@@ -22,6 +22,8 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "tree.h"
+#include "stringpool.h"
+#include "varasm.h"
#include "output.h"
#include "tm.h"
#include "rtl.h"
diff --git a/gcc/config/sparc/sparc.c b/gcc/config/sparc/sparc.c
index ab8f501e4fa..13192c0aa21 100644
--- a/gcc/config/sparc/sparc.c
+++ b/gcc/config/sparc/sparc.c
@@ -25,6 +25,10 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
+#include "calls.h"
+#include "varasm.h"
#include "rtl.h"
#include "regs.h"
#include "hard-reg-set.h"
diff --git a/gcc/config/spu/spu-c.c b/gcc/config/spu/spu-c.c
index 215962fa7c9..7632ec1462c 100644
--- a/gcc/config/spu/spu-c.c
+++ b/gcc/config/spu/spu-c.c
@@ -20,6 +20,7 @@
#include "tm.h"
#include "cpplib.h"
#include "tree.h"
+#include "stringpool.h"
#include "c-family/c-common.h"
#include "c-family/c-pragma.h"
#include "tm_p.h"
diff --git a/gcc/config/spu/spu.c b/gcc/config/spu/spu.c
index e344b73fce6..315f1b3617f 100644
--- a/gcc/config/spu/spu.c
+++ b/gcc/config/spu/spu.c
@@ -28,6 +28,10 @@
#include "recog.h"
#include "obstack.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
+#include "calls.h"
+#include "varasm.h"
#include "expr.h"
#include "optabs.h"
#include "except.h"
@@ -2470,13 +2474,13 @@ spu_machine_dependent_reorg (void)
compact_blocks ();
spu_bb_info =
- (struct spu_bb_info *) xcalloc (n_basic_blocks,
+ (struct spu_bb_info *) xcalloc (n_basic_blocks_for_fn (cfun),
sizeof (struct spu_bb_info));
/* We need exact insn addresses and lengths. */
shorten_branches (get_insns ());
- for (i = n_basic_blocks - 1; i >= 0; i--)
+ for (i = n_basic_blocks_for_fn (cfun) - 1; i >= 0; i--)
{
bb = BASIC_BLOCK (i);
branch = 0;
diff --git a/gcc/config/stormy16/stormy16.c b/gcc/config/stormy16/stormy16.c
index 3a08534be51..d5a1fc6ad86 100644
--- a/gcc/config/stormy16/stormy16.c
+++ b/gcc/config/stormy16/stormy16.c
@@ -35,6 +35,10 @@
#include "diagnostic-core.h"
#include "obstack.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
+#include "varasm.h"
+#include "calls.h"
#include "expr.h"
#include "optabs.h"
#include "except.h"
diff --git a/gcc/config/tilegx/tilegx.c b/gcc/config/tilegx/tilegx.c
index bf13d11b820..809beefa305 100644
--- a/gcc/config/tilegx/tilegx.c
+++ b/gcc/config/tilegx/tilegx.c
@@ -41,6 +41,10 @@
#include "timevar.h"
#include "tree.h"
#include "gimple.h"
+#include "stringpool.h"
+#include "stor-layout.h"
+#include "varasm.h"
+#include "calls.h"
#include "gimplify.h"
#include "cfgloop.h"
#include "tilegx-builtins.h"
diff --git a/gcc/config/tilepro/tilepro.c b/gcc/config/tilepro/tilepro.c
index d497f64125f..9fe1b104f57 100644
--- a/gcc/config/tilepro/tilepro.c
+++ b/gcc/config/tilepro/tilepro.c
@@ -42,6 +42,10 @@
#include "timevar.h"
#include "tree.h"
#include "gimple.h"
+#include "stringpool.h"
+#include "stor-layout.h"
+#include "varasm.h"
+#include "calls.h"
#include "gimplify.h"
#include "cfgloop.h"
#include "tilepro-builtins.h"
diff --git a/gcc/config/v850/v850-c.c b/gcc/config/v850/v850-c.c
index 63ef368afc4..25158d50e09 100644
--- a/gcc/config/v850/v850-c.c
+++ b/gcc/config/v850/v850-c.c
@@ -24,6 +24,8 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "cpplib.h"
#include "tree.h"
+#include "stringpool.h"
+#include "attribs.h"
#include "c-family/c-pragma.h"
#include "diagnostic-core.h"
#include "ggc.h"
diff --git a/gcc/config/v850/v850.c b/gcc/config/v850/v850.c
index 006cff4bcdf..32fe73b1fa6 100644
--- a/gcc/config/v850/v850.c
+++ b/gcc/config/v850/v850.c
@@ -23,6 +23,10 @@
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
+#include "varasm.h"
+#include "calls.h"
#include "rtl.h"
#include "regs.h"
#include "hard-reg-set.h"
diff --git a/gcc/config/vax/vax.c b/gcc/config/vax/vax.c
index 2b6fd9aaa6e..90da3b9c2b8 100644
--- a/gcc/config/vax/vax.c
+++ b/gcc/config/vax/vax.c
@@ -24,6 +24,8 @@ along with GCC; see the file COPYING3. If not see
#include "rtl.h"
#include "df.h"
#include "tree.h"
+#include "calls.h"
+#include "varasm.h"
#include "regs.h"
#include "hard-reg-set.h"
#include "insn-config.h"
diff --git a/gcc/config/vms/vms.c b/gcc/config/vms/vms.c
index ba1e2a69798..3047cfde9fe 100644
--- a/gcc/config/vms/vms.c
+++ b/gcc/config/vms/vms.c
@@ -22,6 +22,7 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "tree.h"
+#include "stringpool.h"
#include "vms-protos.h"
#include "ggc.h"
#include "target.h"
diff --git a/gcc/config/vxworks.c b/gcc/config/vxworks.c
index 2900d9785ea..2940ea11933 100644
--- a/gcc/config/vxworks.c
+++ b/gcc/config/vxworks.c
@@ -26,6 +26,7 @@ along with GCC; see the file COPYING3. If not see
#include "output.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
/* Like default_named_section_asm_out_constructor, except that even
constructors with DEFAULT_INIT_PRIORITY must go in a numbered
diff --git a/gcc/config/xtensa/xtensa.c b/gcc/config/xtensa/xtensa.c
index 6385c5df555..9e6bb23818f 100644
--- a/gcc/config/xtensa/xtensa.c
+++ b/gcc/config/xtensa/xtensa.c
@@ -34,6 +34,10 @@ along with GCC; see the file COPYING3. If not see
#include "recog.h"
#include "output.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
+#include "calls.h"
+#include "varasm.h"
#include "expr.h"
#include "flags.h"
#include "reload.h"
diff --git a/gcc/convert.c b/gcc/convert.c
index a2f2a334dbf..4cf500197ae 100644
--- a/gcc/convert.c
+++ b/gcc/convert.c
@@ -26,6 +26,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
#include "flags.h"
#include "convert.h"
#include "diagnostic-core.h"
diff --git a/gcc/coretypes.h b/gcc/coretypes.h
index 07d0885923b..439b0cb013b 100644
--- a/gcc/coretypes.h
+++ b/gcc/coretypes.h
@@ -64,9 +64,8 @@ typedef const struct hwivec_def *const_hwivec;
union tree_node;
typedef union tree_node *tree;
typedef const union tree_node *const_tree;
-union gimple_statement_d;
-typedef union gimple_statement_d *gimple;
-typedef const union gimple_statement_d *const_gimple;
+typedef struct gimple_statement_base *gimple;
+typedef const struct gimple_statement_base *const_gimple;
typedef gimple gimple_seq;
struct gimple_stmt_iterator_d;
typedef struct gimple_stmt_iterator_d gimple_stmt_iterator;
diff --git a/gcc/coverage.c b/gcc/coverage.c
index 43f9c0cb6f1..f2ac5fcaa46 100644
--- a/gcc/coverage.c
+++ b/gcc/coverage.c
@@ -30,6 +30,8 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "rtl.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
#include "flags.h"
#include "output.h"
#include "regs.h"
@@ -584,7 +586,7 @@ unsigned
coverage_compute_cfg_checksum (void)
{
basic_block bb;
- unsigned chksum = n_basic_blocks;
+ unsigned chksum = n_basic_blocks_for_fn (cfun);
FOR_EACH_BB (bb)
{
diff --git a/gcc/cp/ChangeLog b/gcc/cp/ChangeLog
index 2906ed7a63a..1c0d9b7c121 100644
--- a/gcc/cp/ChangeLog
+++ b/gcc/cp/ChangeLog
@@ -83,6 +83,68 @@
* tree.c: Likewise.
* vtable-class-hierarchy.c: Likewise.
+2013-11-14 Diego Novillo <dnovillo@google.com>
+
+ * call.c: Include stor-layout.h.
+ Include trans-mem.h.
+ Include stringpool.h.
+ * class.c: Include stringpool.h.
+ Include stor-layout.h.
+ Include attribs.h.
+ * cp-gimplify.c: Include stor-layout.h.
+ * cvt.c: Include stor-layout.h.
+ * decl.c: Include stringpool.h.
+ Include stor-layout.h.
+ Include varasm.h.
+ Include attribs.h.
+ Include calls.h.
+ * decl2.c: Include stringpool.h.
+ Include varasm.h.
+ Include attribs.h.
+ Include stor-layout.h.
+ Include calls.h.
+ * error.c: Include stringpool.h.
+ * except.c: Include stringpool.h.
+ Include trans-mem.h.
+ Include attribs.h.
+ * init.c: Include stringpool.h.
+ Include varasm.h.
+ * lambda.c: Include stringpool.h.
+ * lex.c: Include stringpool.h.
+ * mangle.c: Include stor-layout.h.
+ Include stringpool.h.
+ * method.c: Include stringpool.h.
+ Include varasm.h.
+ * name-lookup.c: Include stringpool.h.
+ Include print-tree.h.
+ Include attribs.h.
+ * optimize.c: Include stringpool.h.
+ * parser.c: Include print-tree.h.
+ Include stringpool.h.
+ Include attribs.h.
+ Include trans-mem.h.
+ * pt.c: Include stringpool.h.
+ Include varasm.h.
+ Include attribs.h.
+ Include stor-layout.h.
+ * ptree.c: Include print-tree.h.
+ * repo.c: Include stringpool.h.
+ * rtti.c: Include stringpool.h.
+ Include stor-layout.h.
+ * semantics.c: Include stmt.h.
+ Include varasm.h.
+ Include stor-layout.h.
+ Include stringpool.h.
+ * tree.c: Include stor-layout.h.
+ Include print-tree.h.
+ Include tree-iterator.h.
+ * typeck.c: Include stor-layout.h.
+ Include varasm.h.
+ * typeck2.c: Include stor-layout.h.
+ Include varasm.h.
+ * vtable-class-hierarchy.c: Include stringpool.h.
+ Include stor-layout.h.
+
2013-11-12 Andrew MacLeod <amacleod@redhat.com>
* class.c: Include gimplify.h.
diff --git a/gcc/cp/call.c b/gcc/cp/call.c
index e2907badc3a..00ebed4320e 100644
--- a/gcc/cp/call.c
+++ b/gcc/cp/call.c
@@ -27,6 +27,9 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "trans-mem.h"
+#include "stringpool.h"
#include "cp-tree.h"
#include "flags.h"
#include "toplev.h"
diff --git a/gcc/cp/class.c b/gcc/cp/class.c
index 9158d8a6665..027d235e83d 100644
--- a/gcc/cp/class.c
+++ b/gcc/cp/class.c
@@ -26,6 +26,10 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
+#include "attribs.h"
+#include "gimple.h"
#include "cp-tree.h"
#include "flags.h"
#include "toplev.h"
diff --git a/gcc/cp/cp-gimplify.c b/gcc/cp/cp-gimplify.c
index c464719ad42..e2629464dd9 100644
--- a/gcc/cp/cp-gimplify.c
+++ b/gcc/cp/cp-gimplify.c
@@ -24,6 +24,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
#include "cp-tree.h"
#include "c-family/c-common.h"
#include "tree-iterator.h"
diff --git a/gcc/cp/cvt.c b/gcc/cp/cvt.c
index c7201d540e1..6d0e34156ff 100644
--- a/gcc/cp/cvt.c
+++ b/gcc/cp/cvt.c
@@ -29,6 +29,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
#include "flags.h"
#include "cp-tree.h"
#include "intl.h"
diff --git a/gcc/cp/decl.c b/gcc/cp/decl.c
index 5e161067f57..babfc8893b8 100644
--- a/gcc/cp/decl.c
+++ b/gcc/cp/decl.c
@@ -31,6 +31,11 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
+#include "varasm.h"
+#include "attribs.h"
+#include "calls.h"
#include "flags.h"
#include "cp-tree.h"
#include "tree-iterator.h"
diff --git a/gcc/cp/decl2.c b/gcc/cp/decl2.c
index 18456848492..93c75cbf4d6 100644
--- a/gcc/cp/decl2.c
+++ b/gcc/cp/decl2.c
@@ -31,6 +31,12 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
+#include "varasm.h"
+#include "attribs.h"
+#include "stor-layout.h"
+#include "calls.h"
+#include "gimple.h"
#include "flags.h"
#include "cp-tree.h"
#include "decl.h"
diff --git a/gcc/cp/error.c b/gcc/cp/error.c
index 0ad9f0722db..5481523868d 100644
--- a/gcc/cp/error.c
+++ b/gcc/cp/error.c
@@ -22,6 +22,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
#include "cp-tree.h"
#include "flags.h"
#include "diagnostic.h"
diff --git a/gcc/cp/except.c b/gcc/cp/except.c
index ac2128d13b0..d7d009bdd69 100644
--- a/gcc/cp/except.c
+++ b/gcc/cp/except.c
@@ -26,6 +26,9 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
+#include "trans-mem.h"
+#include "attribs.h"
#include "cp-tree.h"
#include "flags.h"
#include "tree-inline.h"
diff --git a/gcc/cp/init.c b/gcc/cp/init.c
index 32d9f9bf45f..7b6f4e28e19 100644
--- a/gcc/cp/init.c
+++ b/gcc/cp/init.c
@@ -25,6 +25,8 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
+#include "varasm.h"
#include "cp-tree.h"
#include "flags.h"
#include "target.h"
diff --git a/gcc/cp/lambda.c b/gcc/cp/lambda.c
index f39ce1a5ea6..24aa2c55cc0 100644
--- a/gcc/cp/lambda.c
+++ b/gcc/cp/lambda.c
@@ -25,6 +25,7 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "tree.h"
+#include "stringpool.h"
#include "cgraph.h"
#include "tree-iterator.h"
#include "cp-tree.h"
diff --git a/gcc/cp/lex.c b/gcc/cp/lex.c
index d6ed809d61d..5d8a313f38b 100644
--- a/gcc/cp/lex.c
+++ b/gcc/cp/lex.c
@@ -27,6 +27,7 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "input.h"
#include "tree.h"
+#include "stringpool.h"
#include "cp-tree.h"
#include "cpplib.h"
#include "flags.h"
diff --git a/gcc/cp/mangle.c b/gcc/cp/mangle.c
index 69b1e312b7e..5b7120b4bcf 100644
--- a/gcc/cp/mangle.c
+++ b/gcc/cp/mangle.c
@@ -49,6 +49,8 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "stringpool.h"
#include "tm_p.h"
#include "cp-tree.h"
#include "obstack.h"
diff --git a/gcc/cp/method.c b/gcc/cp/method.c
index d15d0a4d6f6..740536573cb 100644
--- a/gcc/cp/method.c
+++ b/gcc/cp/method.c
@@ -26,6 +26,8 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
+#include "varasm.h"
#include "cp-tree.h"
#include "flags.h"
#include "toplev.h"
diff --git a/gcc/cp/name-lookup.c b/gcc/cp/name-lookup.c
index ced596e310a..d0c024a120f 100644
--- a/gcc/cp/name-lookup.c
+++ b/gcc/cp/name-lookup.c
@@ -24,6 +24,9 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "flags.h"
#include "tree.h"
+#include "stringpool.h"
+#include "print-tree.h"
+#include "attribs.h"
#include "cp-tree.h"
#include "name-lookup.h"
#include "timevar.h"
diff --git a/gcc/cp/optimize.c b/gcc/cp/optimize.c
index c4ee8484bb9..b8df1347c33 100644
--- a/gcc/cp/optimize.c
+++ b/gcc/cp/optimize.c
@@ -23,6 +23,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
#include "cp-tree.h"
#include "input.h"
#include "params.h"
diff --git a/gcc/cp/parser.c b/gcc/cp/parser.c
index ec8350d37a2..ff45b8f3a71 100644
--- a/gcc/cp/parser.c
+++ b/gcc/cp/parser.c
@@ -25,6 +25,10 @@ along with GCC; see the file COPYING3. If not see
#include "timevar.h"
#include "cpplib.h"
#include "tree.h"
+#include "print-tree.h"
+#include "stringpool.h"
+#include "attribs.h"
+#include "trans-mem.h"
#include "cp-tree.h"
#include "intl.h"
#include "c-family/c-pragma.h"
diff --git a/gcc/cp/pt.c b/gcc/cp/pt.c
index 3bc8ccb0ee3..bbf8f550f3e 100644
--- a/gcc/cp/pt.c
+++ b/gcc/cp/pt.c
@@ -29,6 +29,10 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
+#include "varasm.h"
+#include "attribs.h"
+#include "stor-layout.h"
#include "intl.h"
#include "pointer-set.h"
#include "flags.h"
diff --git a/gcc/cp/ptree.c b/gcc/cp/ptree.c
index f4ca003be98..3c37a2aed3e 100644
--- a/gcc/cp/ptree.c
+++ b/gcc/cp/ptree.c
@@ -24,6 +24,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "print-tree.h"
#include "cp-tree.h"
void
diff --git a/gcc/cp/repo.c b/gcc/cp/repo.c
index 7b6f7b8ce1b..47b91986582 100644
--- a/gcc/cp/repo.c
+++ b/gcc/cp/repo.c
@@ -29,6 +29,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
#include "cp-tree.h"
#include "input.h"
#include "obstack.h"
diff --git a/gcc/cp/rtti.c b/gcc/cp/rtti.c
index 5827540c9b2..0bac87909a0 100644
--- a/gcc/cp/rtti.c
+++ b/gcc/cp/rtti.c
@@ -24,6 +24,8 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
#include "cp-tree.h"
#include "flags.h"
#include "convert.h"
diff --git a/gcc/cp/semantics.c b/gcc/cp/semantics.c
index dce2b3749fc..11f781288e9 100644
--- a/gcc/cp/semantics.c
+++ b/gcc/cp/semantics.c
@@ -28,6 +28,10 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stmt.h"
+#include "varasm.h"
+#include "stor-layout.h"
+#include "stringpool.h"
#include "cp-tree.h"
#include "c-family/c-common.h"
#include "c-family/c-objc.h"
diff --git a/gcc/cp/tree.c b/gcc/cp/tree.c
index 4a0169c9b3f..2e3b58650d4 100644
--- a/gcc/cp/tree.c
+++ b/gcc/cp/tree.c
@@ -23,6 +23,9 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "print-tree.h"
+#include "tree-iterator.h"
#include "cp-tree.h"
#include "flags.h"
#include "tree-inline.h"
diff --git a/gcc/cp/typeck.c b/gcc/cp/typeck.c
index bff7f17780f..c9ddbe9ba24 100644
--- a/gcc/cp/typeck.c
+++ b/gcc/cp/typeck.c
@@ -29,6 +29,9 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "varasm.h"
+#include "gimple.h"
#include "cp-tree.h"
#include "flags.h"
#include "diagnostic.h"
diff --git a/gcc/cp/typeck2.c b/gcc/cp/typeck2.c
index a6276744b2e..5040226fbe2 100644
--- a/gcc/cp/typeck2.c
+++ b/gcc/cp/typeck2.c
@@ -30,6 +30,8 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "varasm.h"
#include "intl.h"
#include "cp-tree.h"
#include "flags.h"
diff --git a/gcc/cp/vtable-class-hierarchy.c b/gcc/cp/vtable-class-hierarchy.c
index 2b343f1993e..00d4feee022 100644
--- a/gcc/cp/vtable-class-hierarchy.c
+++ b/gcc/cp/vtable-class-hierarchy.c
@@ -120,6 +120,8 @@ along with GCC; see the file COPYING3. If not see
#include "vtable-verify.h"
#include "gimple.h"
#include "gimplify.h"
+#include "stringpool.h"
+#include "stor-layout.h"
static int num_calls_to_regset = 0;
static int num_calls_to_regpair = 0;
diff --git a/gcc/cprop.c b/gcc/cprop.c
index 358fca9171a..9b8bd1e0c4b 100644
--- a/gcc/cprop.c
+++ b/gcc/cprop.c
@@ -967,7 +967,7 @@ cprop_jump (basic_block bb, rtx setcc, rtx jump, rtx from, rtx src)
edge_iterator ei;
FOR_EACH_EDGE (e, ei, bb->succs)
- if (e->dest != EXIT_BLOCK_PTR
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& BB_HEAD (e->dest) == JUMP_LABEL (jump))
{
e->flags |= EDGE_FALLTHRU;
@@ -1376,7 +1376,7 @@ find_implicit_sets (void)
? BRANCH_EDGE (bb)->dest : FALLTHRU_EDGE (bb)->dest;
/* If DEST doesn't go anywhere, ignore it. */
- if (! dest || dest == EXIT_BLOCK_PTR)
+ if (! dest || dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
/* We have found a suitable implicit set. Try to record it now as
@@ -1612,7 +1612,7 @@ bypass_block (basic_block bb, rtx setcc, rtx jump)
old_dest = e->dest;
if (dest != NULL
&& dest != old_dest
- && dest != EXIT_BLOCK_PTR)
+ && dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
redirect_edge_and_branch_force (e, dest);
@@ -1664,15 +1664,15 @@ bypass_conditional_jumps (void)
rtx dest;
/* Note we start at block 1. */
- if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR)
+ if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
return 0;
bypass_last_basic_block = last_basic_block;
mark_dfs_back_edges ();
changed = 0;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb,
- EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb->next_bb,
+ EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
{
/* Check for more than one predecessor. */
if (!single_pred_p (bb))
@@ -1729,24 +1729,25 @@ is_too_expensive (const char *pass)
which have a couple switch statements. Rather than simply
threshold the number of blocks, uses something with a more
graceful degradation. */
- if (n_edges > 20000 + n_basic_blocks * 4)
+ if (n_edges_for_fn (cfun) > 20000 + n_basic_blocks_for_fn (cfun) * 4)
{
warning (OPT_Wdisabled_optimization,
"%s: %d basic blocks and %d edges/basic block",
- pass, n_basic_blocks, n_edges / n_basic_blocks);
+ pass, n_basic_blocks_for_fn (cfun),
+ n_edges_for_fn (cfun) / n_basic_blocks_for_fn (cfun));
return true;
}
/* If allocating memory for the cprop bitmap would take up too much
storage it's better just to disable the optimization. */
- if ((n_basic_blocks
+ if ((n_basic_blocks_for_fn (cfun)
* SBITMAP_SET_SIZE (max_reg_num ())
* sizeof (SBITMAP_ELT_TYPE)) > MAX_GCSE_MEMORY)
{
warning (OPT_Wdisabled_optimization,
"%s: %d basic blocks and %d registers",
- pass, n_basic_blocks, max_reg_num ());
+ pass, n_basic_blocks_for_fn (cfun), max_reg_num ());
return true;
}
@@ -1763,7 +1764,7 @@ one_cprop_pass (void)
int changed = 0;
/* Return if there's nothing to do, or it is too expensive. */
- if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
+ if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1
|| is_too_expensive (_ ("const/copy propagation disabled")))
return 0;
@@ -1835,7 +1836,8 @@ one_cprop_pass (void)
/* Allocate vars to track sets of regs. */
reg_set_bitmap = ALLOC_REG_SET (NULL);
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb, EXIT_BLOCK_PTR,
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb->next_bb,
+ EXIT_BLOCK_PTR_FOR_FN (cfun),
next_bb)
{
/* Reset tables used to keep track of what's still valid [since
@@ -1873,7 +1875,8 @@ one_cprop_pass (void)
if (dump_file)
{
fprintf (dump_file, "CPROP of %s, %d basic blocks, %d bytes needed, ",
- current_function_name (), n_basic_blocks, bytes_used);
+ current_function_name (), n_basic_blocks_for_fn (cfun),
+ bytes_used);
fprintf (dump_file, "%d local const props, %d local copy props, ",
local_const_prop_count, local_copy_prop_count);
fprintf (dump_file, "%d global const props, %d global copy props\n\n",
diff --git a/gcc/cse.c b/gcc/cse.c
index 4b3226c2411..15e582cd223 100644
--- a/gcc/cse.c
+++ b/gcc/cse.c
@@ -6209,7 +6209,7 @@ cse_find_path (basic_block first_bb, struct cse_basic_block_data *data,
&& e == BRANCH_EDGE (previous_bb_in_path))
{
bb = FALLTHRU_EDGE (previous_bb_in_path)->dest;
- if (bb != EXIT_BLOCK_PTR
+ if (bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& single_pred_p (bb)
/* We used to assert here that we would only see blocks
that we have not visited yet. But we may end up
@@ -6263,7 +6263,7 @@ cse_find_path (basic_block first_bb, struct cse_basic_block_data *data,
if (e
&& !((e->flags & EDGE_ABNORMAL_CALL) && cfun->has_nonlocal_label)
- && e->dest != EXIT_BLOCK_PTR
+ && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& single_pred_p (e->dest)
/* Avoid visiting basic blocks twice. The large comment
above explains why this can happen. */
@@ -7175,7 +7175,7 @@ cse_cc_succs (basic_block bb, basic_block orig_bb, rtx cc_reg, rtx cc_src,
continue;
if (EDGE_COUNT (e->dest->preds) != 1
- || e->dest == EXIT_BLOCK_PTR
+ || e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
/* Avoid endless recursion on unreachable blocks. */
|| e->dest == orig_bb)
continue;
diff --git a/gcc/dbxout.c b/gcc/dbxout.c
index 1ef63f32c85..5fe79cf69c3 100644
--- a/gcc/dbxout.c
+++ b/gcc/dbxout.c
@@ -72,6 +72,8 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "tree.h"
+#include "varasm.h"
+#include "stor-layout.h"
#include "rtl.h"
#include "flags.h"
#include "regs.h"
@@ -2872,7 +2874,7 @@ dbxout_symbol (tree decl, int local ATTRIBUTE_UNUSED)
if (TREE_CODE (TREE_TYPE (decl)) == INTEGER_TYPE
|| TREE_CODE (TREE_TYPE (decl)) == ENUMERAL_TYPE)
{
- HOST_WIDE_INT ival = TREE_INT_CST_LOW (DECL_INITIAL (decl));
+ HOST_WIDE_INT ival = tree_to_shwi (DECL_INITIAL (decl));
dbxout_begin_complex_stabs ();
dbxout_symbol_name (decl, NULL, 'c');
diff --git a/gcc/df-core.c b/gcc/df-core.c
index deea7551053..37876af7b01 100644
--- a/gcc/df-core.c
+++ b/gcc/df-core.c
@@ -1097,8 +1097,8 @@ df_worklist_dataflow_doublequeue (struct dataflow *dataflow,
fprintf (dump_file, "df_worklist_dataflow_doublequeue:"
"n_basic_blocks %d n_edges %d"
" count %d (%5.2g)\n",
- n_basic_blocks, n_edges,
- dcount, dcount / (float)n_basic_blocks);
+ n_basic_blocks_for_fn (cfun), n_edges_for_fn (cfun),
+ dcount, dcount / (float)n_basic_blocks_for_fn (cfun));
}
/* Worklist-based dataflow solver. It uses sbitmap as a worklist,
@@ -1606,7 +1606,7 @@ df_compact_blocks (void)
i++;
}
- gcc_assert (i == n_basic_blocks);
+ gcc_assert (i == n_basic_blocks_for_fn (cfun));
for (; i < last_basic_block; i++)
SET_BASIC_BLOCK (i, NULL);
@@ -1714,7 +1714,7 @@ static int *
df_compute_cfg_image (void)
{
basic_block bb;
- int size = 2 + (2 * n_basic_blocks);
+ int size = 2 + (2 * n_basic_blocks_for_fn (cfun));
int i;
int * map;
diff --git a/gcc/df-problems.c b/gcc/df-problems.c
index 59fc2f64444..c6349c8b0a5 100644
--- a/gcc/df-problems.c
+++ b/gcc/df-problems.c
@@ -1007,7 +1007,7 @@ static void
df_lr_confluence_0 (basic_block bb)
{
bitmap op1 = &df_lr_get_bb_info (bb->index)->out;
- if (bb != EXIT_BLOCK_PTR)
+ if (bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
bitmap_copy (op1, &df->hardware_regs_used);
}
diff --git a/gcc/df-scan.c b/gcc/df-scan.c
index aace96d4536..eb7e4d47e0c 100644
--- a/gcc/df-scan.c
+++ b/gcc/df-scan.c
@@ -3873,7 +3873,7 @@ df_entry_block_defs_collect (struct df_collection_rec *collection_rec,
EXECUTE_IF_SET_IN_BITMAP (entry_block_defs, 0, i, bi)
{
df_ref_record (DF_REF_ARTIFICIAL, collection_rec, regno_reg_rtx[i], NULL,
- ENTRY_BLOCK_PTR, NULL, DF_REF_REG_DEF, 0);
+ ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, DF_REF_REG_DEF, 0);
}
df_canonize_collection_rec (collection_rec);
@@ -4034,17 +4034,17 @@ df_exit_block_uses_collect (struct df_collection_rec *collection_rec, bitmap exi
EXECUTE_IF_SET_IN_BITMAP (exit_block_uses, 0, i, bi)
df_ref_record (DF_REF_ARTIFICIAL, collection_rec, regno_reg_rtx[i], NULL,
- EXIT_BLOCK_PTR, NULL, DF_REF_REG_USE, 0);
+ EXIT_BLOCK_PTR_FOR_FN (cfun), NULL, DF_REF_REG_USE, 0);
#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
/* It is deliberate that this is not put in the exit block uses but
I do not know why. */
if (reload_completed
&& !bitmap_bit_p (exit_block_uses, ARG_POINTER_REGNUM)
- && bb_has_eh_pred (EXIT_BLOCK_PTR)
+ && bb_has_eh_pred (EXIT_BLOCK_PTR_FOR_FN (cfun))
&& fixed_regs[ARG_POINTER_REGNUM])
df_ref_record (DF_REF_ARTIFICIAL, collection_rec, regno_reg_rtx[ARG_POINTER_REGNUM], NULL,
- EXIT_BLOCK_PTR, NULL, DF_REF_REG_USE, 0);
+ EXIT_BLOCK_PTR_FOR_FN (cfun), NULL, DF_REF_REG_USE, 0);
#endif
df_canonize_collection_rec (collection_rec);
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index a3fdbb5510d..6fc56b92aad 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -262,7 +262,8 @@ Objective-C and Objective-C++ Dialects}.
-Wpointer-arith -Wno-pointer-to-int-cast @gol
-Wredundant-decls -Wno-return-local-addr @gol
-Wreturn-type -Wsequence-point -Wshadow @gol
--Wsign-compare -Wsign-conversion -Wsizeof-pointer-memaccess @gol
+-Wsign-compare -Wsign-conversion -Wfloat-conversion @gol
+-Wsizeof-pointer-memaccess @gol
-Wstack-protector -Wstack-usage=@var{len} -Wstrict-aliasing @gol
-Wstrict-aliasing=n @gol -Wstrict-overflow -Wstrict-overflow=@var{n} @gol
-Wsuggest-attribute=@r{[}pure@r{|}const@r{|}noreturn@r{|}format@r{]} @gol
@@ -796,7 +797,8 @@ Objective-C and Objective-C++ Dialects}.
-mmad -mno-mad -mimadd -mno-imadd -mfused-madd -mno-fused-madd -nocpp @gol
-mfix-24k -mno-fix-24k @gol
-mfix-r4000 -mno-fix-r4000 -mfix-r4400 -mno-fix-r4400 @gol
--mfix-r10000 -mno-fix-r10000 -mfix-vr4120 -mno-fix-vr4120 @gol
+-mfix-r10000 -mno-fix-r10000 -mfix-rm7000 -mno-fix-rm7000 @gol
+-mfix-vr4120 -mno-fix-vr4120 @gol
-mfix-vr4130 -mno-fix-vr4130 -mfix-sb1 -mno-fix-sb1 @gol
-mflush-func=@var{func} -mno-flush-func @gol
-mbranch-cost=@var{num} -mbranch-likely -mno-branch-likely @gol
@@ -4591,6 +4593,14 @@ value, like assigning a signed integer expression to an unsigned
integer variable. An explicit cast silences the warning. In C, this
option is enabled also by @option{-Wconversion}.
+@item -Wfloat-conversion
+@opindex Wfloat-conversion
+@opindex Wno-float-conversion
+Warn for implicit conversions that reduce the precision of a real value.
+This includes conversions from real to integer, and from higher precision
+real to lower precision real values. This option is also enabled by
+@option{-Wconversion}.
+
@item -Wsizeof-pointer-memaccess
@opindex Wsizeof-pointer-memaccess
@opindex Wno-sizeof-pointer-memaccess
@@ -5260,9 +5270,44 @@ data race bugs.
See @uref{http://code.google.com/p/data-race-test/wiki/ThreadSanitizer} for more details.
@item -fsanitize=undefined
-Enable UndefinedBehaviorSanitizer, a fast undefined behavior detector
+Enable UndefinedBehaviorSanitizer, a fast undefined behavior detector.
Various computations will be instrumented to detect undefined behavior
-at runtime, e.g.@: division by zero or various overflows.
+at runtime. Current suboptions are:
+
+@itemize @bullet
+
+@item @option{-fsanitize=shift}
+
+This option enables checking that the result of a shift operation is
+not undefined. Note that what exactly is considered undefined differs
+slightly between C and C++, as well as between ISO C90 and C99, etc.
+
+@item @option{-fsanitize=integer-divide-by-zero}
+
+Detect integer division by zero as well as @code{INT_MIN / -1} division.
+
+@item @option{-fsanitize=unreachable}
+
+With this option, the compiler will turn the @code{__builtin_unreachable}
+call into a diagnostics message call instead. When reaching the
+@code{__builtin_unreachable} call, the behavior is undefined.
+
+@item @option{-fsanitize=vla-bound}
+
+This option instructs the compiler to check that the size of a variable
+length array is positive. This option does not have any effect in
+@option{-std=c++1y} mode, as the standard requires the exception be thrown
+instead.
+
+@item @option{-fsanitize=null}
+
+This option enables pointer checking. Particularly, the application
+built with this option turned on will issue an error message when it
+tries to dereference a NULL pointer, or if a reference (possibly an
+rvalue reference) is bound to a NULL pointer.
+
+@end itemize
+
While @option{-ftrapv} causes traps for signed overflows to be emitted,
@option{-fsanitize=undefined} gives a diagnostic message.
This currently works only for the C family of languages.
@@ -8137,6 +8182,8 @@ exception handling, for linkonce sections, for functions with a user-defined
section attribute and on any architecture that does not support named
sections.
+Enabled for x86 at levels @option{-O2}, @option{-O3}.
+
@item -freorder-functions
@opindex freorder-functions
Reorder functions in the object file in order to
@@ -8588,9 +8635,8 @@ need to support linker plugins to allow a full-featured build environment
@command{gcc-nm}, @command{gcc-ranlib} wrappers to pass the right options
to these tools. With non fat LTO makefiles need to be modified to use them.
-The default is @option{-ffat-lto-objects} but this default is intended to
-change in future releases when linker plugin enabled environments become more
-common.
+The default is @option{-fno-fat-lto-objects} on targets with linker plugin
+support.
@item -fcompare-elim
@opindex fcompare-elim
@@ -12153,6 +12199,12 @@ before execution begins.
Specify the register to be used for PIC addressing. The default is R10
unless stack-checking is enabled, when R9 is used.
+@item -mpic-data-is-text-relative
+@opindex mpic-data-is-text-relative
+Assume that each data segments are relative to text segment at load time.
+Therefore, it permits addressing data using PC-relative operations.
+This option is on by default for targets other than VxWorks RTP.
+
@item -mpoke-function-name
@opindex mpoke-function-name
Write the name of each function into the text section, directly
@@ -17421,6 +17473,12 @@ branch-likely instructions. @option{-mfix-r10000} is the default when
@option{-march=r10000} is used; @option{-mno-fix-r10000} is the default
otherwise.
+@item -mfix-rm7000
+@itemx -mno-fix-rm7000
+@opindex mfix-rm7000
+Work around the RM7000 @code{dmult}/@code{dmultu} errata. The
+workarounds are implemented by the assembler rather than by GCC@.
+
@item -mfix-vr4120
@itemx -mno-fix-vr4120
@opindex mfix-vr4120
diff --git a/gcc/doc/md.texi b/gcc/doc/md.texi
index 2054295f66b..44a91830b48 100644
--- a/gcc/doc/md.texi
+++ b/gcc/doc/md.texi
@@ -5352,6 +5352,8 @@ all cases. This expected alignment is also in bytes, just like operand 4.
Expected size, when unknown, is set to @code{(const_int -1)}.
Operand 7 is the minimal size of the block and operand 8 is the
maximal size of the block (NULL if it can not be represented as CONST_INT).
+Operand 9 is the probable maximal size (i.e. we can not rely on it for correctness,
+but it can be used for choosing proper code sequence for a given size).
The use for multiple @code{setmem@var{m}} is as for @code{movmem@var{m}}.
diff --git a/gcc/dojump.c b/gcc/dojump.c
index c3cff7dc082..51c3c8ce26a 100644
--- a/gcc/dojump.c
+++ b/gcc/dojump.c
@@ -23,6 +23,7 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "rtl.h"
#include "tree.h"
+#include "stor-layout.h"
#include "flags.h"
#include "function.h"
#include "insn-config.h"
diff --git a/gcc/dominance.c b/gcc/dominance.c
index 569f1f43b21..3d88c0d3ed1 100644
--- a/gcc/dominance.c
+++ b/gcc/dominance.c
@@ -146,7 +146,7 @@ static void
init_dom_info (struct dom_info *di, enum cdi_direction dir)
{
/* We need memory for n_basic_blocks nodes. */
- unsigned int num = n_basic_blocks;
+ unsigned int num = n_basic_blocks_for_fn (cfun);
init_ar (di->dfs_parent, TBB, num, 0);
init_ar (di->path_min, TBB, num, i);
init_ar (di->key, TBB, num, i);
@@ -233,21 +233,21 @@ calc_dfs_tree_nonrec (struct dom_info *di, basic_block bb, bool reverse)
/* Ending block. */
basic_block ex_block;
- stack = XNEWVEC (edge_iterator, n_basic_blocks + 1);
+ stack = XNEWVEC (edge_iterator, n_basic_blocks_for_fn (cfun) + 1);
sp = 0;
/* Initialize our border blocks, and the first edge. */
if (reverse)
{
ei = ei_start (bb->preds);
- en_block = EXIT_BLOCK_PTR;
- ex_block = ENTRY_BLOCK_PTR;
+ en_block = EXIT_BLOCK_PTR_FOR_FN (cfun);
+ ex_block = ENTRY_BLOCK_PTR_FOR_FN (cfun);
}
else
{
ei = ei_start (bb->succs);
- en_block = ENTRY_BLOCK_PTR;
- ex_block = EXIT_BLOCK_PTR;
+ en_block = ENTRY_BLOCK_PTR_FOR_FN (cfun);
+ ex_block = EXIT_BLOCK_PTR_FOR_FN (cfun);
}
/* When the stack is empty we break out of this loop. */
@@ -333,7 +333,8 @@ static void
calc_dfs_tree (struct dom_info *di, bool reverse)
{
/* The first block is the ENTRY_BLOCK (or EXIT_BLOCK if REVERSE). */
- basic_block begin = reverse ? EXIT_BLOCK_PTR : ENTRY_BLOCK_PTR;
+ basic_block begin = (reverse
+ ? EXIT_BLOCK_PTR_FOR_FN (cfun) : ENTRY_BLOCK_PTR_FOR_FN (cfun));
di->dfs_order[last_basic_block] = di->dfsnum;
di->dfs_to_bb[di->dfsnum] = begin;
di->dfsnum++;
@@ -394,7 +395,7 @@ calc_dfs_tree (struct dom_info *di, bool reverse)
di->nodes = di->dfsnum - 1;
/* This aborts e.g. when there is _no_ path from ENTRY to EXIT at all. */
- gcc_assert (di->nodes == (unsigned int) n_basic_blocks - 1);
+ gcc_assert (di->nodes == (unsigned int) n_basic_blocks_for_fn (cfun) - 1);
}
/* Compress the path from V to the root of its set and update path_min at the
@@ -501,9 +502,9 @@ calc_idoms (struct dom_info *di, bool reverse)
edge_iterator ei, einext;
if (reverse)
- en_block = EXIT_BLOCK_PTR;
+ en_block = EXIT_BLOCK_PTR_FOR_FN (cfun);
else
- en_block = ENTRY_BLOCK_PTR;
+ en_block = ENTRY_BLOCK_PTR_FOR_FN (cfun);
/* Go backwards in DFS order, to first look at the leafs. */
v = di->nodes;
@@ -652,7 +653,7 @@ calculate_dominance_info (enum cdi_direction dir)
{
b->dom[dir_index] = et_new_tree (b);
}
- n_bbs_in_dom_tree[dir_index] = n_basic_blocks;
+ n_bbs_in_dom_tree[dir_index] = n_basic_blocks_for_fn (cfun);
init_dom_info (&di, dir);
calc_dfs_tree (&di, reverse);
@@ -1097,7 +1098,7 @@ prune_bbs_to_update_dominators (vec<basic_block> bbs,
for (i = 0; bbs.iterate (i, &bb);)
{
- if (bb == ENTRY_BLOCK_PTR)
+ if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
goto succeed;
if (single_pred_p (bb))
@@ -1171,7 +1172,7 @@ determine_dominators_for_sons (struct graph *g, vec<basic_block> bbs,
if (son[y] == -1)
return;
if (y == (int) bbs.length ())
- ybb = ENTRY_BLOCK_PTR;
+ ybb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
else
ybb = bbs[y];
@@ -1344,7 +1345,7 @@ iterate_fix_dominators (enum cdi_direction dir, vec<basic_block> bbs,
set_immediate_dominator (CDI_DOMINATORS, bb, NULL);
*map->insert (bb) = i;
}
- *map->insert (ENTRY_BLOCK_PTR) = n;
+ *map->insert (ENTRY_BLOCK_PTR_FOR_FN (cfun)) = n;
g = new_graph (n + 1);
for (y = 0; y < g->n_vertices; y++)
diff --git a/gcc/domwalk.c b/gcc/domwalk.c
index 4816b4c8d85..3350e4bb510 100644
--- a/gcc/domwalk.c
+++ b/gcc/domwalk.c
@@ -150,13 +150,14 @@ void
dom_walker::walk (basic_block bb)
{
basic_block dest;
- basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks * 2);
+ basic_block *worklist = XNEWVEC (basic_block,
+ n_basic_blocks_for_fn (cfun) * 2);
int sp = 0;
int *postorder, postorder_num;
if (m_dom_direction == CDI_DOMINATORS)
{
- postorder = XNEWVEC (int, n_basic_blocks);
+ postorder = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
postorder_num = inverted_post_order_compute (postorder);
bb_postorder = XNEWVEC (int, last_basic_block);
for (int i = 0; i < postorder_num; ++i)
@@ -168,8 +169,8 @@ dom_walker::walk (basic_block bb)
{
/* Don't worry about unreachable blocks. */
if (EDGE_COUNT (bb->preds) > 0
- || bb == ENTRY_BLOCK_PTR
- || bb == EXIT_BLOCK_PTR)
+ || bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ || bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
{
/* Callback for subclasses to do custom things before we have walked
the dominator children, but before we walk statements. */
diff --git a/gcc/dse.c b/gcc/dse.c
index b602caa291f..6584ea35bbe 100644
--- a/gcc/dse.c
+++ b/gcc/dse.c
@@ -29,6 +29,7 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "rtl.h"
#include "tree.h"
+#include "stor-layout.h"
#include "tm_p.h"
#include "regs.h"
#include "hard-reg-set.h"
@@ -2750,7 +2751,7 @@ dse_step1 (void)
if (stores_off_frame_dead_at_return
&& (EDGE_COUNT (bb->succs) == 0
|| (single_succ_p (bb)
- && single_succ (bb) == EXIT_BLOCK_PTR
+ && single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun)
&& ! crtl->calls_eh_return)))
{
insn_info_t i_ptr = active_local_stores;
diff --git a/gcc/dwarf2asm.c b/gcc/dwarf2asm.c
index 69907f9fbf2..fc1b0825205 100644
--- a/gcc/dwarf2asm.c
+++ b/gcc/dwarf2asm.c
@@ -24,6 +24,8 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "flags.h"
#include "tree.h"
+#include "stringpool.h"
+#include "varasm.h"
#include "rtl.h"
#include "output.h"
#include "target.h"
diff --git a/gcc/dwarf2cfi.c b/gcc/dwarf2cfi.c
index e0f85edc61b..19276e23a20 100644
--- a/gcc/dwarf2cfi.c
+++ b/gcc/dwarf2cfi.c
@@ -25,6 +25,7 @@ along with GCC; see the file COPYING3. If not see
#include "flags.h"
#include "rtl.h"
#include "tree.h"
+#include "stor-layout.h"
#include "function.h"
#include "basic-block.h"
#include "dwarf2.h"
diff --git a/gcc/dwarf2out.c b/gcc/dwarf2out.c
index be056603655..a02e8aa0f1b 100644
--- a/gcc/dwarf2out.c
+++ b/gcc/dwarf2out.c
@@ -59,10 +59,16 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "tm.h"
+#include "rtl.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
+#include "varasm.h"
+#include "function.h"
+#include "emit-rtl.h"
+#include "gimple.h"
#include "version.h"
#include "flags.h"
-#include "rtl.h"
#include "hard-reg-set.h"
#include "regs.h"
#include "insn-config.h"
diff --git a/gcc/emit-rtl.c b/gcc/emit-rtl.c
index 89eb40874ae..7b7633a5507 100644
--- a/gcc/emit-rtl.c
+++ b/gcc/emit-rtl.c
@@ -38,9 +38,12 @@ along with GCC; see the file COPYING3. If not see
#include "diagnostic-core.h"
#include "rtl.h"
#include "tree.h"
+#include "varasm.h"
+#include "gimple.h"
#include "tm_p.h"
#include "flags.h"
#include "function.h"
+#include "stringpool.h"
#include "expr.h"
#include "regs.h"
#include "hard-reg-set.h"
diff --git a/gcc/emit-rtl.h b/gcc/emit-rtl.h
index 726809097e7..301c67b3545 100644
--- a/gcc/emit-rtl.h
+++ b/gcc/emit-rtl.h
@@ -113,4 +113,7 @@ get_max_uid (void)
{
return crtl->emit.x_cur_insn_uid;
}
+
+extern void set_decl_incoming_rtl (tree, rtx, bool);
+
#endif /* GCC_EMIT_RTL_H */
diff --git a/gcc/except.c b/gcc/except.c
index 6043ef293e6..f7dc193ce47 100644
--- a/gcc/except.c
+++ b/gcc/except.c
@@ -115,6 +115,8 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "rtl.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
#include "flags.h"
#include "function.h"
#include "expr.h"
@@ -1239,7 +1241,7 @@ sjlj_emit_function_enter (rtx dispatch_label)
}
if (fn_begin_outside_block)
- insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR));
+ insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
else
emit_insn_after (seq, fn_begin);
}
@@ -1507,7 +1509,7 @@ finish_eh_generation (void)
if (targetm_common.except_unwind_info (&global_options) == UI_SJLJ
/* Kludge for Alpha (see alpha_gp_save_rtx). */
- || single_succ_edge (ENTRY_BLOCK_PTR)->insns.r)
+ || single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))->insns.r)
commit_edge_insertions ();
/* Redirect all EH edges from the post_landing_pad to the landing pad. */
diff --git a/gcc/explow.c b/gcc/explow.c
index 72792cf9da0..5f98ba0d775 100644
--- a/gcc/explow.c
+++ b/gcc/explow.c
@@ -25,6 +25,7 @@ along with GCC; see the file COPYING3. If not see
#include "diagnostic-core.h"
#include "rtl.h"
#include "tree.h"
+#include "stor-layout.h"
#include "tm_p.h"
#include "flags.h"
#include "except.h"
diff --git a/gcc/expmed.c b/gcc/expmed.c
index f66b087a6a9..cca7a0df9fb 100644
--- a/gcc/expmed.c
+++ b/gcc/expmed.c
@@ -26,6 +26,7 @@ along with GCC; see the file COPYING3. If not see
#include "diagnostic-core.h"
#include "rtl.h"
#include "tree.h"
+#include "stor-layout.h"
#include "tm_p.h"
#include "flags.h"
#include "insn-config.h"
diff --git a/gcc/expr.c b/gcc/expr.c
index a7ad8561e10..cdc1baff5af 100644
--- a/gcc/expr.c
+++ b/gcc/expr.c
@@ -24,6 +24,10 @@ along with GCC; see the file COPYING3. If not see
#include "machmode.h"
#include "rtl.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
+#include "attribs.h"
+#include "varasm.h"
#include "flags.h"
#include "regs.h"
#include "hard-reg-set.h"
@@ -129,7 +133,8 @@ static void move_by_pieces_1 (insn_gen_fn, machine_mode,
struct move_by_pieces_d *);
static bool block_move_libcall_safe_for_call_parm (void);
static bool emit_block_move_via_movmem (rtx, rtx, rtx, unsigned, unsigned, HOST_WIDE_INT,
- unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
+ unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
+ unsigned HOST_WIDE_INT);
static tree emit_block_move_libcall_fn (int);
static void emit_block_move_via_loop (rtx, rtx, rtx, unsigned);
static rtx clear_by_pieces_1 (void *, HOST_WIDE_INT, enum machine_mode);
@@ -1100,7 +1105,8 @@ rtx
emit_block_move_hints (rtx x, rtx y, rtx size, enum block_op_methods method,
unsigned int expected_align, HOST_WIDE_INT expected_size,
unsigned HOST_WIDE_INT min_size,
- unsigned HOST_WIDE_INT max_size)
+ unsigned HOST_WIDE_INT max_size,
+ unsigned HOST_WIDE_INT probable_max_size)
{
bool may_use_call;
rtx retval = 0;
@@ -1157,7 +1163,7 @@ emit_block_move_hints (rtx x, rtx y, rtx size, enum block_op_methods method,
move_by_pieces (x, y, INTVAL (size), align, 0);
else if (emit_block_move_via_movmem (x, y, size, align,
expected_align, expected_size,
- min_size, max_size))
+ min_size, max_size, probable_max_size))
;
else if (may_use_call
&& ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (x))
@@ -1193,7 +1199,7 @@ emit_block_move (rtx x, rtx y, rtx size, enum block_op_methods method)
else
max = GET_MODE_MASK (GET_MODE (size));
return emit_block_move_hints (x, y, size, method, 0, -1,
- min, max);
+ min, max, max);
}
/* A subroutine of emit_block_move. Returns true if calling the
@@ -1258,7 +1264,8 @@ static bool
emit_block_move_via_movmem (rtx x, rtx y, rtx size, unsigned int align,
unsigned int expected_align, HOST_WIDE_INT expected_size,
unsigned HOST_WIDE_INT min_size,
- unsigned HOST_WIDE_INT max_size)
+ unsigned HOST_WIDE_INT max_size,
+ unsigned HOST_WIDE_INT probable_max_size)
{
int save_volatile_ok = volatile_ok;
enum machine_mode mode;
@@ -1267,8 +1274,8 @@ emit_block_move_via_movmem (rtx x, rtx y, rtx size, unsigned int align,
expected_align = align;
if (expected_size != -1)
{
- if ((unsigned HOST_WIDE_INT)expected_size > max_size)
- expected_size = max_size;
+ if ((unsigned HOST_WIDE_INT)expected_size > probable_max_size)
+ expected_size = probable_max_size;
if ((unsigned HOST_WIDE_INT)expected_size < min_size)
expected_size = min_size;
}
@@ -1297,7 +1304,7 @@ emit_block_move_via_movmem (rtx x, rtx y, rtx size, unsigned int align,
|| max_size <= (GET_MODE_MASK (mode) >> 1)
|| GET_MODE_BITSIZE (mode) >= GET_MODE_BITSIZE (Pmode)))
{
- struct expand_operand ops[8];
+ struct expand_operand ops[9];
unsigned int nops;
/* ??? When called via emit_block_move_for_call, it'd be
@@ -1305,7 +1312,7 @@ emit_block_move_via_movmem (rtx x, rtx y, rtx size, unsigned int align,
that it doesn't fail the expansion because it thinks
emitting the libcall would be more efficient. */
nops = insn_data[(int) code].n_generator_args;
- gcc_assert (nops == 4 || nops == 6 || nops == 8);
+ gcc_assert (nops == 4 || nops == 6 || nops == 8 || nops == 9);
create_fixed_operand (&ops[0], x);
create_fixed_operand (&ops[1], y);
@@ -1317,7 +1324,7 @@ emit_block_move_via_movmem (rtx x, rtx y, rtx size, unsigned int align,
create_integer_operand (&ops[4], expected_align / BITS_PER_UNIT);
create_integer_operand (&ops[5], expected_size);
}
- if (nops == 8)
+ if (nops >= 8)
{
create_integer_operand (&ops[6], min_size);
/* If we can not represent the maximal size,
@@ -1327,6 +1334,15 @@ emit_block_move_via_movmem (rtx x, rtx y, rtx size, unsigned int align,
else
create_fixed_operand (&ops[7], NULL);
}
+ if (nops == 9)
+ {
+ /* If we can not represent the maximal size,
+ make parameter NULL. */
+ if ((HOST_WIDE_INT) probable_max_size != -1)
+ create_integer_operand (&ops[8], probable_max_size);
+ else
+ create_fixed_operand (&ops[8], NULL);
+ }
if (maybe_expand_insn (code, nops, ops))
{
volatile_ok = save_volatile_ok;
@@ -2717,7 +2733,8 @@ rtx
clear_storage_hints (rtx object, rtx size, enum block_op_methods method,
unsigned int expected_align, HOST_WIDE_INT expected_size,
unsigned HOST_WIDE_INT min_size,
- unsigned HOST_WIDE_INT max_size)
+ unsigned HOST_WIDE_INT max_size,
+ unsigned HOST_WIDE_INT probable_max_size)
{
enum machine_mode mode = GET_MODE (object);
unsigned int align;
@@ -2759,7 +2776,7 @@ clear_storage_hints (rtx object, rtx size, enum block_op_methods method,
clear_by_pieces (object, INTVAL (size), align);
else if (set_storage_via_setmem (object, size, const0_rtx, align,
expected_align, expected_size,
- min_size, max_size))
+ min_size, max_size, probable_max_size))
;
else if (ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (object)))
return set_storage_via_libcall (object, size, const0_rtx,
@@ -2778,7 +2795,7 @@ clear_storage (rtx object, rtx size, enum block_op_methods method)
min = max = UINTVAL (size);
else
max = GET_MODE_MASK (GET_MODE (size));
- return clear_storage_hints (object, size, method, 0, -1, min, max);
+ return clear_storage_hints (object, size, method, 0, -1, min, max, max);
}
@@ -2877,7 +2894,8 @@ bool
set_storage_via_setmem (rtx object, rtx size, rtx val, unsigned int align,
unsigned int expected_align, HOST_WIDE_INT expected_size,
unsigned HOST_WIDE_INT min_size,
- unsigned HOST_WIDE_INT max_size)
+ unsigned HOST_WIDE_INT max_size,
+ unsigned HOST_WIDE_INT probable_max_size)
{
/* Try the most limited insn first, because there's no point
including more than one in the machine description unless
@@ -2912,11 +2930,11 @@ set_storage_via_setmem (rtx object, rtx size, rtx val, unsigned int align,
|| max_size <= (GET_MODE_MASK (mode) >> 1)
|| GET_MODE_BITSIZE (mode) >= GET_MODE_BITSIZE (Pmode)))
{
- struct expand_operand ops[8];
+ struct expand_operand ops[9];
unsigned int nops;
nops = insn_data[(int) code].n_generator_args;
- gcc_assert (nops == 4 || nops == 6 || nops == 8);
+ gcc_assert (nops == 4 || nops == 6 || nops == 8 || nops == 9);
create_fixed_operand (&ops[0], object);
/* The check above guarantees that this size conversion is valid. */
@@ -2928,7 +2946,7 @@ set_storage_via_setmem (rtx object, rtx size, rtx val, unsigned int align,
create_integer_operand (&ops[4], expected_align / BITS_PER_UNIT);
create_integer_operand (&ops[5], expected_size);
}
- if (nops == 8)
+ if (nops >= 8)
{
create_integer_operand (&ops[6], min_size);
/* If we can not represent the maximal size,
@@ -2938,6 +2956,15 @@ set_storage_via_setmem (rtx object, rtx size, rtx val, unsigned int align,
else
create_fixed_operand (&ops[7], NULL);
}
+ if (nops == 9)
+ {
+ /* If we can not represent the maximal size,
+ make parameter NULL. */
+ if ((HOST_WIDE_INT) probable_max_size != -1)
+ create_integer_operand (&ops[8], probable_max_size);
+ else
+ create_fixed_operand (&ops[8], NULL);
+ }
if (maybe_expand_insn (code, nops, ops))
return true;
}
diff --git a/gcc/expr.h b/gcc/expr.h
index 8230fd54bb1..a2cd6690bfe 100644
--- a/gcc/expr.h
+++ b/gcc/expr.h
@@ -302,6 +302,7 @@ extern rtx emit_block_move_via_libcall (rtx, rtx, rtx, bool);
extern rtx emit_block_move_hints (rtx, rtx, rtx, enum block_op_methods,
unsigned int, HOST_WIDE_INT,
unsigned HOST_WIDE_INT,
+ unsigned HOST_WIDE_INT,
unsigned HOST_WIDE_INT);
extern bool emit_storent_insn (rtx to, rtx from);
@@ -365,6 +366,7 @@ extern rtx clear_storage (rtx, rtx, enum block_op_methods);
extern rtx clear_storage_hints (rtx, rtx, enum block_op_methods,
unsigned int, HOST_WIDE_INT,
unsigned HOST_WIDE_INT,
+ unsigned HOST_WIDE_INT,
unsigned HOST_WIDE_INT);
/* The same, but always output an library call. */
rtx set_storage_via_libcall (rtx, rtx, rtx, bool);
@@ -373,6 +375,7 @@ rtx set_storage_via_libcall (rtx, rtx, rtx, bool);
extern bool set_storage_via_setmem (rtx, rtx, rtx, unsigned int,
unsigned int, HOST_WIDE_INT,
unsigned HOST_WIDE_INT,
+ unsigned HOST_WIDE_INT,
unsigned HOST_WIDE_INT);
extern unsigned HOST_WIDE_INT move_by_pieces_ninsns (unsigned HOST_WIDE_INT,
@@ -745,4 +748,23 @@ extern void expand_case (gimple);
/* Like expand_case but special-case for SJLJ exception dispatching. */
extern void expand_sjlj_dispatch_table (rtx, vec<tree> );
+/* Determine whether the LEN bytes can be moved by using several move
+ instructions. Return nonzero if a call to move_by_pieces should
+ succeed. */
+extern int can_move_by_pieces (unsigned HOST_WIDE_INT, unsigned int);
+
+extern unsigned HOST_WIDE_INT highest_pow2_factor (const_tree);
+bool array_at_struct_end_p (tree);
+
+/* Return a tree of sizetype representing the size, in bytes, of the element
+ of EXP, an ARRAY_REF or an ARRAY_RANGE_REF. */
+extern tree array_ref_element_size (tree);
+
+extern bool categorize_ctor_elements (const_tree, HOST_WIDE_INT *,
+ HOST_WIDE_INT *, bool *);
+
+/* Return a tree representing the offset, in bytes, of the field referenced
+ by EXP. This does not include any offset in DECL_FIELD_BIT_OFFSET. */
+extern tree component_ref_field_offset (tree);
+
#endif /* GCC_EXPR_H */
diff --git a/gcc/final.c b/gcc/final.c
index 74898a31ade..e8d14ab3264 100644
--- a/gcc/final.c
+++ b/gcc/final.c
@@ -48,6 +48,7 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "tree.h"
+#include "varasm.h"
#include "rtl.h"
#include "tm_p.h"
#include "regs.h"
@@ -762,7 +763,7 @@ compute_alignments (void)
&& (branch_frequency > freq_threshold
|| (bb->frequency > bb->prev_bb->frequency * 10
&& (bb->prev_bb->frequency
- <= ENTRY_BLOCK_PTR->frequency / 2))))
+ <= ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency / 2))))
{
log = JUMP_ALIGN (label);
if (dump_file)
@@ -2168,6 +2169,15 @@ final_scan_insn (rtx insn, FILE *file, int optimize_p ATTRIBUTE_UNUSED,
targetm.asm_out.function_switched_text_sections (asm_out_file,
current_function_decl,
in_cold_section_p);
+ /* Emit a label for the split cold section. Form label name by
+ suffixing "cold" to the original function's name. */
+ if (in_cold_section_p)
+ {
+ tree cold_function_name
+ = clone_function_name (current_function_decl, "cold");
+ ASM_OUTPUT_LABEL (asm_out_file,
+ IDENTIFIER_POINTER (cold_function_name));
+ }
break;
case NOTE_INSN_BASIC_BLOCK:
diff --git a/gcc/flag-types.h b/gcc/flag-types.h
index 7d0ac3582e4..528c88a370c 100644
--- a/gcc/flag-types.h
+++ b/gcc/flag-types.h
@@ -211,8 +211,9 @@ enum sanitize_code {
SANITIZE_DIVIDE = 1 << 3,
SANITIZE_UNREACHABLE = 1 << 4,
SANITIZE_VLA = 1 << 5,
+ SANITIZE_NULL = 1 << 6,
SANITIZE_UNDEFINED = SANITIZE_SHIFT | SANITIZE_DIVIDE | SANITIZE_UNREACHABLE
- | SANITIZE_VLA
+ | SANITIZE_VLA | SANITIZE_NULL
};
/* flag_vtable_verify initialization levels. */
diff --git a/gcc/fold-const.c b/gcc/fold-const.c
index ce24f75712c..a25dfffc72e 100644
--- a/gcc/fold-const.c
+++ b/gcc/fold-const.c
@@ -46,6 +46,9 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "flags.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "calls.h"
+#include "tree-iterator.h"
#include "realmpfr.h"
#include "rtl.h"
#include "expr.h"
@@ -6580,7 +6583,7 @@ fold_single_bit_test (location_t loc, enum tree_code code,
&& wi::ltu_p (wi::to_widest (TREE_OPERAND (inner, 1)) + bitnum,
TYPE_PRECISION (type)))
{
- bitnum += TREE_INT_CST_LOW (TREE_OPERAND (inner, 1));
+ bitnum += tree_to_uhwi (TREE_OPERAND (inner, 1));
inner = TREE_OPERAND (inner, 0);
}
@@ -12546,8 +12549,8 @@ fold_binary_loc (location_t loc,
&& tree_fits_uhwi_p (TREE_OPERAND (arg0, 1))
&& tree_to_uhwi (TREE_OPERAND (arg0, 1)) < prec)
{
- HOST_WIDE_INT low = (tree_to_shwi (TREE_OPERAND (arg0, 1))
- + tree_to_shwi (arg1));
+ unsigned int low = (tree_to_uhwi (TREE_OPERAND (arg0, 1))
+ + tree_to_uhwi (arg1));
/* Deal with a OP (c1 + c2) being undefined but (a OP c1) OP c2
being well defined. */
@@ -12571,13 +12574,13 @@ fold_binary_loc (location_t loc,
if (((code == LSHIFT_EXPR && TREE_CODE (arg0) == RSHIFT_EXPR)
|| (TYPE_UNSIGNED (type)
&& code == RSHIFT_EXPR && TREE_CODE (arg0) == LSHIFT_EXPR))
- && tree_fits_shwi_p (arg1)
- && tree_to_shwi (arg1) < prec
- && tree_fits_shwi_p (TREE_OPERAND (arg0, 1))
- && tree_to_shwi (TREE_OPERAND (arg0, 1)) < prec)
+ && tree_fits_uhwi_p (arg1)
+ && tree_to_uhwi (arg1) < prec
+ && tree_fits_uhwi_p (TREE_OPERAND (arg0, 1))
+ && tree_to_uhwi (TREE_OPERAND (arg0, 1)) < prec)
{
- HOST_WIDE_INT low0 = tree_to_shwi (TREE_OPERAND (arg0, 1));
- HOST_WIDE_INT low1 = tree_to_shwi (arg1);
+ HOST_WIDE_INT low0 = tree_to_uhwi (TREE_OPERAND (arg0, 1));
+ HOST_WIDE_INT low1 = tree_to_uhwi (arg1);
tree lshift;
tree arg00;
diff --git a/gcc/fold-const.h b/gcc/fold-const.h
new file mode 100644
index 00000000000..d15a69df5bc
--- /dev/null
+++ b/gcc/fold-const.h
@@ -0,0 +1,172 @@
+/* Fold a constant sub-tree into a single node for C-compiler
+ Copyright (C) 1987-2013 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_FOLD_CONST_H
+#define GCC_FOLD_CONST_H
+
+/* Non-zero if we are folding constants inside an initializer; zero
+ otherwise. */
+extern int folding_initializer;
+
+/* Convert between trees and native memory representation. */
+extern int native_encode_expr (const_tree, unsigned char *, int);
+extern tree native_interpret_expr (tree, const unsigned char *, int);
+
+/* Fold constants as much as possible in an expression.
+ Returns the simplified expression.
+ Acts only on the top level of the expression;
+ if the argument itself cannot be simplified, its
+ subexpressions are not changed. */
+
+extern tree fold (tree);
+#define fold_unary(CODE,T1,T2)\
+ fold_unary_loc (UNKNOWN_LOCATION, CODE, T1, T2)
+extern tree fold_unary_loc (location_t, enum tree_code, tree, tree);
+#define fold_unary_ignore_overflow(CODE,T1,T2)\
+ fold_unary_ignore_overflow_loc (UNKNOWN_LOCATION, CODE, T1, T2)
+extern tree fold_unary_ignore_overflow_loc (location_t, enum tree_code, tree, tree);
+#define fold_binary(CODE,T1,T2,T3)\
+ fold_binary_loc (UNKNOWN_LOCATION, CODE, T1, T2, T3)
+extern tree fold_binary_loc (location_t, enum tree_code, tree, tree, tree);
+#define fold_ternary(CODE,T1,T2,T3,T4)\
+ fold_ternary_loc (UNKNOWN_LOCATION, CODE, T1, T2, T3, T4)
+extern tree fold_ternary_loc (location_t, enum tree_code, tree, tree, tree, tree);
+#define fold_build1(c,t1,t2)\
+ fold_build1_stat_loc (UNKNOWN_LOCATION, c, t1, t2 MEM_STAT_INFO)
+#define fold_build1_loc(l,c,t1,t2)\
+ fold_build1_stat_loc (l, c, t1, t2 MEM_STAT_INFO)
+extern tree fold_build1_stat_loc (location_t, enum tree_code, tree,
+ tree MEM_STAT_DECL);
+#define fold_build2(c,t1,t2,t3)\
+ fold_build2_stat_loc (UNKNOWN_LOCATION, c, t1, t2, t3 MEM_STAT_INFO)
+#define fold_build2_loc(l,c,t1,t2,t3)\
+ fold_build2_stat_loc (l, c, t1, t2, t3 MEM_STAT_INFO)
+extern tree fold_build2_stat_loc (location_t, enum tree_code, tree, tree,
+ tree MEM_STAT_DECL);
+#define fold_build3(c,t1,t2,t3,t4)\
+ fold_build3_stat_loc (UNKNOWN_LOCATION, c, t1, t2, t3, t4 MEM_STAT_INFO)
+#define fold_build3_loc(l,c,t1,t2,t3,t4)\
+ fold_build3_stat_loc (l, c, t1, t2, t3, t4 MEM_STAT_INFO)
+extern tree fold_build3_stat_loc (location_t, enum tree_code, tree, tree, tree,
+ tree MEM_STAT_DECL);
+extern tree fold_build1_initializer_loc (location_t, enum tree_code, tree, tree);
+extern tree fold_build2_initializer_loc (location_t, enum tree_code, tree, tree, tree);
+#define fold_build_call_array(T1,T2,N,T4)\
+ fold_build_call_array_loc (UNKNOWN_LOCATION, T1, T2, N, T4)
+extern tree fold_build_call_array_loc (location_t, tree, tree, int, tree *);
+#define fold_build_call_array_initializer(T1,T2,N,T4)\
+ fold_build_call_array_initializer_loc (UNKNOWN_LOCATION, T1, T2, N, T4)
+extern tree fold_build_call_array_initializer_loc (location_t, tree, tree, int, tree *);
+extern bool fold_convertible_p (const_tree, const_tree);
+#define fold_convert(T1,T2)\
+ fold_convert_loc (UNKNOWN_LOCATION, T1, T2)
+extern tree fold_convert_loc (location_t, tree, tree);
+extern tree fold_single_bit_test (location_t, enum tree_code, tree, tree, tree);
+extern tree fold_ignored_result (tree);
+extern tree fold_abs_const (tree, tree);
+extern tree fold_indirect_ref_1 (location_t, tree, tree);
+extern void fold_defer_overflow_warnings (void);
+extern void fold_undefer_overflow_warnings (bool, const_gimple, int);
+extern void fold_undefer_and_ignore_overflow_warnings (void);
+extern bool fold_deferring_overflow_warnings_p (void);
+extern tree fold_fma (location_t, tree, tree, tree, tree);
+extern int operand_equal_p (const_tree, const_tree, unsigned int);
+extern int multiple_of_p (tree, const_tree, const_tree);
+#define omit_one_operand(T1,T2,T3)\
+ omit_one_operand_loc (UNKNOWN_LOCATION, T1, T2, T3)
+extern tree omit_one_operand_loc (location_t, tree, tree, tree);
+#define omit_two_operands(T1,T2,T3,T4)\
+ omit_two_operands_loc (UNKNOWN_LOCATION, T1, T2, T3, T4)
+extern tree omit_two_operands_loc (location_t, tree, tree, tree, tree);
+#define invert_truthvalue(T)\
+ invert_truthvalue_loc (UNKNOWN_LOCATION, T)
+extern tree invert_truthvalue_loc (location_t, tree);
+extern tree fold_unary_to_constant (enum tree_code, tree, tree);
+extern tree fold_binary_to_constant (enum tree_code, tree, tree, tree);
+extern tree fold_read_from_constant_string (tree);
+extern tree int_const_binop (enum tree_code, const_tree, const_tree);
+#define build_fold_addr_expr(T)\
+ build_fold_addr_expr_loc (UNKNOWN_LOCATION, (T))
+extern tree build_fold_addr_expr_loc (location_t, tree);
+#define build_fold_addr_expr_with_type(T,TYPE)\
+ build_fold_addr_expr_with_type_loc (UNKNOWN_LOCATION, (T), TYPE)
+extern tree build_fold_addr_expr_with_type_loc (location_t, tree, tree);
+extern tree fold_build_cleanup_point_expr (tree type, tree expr);
+extern tree fold_strip_sign_ops (tree);
+#define build_fold_indirect_ref(T)\
+ build_fold_indirect_ref_loc (UNKNOWN_LOCATION, T)
+extern tree build_fold_indirect_ref_loc (location_t, tree);
+#define fold_indirect_ref(T)\
+ fold_indirect_ref_loc (UNKNOWN_LOCATION, T)
+extern tree fold_indirect_ref_loc (location_t, tree);
+extern tree build_simple_mem_ref_loc (location_t, tree);
+#define build_simple_mem_ref(T)\
+ build_simple_mem_ref_loc (UNKNOWN_LOCATION, T)
+extern offset_int mem_ref_offset (const_tree);
+extern tree build_invariant_address (tree, tree, HOST_WIDE_INT);
+extern tree constant_boolean_node (bool, tree);
+extern tree div_if_zero_remainder (const_tree, const_tree);
+
+extern bool tree_swap_operands_p (const_tree, const_tree, bool);
+extern enum tree_code swap_tree_comparison (enum tree_code);
+
+extern bool ptr_difference_const (tree, tree, HOST_WIDE_INT *);
+extern enum tree_code invert_tree_comparison (enum tree_code, bool);
+
+extern bool tree_unary_nonzero_warnv_p (enum tree_code, tree, tree, bool *);
+extern bool tree_binary_nonzero_warnv_p (enum tree_code, tree, tree, tree op1,
+ bool *);
+extern bool tree_single_nonzero_warnv_p (tree, bool *);
+extern bool tree_unary_nonnegative_warnv_p (enum tree_code, tree, tree, bool *);
+extern bool tree_binary_nonnegative_warnv_p (enum tree_code, tree, tree, tree,
+ bool *);
+extern bool tree_single_nonnegative_warnv_p (tree t, bool *strict_overflow_p);
+extern bool tree_call_nonnegative_warnv_p (tree, tree, tree, tree, bool *);
+
+extern bool fold_real_zero_addition_p (const_tree, const_tree, int);
+extern tree combine_comparisons (location_t, enum tree_code, enum tree_code,
+ enum tree_code, tree, tree, tree);
+extern void debug_fold_checksum (const_tree);
+extern bool may_negate_without_overflow_p (const_tree);
+#define round_up(T,N) round_up_loc (UNKNOWN_LOCATION, T, N)
+extern tree round_up_loc (location_t, tree, int);
+#define round_down(T,N) round_down_loc (UNKNOWN_LOCATION, T, N)
+extern tree round_down_loc (location_t, tree, int);
+extern tree size_int_kind (HOST_WIDE_INT, enum size_type_kind);
+#define size_binop(CODE,T1,T2)\
+ size_binop_loc (UNKNOWN_LOCATION, CODE, T1, T2)
+extern tree size_binop_loc (location_t, enum tree_code, tree, tree);
+#define size_diffop(T1,T2)\
+ size_diffop_loc (UNKNOWN_LOCATION, T1, T2)
+extern tree size_diffop_loc (location_t, tree, tree);
+
+/* Return an expr equal to X but certainly not valid as an lvalue. */
+#define non_lvalue(T) non_lvalue_loc (UNKNOWN_LOCATION, T)
+extern tree non_lvalue_loc (location_t, tree);
+
+extern bool tree_expr_nonnegative_p (tree);
+extern bool tree_expr_nonnegative_warnv_p (tree, bool *);
+extern tree make_range (tree, int *, tree *, tree *, bool *);
+extern tree make_range_step (location_t, enum tree_code, tree, tree, tree,
+ tree *, tree *, int *, bool *);
+extern tree build_range_check (location_t, tree, tree, int, tree, tree);
+extern bool merge_ranges (int *, tree *, tree *, int, tree, tree, int,
+ tree, tree);
+
+#endif // GCC_FOLD_CONST_H
diff --git a/gcc/fortran/ChangeLog b/gcc/fortran/ChangeLog
index 16e8915c0c5..26e58bef0f0 100644
--- a/gcc/fortran/ChangeLog
+++ b/gcc/fortran/ChangeLog
@@ -7,6 +7,33 @@
* trans-expr.c: Include only gimplify.h and gimple.h as needed.
* trans-openmp.c: Likewise.
+2013-11-14 Diego Novillo <dnovillo@google.com>
+
+ * decl.c: Include stringpool.h.
+ * iresolve.c: Include stringpool.h.
+ * match.c: Include stringpool.h.
+ * module.c: Include stringpool.h.
+ * target-memory.c: Include stor-layout.h.
+ * trans-common.c: Include stringpool.h.
+ Include stor-layout.h.
+ Include varasm.h.
+ * trans-const.c: Include stor-layout.h.
+ * trans-decl.c: Include stringpool.h.
+ Include stor-layout.h.
+ Include varasm.h.
+ Include attribs.h.
+ * trans-expr.c: Include stringpool.h.
+ * trans-intrinsic.c: Include stringpool.h.
+ Include tree-nested.h.
+ Include stor-layout.h.
+ * trans-io.c: Include stringpool.h.
+ Include stor-layout.h.
+ * trans-openmp.c: Include stringpool.h.
+ * trans-stmt.c: Include stringpool.h.
+ * trans-types.c: Include stor-layout.h.
+ Include stringpool.h.
+ * trans.c: Include stringpool.h.
+
2013-11-12 Andrew MacLeod <amacleod@redhat.com>
* f95-lang.c: Don't include gimple.h.
diff --git a/gcc/fortran/decl.c b/gcc/fortran/decl.c
index 9c9fd4ffbf0..0a0f8e0f3b5 100644
--- a/gcc/fortran/decl.c
+++ b/gcc/fortran/decl.c
@@ -27,6 +27,7 @@ along with GCC; see the file COPYING3. If not see
#include "flags.h"
#include "constructor.h"
#include "tree.h"
+#include "stringpool.h"
/* Macros to access allocate memory for gfc_data_variable,
gfc_data_value and gfc_data. */
diff --git a/gcc/fortran/iresolve.c b/gcc/fortran/iresolve.c
index af452b32888..f31340f5d55 100644
--- a/gcc/fortran/iresolve.c
+++ b/gcc/fortran/iresolve.c
@@ -30,6 +30,7 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "tree.h"
+#include "stringpool.h"
#include "gfortran.h"
#include "intrinsic.h"
#include "constructor.h"
diff --git a/gcc/fortran/match.c b/gcc/fortran/match.c
index 71e3862189a..539780aaa24 100644
--- a/gcc/fortran/match.c
+++ b/gcc/fortran/match.c
@@ -26,6 +26,7 @@ along with GCC; see the file COPYING3. If not see
#include "match.h"
#include "parse.h"
#include "tree.h"
+#include "stringpool.h"
int gfc_matching_ptr_assignment = 0;
int gfc_matching_procptr_assignment = 0;
diff --git a/gcc/fortran/module.c b/gcc/fortran/module.c
index c390a95952e..0cd48018577 100644
--- a/gcc/fortran/module.c
+++ b/gcc/fortran/module.c
@@ -74,6 +74,7 @@ along with GCC; see the file COPYING3. If not see
#include "constructor.h"
#include "cpp.h"
#include "tree.h"
+#include "stringpool.h"
#include "scanner.h"
#include <zlib.h>
diff --git a/gcc/fortran/target-memory.c b/gcc/fortran/target-memory.c
index 86bbb5de51d..8778d982d97 100644
--- a/gcc/fortran/target-memory.c
+++ b/gcc/fortran/target-memory.c
@@ -24,6 +24,7 @@ along with GCC; see the file COPYING3. If not see
#include "flags.h"
#include "machmode.h"
#include "tree.h"
+#include "stor-layout.h"
#include "gfortran.h"
#include "arith.h"
#include "constructor.h"
diff --git a/gcc/fortran/trans-common.c b/gcc/fortran/trans-common.c
index ffe48925ace..bc54b9deea7 100644
--- a/gcc/fortran/trans-common.c
+++ b/gcc/fortran/trans-common.c
@@ -98,6 +98,9 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
+#include "varasm.h"
#include "gfortran.h"
#include "trans.h"
#include "trans-types.h"
diff --git a/gcc/fortran/trans-const.c b/gcc/fortran/trans-const.c
index ad2b4d23b39..ddab0fac513 100644
--- a/gcc/fortran/trans-const.c
+++ b/gcc/fortran/trans-const.c
@@ -24,6 +24,7 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "tree.h"
+#include "stor-layout.h"
#include "realmpfr.h"
#include "diagnostic-core.h" /* For fatal_error. */
#include "double-int.h"
diff --git a/gcc/fortran/trans-decl.c b/gcc/fortran/trans-decl.c
index fa83a765338..e201d854dbd 100644
--- a/gcc/fortran/trans-decl.c
+++ b/gcc/fortran/trans-decl.c
@@ -25,6 +25,10 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
+#include "varasm.h"
+#include "attribs.h"
#include "tree-dump.h"
#include "gimple-expr.h" /* For create_tmp_var_raw. */
#include "ggc.h"
diff --git a/gcc/fortran/trans-expr.c b/gcc/fortran/trans-expr.c
index 92be1e8e0b4..7cf9bc18d38 100644
--- a/gcc/fortran/trans-expr.c
+++ b/gcc/fortran/trans-expr.c
@@ -25,6 +25,7 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "tree.h"
+#include "stringpool.h"
#include "diagnostic-core.h" /* For fatal_error. */
#include "langhooks.h"
#include "flags.h"
diff --git a/gcc/fortran/trans-intrinsic.c b/gcc/fortran/trans-intrinsic.c
index cef43ea8ada..7e5feab1cf3 100644
--- a/gcc/fortran/trans-intrinsic.c
+++ b/gcc/fortran/trans-intrinsic.c
@@ -26,6 +26,9 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h" /* For UNITS_PER_WORD. */
#include "tree.h"
+#include "stringpool.h"
+#include "tree-nested.h"
+#include "stor-layout.h"
#include "ggc.h"
#include "diagnostic-core.h" /* For internal_error. */
#include "toplev.h" /* For rest_of_decl_compilation. */
diff --git a/gcc/fortran/trans-io.c b/gcc/fortran/trans-io.c
index 5fa1cdc091e..9b46a4eef3e 100644
--- a/gcc/fortran/trans-io.c
+++ b/gcc/fortran/trans-io.c
@@ -23,6 +23,8 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
#include "ggc.h"
#include "diagnostic-core.h" /* For internal_error. */
#include "gfortran.h"
diff --git a/gcc/fortran/trans-openmp.c b/gcc/fortran/trans-openmp.c
index 13c87058ddd..d23af17d81e 100644
--- a/gcc/fortran/trans-openmp.c
+++ b/gcc/fortran/trans-openmp.c
@@ -25,6 +25,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree.h"
#include "gimple.h"
#include "gimplify.h" /* For create_tmp_var_raw. */
+#include "stringpool.h"
#include "diagnostic-core.h" /* For internal_error. */
#include "gfortran.h"
#include "trans.h"
diff --git a/gcc/fortran/trans-stmt.c b/gcc/fortran/trans-stmt.c
index 62e690d407c..4f211975581 100644
--- a/gcc/fortran/trans-stmt.c
+++ b/gcc/fortran/trans-stmt.c
@@ -24,6 +24,7 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "tree.h"
+#include "stringpool.h"
#include "gfortran.h"
#include "flags.h"
#include "trans.h"
diff --git a/gcc/fortran/trans-types.c b/gcc/fortran/trans-types.c
index 07a3ac276f8..58b0c252336 100644
--- a/gcc/fortran/trans-types.c
+++ b/gcc/fortran/trans-types.c
@@ -34,6 +34,8 @@ along with GCC; see the file COPYING3. If not see
FLOAT_TYPE_SIZE, DOUBLE_TYPE_SIZE,
LONG_DOUBLE_TYPE_SIZE and LIBGCC2_HAS_TF_MODE. */
#include "tree.h"
+#include "stor-layout.h"
+#include "stringpool.h"
#include "langhooks.h" /* For iso-c-bindings.def. */
#include "target.h"
#include "ggc.h"
diff --git a/gcc/fortran/trans.c b/gcc/fortran/trans.c
index ef20a20f018..204cbff3247 100644
--- a/gcc/fortran/trans.c
+++ b/gcc/fortran/trans.c
@@ -23,6 +23,8 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tree.h"
#include "gimple-expr.h" /* For create_tmp_var_raw. */
+#include "gimple.h"
+#include "stringpool.h"
#include "tree-iterator.h"
#include "diagnostic-core.h" /* For internal_error. */
#include "flags.h"
diff --git a/gcc/function.c b/gcc/function.c
index 41382310e04..fde4a8e6d07 100644
--- a/gcc/function.c
+++ b/gcc/function.c
@@ -37,6 +37,9 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "rtl-error.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "varasm.h"
+#include "stringpool.h"
#include "flags.h"
#include "except.h"
#include "function.h"
@@ -3975,7 +3978,8 @@ regno_clobbered_at_setjmp (bitmap setjmp_crosses, int regno)
return false;
return ((REG_N_SETS (regno) > 1
- || REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR), regno))
+ || REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
+ regno))
&& REGNO_REG_SET_P (setjmp_crosses, regno));
}
@@ -4028,7 +4032,7 @@ generate_setjmp_warnings (void)
{
bitmap setjmp_crosses = regstat_get_setjmp_crosses ();
- if (n_basic_blocks == NUM_FIXED_BLOCKS
+ if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS
|| bitmap_empty_p (setjmp_crosses))
return;
@@ -5397,7 +5401,7 @@ next_block_for_reg (basic_block bb, int regno, int end_regno)
/* We can sometimes encounter dead code. Don't try to move it
into the exit block. */
- if (!live_edge || live_edge->dest == EXIT_BLOCK_PTR)
+ if (!live_edge || live_edge->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
return NULL;
/* Reject targets of abnormal edges. This is needed for correctness
@@ -5722,7 +5726,7 @@ convert_jumps_to_returns (basic_block last_bb, bool simple_p,
src_bbs.create (EDGE_COUNT (last_bb->preds));
FOR_EACH_EDGE (e, ei, last_bb->preds)
- if (e->src != ENTRY_BLOCK_PTR)
+ if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
src_bbs.quick_push (e->src);
label = BB_HEAD (last_bb);
@@ -5802,7 +5806,7 @@ convert_jumps_to_returns (basic_block last_bb, bool simple_p,
}
/* Fix up the CFG for the successful change we just made. */
- redirect_edge_succ (e, EXIT_BLOCK_PTR);
+ redirect_edge_succ (e, EXIT_BLOCK_PTR_FOR_FN (cfun));
e->flags &= ~EDGE_CROSSING;
}
src_bbs.release ();
@@ -5894,7 +5898,7 @@ thread_prologue_and_epilogue_insns (void)
df_analyze ();
- rtl_profile_for_bb (ENTRY_BLOCK_PTR);
+ rtl_profile_for_bb (ENTRY_BLOCK_PTR_FOR_FN (cfun));
inserted = false;
seq = NULL_RTX;
@@ -5904,8 +5908,8 @@ thread_prologue_and_epilogue_insns (void)
/* Can't deal with multiple successors of the entry block at the
moment. Function should always have at least one entry
point. */
- gcc_assert (single_succ_p (ENTRY_BLOCK_PTR));
- entry_edge = single_succ_edge (ENTRY_BLOCK_PTR);
+ gcc_assert (single_succ_p (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
+ entry_edge = single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun));
orig_entry_edge = entry_edge;
split_prologue_seq = NULL_RTX;
@@ -6015,7 +6019,7 @@ thread_prologue_and_epilogue_insns (void)
/* Find the set of basic blocks that require a stack frame,
and blocks that are too big to be duplicated. */
- vec.create (n_basic_blocks);
+ vec.create (n_basic_blocks_for_fn (cfun));
CLEAR_HARD_REG_SET (set_up_by_prologue.set);
add_to_hard_reg_set (&set_up_by_prologue.set, Pmode,
@@ -6078,7 +6082,7 @@ thread_prologue_and_epilogue_insns (void)
basic_block tmp_bb = vec.pop ();
FOR_EACH_EDGE (e, ei, tmp_bb->succs)
- if (e->dest != EXIT_BLOCK_PTR
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& bitmap_set_bit (&bb_flags, e->dest->index))
vec.quick_push (e->dest);
}
@@ -6086,7 +6090,7 @@ thread_prologue_and_epilogue_insns (void)
/* Find the set of basic blocks that need no prologue, have a
single successor, can be duplicated, meet a max size
requirement, and go to the exit via like blocks. */
- vec.quick_push (EXIT_BLOCK_PTR);
+ vec.quick_push (EXIT_BLOCK_PTR_FOR_FN (cfun));
while (!vec.is_empty ())
{
basic_block tmp_bb = vec.pop ();
@@ -6263,7 +6267,7 @@ thread_prologue_and_epilogue_insns (void)
{
/* Otherwise put the copy at the end of the function. */
copy_bb = create_basic_block (NULL_RTX, NULL_RTX,
- EXIT_BLOCK_PTR->prev_bb);
+ EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
BB_COPY_PARTITION (copy_bb, bb);
}
@@ -6277,7 +6281,7 @@ thread_prologue_and_epilogue_insns (void)
dup_block_and_redirect (tbb, copy_bb, insert_point,
&bb_flags);
tbb = single_succ (tbb);
- if (tbb == EXIT_BLOCK_PTR)
+ if (tbb == EXIT_BLOCK_PTR_FOR_FN (cfun))
break;
e = split_block (copy_bb, PREV_INSN (insert_point));
copy_bb = e->dest;
@@ -6291,7 +6295,8 @@ thread_prologue_and_epilogue_insns (void)
if (CALL_P (PREV_INSN (insert_point))
&& SIBLING_CALL_P (PREV_INSN (insert_point)))
eflags = EDGE_SIBCALL | EDGE_ABNORMAL;
- make_single_succ_edge (copy_bb, EXIT_BLOCK_PTR, eflags);
+ make_single_succ_edge (copy_bb, EXIT_BLOCK_PTR_FOR_FN (cfun),
+ eflags);
/* verify_flow_info doesn't like a note after a
sibling call. */
@@ -6322,15 +6327,15 @@ thread_prologue_and_epilogue_insns (void)
/* If the exit block has no non-fake predecessors, we don't need
an epilogue. */
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
if ((e->flags & EDGE_FAKE) == 0)
break;
if (e == NULL)
goto epilogue_done;
- rtl_profile_for_bb (EXIT_BLOCK_PTR);
+ rtl_profile_for_bb (EXIT_BLOCK_PTR_FOR_FN (cfun));
- exit_fallthru_edge = find_fallthru_edge (EXIT_BLOCK_PTR->preds);
+ exit_fallthru_edge = find_fallthru_edge (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
/* If we're allowed to generate a simple return instruction, then by
definition we don't need a full epilogue. If the last basic
@@ -6346,10 +6351,10 @@ thread_prologue_and_epilogue_insns (void)
/* convert_jumps_to_returns may add to EXIT_BLOCK_PTR->preds
(but won't remove). Stop at end of current preds. */
- last = EDGE_COUNT (EXIT_BLOCK_PTR->preds);
+ last = EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
for (i = 0; i < last; i++)
{
- e = EDGE_I (EXIT_BLOCK_PTR->preds, i);
+ e = EDGE_I (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds, i);
if (LABEL_P (BB_HEAD (e->src))
&& !bitmap_bit_p (&bb_flags, e->src->index)
&& !active_insn_between (BB_HEAD (e->src), BB_END (e->src)))
@@ -6413,7 +6418,7 @@ thread_prologue_and_epilogue_insns (void)
code. In order to be able to properly annotate these with unwind
info, try to split them now. If we get a valid split, drop an
EPILOGUE_BEG note and mark the insns as epilogue insns. */
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
{
rtx prev, last, trial;
@@ -6504,7 +6509,7 @@ epilogue_done:
/* The epilogue insns we inserted may cause the exit edge to no longer
be fallthru. */
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
{
if (((e->flags & EDGE_FALLTHRU) != 0)
&& returnjump_p (BB_END (e->src)))
@@ -6541,7 +6546,7 @@ epilogue_done:
}
/* Also check returns we might need to add to tail blocks. */
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
if (EDGE_COUNT (e->src->preds) != 0
&& (e->flags & EDGE_FAKE) != 0
&& !bitmap_bit_p (&bb_flags, e->src->index))
@@ -6556,7 +6561,7 @@ epilogue_done:
inserting new BBs at the end of the function. Do this
after the call to split_block above which may split
the original exit pred. */
- exit_pred = EXIT_BLOCK_PTR->prev_bb;
+ exit_pred = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
FOR_EACH_VEC_ELT (unconverted_simple_returns, i, e)
{
@@ -6593,7 +6598,7 @@ epilogue_done:
emit_barrier_after (start);
*pdest_bb = bb;
- make_edge (bb, EXIT_BLOCK_PTR, 0);
+ make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
}
redirect_edge_and_branch_force (e, *pdest_bb);
}
@@ -6602,7 +6607,7 @@ epilogue_done:
if (entry_edge != orig_entry_edge)
{
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
if (EDGE_COUNT (e->src->preds) != 0
&& (e->flags & EDGE_FAKE) != 0
&& !bitmap_bit_p (&bb_flags, e->src->index))
@@ -6615,7 +6620,9 @@ epilogue_done:
#ifdef HAVE_sibcall_epilogue
/* Emit sibling epilogues before any sibling call sites. */
- for (ei = ei_start (EXIT_BLOCK_PTR->preds); (e = ei_safe_edge (ei)); )
+ for (ei = ei_start (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds); (e =
+ ei_safe_edge (ei));
+ )
{
basic_block bb = e->src;
rtx insn = BB_END (bb);
@@ -6746,7 +6753,7 @@ reposition_prologue_and_epilogue_notes (void)
edge_iterator ei;
edge e;
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
{
rtx insn, first = NULL, note = NULL;
basic_block bb = e->src;
diff --git a/gcc/function.h b/gcc/function.h
index 9bb6ff04c50..c00bfe2aa37 100644
--- a/gcc/function.h
+++ b/gcc/function.h
@@ -822,4 +822,25 @@ extern unsigned int emit_initial_value_sets (void);
extern bool optimize_function_for_size_p (struct function *);
extern bool optimize_function_for_speed_p (struct function *);
+/* In function.c */
+extern void expand_function_end (void);
+extern void expand_function_start (tree);
+extern void stack_protect_epilogue (void);
+extern void init_dummy_function_start (void);
+extern void expand_dummy_function_end (void);
+extern void allocate_struct_function (tree, bool);
+extern void push_struct_function (tree fndecl);
+extern void init_function_start (tree);
+extern bool use_register_for_decl (const_tree);
+extern void generate_setjmp_warnings (void);
+extern void init_temp_slots (void);
+extern void free_temp_slots (void);
+extern void pop_temp_slots (void);
+extern void push_temp_slots (void);
+extern void preserve_temp_slots (rtx);
+extern int aggregate_value_p (const_tree, const_tree);
+extern void push_function_context (void);
+extern void pop_function_context (void);
+extern gimple_seq gimplify_parameters (void);
+
#endif /* GCC_FUNCTION_H */
diff --git a/gcc/fwprop.c b/gcc/fwprop.c
index d08710c9614..da40a677559 100644
--- a/gcc/fwprop.c
+++ b/gcc/fwprop.c
@@ -289,7 +289,7 @@ build_single_def_use_links (void)
reg_defs.create (max_reg_num ());
reg_defs.safe_grow_cleared (max_reg_num ());
- reg_defs_stack.create (n_basic_blocks * 10);
+ reg_defs_stack.create (n_basic_blocks_for_fn (cfun) * 10);
local_md = BITMAP_ALLOC (NULL);
local_lr = BITMAP_ALLOC (NULL);
diff --git a/gcc/gcc-symtab.h b/gcc/gcc-symtab.h
new file mode 100644
index 00000000000..a5bbad265a3
--- /dev/null
+++ b/gcc/gcc-symtab.h
@@ -0,0 +1,28 @@
+/* Declarations for symtab.c.
+ FIXME - This file should be named symtab.h, but that name conflicts
+ with libcpp's symtab.h.
+
+ Copyright (C) 2013 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_SYMTAB_H
+#define GCC_SYMTAB_H
+
+extern void change_decl_assembler_name (tree, tree);
+
+#endif // GCC_SYMTAB_H
diff --git a/gcc/gcse.c b/gcc/gcse.c
index 571e8788c83..3012c4d1d36 100644
--- a/gcc/gcse.c
+++ b/gcc/gcse.c
@@ -1964,7 +1964,7 @@ prune_insertions_deletions (int n_elems)
/* Iterate over the edges counting the number of times each expression
needs to be inserted. */
- for (i = 0; i < (unsigned) n_edges; i++)
+ for (i = 0; i < (unsigned) n_edges_for_fn (cfun); i++)
{
EXECUTE_IF_SET_IN_BITMAP (pre_insert_map[i], 0, j, sbi)
insertions[j]++;
@@ -1990,7 +1990,7 @@ prune_insertions_deletions (int n_elems)
/* Now prune PRE_INSERT_MAP and PRE_DELETE_MAP based on PRUNE_EXPRS. */
EXECUTE_IF_SET_IN_BITMAP (prune_exprs, 0, j, sbi)
{
- for (i = 0; i < (unsigned) n_edges; i++)
+ for (i = 0; i < (unsigned) n_edges_for_fn (cfun); i++)
bitmap_clear_bit (pre_insert_map[i], j);
for (i = 0; i < (unsigned) last_basic_block; i++)
@@ -2063,7 +2063,7 @@ pre_expr_reaches_here_p_work (basic_block occr_bb, struct expr *expr,
{
basic_block pred_bb = pred->src;
- if (pred->src == ENTRY_BLOCK_PTR
+ if (pred->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
/* Has predecessor has already been visited? */
|| visited[pred_bb->index])
;/* Nothing to do. */
@@ -2662,7 +2662,7 @@ one_pre_gcse_pass (void)
gcse_create_count = 0;
/* Return if there's nothing to do, or it is too expensive. */
- if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
+ if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1
|| is_too_expensive (_("PRE disabled")))
return 0;
@@ -2708,7 +2708,8 @@ one_pre_gcse_pass (void)
if (dump_file)
{
fprintf (dump_file, "PRE GCSE of %s, %d basic blocks, %d bytes needed, ",
- current_function_name (), n_basic_blocks, bytes_used);
+ current_function_name (), n_basic_blocks_for_fn (cfun),
+ bytes_used);
fprintf (dump_file, "%d substs, %d insns created\n",
gcse_subst_count, gcse_create_count);
}
@@ -2829,7 +2830,7 @@ compute_code_hoist_vbeinout (void)
the convergence. */
FOR_EACH_BB_REVERSE (bb)
{
- if (bb->next_bb != EXIT_BLOCK_PTR)
+ if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
bitmap_intersection_of_succs (hoist_vbeout[bb->index],
hoist_vbein, bb);
@@ -2907,7 +2908,7 @@ update_bb_reg_pressure (basic_block bb, rtx from)
FOR_EACH_EDGE (succ, ei, bb->succs)
{
succ_bb = succ->dest;
- if (succ_bb == EXIT_BLOCK_PTR)
+ if (succ_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
if (bitmap_bit_p (BB_DATA (succ_bb)->live_in, REGNO (dreg)))
@@ -3040,7 +3041,7 @@ should_hoist_expr_to_dom (basic_block expr_bb, struct expr *expr,
{
basic_block pred_bb = pred->src;
- if (pred->src == ENTRY_BLOCK_PTR)
+ if (pred->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
break;
else if (pred_bb == expr_bb)
continue;
@@ -3184,16 +3185,16 @@ hoist_code (void)
bb_size[bb->index] = to_head;
}
- gcc_assert (EDGE_COUNT (ENTRY_BLOCK_PTR->succs) == 1
- && (EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->dest
- == ENTRY_BLOCK_PTR->next_bb));
+ gcc_assert (EDGE_COUNT (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs) == 1
+ && (EDGE_SUCC (ENTRY_BLOCK_PTR_FOR_FN (cfun), 0)->dest
+ == ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb));
from_bbs = BITMAP_ALLOC (NULL);
if (flag_ira_hoist_pressure)
hoisted_bbs = BITMAP_ALLOC (NULL);
dom_tree_walk = get_all_dominated_blocks (CDI_DOMINATORS,
- ENTRY_BLOCK_PTR->next_bb);
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb);
/* Walk over each basic block looking for potentially hoistable
expressions, nothing gets hoisted from the entry block. */
@@ -3591,7 +3592,7 @@ one_code_hoisting_pass (void)
gcse_create_count = 0;
/* Return if there's nothing to do, or it is too expensive. */
- if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
+ if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1
|| is_too_expensive (_("GCSE disabled")))
return 0;
@@ -3642,7 +3643,8 @@ one_code_hoisting_pass (void)
if (dump_file)
{
fprintf (dump_file, "HOIST of %s, %d basic blocks, %d bytes needed, ",
- current_function_name (), n_basic_blocks, bytes_used);
+ current_function_name (), n_basic_blocks_for_fn (cfun),
+ bytes_used);
fprintf (dump_file, "%d substs, %d insns created\n",
gcse_subst_count, gcse_create_count);
}
@@ -4067,24 +4069,25 @@ is_too_expensive (const char *pass)
which have a couple switch statements. Rather than simply
threshold the number of blocks, uses something with a more
graceful degradation. */
- if (n_edges > 20000 + n_basic_blocks * 4)
+ if (n_edges_for_fn (cfun) > 20000 + n_basic_blocks_for_fn (cfun) * 4)
{
warning (OPT_Wdisabled_optimization,
"%s: %d basic blocks and %d edges/basic block",
- pass, n_basic_blocks, n_edges / n_basic_blocks);
+ pass, n_basic_blocks_for_fn (cfun),
+ n_edges_for_fn (cfun) / n_basic_blocks_for_fn (cfun));
return true;
}
/* If allocating memory for the dataflow bitmaps would take up too much
storage it's better just to disable the optimization. */
- if ((n_basic_blocks
+ if ((n_basic_blocks_for_fn (cfun)
* SBITMAP_SET_SIZE (max_reg_num ())
* sizeof (SBITMAP_ELT_TYPE)) > MAX_GCSE_MEMORY)
{
warning (OPT_Wdisabled_optimization,
"%s: %d basic blocks and %d registers",
- pass, n_basic_blocks, max_reg_num ());
+ pass, n_basic_blocks_for_fn (cfun), max_reg_num ());
return true;
}
diff --git a/gcc/gdbhooks.py b/gcc/gdbhooks.py
index a9d502d3d21..baccd6b783e 100644
--- a/gcc/gdbhooks.py
+++ b/gcc/gdbhooks.py
@@ -109,6 +109,26 @@ available:
1594 execute_pass_list (g->get_passes ()->all_passes);
(gdb) p node
$1 = <cgraph_node* 0x7ffff0312720 "foo">
+
+vec<> pointers are printed as the address followed by the elements in
+braces. Here's a length 2 vec:
+ (gdb) p bb->preds
+ $18 = 0x7ffff0428b68 = {<edge 0x7ffff044d380 (3 -> 5)>, <edge 0x7ffff044d3b8 (4 -> 5)>}
+
+and here's a length 1 vec:
+ (gdb) p bb->succs
+ $19 = 0x7ffff0428bb8 = {<edge 0x7ffff044d3f0 (5 -> EXIT)>}
+
+You cannot yet use array notation [] to access the elements within the
+vector: attempting to do so instead gives you the vec itself (for vec[0]),
+or a (probably) invalid cast to vec<> for the memory after the vec (for
+vec[1] onwards).
+
+Instead (for now) you must access m_vecdata:
+ (gdb) p bb->preds->m_vecdata[0]
+ $20 = <edge 0x7ffff044d380 (3 -> 5)>
+ (gdb) p bb->preds->m_vecdata[1]
+ $21 = <edge 0x7ffff044d3b8 (4 -> 5)>
"""
import re
@@ -240,7 +260,7 @@ class GimplePrinter:
def to_string (self):
if long(self.gdbval) == 0:
return '<gimple 0x0>'
- val_gimple_code = self.gdbval['gsbase']['code']
+ val_gimple_code = self.gdbval['code']
val_gimple_code_name = gdb.parse_and_eval('gimple_code_name')
val_code_name = val_gimple_code_name[long(val_gimple_code)]
result = '<%s 0x%x' % (val_code_name.string(),
@@ -349,29 +369,77 @@ class PassPrinter:
######################################################################
+class VecPrinter:
+ # -ex "up" -ex "p bb->preds"
+ def __init__(self, gdbval):
+ self.gdbval = gdbval
+
+ def display_hint (self):
+ return 'array'
+
+ def to_string (self):
+ # A trivial implementation; prettyprinting the contents is done
+ # by gdb calling the "children" method below.
+ return '0x%x' % long(self.gdbval)
+
+ def children (self):
+ m_vecpfx = self.gdbval['m_vecpfx']
+ m_num = m_vecpfx['m_num']
+ m_vecdata = self.gdbval['m_vecdata']
+ for i in range(m_num):
+ yield ('[%d]' % i, m_vecdata[i])
+
+######################################################################
+
# TODO:
-# * vec
# * hashtab
# * location_t
class GdbSubprinter(gdb.printing.SubPrettyPrinter):
- def __init__(self, name, str_type_, class_):
+ def __init__(self, name, class_):
super(GdbSubprinter, self).__init__(name)
- self.str_type_ = str_type_
self.class_ = class_
+ def handles_type(self, str_type):
+ raise NotImplementedError
+
+class GdbSubprinterTypeList(GdbSubprinter):
+ """
+ A GdbSubprinter that handles a specific set of types
+ """
+ def __init__(self, str_types, name, class_):
+ super(GdbSubprinterTypeList, self).__init__(name, class_)
+ self.str_types = frozenset(str_types)
+
+ def handles_type(self, str_type):
+ return str_type in self.str_types
+
+class GdbSubprinterRegex(GdbSubprinter):
+ """
+ A GdbSubprinter that handles types that match a regex
+ """
+ def __init__(self, regex, name, class_):
+ super(GdbSubprinterRegex, self).__init__(name, class_)
+ self.regex = re.compile(regex)
+
+ def handles_type(self, str_type):
+ return self.regex.match(str_type)
+
class GdbPrettyPrinters(gdb.printing.PrettyPrinter):
def __init__(self, name):
super(GdbPrettyPrinters, self).__init__(name, [])
- def add_printer(self, name, exp, class_):
- self.subprinters.append(GdbSubprinter(name, exp, class_))
+ def add_printer_for_types(self, name, class_, types):
+ self.subprinters.append(GdbSubprinterTypeList(name, class_, types))
+
+ def add_printer_for_regex(self, name, class_, regex):
+ self.subprinters.append(GdbSubprinterRegex(name, class_, regex))
def __call__(self, gdbval):
type_ = gdbval.type.unqualified()
- str_type_ = str(type_)
+ str_type = str(type_)
for printer in self.subprinters:
- if printer.enabled and str_type_ == printer.str_type_:
+ if printer.enabled and printer.handles_type(str_type):
return printer.class_(gdbval)
# Couldn't find a pretty printer (or it was disabled):
@@ -380,13 +448,26 @@ class GdbPrettyPrinters(gdb.printing.PrettyPrinter):
def build_pretty_printer():
pp = GdbPrettyPrinters('gcc')
- pp.add_printer('tree', 'tree', TreePrinter)
- pp.add_printer('cgraph_node', 'cgraph_node *', CGraphNodePrinter)
- pp.add_printer('gimple', 'gimple', GimplePrinter)
- pp.add_printer('basic_block', 'basic_block', BasicBlockPrinter)
- pp.add_printer('edge', 'edge', CfgEdgePrinter)
- pp.add_printer('rtx_def', 'rtx_def *', RtxPrinter)
- pp.add_printer('opt_pass', 'opt_pass *', PassPrinter)
+ pp.add_printer_for_types(['tree'],
+ 'tree', TreePrinter)
+ pp.add_printer_for_types(['cgraph_node *'],
+ 'cgraph_node', CGraphNodePrinter)
+ pp.add_printer_for_types(['gimple', 'gimple_statement_base *'],
+ 'gimple',
+ GimplePrinter)
+ pp.add_printer_for_types(['basic_block', 'basic_block_def *'],
+ 'basic_block',
+ BasicBlockPrinter)
+ pp.add_printer_for_types(['edge', 'edge_def *'],
+ 'edge',
+ CfgEdgePrinter)
+ pp.add_printer_for_types(['rtx_def *'], 'rtx_def', RtxPrinter)
+ pp.add_printer_for_types(['opt_pass *'], 'opt_pass', PassPrinter)
+
+ pp.add_printer_for_regex(r'vec<(\S+), (\S+), (\S+)> \*',
+ 'vec',
+ VecPrinter)
+
return pp
gdb.printing.register_pretty_printer(
diff --git a/gcc/gdbinit.in b/gcc/gdbinit.in
index 104d98e520f..aa0bf9beda6 100644
--- a/gcc/gdbinit.in
+++ b/gcc/gdbinit.in
@@ -99,7 +99,7 @@ set mpz_out_str(stderr, 10, $)
end
document pmz
-Print the mpz value that is $
+Print the mpz value that is $
Works only when an inferior is executing.
end
diff --git a/gcc/genattrtab.c b/gcc/genattrtab.c
index f79380d6e46..c0125d103b7 100644
--- a/gcc/genattrtab.c
+++ b/gcc/genattrtab.c
@@ -5101,6 +5101,9 @@ write_header (FILE *outf)
fprintf (outf, "#include \"coretypes.h\"\n");
fprintf (outf, "#include \"tm.h\"\n");
fprintf (outf, "#include \"tree.h\"\n");
+ fprintf (outf, "#include \"varasm.h\"\n");
+ fprintf (outf, "#include \"stor-layout.h\"\n");
+ fprintf (outf, "#include \"calls.h\"\n");
fprintf (outf, "#include \"rtl.h\"\n");
fprintf (outf, "#include \"insn-attr.h\"\n");
fprintf (outf, "#include \"tm_p.h\"\n");
diff --git a/gcc/genautomata.c b/gcc/genautomata.c
index f6c4b91c42e..3ab480428aa 100644
--- a/gcc/genautomata.c
+++ b/gcc/genautomata.c
@@ -9666,6 +9666,9 @@ main (int argc, char **argv)
"#include \"coretypes.h\"\n"
"#include \"tm.h\"\n"
"#include \"tree.h\"\n"
+ "#include \"varasm.h\"\n"
+ "#include \"stor-layout.h\"\n"
+ "#include \"calls.h\"\n"
"#include \"rtl.h\"\n"
"#include \"tm_p.h\"\n"
"#include \"insn-config.h\"\n"
diff --git a/gcc/genemit.c b/gcc/genemit.c
index 45d3936ddba..174aa30ff22 100644
--- a/gcc/genemit.c
+++ b/gcc/genemit.c
@@ -792,6 +792,9 @@ from the machine description file `md'. */\n\n");
printf ("#include \"coretypes.h\"\n");
printf ("#include \"tm.h\"\n");
printf ("#include \"tree.h\"\n");
+ printf ("#include \"varasm.h\"\n");
+ printf ("#include \"stor-layout.h\"\n");
+ printf ("#include \"calls.h\"\n");
printf ("#include \"rtl.h\"\n");
printf ("#include \"tm_p.h\"\n");
printf ("#include \"function.h\"\n");
diff --git a/gcc/gengtype.c b/gcc/gengtype.c
index ab2f336851d..c2172c928fb 100644
--- a/gcc/gengtype.c
+++ b/gcc/gengtype.c
@@ -1769,7 +1769,7 @@ open_base_files (void)
"hard-reg-set.h", "basic-block.h", "cselib.h", "insn-addr.h",
"optabs.h", "libfuncs.h", "debug.h", "ggc.h", "cgraph.h",
"gimple.h", "gimple-iterator.h", "gimple-ssa.h", "tree-cfg.h",
- "tree-phinodes.h", "ssa-iterators.h", "tree-ssanames.h",
+ "tree-phinodes.h", "ssa-iterators.h", "stringpool.h", "tree-ssanames.h",
"tree-ssa-loop.h", "tree-ssa-loop-ivopts.h", "tree-ssa-loop-manip.h",
"tree-ssa-loop-niter.h", "tree-into-ssa.h", "tree-dfa.h",
"tree-ssa.h", "reload.h", "cpp-id-data.h", "tree-chrec.h",
diff --git a/gcc/genopinit.c b/gcc/genopinit.c
index 3efb71e249e..2e736ce687b 100644
--- a/gcc/genopinit.c
+++ b/gcc/genopinit.c
@@ -405,6 +405,9 @@ main (int argc, char **argv)
"#include \"coretypes.h\"\n"
"#include \"tm.h\"\n"
"#include \"tree.h\"\n"
+ "#include \"varasm.h\"\n"
+ "#include \"stor-layout.h\"\n"
+ "#include \"calls.h\"\n"
"#include \"rtl.h\"\n"
"#include \"tm_p.h\"\n"
"#include \"flags.h\"\n"
diff --git a/gcc/genoutput.c b/gcc/genoutput.c
index 2a7ee2391db..de8979bc726 100644
--- a/gcc/genoutput.c
+++ b/gcc/genoutput.c
@@ -239,6 +239,9 @@ output_prologue (void)
printf ("#include \"flags.h\"\n");
printf ("#include \"ggc.h\"\n");
printf ("#include \"tree.h\"\n");
+ printf ("#include \"varasm.h\"\n");
+ printf ("#include \"stor-layout.h\"\n");
+ printf ("#include \"calls.h\"\n");
printf ("#include \"rtl.h\"\n");
printf ("#include \"expr.h\"\n");
printf ("#include \"insn-codes.h\"\n");
diff --git a/gcc/genpeep.c b/gcc/genpeep.c
index 877fde3ec90..8d9d25dd772 100644
--- a/gcc/genpeep.c
+++ b/gcc/genpeep.c
@@ -360,6 +360,9 @@ from the machine description file `md'. */\n\n");
printf ("#include \"tm.h\"\n");
printf ("#include \"insn-config.h\"\n");
printf ("#include \"tree.h\"\n");
+ printf ("#include \"varasm.h\"\n");
+ printf ("#include \"stor-layout.h\"\n");
+ printf ("#include \"calls.h\"\n");
printf ("#include \"rtl.h\"\n");
printf ("#include \"tm_p.h\"\n");
printf ("#include \"regs.h\"\n");
diff --git a/gcc/genpreds.c b/gcc/genpreds.c
index 9114d2d60d9..976eb113a6b 100644
--- a/gcc/genpreds.c
+++ b/gcc/genpreds.c
@@ -1306,6 +1306,9 @@ write_insn_preds_c (void)
#include \"tm.h\"\n\
#include \"rtl.h\"\n\
#include \"tree.h\"\n\
+#include \"varasm.h\"\n\
+#include \"stor-layout.h\"\n\
+#include \"calls.h\"\n\
#include \"tm_p.h\"\n\
#include \"function.h\"\n\
#include \"insn-config.h\"\n\
diff --git a/gcc/ggc.h b/gcc/ggc.h
index b31bc80489f..bb8f939513d 100644
--- a/gcc/ggc.h
+++ b/gcc/ggc.h
@@ -269,10 +269,10 @@ ggc_alloc_cleared_tree_node_stat (size_t s MEM_STAT_DECL)
return (union tree_node *) ggc_internal_cleared_alloc_stat (s PASS_MEM_STAT);
}
-static inline union gimple_statement_d *
-ggc_alloc_cleared_gimple_statement_d_stat (size_t s MEM_STAT_DECL)
+static inline struct gimple_statement_base *
+ggc_alloc_cleared_gimple_statement_stat (size_t s MEM_STAT_DECL)
{
- return (union gimple_statement_d *)
+ return (struct gimple_statement_base *)
ggc_internal_cleared_alloc_stat (s PASS_MEM_STAT);
}
diff --git a/gcc/gimple-builder.c b/gcc/gimple-builder.c
index 6b53221b6b3..17cd2e1b887 100644
--- a/gcc/gimple-builder.c
+++ b/gcc/gimple-builder.c
@@ -21,6 +21,7 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "tree.h"
+#include "stringpool.h"
#include "gimple.h"
#include "tree-ssanames.h"
diff --git a/gcc/gimple-expr.c b/gcc/gimple-expr.c
index 9156f952784..77660a34c6d 100644
--- a/gcc/gimple-expr.c
+++ b/gcc/gimple-expr.c
@@ -25,7 +25,9 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "tree.h"
#include "gimple.h"
+#include "stringpool.h"
#include "gimplify.h"
+#include "stor-layout.h"
#include "demangle.h"
#include "gimple-ssa.h"
diff --git a/gcc/gimple-fold.c b/gcc/gimple-fold.c
index 80c23be230d..5d904029769 100644
--- a/gcc/gimple-fold.c
+++ b/gcc/gimple-fold.c
@@ -23,6 +23,10 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
+#include "expr.h"
+#include "stmt.h"
+#include "stor-layout.h"
#include "flags.h"
#include "function.h"
#include "dumpfile.h"
@@ -3064,7 +3068,7 @@ fold_const_aggregate_ref_1 (tree t, tree (*valueize) (tree))
offset = woffset.to_shwi ();
/* TODO: This code seems wrong, multiply then check
to see if it fits. */
- offset *= TREE_INT_CST_LOW (unit_size);
+ offset *= tree_to_uhwi (unit_size);
offset *= BITS_PER_UNIT;
base = TREE_OPERAND (t, 0);
diff --git a/gcc/gimple-iterator.c b/gcc/gimple-iterator.c
index b9453892891..a3e74fe5012 100644
--- a/gcc/gimple-iterator.c
+++ b/gcc/gimple-iterator.c
@@ -68,7 +68,7 @@ update_bb_for_stmts (gimple_seq_node first, gimple_seq_node last,
{
gimple_seq_node n;
- for (n = first; n; n = n->gsbase.next)
+ for (n = first; n; n = n->next)
{
gimple_set_bb (n, bb);
if (n == last)
@@ -86,7 +86,7 @@ update_call_edge_frequencies (gimple_seq_node first, basic_block bb)
int bb_freq = 0;
gimple_seq_node n;
- for (n = first; n ; n = n->gsbase.next)
+ for (n = first; n ; n = n->next)
if (is_gimple_call (n))
{
struct cgraph_edge *e;
@@ -124,7 +124,7 @@ gsi_insert_seq_nodes_before (gimple_stmt_iterator *i,
basic_block bb;
gimple_seq_node cur = i->ptr;
- gcc_assert (!cur || cur->gsbase.prev);
+ gcc_assert (!cur || cur->prev);
if ((bb = gsi_bb (*i)) != NULL)
update_bb_for_stmts (first, last, bb);
@@ -132,13 +132,13 @@ gsi_insert_seq_nodes_before (gimple_stmt_iterator *i,
/* Link SEQ before CUR in the sequence. */
if (cur)
{
- first->gsbase.prev = cur->gsbase.prev;
- if (first->gsbase.prev->gsbase.next)
- first->gsbase.prev->gsbase.next = first;
+ first->prev = cur->prev;
+ if (first->prev->next)
+ first->prev->next = first;
else
gimple_seq_set_first (i->seq, first);
- last->gsbase.next = cur;
- cur->gsbase.prev = last;
+ last->next = cur;
+ cur->prev = last;
}
else
{
@@ -149,11 +149,11 @@ gsi_insert_seq_nodes_before (gimple_stmt_iterator *i,
labels, so it returns an iterator after the end of the block, and
we need to insert before it; it might be cleaner to add a flag to the
iterator saying whether we are at the start or end of the list). */
- last->gsbase.next = NULL;
+ last->next = NULL;
if (itlast)
{
- first->gsbase.prev = itlast;
- itlast->gsbase.next = first;
+ first->prev = itlast;
+ itlast->next = first;
}
else
gimple_seq_set_first (i->seq, first);
@@ -242,7 +242,7 @@ gsi_insert_seq_nodes_after (gimple_stmt_iterator *i,
basic_block bb;
gimple_seq_node cur = i->ptr;
- gcc_assert (!cur || cur->gsbase.prev);
+ gcc_assert (!cur || cur->prev);
/* If the iterator is inside a basic block, we need to update the
basic block information for all the nodes between FIRST and LAST. */
@@ -252,20 +252,20 @@ gsi_insert_seq_nodes_after (gimple_stmt_iterator *i,
/* Link SEQ after CUR. */
if (cur)
{
- last->gsbase.next = cur->gsbase.next;
- if (last->gsbase.next)
+ last->next = cur->next;
+ if (last->next)
{
- last->gsbase.next->gsbase.prev = last;
+ last->next->prev = last;
}
else
gimple_seq_set_last (i->seq, last);
- first->gsbase.prev = cur;
- cur->gsbase.next = first;
+ first->prev = cur;
+ cur->next = first;
}
else
{
gcc_assert (!gimple_seq_last (*i->seq));
- last->gsbase.next = NULL;
+ last->next = NULL;
gimple_seq_set_first (i->seq, first);
gimple_seq_set_last (i->seq, last);
}
@@ -347,15 +347,15 @@ gsi_split_seq_after (gimple_stmt_iterator i)
cur = i.ptr;
/* How can we possibly split after the end, or before the beginning? */
- gcc_assert (cur && cur->gsbase.next);
- next = cur->gsbase.next;
+ gcc_assert (cur && cur->next);
+ next = cur->next;
pold_seq = i.seq;
gimple_seq_set_first (&new_seq, next);
gimple_seq_set_last (&new_seq, gimple_seq_last (*pold_seq));
gimple_seq_set_last (pold_seq, cur);
- cur->gsbase.next = NULL;
+ cur->next = NULL;
return new_seq;
}
@@ -371,17 +371,17 @@ gsi_set_stmt (gimple_stmt_iterator *gsi, gimple stmt)
gimple orig_stmt = gsi_stmt (*gsi);
gimple prev, next;
- stmt->gsbase.next = next = orig_stmt->gsbase.next;
- stmt->gsbase.prev = prev = orig_stmt->gsbase.prev;
+ stmt->next = next = orig_stmt->next;
+ stmt->prev = prev = orig_stmt->prev;
/* Note how we don't clear next/prev of orig_stmt. This is so that
copies of *GSI our callers might still hold (to orig_stmt)
can be advanced as if they too were replaced. */
- if (prev->gsbase.next)
- prev->gsbase.next = stmt;
+ if (prev->next)
+ prev->next = stmt;
else
gimple_seq_set_first (gsi->seq, stmt);
if (next)
- next->gsbase.prev = stmt;
+ next->prev = stmt;
else
gimple_seq_set_last (gsi->seq, stmt);
@@ -402,10 +402,10 @@ gsi_split_seq_before (gimple_stmt_iterator *i, gimple_seq *pnew_seq)
/* How can we possibly split after the end? */
gcc_assert (cur);
- prev = cur->gsbase.prev;
+ prev = cur->prev;
old_seq = *i->seq;
- if (!prev->gsbase.next)
+ if (!prev->next)
*i->seq = NULL;
i->seq = pnew_seq;
@@ -415,8 +415,8 @@ gsi_split_seq_before (gimple_stmt_iterator *i, gimple_seq *pnew_seq)
/* Cut OLD_SEQ before I. */
gimple_seq_set_last (&old_seq, prev);
- if (prev->gsbase.next)
- prev->gsbase.next = NULL;
+ if (prev->next)
+ prev->next = NULL;
}
@@ -576,20 +576,20 @@ gsi_remove (gimple_stmt_iterator *i, bool remove_permanently)
/* Update the iterator and re-wire the links in I->SEQ. */
cur = i->ptr;
- next = cur->gsbase.next;
- prev = cur->gsbase.prev;
+ next = cur->next;
+ prev = cur->prev;
/* See gsi_set_stmt for why we don't reset prev/next of STMT. */
if (next)
/* Cur is not last. */
- next->gsbase.prev = prev;
- else if (prev->gsbase.next)
+ next->prev = prev;
+ else if (prev->next)
/* Cur is last but not first. */
gimple_seq_set_last (i->seq, prev);
- if (prev->gsbase.next)
+ if (prev->next)
/* Cur is not first. */
- prev->gsbase.next = next;
+ prev->next = next;
else
/* Cur is first. */
*i->seq = next;
@@ -713,7 +713,7 @@ gimple_find_edge_insert_loc (edge e, gimple_stmt_iterator *gsi,
restart:
if (single_pred_p (dest)
&& gimple_seq_empty_p (phi_nodes (dest))
- && dest != EXIT_BLOCK_PTR)
+ && dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
*gsi = gsi_start_bb (dest);
if (gsi_end_p (*gsi))
@@ -744,7 +744,7 @@ gimple_find_edge_insert_loc (edge e, gimple_stmt_iterator *gsi,
src = e->src;
if ((e->flags & EDGE_ABNORMAL) == 0
&& single_succ_p (src)
- && src != ENTRY_BLOCK_PTR)
+ && src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
{
*gsi = gsi_last_bb (src);
if (gsi_end_p (*gsi))
@@ -830,7 +830,8 @@ gsi_commit_edge_inserts (void)
edge e;
edge_iterator ei;
- gsi_commit_one_edge_insert (single_succ_edge (ENTRY_BLOCK_PTR), NULL);
+ gsi_commit_one_edge_insert (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
+ NULL);
FOR_EACH_BB (bb)
FOR_EACH_EDGE (e, ei, bb->succs)
diff --git a/gcc/gimple-iterator.h b/gcc/gimple-iterator.h
index 24045f52487..11b12763505 100644
--- a/gcc/gimple-iterator.h
+++ b/gcc/gimple-iterator.h
@@ -168,7 +168,7 @@ gsi_end_p (gimple_stmt_iterator i)
static inline bool
gsi_one_before_end_p (gimple_stmt_iterator i)
{
- return i.ptr != NULL && i.ptr->gsbase.next == NULL;
+ return i.ptr != NULL && i.ptr->next == NULL;
}
/* Advance the iterator to the next gimple statement. */
@@ -176,7 +176,7 @@ gsi_one_before_end_p (gimple_stmt_iterator i)
static inline void
gsi_next (gimple_stmt_iterator *i)
{
- i->ptr = i->ptr->gsbase.next;
+ i->ptr = i->ptr->next;
}
/* Advance the iterator to the previous gimple statement. */
@@ -184,8 +184,8 @@ gsi_next (gimple_stmt_iterator *i)
static inline void
gsi_prev (gimple_stmt_iterator *i)
{
- gimple prev = i->ptr->gsbase.prev;
- if (prev->gsbase.next)
+ gimple prev = i->ptr->prev;
+ if (prev->next)
i->ptr = prev;
else
i->ptr = NULL;
diff --git a/gcc/gimple-low.c b/gcc/gimple-low.c
index 3f9ac575e64..71f8dfec3f8 100644
--- a/gcc/gimple-low.c
+++ b/gcc/gimple-low.c
@@ -23,6 +23,8 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "tree-nested.h"
+#include "calls.h"
#include "gimple.h"
#include "gimple-iterator.h"
#include "tree-iterator.h"
diff --git a/gcc/gimple-pretty-print.c b/gcc/gimple-pretty-print.c
index 928426c2462..e0786342f69 100644
--- a/gcc/gimple-pretty-print.c
+++ b/gcc/gimple-pretty-print.c
@@ -24,6 +24,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
#include "diagnostic.h"
#include "gimple-pretty-print.h"
#include "hashtab.h"
@@ -87,13 +88,13 @@ print_gimple_stmt (FILE *file, gimple g, int spc, int flags)
}
DEBUG_FUNCTION void
-debug (gimple_statement_d &ref)
+debug (gimple_statement_base &ref)
{
print_gimple_stmt (stderr, &ref, 0, 0);
}
DEBUG_FUNCTION void
-debug (gimple_statement_d *ptr)
+debug (gimple_statement_base *ptr)
{
if (ptr)
debug (*ptr);
@@ -1072,7 +1073,7 @@ dump_gimple_eh_dispatch (pretty_printer *buffer, gimple gs, int spc, int flags)
static void
dump_gimple_debug (pretty_printer *buffer, gimple gs, int spc, int flags)
{
- switch (gs->gsbase.subcode)
+ switch (gs->subcode)
{
case GIMPLE_DEBUG_BIND:
if (flags & TDF_RAW)
diff --git a/gcc/gimple-pretty-print.h b/gcc/gimple-pretty-print.h
index 5227eb8d584..edb23e61c71 100644
--- a/gcc/gimple-pretty-print.h
+++ b/gcc/gimple-pretty-print.h
@@ -29,8 +29,8 @@ extern void debug_gimple_stmt (gimple);
extern void debug_gimple_seq (gimple_seq);
extern void print_gimple_seq (FILE *, gimple_seq, int, int);
extern void print_gimple_stmt (FILE *, gimple, int, int);
-extern void debug (gimple_statement_d &ref);
-extern void debug (gimple_statement_d *ptr);
+extern void debug (gimple_statement_base &ref);
+extern void debug (gimple_statement_base *ptr);
extern void print_gimple_expr (FILE *, gimple, int, int);
extern void pp_gimple_stmt_1 (pretty_printer *, gimple, int, int);
extern void gimple_dump_bb (FILE *, basic_block, int, int);
diff --git a/gcc/gimple-ssa-isolate-paths.c b/gcc/gimple-ssa-isolate-paths.c
index c42f112da8b..1b7e3ff7d76 100644
--- a/gcc/gimple-ssa-isolate-paths.c
+++ b/gcc/gimple-ssa-isolate-paths.c
@@ -30,6 +30,7 @@ along with GCC; see the file COPYING3. If not see
#include "gimple-iterator.h"
#include "gimple-walk.h"
#include "tree-ssa.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "gimple-ssa.h"
#include "tree-ssa-operands.h"
diff --git a/gcc/gimple-ssa-strength-reduction.c b/gcc/gimple-ssa-strength-reduction.c
index 639e037fea5..268fda26fe0 100644
--- a/gcc/gimple-ssa-strength-reduction.c
+++ b/gcc/gimple-ssa-strength-reduction.c
@@ -40,6 +40,8 @@ along with GCC; see the file COPYING3. If not see
#include "gimple.h"
#include "gimple-iterator.h"
#include "gimplify-me.h"
+#include "stor-layout.h"
+#include "expr.h"
#include "basic-block.h"
#include "tree-pass.h"
#include "cfgloop.h"
@@ -48,6 +50,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-cfg.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "domwalk.h"
#include "pointer-set.h"
@@ -733,7 +736,7 @@ slsr_process_phi (gimple phi, bool speed)
derived_base_name = arg;
if (SSA_NAME_IS_DEFAULT_DEF (arg))
- arg_bb = single_succ (ENTRY_BLOCK_PTR);
+ arg_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
else
gimple_bb (SSA_NAME_DEF_STMT (arg));
}
diff --git a/gcc/gimple-ssa.h b/gcc/gimple-ssa.h
index 23aa099ba51..50b48bb6228 100644
--- a/gcc/gimple-ssa.h
+++ b/gcc/gimple-ssa.h
@@ -108,11 +108,13 @@ static inline use_operand_p
gimple_vuse_op (const_gimple g)
{
struct use_optype_d *ops;
- if (!gimple_has_mem_ops (g))
+ const gimple_statement_with_memory_ops *mem_ops_stmt =
+ dyn_cast <const gimple_statement_with_memory_ops> (g);
+ if (!mem_ops_stmt)
return NULL_USE_OPERAND_P;
- ops = g->gsops.opbase.use_ops;
+ ops = mem_ops_stmt->use_ops;
if (ops
- && USE_OP_PTR (ops)->use == &g->gsmembase.vuse)
+ && USE_OP_PTR (ops)->use == &mem_ops_stmt->vuse)
return USE_OP_PTR (ops);
return NULL_USE_OPERAND_P;
}
@@ -122,10 +124,12 @@ gimple_vuse_op (const_gimple g)
static inline def_operand_p
gimple_vdef_op (gimple g)
{
- if (!gimple_has_mem_ops (g))
+ gimple_statement_with_memory_ops *mem_ops_stmt =
+ dyn_cast <gimple_statement_with_memory_ops> (g);
+ if (!mem_ops_stmt)
return NULL_DEF_OPERAND_P;
- if (g->gsmembase.vdef)
- return &g->gsmembase.vdef;
+ if (mem_ops_stmt->vdef)
+ return &mem_ops_stmt->vdef;
return NULL_DEF_OPERAND_P;
}
diff --git a/gcc/gimple-streamer-in.c b/gcc/gimple-streamer-in.c
index 6f8f51a18ce..a5c8dd8fd39 100644
--- a/gcc/gimple-streamer-in.c
+++ b/gcc/gimple-streamer-in.c
@@ -28,6 +28,7 @@ along with GCC; see the file COPYING3. If not see
#include "gimple-iterator.h"
#include "gimple-ssa.h"
#include "tree-phinodes.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "data-streamer.h"
#include "tree-streamer.h"
@@ -99,12 +100,12 @@ input_gimple_stmt (struct lto_input_block *ib, struct data_in *data_in,
bp = streamer_read_bitpack (ib);
num_ops = bp_unpack_var_len_unsigned (&bp);
stmt = gimple_alloc (code, num_ops);
- stmt->gsbase.no_warning = bp_unpack_value (&bp, 1);
+ stmt->no_warning = bp_unpack_value (&bp, 1);
if (is_gimple_assign (stmt))
- stmt->gsbase.nontemporal_move = bp_unpack_value (&bp, 1);
- stmt->gsbase.has_volatile_ops = bp_unpack_value (&bp, 1);
+ stmt->nontemporal_move = bp_unpack_value (&bp, 1);
+ stmt->has_volatile_ops = bp_unpack_value (&bp, 1);
has_hist = bp_unpack_value (&bp, 1);
- stmt->gsbase.subcode = bp_unpack_var_len_unsigned (&bp);
+ stmt->subcode = bp_unpack_var_len_unsigned (&bp);
/* Read location information. */
gimple_set_location (stmt, stream_input_location (&bp, data_in));
@@ -130,13 +131,14 @@ input_gimple_stmt (struct lto_input_block *ib, struct data_in *data_in,
case GIMPLE_ASM:
{
/* FIXME lto. Move most of this into a new gimple_asm_set_string(). */
+ gimple_statement_asm *asm_stmt = as_a <gimple_statement_asm> (stmt);
tree str;
- stmt->gimple_asm.ni = streamer_read_uhwi (ib);
- stmt->gimple_asm.no = streamer_read_uhwi (ib);
- stmt->gimple_asm.nc = streamer_read_uhwi (ib);
- stmt->gimple_asm.nl = streamer_read_uhwi (ib);
+ asm_stmt->ni = streamer_read_uhwi (ib);
+ asm_stmt->no = streamer_read_uhwi (ib);
+ asm_stmt->nc = streamer_read_uhwi (ib);
+ asm_stmt->nl = streamer_read_uhwi (ib);
str = streamer_read_string_cst (data_in, ib);
- stmt->gimple_asm.string = TREE_STRING_POINTER (str);
+ asm_stmt->string = TREE_STRING_POINTER (str);
}
/* Fallthru */
diff --git a/gcc/gimple-streamer-out.c b/gcc/gimple-streamer-out.c
index 4d0664f3d98..0d6b6a6ae95 100644
--- a/gcc/gimple-streamer-out.c
+++ b/gcc/gimple-streamer-out.c
@@ -78,7 +78,7 @@ output_gimple_stmt (struct output_block *ob, gimple stmt)
bp_pack_value (&bp, gimple_has_volatile_ops (stmt), 1);
hist = gimple_histogram_value (cfun, stmt);
bp_pack_value (&bp, hist != NULL, 1);
- bp_pack_var_len_unsigned (&bp, stmt->gsbase.subcode);
+ bp_pack_var_len_unsigned (&bp, stmt->subcode);
/* Emit location information for the statement. */
stream_output_location (ob, &bp, LOCATION_LOCUS (gimple_location (stmt)));
diff --git a/gcc/gimple-walk.c b/gcc/gimple-walk.c
index deb4673354a..22f4584a673 100644
--- a/gcc/gimple-walk.c
+++ b/gcc/gimple-walk.c
@@ -24,6 +24,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stmt.h"
#include "gimple.h"
#include "gimple-iterator.h"
#include "gimple-walk.h"
diff --git a/gcc/gimple.c b/gcc/gimple.c
index b316c84c5d5..780a35042ad 100644
--- a/gcc/gimple.c
+++ b/gcc/gimple.c
@@ -25,6 +25,9 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "target.h"
#include "tree.h"
+#include "calls.h"
+#include "stmt.h"
+#include "stor-layout.h"
#include "ggc.h"
#include "hard-reg-set.h"
#include "basic-block.h"
@@ -93,7 +96,7 @@ static const char * const gimple_alloc_kind_names[] = {
static inline void
gimple_set_code (gimple g, enum gimple_code code)
{
- g->gsbase.code = code;
+ g->code = code;
}
/* Return the number of bytes needed to hold a GIMPLE statement with
@@ -125,13 +128,13 @@ gimple_alloc_stat (enum gimple_code code, unsigned num_ops MEM_STAT_DECL)
gimple_alloc_sizes[(int) kind] += size;
}
- stmt = ggc_alloc_cleared_gimple_statement_d_stat (size PASS_MEM_STAT);
+ stmt = ggc_alloc_cleared_gimple_statement_stat (size PASS_MEM_STAT);
gimple_set_code (stmt, code);
gimple_set_num_ops (stmt, num_ops);
/* Do not call gimple_set_modified here as it has other side
effects and this tuple is still not completely built. */
- stmt->gsbase.modified = 1;
+ stmt->modified = 1;
gimple_init_singleton (stmt);
return stmt;
@@ -145,7 +148,7 @@ gimple_set_subcode (gimple g, unsigned subcode)
/* We only have 16 bits for the RHS code. Assert that we are not
overflowing it. */
gcc_assert (subcode < (1 << 16));
- g->gsbase.subcode = subcode;
+ g->subcode = subcode;
}
@@ -280,7 +283,7 @@ static inline gimple
gimple_build_call_internal_1 (enum internal_fn fn, unsigned nargs)
{
gimple s = gimple_build_with_ops (GIMPLE_CALL, ERROR_MARK, nargs + 3);
- s->gsbase.subcode |= GF_CALL_INTERNAL;
+ s->subcode |= GF_CALL_INTERNAL;
gimple_call_set_internal_fn (s, fn);
gimple_call_reset_alias_info (s);
return s;
@@ -530,21 +533,22 @@ static inline gimple
gimple_build_asm_1 (const char *string, unsigned ninputs, unsigned noutputs,
unsigned nclobbers, unsigned nlabels)
{
- gimple p;
+ gimple_statement_asm *p;
int size = strlen (string);
/* ASMs with labels cannot have outputs. This should have been
enforced by the front end. */
gcc_assert (nlabels == 0 || noutputs == 0);
- p = gimple_build_with_ops (GIMPLE_ASM, ERROR_MARK,
- ninputs + noutputs + nclobbers + nlabels);
+ p = as_a <gimple_statement_asm> (
+ gimple_build_with_ops (GIMPLE_ASM, ERROR_MARK,
+ ninputs + noutputs + nclobbers + nlabels));
- p->gimple_asm.ni = ninputs;
- p->gimple_asm.no = noutputs;
- p->gimple_asm.nc = nclobbers;
- p->gimple_asm.nl = nlabels;
- p->gimple_asm.string = ggc_alloc_string (string, size);
+ p->ni = ninputs;
+ p->no = noutputs;
+ p->nc = nclobbers;
+ p->nl = nlabels;
+ p->string = ggc_alloc_string (string, size);
if (GATHER_STATISTICS)
gimple_alloc_sizes[(int) gimple_alloc_kind (GIMPLE_ASM)] += size;
@@ -656,14 +660,14 @@ gimple_build_eh_else (gimple_seq n_body, gimple_seq e_body)
KIND is either GIMPLE_TRY_CATCH or GIMPLE_TRY_FINALLY depending on
whether this is a try/catch or a try/finally respectively. */
-gimple
+gimple_statement_try *
gimple_build_try (gimple_seq eval, gimple_seq cleanup,
enum gimple_try_flags kind)
{
- gimple p;
+ gimple_statement_try *p;
gcc_assert (kind == GIMPLE_TRY_CATCH || kind == GIMPLE_TRY_FINALLY);
- p = gimple_alloc (GIMPLE_TRY, 0);
+ p = as_a <gimple_statement_try> (gimple_alloc (GIMPLE_TRY, 0));
gimple_set_subcode (p, kind);
if (eval)
gimple_try_set_eval (p, eval);
@@ -693,8 +697,10 @@ gimple_build_wce (gimple_seq cleanup)
gimple
gimple_build_resx (int region)
{
- gimple p = gimple_build_with_ops (GIMPLE_RESX, ERROR_MARK, 0);
- p->gimple_eh_ctrl.region = region;
+ gimple_statement_eh_ctrl *p =
+ as_a <gimple_statement_eh_ctrl> (
+ gimple_build_with_ops (GIMPLE_RESX, ERROR_MARK, 0));
+ p->region = region;
return p;
}
@@ -741,8 +747,10 @@ gimple_build_switch (tree index, tree default_label, vec<tree> args)
gimple
gimple_build_eh_dispatch (int region)
{
- gimple p = gimple_build_with_ops (GIMPLE_EH_DISPATCH, ERROR_MARK, 0);
- p->gimple_eh_ctrl.region = region;
+ gimple_statement_eh_ctrl *p =
+ as_a <gimple_statement_eh_ctrl> (
+ gimple_build_with_ops (GIMPLE_EH_DISPATCH, ERROR_MARK, 0));
+ p->region = region;
return p;
}
@@ -816,14 +824,17 @@ gimple
gimple_build_omp_for (gimple_seq body, int kind, tree clauses, size_t collapse,
gimple_seq pre_body)
{
- gimple p = gimple_alloc (GIMPLE_OMP_FOR, 0);
+ gimple_statement_omp_for *p =
+ as_a <gimple_statement_omp_for> (gimple_alloc (GIMPLE_OMP_FOR, 0));
if (body)
gimple_omp_set_body (p, body);
gimple_omp_for_set_clauses (p, clauses);
gimple_omp_for_set_kind (p, kind);
- p->gimple_omp_for.collapse = collapse;
- p->gimple_omp_for.iter
- = ggc_alloc_cleared_vec_gimple_omp_for_iter (collapse);
+ p->collapse = collapse;
+ p->iter = static_cast <struct gimple_omp_for_iter *> (
+ ggc_internal_cleared_vec_alloc_stat (sizeof (*p->iter),
+ collapse MEM_STAT_INFO));
+
if (pre_body)
gimple_omp_for_set_pre_body (p, pre_body);
@@ -1111,8 +1122,8 @@ gimple_check_failed (const_gimple gs, const char *file, int line,
gimple_code_name[code],
get_tree_code_name (subcode),
gimple_code_name[gimple_code (gs)],
- gs->gsbase.subcode > 0
- ? get_tree_code_name ((enum tree_code) gs->gsbase.subcode)
+ gs->subcode > 0
+ ? get_tree_code_name ((enum tree_code) gs->subcode)
: "",
function, trim_filename (file), line);
}
@@ -1306,7 +1317,7 @@ gimple_call_flags (const_gimple stmt)
else
flags = flags_from_decl_or_type (gimple_call_fntype (stmt));
- if (stmt->gsbase.subcode & GF_CALL_NOTHROW)
+ if (stmt->subcode & GF_CALL_NOTHROW)
flags |= ECF_NOTHROW;
return flags;
@@ -1447,7 +1458,7 @@ gimple_assign_unary_nop_p (gimple gs)
void
gimple_set_bb (gimple stmt, basic_block bb)
{
- stmt->gsbase.bb = bb;
+ stmt->bb = bb;
/* If the statement is a label, add the label to block-to-labels map
so that we can speed up edge creation for GIMPLE_GOTOs. */
@@ -1642,9 +1653,15 @@ gimple_copy (gimple stmt)
gimple_omp_for_set_pre_body (copy, new_seq);
t = unshare_expr (gimple_omp_for_clauses (stmt));
gimple_omp_for_set_clauses (copy, t);
- copy->gimple_omp_for.iter
- = ggc_alloc_vec_gimple_omp_for_iter
- (gimple_omp_for_collapse (stmt));
+ {
+ gimple_statement_omp_for *omp_for_copy =
+ as_a <gimple_statement_omp_for> (copy);
+ omp_for_copy->iter =
+ static_cast <struct gimple_omp_for_iter *> (
+ ggc_internal_vec_alloc_stat (sizeof (struct gimple_omp_for_iter),
+ gimple_omp_for_collapse (stmt)
+ MEM_STAT_INFO));
+ }
for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
{
gimple_omp_for_set_cond (copy, i,
diff --git a/gcc/gimple.h b/gcc/gimple.h
index 4234c3cfdb4..0eb23fc39e8 100644
--- a/gcc/gimple.h
+++ b/gcc/gimple.h
@@ -32,6 +32,7 @@ along with GCC; see the file COPYING3. If not see
#include "gimple-fold.h"
#include "tree-eh.h"
#include "gimple-expr.h"
+#include "is-a.h"
typedef gimple gimple_seq_node;
@@ -146,7 +147,10 @@ enum plf_mask {
/* Data structure definitions for GIMPLE tuples. NOTE: word markers
are for 64 bit hosts. */
-struct GTY((chain_next ("%h.next"))) gimple_statement_base {
+struct GTY((desc ("gimple_statement_structure (&%h)"), tag ("GSS_BASE"),
+ chain_next ("%h.next"), variable_size))
+ gimple_statement_base
+{
/* [ WORD 1 ]
Main identifying code for a tuple. */
ENUM_BITFIELD(gimple_code) code : 8;
@@ -210,10 +214,11 @@ struct GTY((chain_next ("%h.next"))) gimple_statement_base {
/* Base structure for tuples with operands. */
-struct GTY(()) gimple_statement_with_ops_base
+/* This gimple subclass has no tag value. */
+struct GTY(())
+ gimple_statement_with_ops_base : public gimple_statement_base
{
- /* [ WORD 1-6 ] */
- struct gimple_statement_base gsbase;
+ /* [ WORD 1-6 ] : base class */
/* [ WORD 7 ]
SSA operand vectors. NOTE: It should be possible to
@@ -226,25 +231,25 @@ struct GTY(()) gimple_statement_with_ops_base
/* Statements that take register operands. */
-struct GTY(()) gimple_statement_with_ops
+struct GTY((tag("GSS_WITH_OPS")))
+ gimple_statement_with_ops : public gimple_statement_with_ops_base
{
- /* [ WORD 1-7 ] */
- struct gimple_statement_with_ops_base opbase;
+ /* [ WORD 1-7 ] : base class */
/* [ WORD 8 ]
Operand vector. NOTE! This must always be the last field
of this structure. In particular, this means that this
structure cannot be embedded inside another one. */
- tree GTY((length ("%h.opbase.gsbase.num_ops"))) op[1];
+ tree GTY((length ("%h.num_ops"))) op[1];
};
/* Base for statements that take both memory and register operands. */
-struct GTY(()) gimple_statement_with_memory_ops_base
+struct GTY((tag("GSS_WITH_MEM_OPS_BASE")))
+ gimple_statement_with_memory_ops_base : public gimple_statement_with_ops_base
{
- /* [ WORD 1-7 ] */
- struct gimple_statement_with_ops_base opbase;
+ /* [ WORD 1-7 ] : base class */
/* [ WORD 8-9 ]
Virtual operands for this statement. The GC will pick them
@@ -256,32 +261,33 @@ struct GTY(()) gimple_statement_with_memory_ops_base
/* Statements that take both memory and register operands. */
-struct GTY(()) gimple_statement_with_memory_ops
+struct GTY((tag("GSS_WITH_MEM_OPS")))
+ gimple_statement_with_memory_ops :
+ public gimple_statement_with_memory_ops_base
{
- /* [ WORD 1-9 ] */
- struct gimple_statement_with_memory_ops_base membase;
+ /* [ WORD 1-9 ] : base class */
/* [ WORD 10 ]
Operand vector. NOTE! This must always be the last field
of this structure. In particular, this means that this
structure cannot be embedded inside another one. */
- tree GTY((length ("%h.membase.opbase.gsbase.num_ops"))) op[1];
+ tree GTY((length ("%h.num_ops"))) op[1];
};
/* Call statements that take both memory and register operands. */
-struct GTY(()) gimple_statement_call
+struct GTY((tag("GSS_CALL")))
+ gimple_statement_call : public gimple_statement_with_memory_ops_base
{
- /* [ WORD 1-9 ] */
- struct gimple_statement_with_memory_ops_base membase;
+ /* [ WORD 1-9 ] : base class */
/* [ WORD 10-13 ] */
struct pt_solution call_used;
struct pt_solution call_clobbered;
/* [ WORD 14 ] */
- union GTY ((desc ("%1.membase.opbase.gsbase.subcode & GF_CALL_INTERNAL"))) {
+ union GTY ((desc ("%1.subcode & GF_CALL_INTERNAL"))) {
tree GTY ((tag ("0"))) fntype;
enum internal_fn GTY ((tag ("GF_CALL_INTERNAL"))) internal_fn;
} u;
@@ -290,15 +296,16 @@ struct GTY(()) gimple_statement_call
Operand vector. NOTE! This must always be the last field
of this structure. In particular, this means that this
structure cannot be embedded inside another one. */
- tree GTY((length ("%h.membase.opbase.gsbase.num_ops"))) op[1];
+ tree GTY((length ("%h.num_ops"))) op[1];
};
/* OpenMP statements (#pragma omp). */
-struct GTY(()) gimple_statement_omp {
- /* [ WORD 1-6 ] */
- struct gimple_statement_base gsbase;
+struct GTY((tag("GSS_OMP")))
+ gimple_statement_omp : public gimple_statement_base
+{
+ /* [ WORD 1-6 ] : base class */
/* [ WORD 7 ] */
gimple_seq body;
@@ -307,9 +314,10 @@ struct GTY(()) gimple_statement_omp {
/* GIMPLE_BIND */
-struct GTY(()) gimple_statement_bind {
- /* [ WORD 1-6 ] */
- struct gimple_statement_base gsbase;
+struct GTY((tag("GSS_BIND")))
+ gimple_statement_bind : public gimple_statement_base
+{
+ /* [ WORD 1-6 ] : base class */
/* [ WORD 7 ]
Variables declared in this scope. */
@@ -330,9 +338,10 @@ struct GTY(()) gimple_statement_bind {
/* GIMPLE_CATCH */
-struct GTY(()) gimple_statement_catch {
- /* [ WORD 1-6 ] */
- struct gimple_statement_base gsbase;
+struct GTY((tag("GSS_CATCH")))
+ gimple_statement_catch : public gimple_statement_base
+{
+ /* [ WORD 1-6 ] : base class */
/* [ WORD 7 ] */
tree types;
@@ -344,9 +353,10 @@ struct GTY(()) gimple_statement_catch {
/* GIMPLE_EH_FILTER */
-struct GTY(()) gimple_statement_eh_filter {
- /* [ WORD 1-6 ] */
- struct gimple_statement_base gsbase;
+struct GTY((tag("GSS_EH_FILTER")))
+ gimple_statement_eh_filter : public gimple_statement_base
+{
+ /* [ WORD 1-6 ] : base class */
/* [ WORD 7 ]
Filter types. */
@@ -359,9 +369,10 @@ struct GTY(()) gimple_statement_eh_filter {
/* GIMPLE_EH_ELSE */
-struct GTY(()) gimple_statement_eh_else {
- /* [ WORD 1-6 ] */
- struct gimple_statement_base gsbase;
+struct GTY((tag("GSS_EH_ELSE")))
+ gimple_statement_eh_else : public gimple_statement_base
+{
+ /* [ WORD 1-6 ] : base class */
/* [ WORD 7,8 ] */
gimple_seq n_body, e_body;
@@ -369,9 +380,10 @@ struct GTY(()) gimple_statement_eh_else {
/* GIMPLE_EH_MUST_NOT_THROW */
-struct GTY(()) gimple_statement_eh_mnt {
- /* [ WORD 1-6 ] */
- struct gimple_statement_base gsbase;
+struct GTY((tag("GSS_EH_MNT")))
+ gimple_statement_eh_mnt : public gimple_statement_base
+{
+ /* [ WORD 1-6 ] : base class */
/* [ WORD 7 ] Abort function decl. */
tree fndecl;
@@ -379,9 +391,10 @@ struct GTY(()) gimple_statement_eh_mnt {
/* GIMPLE_PHI */
-struct GTY(()) gimple_statement_phi {
- /* [ WORD 1-6 ] */
- struct gimple_statement_base gsbase;
+struct GTY((tag("GSS_PHI")))
+ gimple_statement_phi : public gimple_statement_base
+{
+ /* [ WORD 1-6 ] : base class */
/* [ WORD 7 ] */
unsigned capacity;
@@ -397,10 +410,10 @@ struct GTY(()) gimple_statement_phi {
/* GIMPLE_RESX, GIMPLE_EH_DISPATCH */
-struct GTY(()) gimple_statement_eh_ctrl
+struct GTY((tag("GSS_EH_CTRL")))
+ gimple_statement_eh_ctrl : public gimple_statement_base
{
- /* [ WORD 1-6 ] */
- struct gimple_statement_base gsbase;
+ /* [ WORD 1-6 ] : base class */
/* [ WORD 7 ]
Exception region number. */
@@ -410,9 +423,10 @@ struct GTY(()) gimple_statement_eh_ctrl
/* GIMPLE_TRY */
-struct GTY(()) gimple_statement_try {
- /* [ WORD 1-6 ] */
- struct gimple_statement_base gsbase;
+struct GTY((tag("GSS_TRY")))
+ gimple_statement_try : public gimple_statement_base
+{
+ /* [ WORD 1-6 ] : base class */
/* [ WORD 7 ]
Expression to evaluate. */
@@ -439,9 +453,10 @@ enum gimple_try_flags
/* GIMPLE_WITH_CLEANUP_EXPR */
-struct GTY(()) gimple_statement_wce {
- /* [ WORD 1-6 ] */
- struct gimple_statement_base gsbase;
+struct GTY((tag("GSS_WCE")))
+ gimple_statement_wce : public gimple_statement_base
+{
+ /* [ WORD 1-6 ] : base class */
/* Subcode: CLEANUP_EH_ONLY. True if the cleanup should only be
executed if an exception is thrown, not on normal exit of its
@@ -456,10 +471,10 @@ struct GTY(()) gimple_statement_wce {
/* GIMPLE_ASM */
-struct GTY(()) gimple_statement_asm
+struct GTY((tag("GSS_ASM")))
+ gimple_statement_asm : public gimple_statement_with_memory_ops_base
{
- /* [ WORD 1-9 ] */
- struct gimple_statement_with_memory_ops_base membase;
+ /* [ WORD 1-9 ] : base class */
/* [ WORD 10 ]
__asm__ statement. */
@@ -476,14 +491,15 @@ struct GTY(()) gimple_statement_asm
Operand vector. NOTE! This must always be the last field
of this structure. In particular, this means that this
structure cannot be embedded inside another one. */
- tree GTY((length ("%h.membase.opbase.gsbase.num_ops"))) op[1];
+ tree GTY((length ("%h.num_ops"))) op[1];
};
/* GIMPLE_OMP_CRITICAL */
-struct GTY(()) gimple_statement_omp_critical {
- /* [ WORD 1-7 ] */
- struct gimple_statement_omp omp;
+struct GTY((tag("GSS_OMP_CRITICAL")))
+ gimple_statement_omp_critical : public gimple_statement_omp
+{
+ /* [ WORD 1-7 ] : base class */
/* [ WORD 8 ]
Critical section name. */
@@ -510,9 +526,10 @@ struct GTY(()) gimple_omp_for_iter {
/* GIMPLE_OMP_FOR */
-struct GTY(()) gimple_statement_omp_for {
- /* [ WORD 1-7 ] */
- struct gimple_statement_omp omp;
+struct GTY((tag("GSS_OMP_FOR")))
+ gimple_statement_omp_for : public gimple_statement_omp
+{
+ /* [ WORD 1-7 ] : base class */
/* [ WORD 8 ] */
tree clauses;
@@ -532,9 +549,10 @@ struct GTY(()) gimple_statement_omp_for {
/* GIMPLE_OMP_PARALLEL */
-struct GTY(()) gimple_statement_omp_parallel {
- /* [ WORD 1-7 ] */
- struct gimple_statement_omp omp;
+struct GTY((tag("GSS_OMP_PARALLEL")))
+ gimple_statement_omp_parallel : public gimple_statement_omp
+{
+ /* [ WORD 1-7 ] : base class */
/* [ WORD 8 ]
Clauses. */
@@ -552,9 +570,10 @@ struct GTY(()) gimple_statement_omp_parallel {
/* GIMPLE_OMP_TASK */
-struct GTY(()) gimple_statement_omp_task {
- /* [ WORD 1-10 ] */
- struct gimple_statement_omp_parallel par;
+struct GTY((tag("GSS_OMP_TASK")))
+ gimple_statement_omp_task : public gimple_statement_omp_parallel
+{
+ /* [ WORD 1-10 ] : base class */
/* [ WORD 11 ]
Child function holding firstprivate initialization if needed. */
@@ -573,9 +592,10 @@ struct GTY(()) gimple_statement_omp_task {
/* GIMPLE_OMP_SECTIONS */
-struct GTY(()) gimple_statement_omp_sections {
- /* [ WORD 1-7 ] */
- struct gimple_statement_omp omp;
+struct GTY((tag("GSS_OMP_SECTIONS")))
+ gimple_statement_omp_sections : public gimple_statement_omp
+{
+ /* [ WORD 1-7 ] : base class */
/* [ WORD 8 ] */
tree clauses;
@@ -591,9 +611,10 @@ struct GTY(()) gimple_statement_omp_sections {
Note: This does not inherit from gimple_statement_omp, because we
do not need the body field. */
-struct GTY(()) gimple_statement_omp_continue {
- /* [ WORD 1-6 ] */
- struct gimple_statement_base gsbase;
+struct GTY((tag("GSS_OMP_CONTINUE")))
+ gimple_statement_omp_continue : public gimple_statement_base
+{
+ /* [ WORD 1-6 ] : base class */
/* [ WORD 7 ] */
tree control_def;
@@ -604,9 +625,10 @@ struct GTY(()) gimple_statement_omp_continue {
/* GIMPLE_OMP_SINGLE, GIMPLE_OMP_TARGET, GIMPLE_OMP_TEAMS */
-struct GTY(()) gimple_statement_omp_single {
- /* [ WORD 1-7 ] */
- struct gimple_statement_omp omp;
+struct GTY((tag("GSS_OMP_SINGLE")))
+ gimple_statement_omp_single : public gimple_statement_omp
+{
+ /* [ WORD 1-7 ] : base class */
/* [ WORD 7 ] */
tree clauses;
@@ -617,9 +639,10 @@ struct GTY(()) gimple_statement_omp_single {
Note: This is based on gimple_statement_base, not g_s_omp, because g_s_omp
contains a sequence, which we don't need here. */
-struct GTY(()) gimple_statement_omp_atomic_load {
- /* [ WORD 1-6 ] */
- struct gimple_statement_base gsbase;
+struct GTY((tag("GSS_OMP_ATOMIC_LOAD")))
+ gimple_statement_omp_atomic_load : public gimple_statement_base
+{
+ /* [ WORD 1-6 ] : base class */
/* [ WORD 7-8 ] */
tree rhs, lhs;
@@ -628,9 +651,10 @@ struct GTY(()) gimple_statement_omp_atomic_load {
/* GIMPLE_OMP_ATOMIC_STORE.
See note on GIMPLE_OMP_ATOMIC_LOAD. */
-struct GTY(()) gimple_statement_omp_atomic_store {
- /* [ WORD 1-6 ] */
- struct gimple_statement_base gsbase;
+struct GTY((tag("GSS_OMP_ATOMIC_STORE")))
+ gimple_statement_omp_atomic_store : public gimple_statement_base
+{
+ /* [ WORD 1-6 ] : base class */
/* [ WORD 7 ] */
tree val;
@@ -664,10 +688,10 @@ struct GTY(()) gimple_statement_omp_atomic_store {
likely because it is guaranteed to go irrevocable upon entry. */
#define GTMA_HAS_NO_INSTRUMENTATION (1u << 7)
-struct GTY(()) gimple_statement_transaction
+struct GTY((tag("GSS_TRANSACTION")))
+ gimple_statement_transaction : public gimple_statement_with_memory_ops_base
{
- /* [ WORD 1-9 ] */
- struct gimple_statement_with_memory_ops_base gsbase;
+ /* [ WORD 1-9 ] : base class */
/* [ WORD 10 ] */
gimple_seq body;
@@ -683,39 +707,309 @@ enum gimple_statement_structure_enum {
};
#undef DEFGSSTRUCT
+template <>
+template <>
+inline bool
+is_a_helper <gimple_statement_asm>::test (gimple gs)
+{
+ return gs->code == GIMPLE_ASM;
+}
-/* Define the overall contents of a gimple tuple. It may be any of the
- structures declared above for various types of tuples. */
-
-union GTY ((desc ("gimple_statement_structure (&%h)"),
- chain_next ("%h.gsbase.next"), variable_size)) gimple_statement_d {
- struct gimple_statement_base GTY ((tag ("GSS_BASE"))) gsbase;
- struct gimple_statement_with_ops GTY ((tag ("GSS_WITH_OPS"))) gsops;
- struct gimple_statement_with_memory_ops_base GTY ((tag ("GSS_WITH_MEM_OPS_BASE"))) gsmembase;
- struct gimple_statement_with_memory_ops GTY ((tag ("GSS_WITH_MEM_OPS"))) gsmem;
- struct gimple_statement_call GTY ((tag ("GSS_CALL"))) gimple_call;
- struct gimple_statement_omp GTY ((tag ("GSS_OMP"))) omp;
- struct gimple_statement_bind GTY ((tag ("GSS_BIND"))) gimple_bind;
- struct gimple_statement_catch GTY ((tag ("GSS_CATCH"))) gimple_catch;
- struct gimple_statement_eh_filter GTY ((tag ("GSS_EH_FILTER"))) gimple_eh_filter;
- struct gimple_statement_eh_mnt GTY ((tag ("GSS_EH_MNT"))) gimple_eh_mnt;
- struct gimple_statement_eh_else GTY ((tag ("GSS_EH_ELSE"))) gimple_eh_else;
- struct gimple_statement_phi GTY ((tag ("GSS_PHI"))) gimple_phi;
- struct gimple_statement_eh_ctrl GTY ((tag ("GSS_EH_CTRL"))) gimple_eh_ctrl;
- struct gimple_statement_try GTY ((tag ("GSS_TRY"))) gimple_try;
- struct gimple_statement_wce GTY ((tag ("GSS_WCE"))) gimple_wce;
- struct gimple_statement_asm GTY ((tag ("GSS_ASM"))) gimple_asm;
- struct gimple_statement_omp_critical GTY ((tag ("GSS_OMP_CRITICAL"))) gimple_omp_critical;
- struct gimple_statement_omp_for GTY ((tag ("GSS_OMP_FOR"))) gimple_omp_for;
- struct gimple_statement_omp_parallel GTY ((tag ("GSS_OMP_PARALLEL"))) gimple_omp_parallel;
- struct gimple_statement_omp_task GTY ((tag ("GSS_OMP_TASK"))) gimple_omp_task;
- struct gimple_statement_omp_sections GTY ((tag ("GSS_OMP_SECTIONS"))) gimple_omp_sections;
- struct gimple_statement_omp_single GTY ((tag ("GSS_OMP_SINGLE"))) gimple_omp_single;
- struct gimple_statement_omp_continue GTY ((tag ("GSS_OMP_CONTINUE"))) gimple_omp_continue;
- struct gimple_statement_omp_atomic_load GTY ((tag ("GSS_OMP_ATOMIC_LOAD"))) gimple_omp_atomic_load;
- struct gimple_statement_omp_atomic_store GTY ((tag ("GSS_OMP_ATOMIC_STORE"))) gimple_omp_atomic_store;
- struct gimple_statement_transaction GTY((tag ("GSS_TRANSACTION"))) gimple_transaction;
-};
+template <>
+template <>
+inline bool
+is_a_helper <gimple_statement_bind>::test (gimple gs)
+{
+ return gs->code == GIMPLE_BIND;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gimple_statement_call>::test (gimple gs)
+{
+ return gs->code == GIMPLE_CALL;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gimple_statement_catch>::test (gimple gs)
+{
+ return gs->code == GIMPLE_CATCH;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gimple_statement_eh_ctrl>::test (gimple gs)
+{
+ return gs->code == GIMPLE_RESX || gs->code == GIMPLE_EH_DISPATCH;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gimple_statement_eh_else>::test (gimple gs)
+{
+ return gs->code == GIMPLE_EH_ELSE;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gimple_statement_eh_filter>::test (gimple gs)
+{
+ return gs->code == GIMPLE_EH_FILTER;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gimple_statement_eh_mnt>::test (gimple gs)
+{
+ return gs->code == GIMPLE_EH_MUST_NOT_THROW;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gimple_statement_omp_atomic_load>::test (gimple gs)
+{
+ return gs->code == GIMPLE_OMP_ATOMIC_LOAD;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gimple_statement_omp_atomic_store>::test (gimple gs)
+{
+ return gs->code == GIMPLE_OMP_ATOMIC_STORE || gs->code == GIMPLE_OMP_RETURN;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gimple_statement_omp_continue>::test (gimple gs)
+{
+ return gs->code == GIMPLE_OMP_CONTINUE;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gimple_statement_omp_critical>::test (gimple gs)
+{
+ return gs->code == GIMPLE_OMP_CRITICAL;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gimple_statement_omp_for>::test (gimple gs)
+{
+ return gs->code == GIMPLE_OMP_FOR;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gimple_statement_omp_parallel>::test (gimple gs)
+{
+ return gs->code == GIMPLE_OMP_PARALLEL || gs->code == GIMPLE_OMP_TASK || gs->code == GIMPLE_OMP_TARGET;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gimple_statement_omp_sections>::test (gimple gs)
+{
+ return gs->code == GIMPLE_OMP_SECTIONS;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gimple_statement_omp_single>::test (gimple gs)
+{
+ return gs->code == GIMPLE_OMP_SINGLE || gs->code == GIMPLE_OMP_TEAMS;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gimple_statement_omp_task>::test (gimple gs)
+{
+ return gs->code == GIMPLE_OMP_TASK;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gimple_statement_phi>::test (gimple gs)
+{
+ return gs->code == GIMPLE_PHI;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gimple_statement_transaction>::test (gimple gs)
+{
+ return gs->code == GIMPLE_TRANSACTION;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gimple_statement_try>::test (gimple gs)
+{
+ return gs->code == GIMPLE_TRY;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gimple_statement_wce>::test (gimple gs)
+{
+ return gs->code == GIMPLE_WITH_CLEANUP_EXPR;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gimple_statement_asm>::test (const_gimple gs)
+{
+ return gs->code == GIMPLE_ASM;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gimple_statement_bind>::test (const_gimple gs)
+{
+ return gs->code == GIMPLE_BIND;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gimple_statement_call>::test (const_gimple gs)
+{
+ return gs->code == GIMPLE_CALL;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gimple_statement_catch>::test (const_gimple gs)
+{
+ return gs->code == GIMPLE_CATCH;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gimple_statement_eh_ctrl>::test (const_gimple gs)
+{
+ return gs->code == GIMPLE_RESX || gs->code == GIMPLE_EH_DISPATCH;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gimple_statement_eh_filter>::test (const_gimple gs)
+{
+ return gs->code == GIMPLE_EH_FILTER;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gimple_statement_omp_atomic_load>::test (const_gimple gs)
+{
+ return gs->code == GIMPLE_OMP_ATOMIC_LOAD;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gimple_statement_omp_atomic_store>::test (const_gimple gs)
+{
+ return gs->code == GIMPLE_OMP_ATOMIC_STORE || gs->code == GIMPLE_OMP_RETURN;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gimple_statement_omp_continue>::test (const_gimple gs)
+{
+ return gs->code == GIMPLE_OMP_CONTINUE;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gimple_statement_omp_critical>::test (const_gimple gs)
+{
+ return gs->code == GIMPLE_OMP_CRITICAL;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gimple_statement_omp_for>::test (const_gimple gs)
+{
+ return gs->code == GIMPLE_OMP_FOR;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gimple_statement_omp_parallel>::test (const_gimple gs)
+{
+ return gs->code == GIMPLE_OMP_PARALLEL || gs->code == GIMPLE_OMP_TASK || gs->code == GIMPLE_OMP_TARGET;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gimple_statement_omp_sections>::test (const_gimple gs)
+{
+ return gs->code == GIMPLE_OMP_SECTIONS;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gimple_statement_omp_single>::test (const_gimple gs)
+{
+ return gs->code == GIMPLE_OMP_SINGLE || gs->code == GIMPLE_OMP_TEAMS;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gimple_statement_omp_task>::test (const_gimple gs)
+{
+ return gs->code == GIMPLE_OMP_TASK;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gimple_statement_phi>::test (const_gimple gs)
+{
+ return gs->code == GIMPLE_PHI;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gimple_statement_transaction>::test (const_gimple gs)
+{
+ return gs->code == GIMPLE_TRANSACTION;
+}
/* Offset in bytes to the location of the operand vector.
Zero if there is no operand vector for this tuple structure. */
@@ -758,7 +1052,8 @@ gimple gimple_build_catch (tree, gimple_seq);
gimple gimple_build_eh_filter (tree, gimple_seq);
gimple gimple_build_eh_must_not_throw (tree);
gimple gimple_build_eh_else (gimple_seq, gimple_seq);
-gimple gimple_build_try (gimple_seq, gimple_seq, enum gimple_try_flags);
+gimple_statement_try *gimple_build_try (gimple_seq, gimple_seq,
+ enum gimple_try_flags);
gimple gimple_build_wce (gimple_seq);
gimple gimple_build_resx (int);
gimple gimple_build_switch_nlabels (unsigned, tree, tree);
@@ -890,7 +1185,7 @@ gimple_seq_first_stmt (gimple_seq s)
static inline gimple_seq_node
gimple_seq_last (gimple_seq s)
{
- return s ? s->gsbase.prev : NULL;
+ return s ? s->prev : NULL;
}
@@ -909,7 +1204,7 @@ gimple_seq_last_stmt (gimple_seq s)
static inline void
gimple_seq_set_last (gimple_seq *ps, gimple_seq_node last)
{
- (*ps)->gsbase.prev = last;
+ (*ps)->prev = last;
}
@@ -970,7 +1265,7 @@ set_bb_seq (basic_block bb, gimple_seq seq)
static inline enum gimple_code
gimple_code (const_gimple g)
{
- return g->gsbase.code;
+ return g->code;
}
@@ -1033,7 +1328,7 @@ gimple_has_substatements (gimple g)
static inline basic_block
gimple_bb (const_gimple g)
{
- return g->gsbase.bb;
+ return g->bb;
}
@@ -1042,7 +1337,7 @@ gimple_bb (const_gimple g)
static inline tree
gimple_block (const_gimple g)
{
- return LOCATION_BLOCK (g->gsbase.location);
+ return LOCATION_BLOCK (g->location);
}
@@ -1052,10 +1347,10 @@ static inline void
gimple_set_block (gimple g, tree block)
{
if (block)
- g->gsbase.location =
- COMBINE_LOCATION_DATA (line_table, g->gsbase.location, block);
+ g->location =
+ COMBINE_LOCATION_DATA (line_table, g->location, block);
else
- g->gsbase.location = LOCATION_LOCUS (g->gsbase.location);
+ g->location = LOCATION_LOCUS (g->location);
}
@@ -1064,7 +1359,7 @@ gimple_set_block (gimple g, tree block)
static inline location_t
gimple_location (const_gimple g)
{
- return g->gsbase.location;
+ return g->location;
}
/* Return pointer to location information for statement G. */
@@ -1072,7 +1367,7 @@ gimple_location (const_gimple g)
static inline const location_t *
gimple_location_ptr (const_gimple g)
{
- return &g->gsbase.location;
+ return &g->location;
}
@@ -1081,7 +1376,7 @@ gimple_location_ptr (const_gimple g)
static inline void
gimple_set_location (gimple g, location_t location)
{
- g->gsbase.location = location;
+ g->location = location;
}
@@ -1126,7 +1421,7 @@ gimple_seq_singleton_p (gimple_seq seq)
static inline bool
gimple_no_warning_p (const_gimple stmt)
{
- return stmt->gsbase.no_warning;
+ return stmt->no_warning;
}
/* Set the no_warning flag of STMT to NO_WARNING. */
@@ -1134,7 +1429,7 @@ gimple_no_warning_p (const_gimple stmt)
static inline void
gimple_set_no_warning (gimple stmt, bool no_warning)
{
- stmt->gsbase.no_warning = (unsigned) no_warning;
+ stmt->no_warning = (unsigned) no_warning;
}
/* Set the visited status on statement STMT to VISITED_P. */
@@ -1142,7 +1437,7 @@ gimple_set_no_warning (gimple stmt, bool no_warning)
static inline void
gimple_set_visited (gimple stmt, bool visited_p)
{
- stmt->gsbase.visited = (unsigned) visited_p;
+ stmt->visited = (unsigned) visited_p;
}
@@ -1151,7 +1446,7 @@ gimple_set_visited (gimple stmt, bool visited_p)
static inline bool
gimple_visited_p (gimple stmt)
{
- return stmt->gsbase.visited;
+ return stmt->visited;
}
@@ -1161,9 +1456,9 @@ static inline void
gimple_set_plf (gimple stmt, enum plf_mask plf, bool val_p)
{
if (val_p)
- stmt->gsbase.plf |= (unsigned int) plf;
+ stmt->plf |= (unsigned int) plf;
else
- stmt->gsbase.plf &= ~((unsigned int) plf);
+ stmt->plf &= ~((unsigned int) plf);
}
@@ -1172,7 +1467,7 @@ gimple_set_plf (gimple stmt, enum plf_mask plf, bool val_p)
static inline unsigned int
gimple_plf (gimple stmt, enum plf_mask plf)
{
- return stmt->gsbase.plf & ((unsigned int) plf);
+ return stmt->plf & ((unsigned int) plf);
}
@@ -1181,7 +1476,7 @@ gimple_plf (gimple stmt, enum plf_mask plf)
static inline void
gimple_set_uid (gimple g, unsigned uid)
{
- g->gsbase.uid = uid;
+ g->uid = uid;
}
@@ -1190,7 +1485,7 @@ gimple_set_uid (gimple g, unsigned uid)
static inline unsigned
gimple_uid (const_gimple g)
{
- return g->gsbase.uid;
+ return g->uid;
}
@@ -1199,8 +1494,8 @@ gimple_uid (const_gimple g)
static inline void
gimple_init_singleton (gimple g)
{
- g->gsbase.next = NULL;
- g->gsbase.prev = g;
+ g->next = NULL;
+ g->prev = g;
}
@@ -1212,6 +1507,21 @@ gimple_has_ops (const_gimple g)
return gimple_code (g) >= GIMPLE_COND && gimple_code (g) <= GIMPLE_RETURN;
}
+template <>
+template <>
+inline bool
+is_a_helper <const gimple_statement_with_ops>::test (const_gimple gs)
+{
+ return gimple_has_ops (gs);
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gimple_statement_with_ops>::test (gimple gs)
+{
+ return gimple_has_ops (gs);
+}
/* Return true if GIMPLE statement G has memory operands. */
@@ -1221,15 +1531,32 @@ gimple_has_mem_ops (const_gimple g)
return gimple_code (g) >= GIMPLE_ASSIGN && gimple_code (g) <= GIMPLE_RETURN;
}
+template <>
+template <>
+inline bool
+is_a_helper <const gimple_statement_with_memory_ops>::test (const_gimple gs)
+{
+ return gimple_has_mem_ops (gs);
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gimple_statement_with_memory_ops>::test (gimple gs)
+{
+ return gimple_has_mem_ops (gs);
+}
/* Return the set of USE operands for statement G. */
static inline struct use_optype_d *
gimple_use_ops (const_gimple g)
{
- if (!gimple_has_ops (g))
+ const gimple_statement_with_ops *ops_stmt =
+ dyn_cast <const gimple_statement_with_ops> (g);
+ if (!ops_stmt)
return NULL;
- return g->gsops.opbase.use_ops;
+ return ops_stmt->use_ops;
}
@@ -1238,8 +1565,9 @@ gimple_use_ops (const_gimple g)
static inline void
gimple_set_use_ops (gimple g, struct use_optype_d *use)
{
- gcc_gimple_checking_assert (gimple_has_ops (g));
- g->gsops.opbase.use_ops = use;
+ gimple_statement_with_ops *ops_stmt =
+ as_a <gimple_statement_with_ops> (g);
+ ops_stmt->use_ops = use;
}
@@ -1248,9 +1576,11 @@ gimple_set_use_ops (gimple g, struct use_optype_d *use)
static inline tree
gimple_vuse (const_gimple g)
{
- if (!gimple_has_mem_ops (g))
+ const gimple_statement_with_memory_ops *mem_ops_stmt =
+ dyn_cast <const gimple_statement_with_memory_ops> (g);
+ if (!mem_ops_stmt)
return NULL_TREE;
- return g->gsmembase.vuse;
+ return mem_ops_stmt->vuse;
}
/* Return the single VDEF operand of the statement G. */
@@ -1258,9 +1588,11 @@ gimple_vuse (const_gimple g)
static inline tree
gimple_vdef (const_gimple g)
{
- if (!gimple_has_mem_ops (g))
+ const gimple_statement_with_memory_ops *mem_ops_stmt =
+ dyn_cast <const gimple_statement_with_memory_ops> (g);
+ if (!mem_ops_stmt)
return NULL_TREE;
- return g->gsmembase.vdef;
+ return mem_ops_stmt->vdef;
}
/* Return the single VUSE operand of the statement G. */
@@ -1268,9 +1600,11 @@ gimple_vdef (const_gimple g)
static inline tree *
gimple_vuse_ptr (gimple g)
{
- if (!gimple_has_mem_ops (g))
+ gimple_statement_with_memory_ops *mem_ops_stmt =
+ dyn_cast <gimple_statement_with_memory_ops> (g);
+ if (!mem_ops_stmt)
return NULL;
- return &g->gsmembase.vuse;
+ return &mem_ops_stmt->vuse;
}
/* Return the single VDEF operand of the statement G. */
@@ -1278,9 +1612,11 @@ gimple_vuse_ptr (gimple g)
static inline tree *
gimple_vdef_ptr (gimple g)
{
- if (!gimple_has_mem_ops (g))
+ gimple_statement_with_memory_ops *mem_ops_stmt =
+ dyn_cast <gimple_statement_with_memory_ops> (g);
+ if (!mem_ops_stmt)
return NULL;
- return &g->gsmembase.vdef;
+ return &mem_ops_stmt->vdef;
}
/* Set the single VUSE operand of the statement G. */
@@ -1288,8 +1624,9 @@ gimple_vdef_ptr (gimple g)
static inline void
gimple_set_vuse (gimple g, tree vuse)
{
- gcc_gimple_checking_assert (gimple_has_mem_ops (g));
- g->gsmembase.vuse = vuse;
+ gimple_statement_with_memory_ops *mem_ops_stmt =
+ as_a <gimple_statement_with_memory_ops> (g);
+ mem_ops_stmt->vuse = vuse;
}
/* Set the single VDEF operand of the statement G. */
@@ -1297,8 +1634,9 @@ gimple_set_vuse (gimple g, tree vuse)
static inline void
gimple_set_vdef (gimple g, tree vdef)
{
- gcc_gimple_checking_assert (gimple_has_mem_ops (g));
- g->gsmembase.vdef = vdef;
+ gimple_statement_with_memory_ops *mem_ops_stmt =
+ as_a <gimple_statement_with_memory_ops> (g);
+ mem_ops_stmt->vdef = vdef;
}
@@ -1308,7 +1646,7 @@ gimple_set_vdef (gimple g, tree vdef)
static inline bool
gimple_modified_p (const_gimple g)
{
- return (gimple_has_ops (g)) ? (bool) g->gsbase.modified : false;
+ return (gimple_has_ops (g)) ? (bool) g->modified : false;
}
@@ -1319,7 +1657,7 @@ static inline void
gimple_set_modified (gimple s, bool modifiedp)
{
if (gimple_has_ops (s))
- s->gsbase.modified = (unsigned) modifiedp;
+ s->modified = (unsigned) modifiedp;
}
@@ -1334,7 +1672,7 @@ gimple_expr_code (const_gimple stmt)
{
enum gimple_code code = gimple_code (stmt);
if (code == GIMPLE_ASSIGN || code == GIMPLE_COND)
- return (enum tree_code) stmt->gsbase.subcode;
+ return (enum tree_code) stmt->subcode;
else
{
gcc_gimple_checking_assert (code == GIMPLE_CALL);
@@ -1349,7 +1687,7 @@ static inline bool
gimple_has_volatile_ops (const_gimple stmt)
{
if (gimple_has_mem_ops (stmt))
- return stmt->gsbase.has_volatile_ops;
+ return stmt->has_volatile_ops;
else
return false;
}
@@ -1361,7 +1699,7 @@ static inline void
gimple_set_has_volatile_ops (gimple stmt, bool volatilep)
{
if (gimple_has_mem_ops (stmt))
- stmt->gsbase.has_volatile_ops = (unsigned) volatilep;
+ stmt->has_volatile_ops = (unsigned) volatilep;
}
/* Return true if STMT is in a transaction. */
@@ -1388,7 +1726,7 @@ gimple_omp_subcode (const_gimple s)
{
gcc_gimple_checking_assert (gimple_code (s) >= GIMPLE_OMP_ATOMIC_LOAD
&& gimple_code (s) <= GIMPLE_OMP_TEAMS);
- return s->gsbase.subcode;
+ return s->subcode;
}
/* Set the subcode for OMP statement S to SUBCODE. */
@@ -1399,7 +1737,7 @@ gimple_omp_set_subcode (gimple s, unsigned int subcode)
/* We only have 16 bits for the subcode. Assert that we are not
overflowing it. */
gcc_gimple_checking_assert (subcode < (1 << 16));
- s->gsbase.subcode = subcode;
+ s->subcode = subcode;
}
/* Set the nowait flag on OMP_RETURN statement S. */
@@ -1408,7 +1746,7 @@ static inline void
gimple_omp_return_set_nowait (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_OMP_RETURN);
- s->gsbase.subcode |= GF_OMP_RETURN_NOWAIT;
+ s->subcode |= GF_OMP_RETURN_NOWAIT;
}
@@ -1428,8 +1766,9 @@ gimple_omp_return_nowait_p (const_gimple g)
static inline void
gimple_omp_return_set_lhs (gimple g, tree lhs)
{
- GIMPLE_CHECK (g, GIMPLE_OMP_RETURN);
- g->gimple_omp_atomic_store.val = lhs;
+ gimple_statement_omp_atomic_store *omp_atomic_store_stmt =
+ as_a <gimple_statement_omp_atomic_store> (g);
+ omp_atomic_store_stmt->val = lhs;
}
@@ -1438,8 +1777,9 @@ gimple_omp_return_set_lhs (gimple g, tree lhs)
static inline tree
gimple_omp_return_lhs (const_gimple g)
{
- GIMPLE_CHECK (g, GIMPLE_OMP_RETURN);
- return g->gimple_omp_atomic_store.val;
+ const gimple_statement_omp_atomic_store *omp_atomic_store_stmt =
+ as_a <const gimple_statement_omp_atomic_store> (g);
+ return omp_atomic_store_stmt->val;
}
@@ -1448,8 +1788,9 @@ gimple_omp_return_lhs (const_gimple g)
static inline tree *
gimple_omp_return_lhs_ptr (gimple g)
{
- GIMPLE_CHECK (g, GIMPLE_OMP_RETURN);
- return &g->gimple_omp_atomic_store.val;
+ gimple_statement_omp_atomic_store *omp_atomic_store_stmt =
+ as_a <gimple_statement_omp_atomic_store> (g);
+ return &omp_atomic_store_stmt->val;
}
@@ -1470,7 +1811,7 @@ static inline void
gimple_omp_section_set_last (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_SECTION);
- g->gsbase.subcode |= GF_OMP_SECTION_LAST;
+ g->subcode |= GF_OMP_SECTION_LAST;
}
@@ -1493,9 +1834,9 @@ gimple_omp_parallel_set_combined_p (gimple g, bool combined_p)
{
GIMPLE_CHECK (g, GIMPLE_OMP_PARALLEL);
if (combined_p)
- g->gsbase.subcode |= GF_OMP_PARALLEL_COMBINED;
+ g->subcode |= GF_OMP_PARALLEL_COMBINED;
else
- g->gsbase.subcode &= ~GF_OMP_PARALLEL_COMBINED;
+ g->subcode &= ~GF_OMP_PARALLEL_COMBINED;
}
@@ -1518,7 +1859,7 @@ gimple_omp_atomic_set_need_value (gimple g)
{
if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD)
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
- g->gsbase.subcode |= GF_OMP_ATOMIC_NEED_VALUE;
+ g->subcode |= GF_OMP_ATOMIC_NEED_VALUE;
}
@@ -1541,7 +1882,7 @@ gimple_omp_atomic_set_seq_cst (gimple g)
{
if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD)
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
- g->gsbase.subcode |= GF_OMP_ATOMIC_SEQ_CST;
+ g->subcode |= GF_OMP_ATOMIC_SEQ_CST;
}
@@ -1550,7 +1891,7 @@ gimple_omp_atomic_set_seq_cst (gimple g)
static inline unsigned
gimple_num_ops (const_gimple gs)
{
- return gs->gsbase.num_ops;
+ return gs->num_ops;
}
@@ -1559,7 +1900,7 @@ gimple_num_ops (const_gimple gs)
static inline void
gimple_set_num_ops (gimple gs, unsigned num_ops)
{
- gs->gsbase.num_ops = num_ops;
+ gs->num_ops = num_ops;
}
@@ -1790,7 +2131,7 @@ static inline bool
gimple_assign_nontemporal_move_p (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
- return gs->gsbase.nontemporal_move;
+ return gs->nontemporal_move;
}
/* Sets nontemporal move flag of GS to NONTEMPORAL. */
@@ -1799,7 +2140,7 @@ static inline void
gimple_assign_set_nontemporal_move (gimple gs, bool nontemporal)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
- gs->gsbase.nontemporal_move = nontemporal;
+ gs->nontemporal_move = nontemporal;
}
@@ -1813,7 +2154,7 @@ gimple_assign_rhs_code (const_gimple gs)
enum tree_code code;
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
- code = (enum tree_code) gs->gsbase.subcode;
+ code = (enum tree_code) gs->subcode;
/* While we initially set subcode to the TREE_CODE of the rhs for
GIMPLE_SINGLE_RHS assigns we do not update that subcode to stay
in sync when we rewrite stmts into SSA form or do SSA propagations. */
@@ -1831,7 +2172,7 @@ static inline void
gimple_assign_set_rhs_code (gimple s, enum tree_code code)
{
GIMPLE_CHECK (s, GIMPLE_ASSIGN);
- s->gsbase.subcode = code;
+ s->subcode = code;
}
@@ -1956,7 +2297,7 @@ static inline bool
gimple_call_internal_p (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
- return (gs->gsbase.subcode & GF_CALL_INTERNAL) != 0;
+ return (gs->subcode & GF_CALL_INTERNAL) != 0;
}
@@ -1967,7 +2308,7 @@ static inline bool
gimple_call_with_bounds_p (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
- return (gs->gsbase.subcode & GF_CALL_WITH_BOUNDS) != 0;
+ return (gs->subcode & GF_CALL_WITH_BOUNDS) != 0;
}
@@ -1979,9 +2320,9 @@ gimple_call_set_with_bounds (gimple gs, bool with_bounds)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
if (with_bounds)
- gs->gsbase.subcode |= GF_CALL_WITH_BOUNDS;
+ gs->subcode |= GF_CALL_WITH_BOUNDS;
else
- gs->gsbase.subcode &= ~GF_CALL_WITH_BOUNDS;
+ gs->subcode &= ~GF_CALL_WITH_BOUNDS;
}
@@ -1991,7 +2332,7 @@ static inline enum internal_fn
gimple_call_internal_fn (const_gimple gs)
{
gcc_gimple_checking_assert (gimple_call_internal_p (gs));
- return gs->gimple_call.u.internal_fn;
+ return static_cast <const gimple_statement_call *> (gs)->u.internal_fn;
}
@@ -2000,10 +2341,11 @@ gimple_call_internal_fn (const_gimple gs)
static inline tree
gimple_call_fntype (const_gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_CALL);
+ const gimple_statement_call *call_stmt =
+ as_a <const gimple_statement_call> (gs);
if (gimple_call_internal_p (gs))
return NULL_TREE;
- return gs->gimple_call.u.fntype;
+ return call_stmt->u.fntype;
}
/* Set the type of the function called by GS to FNTYPE. */
@@ -2011,9 +2353,9 @@ gimple_call_fntype (const_gimple gs)
static inline void
gimple_call_set_fntype (gimple gs, tree fntype)
{
- GIMPLE_CHECK (gs, GIMPLE_CALL);
+ gimple_statement_call *call_stmt = as_a <gimple_statement_call> (gs);
gcc_gimple_checking_assert (!gimple_call_internal_p (gs));
- gs->gimple_call.u.fntype = fntype;
+ call_stmt->u.fntype = fntype;
}
@@ -2065,9 +2407,9 @@ gimple_call_set_fndecl (gimple gs, tree decl)
static inline void
gimple_call_set_internal_fn (gimple gs, enum internal_fn fn)
{
- GIMPLE_CHECK (gs, GIMPLE_CALL);
+ gimple_statement_call *call_stmt = as_a <gimple_statement_call> (gs);
gcc_gimple_checking_assert (gimple_call_internal_p (gs));
- gs->gimple_call.u.internal_fn = fn;
+ call_stmt->u.internal_fn = fn;
}
@@ -2180,9 +2522,9 @@ gimple_call_set_tail (gimple s, bool tail_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (tail_p)
- s->gsbase.subcode |= GF_CALL_TAILCALL;
+ s->subcode |= GF_CALL_TAILCALL;
else
- s->gsbase.subcode &= ~GF_CALL_TAILCALL;
+ s->subcode &= ~GF_CALL_TAILCALL;
}
@@ -2192,7 +2534,7 @@ static inline bool
gimple_call_tail_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
- return (s->gsbase.subcode & GF_CALL_TAILCALL) != 0;
+ return (s->subcode & GF_CALL_TAILCALL) != 0;
}
@@ -2205,9 +2547,9 @@ gimple_call_set_return_slot_opt (gimple s, bool return_slot_opt_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (return_slot_opt_p)
- s->gsbase.subcode |= GF_CALL_RETURN_SLOT_OPT;
+ s->subcode |= GF_CALL_RETURN_SLOT_OPT;
else
- s->gsbase.subcode &= ~GF_CALL_RETURN_SLOT_OPT;
+ s->subcode &= ~GF_CALL_RETURN_SLOT_OPT;
}
@@ -2217,7 +2559,7 @@ static inline bool
gimple_call_return_slot_opt_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
- return (s->gsbase.subcode & GF_CALL_RETURN_SLOT_OPT) != 0;
+ return (s->subcode & GF_CALL_RETURN_SLOT_OPT) != 0;
}
@@ -2229,9 +2571,9 @@ gimple_call_set_from_thunk (gimple s, bool from_thunk_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (from_thunk_p)
- s->gsbase.subcode |= GF_CALL_FROM_THUNK;
+ s->subcode |= GF_CALL_FROM_THUNK;
else
- s->gsbase.subcode &= ~GF_CALL_FROM_THUNK;
+ s->subcode &= ~GF_CALL_FROM_THUNK;
}
@@ -2241,7 +2583,7 @@ static inline bool
gimple_call_from_thunk_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
- return (s->gsbase.subcode & GF_CALL_FROM_THUNK) != 0;
+ return (s->subcode & GF_CALL_FROM_THUNK) != 0;
}
@@ -2253,9 +2595,9 @@ gimple_call_set_va_arg_pack (gimple s, bool pass_arg_pack_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (pass_arg_pack_p)
- s->gsbase.subcode |= GF_CALL_VA_ARG_PACK;
+ s->subcode |= GF_CALL_VA_ARG_PACK;
else
- s->gsbase.subcode &= ~GF_CALL_VA_ARG_PACK;
+ s->subcode &= ~GF_CALL_VA_ARG_PACK;
}
@@ -2266,7 +2608,7 @@ static inline bool
gimple_call_va_arg_pack_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
- return (s->gsbase.subcode & GF_CALL_VA_ARG_PACK) != 0;
+ return (s->subcode & GF_CALL_VA_ARG_PACK) != 0;
}
@@ -2288,9 +2630,9 @@ gimple_call_set_nothrow (gimple s, bool nothrow_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (nothrow_p)
- s->gsbase.subcode |= GF_CALL_NOTHROW;
+ s->subcode |= GF_CALL_NOTHROW;
else
- s->gsbase.subcode &= ~GF_CALL_NOTHROW;
+ s->subcode &= ~GF_CALL_NOTHROW;
}
/* Return true if S is a nothrow call. */
@@ -2312,9 +2654,9 @@ gimple_call_set_alloca_for_var (gimple s, bool for_var)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (for_var)
- s->gsbase.subcode |= GF_CALL_ALLOCA_FOR_VAR;
+ s->subcode |= GF_CALL_ALLOCA_FOR_VAR;
else
- s->gsbase.subcode &= ~GF_CALL_ALLOCA_FOR_VAR;
+ s->subcode &= ~GF_CALL_ALLOCA_FOR_VAR;
}
/* Return true of S is a call to builtin_alloca emitted for VLA objects. */
@@ -2323,7 +2665,7 @@ static inline bool
gimple_call_alloca_for_var_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
- return (s->gsbase.subcode & GF_CALL_ALLOCA_FOR_VAR) != 0;
+ return (s->subcode & GF_CALL_ALLOCA_FOR_VAR) != 0;
}
/* Copy all the GF_CALL_* flags from ORIG_CALL to DEST_CALL. */
@@ -2333,7 +2675,7 @@ gimple_call_copy_flags (gimple dest_call, gimple orig_call)
{
GIMPLE_CHECK (dest_call, GIMPLE_CALL);
GIMPLE_CHECK (orig_call, GIMPLE_CALL);
- dest_call->gsbase.subcode = orig_call->gsbase.subcode;
+ dest_call->subcode = orig_call->subcode;
}
@@ -2343,8 +2685,8 @@ gimple_call_copy_flags (gimple dest_call, gimple orig_call)
static inline struct pt_solution *
gimple_call_use_set (gimple call)
{
- GIMPLE_CHECK (call, GIMPLE_CALL);
- return &call->gimple_call.call_used;
+ gimple_statement_call *call_stmt = as_a <gimple_statement_call> (call);
+ return &call_stmt->call_used;
}
@@ -2354,8 +2696,8 @@ gimple_call_use_set (gimple call)
static inline struct pt_solution *
gimple_call_clobber_set (gimple call)
{
- GIMPLE_CHECK (call, GIMPLE_CALL);
- return &call->gimple_call.call_clobbered;
+ gimple_statement_call *call_stmt = as_a <gimple_statement_call> (call);
+ return &call_stmt->call_clobbered;
}
@@ -2377,7 +2719,7 @@ static inline enum tree_code
gimple_cond_code (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
- return (enum tree_code) gs->gsbase.subcode;
+ return (enum tree_code) gs->subcode;
}
@@ -2387,7 +2729,7 @@ static inline void
gimple_cond_set_code (gimple gs, enum tree_code code)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
- gs->gsbase.subcode = code;
+ gs->subcode = code;
}
@@ -2503,7 +2845,7 @@ gimple_cond_make_false (gimple gs)
{
gimple_cond_set_lhs (gs, boolean_true_node);
gimple_cond_set_rhs (gs, boolean_false_node);
- gs->gsbase.subcode = EQ_EXPR;
+ gs->subcode = EQ_EXPR;
}
@@ -2514,7 +2856,7 @@ gimple_cond_make_true (gimple gs)
{
gimple_cond_set_lhs (gs, boolean_true_node);
gimple_cond_set_rhs (gs, boolean_true_node);
- gs->gsbase.subcode = EQ_EXPR;
+ gs->subcode = EQ_EXPR;
}
/* Check if conditional statemente GS is of the form 'if (1 == 1)',
@@ -2623,8 +2965,9 @@ gimple_goto_set_dest (gimple gs, tree dest)
static inline tree
gimple_bind_vars (const_gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_BIND);
- return gs->gimple_bind.vars;
+ const gimple_statement_bind *bind_stmt =
+ as_a <const gimple_statement_bind> (gs);
+ return bind_stmt->vars;
}
@@ -2634,8 +2977,8 @@ gimple_bind_vars (const_gimple gs)
static inline void
gimple_bind_set_vars (gimple gs, tree vars)
{
- GIMPLE_CHECK (gs, GIMPLE_BIND);
- gs->gimple_bind.vars = vars;
+ gimple_statement_bind *bind_stmt = as_a <gimple_statement_bind> (gs);
+ bind_stmt->vars = vars;
}
@@ -2645,16 +2988,16 @@ gimple_bind_set_vars (gimple gs, tree vars)
static inline void
gimple_bind_append_vars (gimple gs, tree vars)
{
- GIMPLE_CHECK (gs, GIMPLE_BIND);
- gs->gimple_bind.vars = chainon (gs->gimple_bind.vars, vars);
+ gimple_statement_bind *bind_stmt = as_a <gimple_statement_bind> (gs);
+ bind_stmt->vars = chainon (bind_stmt->vars, vars);
}
static inline gimple_seq *
gimple_bind_body_ptr (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_BIND);
- return &gs->gimple_bind.body;
+ gimple_statement_bind *bind_stmt = as_a <gimple_statement_bind> (gs);
+ return &bind_stmt->body;
}
/* Return the GIMPLE sequence contained in the GIMPLE_BIND statement GS. */
@@ -2672,8 +3015,8 @@ gimple_bind_body (gimple gs)
static inline void
gimple_bind_set_body (gimple gs, gimple_seq seq)
{
- GIMPLE_CHECK (gs, GIMPLE_BIND);
- gs->gimple_bind.body = seq;
+ gimple_statement_bind *bind_stmt = as_a <gimple_statement_bind> (gs);
+ bind_stmt->body = seq;
}
@@ -2682,8 +3025,8 @@ gimple_bind_set_body (gimple gs, gimple_seq seq)
static inline void
gimple_bind_add_stmt (gimple gs, gimple stmt)
{
- GIMPLE_CHECK (gs, GIMPLE_BIND);
- gimple_seq_add_stmt (&gs->gimple_bind.body, stmt);
+ gimple_statement_bind *bind_stmt = as_a <gimple_statement_bind> (gs);
+ gimple_seq_add_stmt (&bind_stmt->body, stmt);
}
@@ -2692,8 +3035,8 @@ gimple_bind_add_stmt (gimple gs, gimple stmt)
static inline void
gimple_bind_add_seq (gimple gs, gimple_seq seq)
{
- GIMPLE_CHECK (gs, GIMPLE_BIND);
- gimple_seq_add_seq (&gs->gimple_bind.body, seq);
+ gimple_statement_bind *bind_stmt = as_a <gimple_statement_bind> (gs);
+ gimple_seq_add_seq (&bind_stmt->body, seq);
}
@@ -2703,8 +3046,9 @@ gimple_bind_add_seq (gimple gs, gimple_seq seq)
static inline tree
gimple_bind_block (const_gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_BIND);
- return gs->gimple_bind.block;
+ const gimple_statement_bind *bind_stmt =
+ as_a <const gimple_statement_bind> (gs);
+ return bind_stmt->block;
}
@@ -2714,10 +3058,10 @@ gimple_bind_block (const_gimple gs)
static inline void
gimple_bind_set_block (gimple gs, tree block)
{
- GIMPLE_CHECK (gs, GIMPLE_BIND);
+ gimple_statement_bind *bind_stmt = as_a <gimple_statement_bind> (gs);
gcc_gimple_checking_assert (block == NULL_TREE
|| TREE_CODE (block) == BLOCK);
- gs->gimple_bind.block = block;
+ bind_stmt->block = block;
}
@@ -2726,8 +3070,9 @@ gimple_bind_set_block (gimple gs, tree block)
static inline unsigned
gimple_asm_ninputs (const_gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_ASM);
- return gs->gimple_asm.ni;
+ const gimple_statement_asm *asm_stmt =
+ as_a <const gimple_statement_asm> (gs);
+ return asm_stmt->ni;
}
@@ -2736,8 +3081,9 @@ gimple_asm_ninputs (const_gimple gs)
static inline unsigned
gimple_asm_noutputs (const_gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_ASM);
- return gs->gimple_asm.no;
+ const gimple_statement_asm *asm_stmt =
+ as_a <const gimple_statement_asm> (gs);
+ return asm_stmt->no;
}
@@ -2746,8 +3092,9 @@ gimple_asm_noutputs (const_gimple gs)
static inline unsigned
gimple_asm_nclobbers (const_gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_ASM);
- return gs->gimple_asm.nc;
+ const gimple_statement_asm *asm_stmt =
+ as_a <const gimple_statement_asm> (gs);
+ return asm_stmt->nc;
}
/* Return the number of label operands for GIMPLE_ASM GS. */
@@ -2755,8 +3102,9 @@ gimple_asm_nclobbers (const_gimple gs)
static inline unsigned
gimple_asm_nlabels (const_gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_ASM);
- return gs->gimple_asm.nl;
+ const gimple_statement_asm *asm_stmt =
+ as_a <const gimple_statement_asm> (gs);
+ return asm_stmt->nl;
}
/* Return input operand INDEX of GIMPLE_ASM GS. */
@@ -2764,9 +3112,10 @@ gimple_asm_nlabels (const_gimple gs)
static inline tree
gimple_asm_input_op (const_gimple gs, unsigned index)
{
- GIMPLE_CHECK (gs, GIMPLE_ASM);
- gcc_gimple_checking_assert (index < gs->gimple_asm.ni);
- return gimple_op (gs, index + gs->gimple_asm.no);
+ const gimple_statement_asm *asm_stmt =
+ as_a <const gimple_statement_asm> (gs);
+ gcc_gimple_checking_assert (index < asm_stmt->ni);
+ return gimple_op (gs, index + asm_stmt->no);
}
/* Return a pointer to input operand INDEX of GIMPLE_ASM GS. */
@@ -2774,9 +3123,10 @@ gimple_asm_input_op (const_gimple gs, unsigned index)
static inline tree *
gimple_asm_input_op_ptr (const_gimple gs, unsigned index)
{
- GIMPLE_CHECK (gs, GIMPLE_ASM);
- gcc_gimple_checking_assert (index < gs->gimple_asm.ni);
- return gimple_op_ptr (gs, index + gs->gimple_asm.no);
+ const gimple_statement_asm *asm_stmt =
+ as_a <const gimple_statement_asm> (gs);
+ gcc_gimple_checking_assert (index < asm_stmt->ni);
+ return gimple_op_ptr (gs, index + asm_stmt->no);
}
@@ -2785,10 +3135,10 @@ gimple_asm_input_op_ptr (const_gimple gs, unsigned index)
static inline void
gimple_asm_set_input_op (gimple gs, unsigned index, tree in_op)
{
- GIMPLE_CHECK (gs, GIMPLE_ASM);
- gcc_gimple_checking_assert (index < gs->gimple_asm.ni
+ gimple_statement_asm *asm_stmt = as_a <gimple_statement_asm> (gs);
+ gcc_gimple_checking_assert (index < asm_stmt->ni
&& TREE_CODE (in_op) == TREE_LIST);
- gimple_set_op (gs, index + gs->gimple_asm.no, in_op);
+ gimple_set_op (gs, index + asm_stmt->no, in_op);
}
@@ -2797,8 +3147,9 @@ gimple_asm_set_input_op (gimple gs, unsigned index, tree in_op)
static inline tree
gimple_asm_output_op (const_gimple gs, unsigned index)
{
- GIMPLE_CHECK (gs, GIMPLE_ASM);
- gcc_gimple_checking_assert (index < gs->gimple_asm.no);
+ const gimple_statement_asm *asm_stmt =
+ as_a <const gimple_statement_asm> (gs);
+ gcc_gimple_checking_assert (index < asm_stmt->no);
return gimple_op (gs, index);
}
@@ -2807,8 +3158,9 @@ gimple_asm_output_op (const_gimple gs, unsigned index)
static inline tree *
gimple_asm_output_op_ptr (const_gimple gs, unsigned index)
{
- GIMPLE_CHECK (gs, GIMPLE_ASM);
- gcc_gimple_checking_assert (index < gs->gimple_asm.no);
+ const gimple_statement_asm *asm_stmt =
+ as_a <const gimple_statement_asm> (gs);
+ gcc_gimple_checking_assert (index < asm_stmt->no);
return gimple_op_ptr (gs, index);
}
@@ -2818,8 +3170,8 @@ gimple_asm_output_op_ptr (const_gimple gs, unsigned index)
static inline void
gimple_asm_set_output_op (gimple gs, unsigned index, tree out_op)
{
- GIMPLE_CHECK (gs, GIMPLE_ASM);
- gcc_gimple_checking_assert (index < gs->gimple_asm.no
+ gimple_statement_asm *asm_stmt = as_a <gimple_statement_asm> (gs);
+ gcc_gimple_checking_assert (index < asm_stmt->no
&& TREE_CODE (out_op) == TREE_LIST);
gimple_set_op (gs, index, out_op);
}
@@ -2830,9 +3182,10 @@ gimple_asm_set_output_op (gimple gs, unsigned index, tree out_op)
static inline tree
gimple_asm_clobber_op (const_gimple gs, unsigned index)
{
- GIMPLE_CHECK (gs, GIMPLE_ASM);
- gcc_gimple_checking_assert (index < gs->gimple_asm.nc);
- return gimple_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.no);
+ const gimple_statement_asm *asm_stmt =
+ as_a <const gimple_statement_asm> (gs);
+ gcc_gimple_checking_assert (index < asm_stmt->nc);
+ return gimple_op (gs, index + asm_stmt->ni + asm_stmt->no);
}
@@ -2841,10 +3194,10 @@ gimple_asm_clobber_op (const_gimple gs, unsigned index)
static inline void
gimple_asm_set_clobber_op (gimple gs, unsigned index, tree clobber_op)
{
- GIMPLE_CHECK (gs, GIMPLE_ASM);
- gcc_gimple_checking_assert (index < gs->gimple_asm.nc
+ gimple_statement_asm *asm_stmt = as_a <gimple_statement_asm> (gs);
+ gcc_gimple_checking_assert (index < asm_stmt->nc
&& TREE_CODE (clobber_op) == TREE_LIST);
- gimple_set_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.no, clobber_op);
+ gimple_set_op (gs, index + asm_stmt->ni + asm_stmt->no, clobber_op);
}
/* Return label operand INDEX of GIMPLE_ASM GS. */
@@ -2852,9 +3205,10 @@ gimple_asm_set_clobber_op (gimple gs, unsigned index, tree clobber_op)
static inline tree
gimple_asm_label_op (const_gimple gs, unsigned index)
{
- GIMPLE_CHECK (gs, GIMPLE_ASM);
- gcc_gimple_checking_assert (index < gs->gimple_asm.nl);
- return gimple_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.nc);
+ const gimple_statement_asm *asm_stmt =
+ as_a <const gimple_statement_asm> (gs);
+ gcc_gimple_checking_assert (index < asm_stmt->nl);
+ return gimple_op (gs, index + asm_stmt->ni + asm_stmt->nc);
}
/* Set LABEL_OP to be label operand INDEX in GIMPLE_ASM GS. */
@@ -2862,10 +3216,10 @@ gimple_asm_label_op (const_gimple gs, unsigned index)
static inline void
gimple_asm_set_label_op (gimple gs, unsigned index, tree label_op)
{
- GIMPLE_CHECK (gs, GIMPLE_ASM);
- gcc_gimple_checking_assert (index < gs->gimple_asm.nl
+ gimple_statement_asm *asm_stmt = as_a <gimple_statement_asm> (gs);
+ gcc_gimple_checking_assert (index < asm_stmt->nl
&& TREE_CODE (label_op) == TREE_LIST);
- gimple_set_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.nc, label_op);
+ gimple_set_op (gs, index + asm_stmt->ni + asm_stmt->nc, label_op);
}
/* Return the string representing the assembly instruction in
@@ -2874,8 +3228,9 @@ gimple_asm_set_label_op (gimple gs, unsigned index, tree label_op)
static inline const char *
gimple_asm_string (const_gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_ASM);
- return gs->gimple_asm.string;
+ const gimple_statement_asm *asm_stmt =
+ as_a <const gimple_statement_asm> (gs);
+ return asm_stmt->string;
}
@@ -2885,7 +3240,7 @@ static inline bool
gimple_asm_volatile_p (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
- return (gs->gsbase.subcode & GF_ASM_VOLATILE) != 0;
+ return (gs->subcode & GF_ASM_VOLATILE) != 0;
}
@@ -2896,9 +3251,9 @@ gimple_asm_set_volatile (gimple gs, bool volatile_p)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
if (volatile_p)
- gs->gsbase.subcode |= GF_ASM_VOLATILE;
+ gs->subcode |= GF_ASM_VOLATILE;
else
- gs->gsbase.subcode &= ~GF_ASM_VOLATILE;
+ gs->subcode &= ~GF_ASM_VOLATILE;
}
@@ -2909,9 +3264,9 @@ gimple_asm_set_input (gimple gs, bool input_p)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
if (input_p)
- gs->gsbase.subcode |= GF_ASM_INPUT;
+ gs->subcode |= GF_ASM_INPUT;
else
- gs->gsbase.subcode &= ~GF_ASM_INPUT;
+ gs->subcode &= ~GF_ASM_INPUT;
}
@@ -2921,7 +3276,7 @@ static inline bool
gimple_asm_input_p (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
- return (gs->gsbase.subcode & GF_ASM_INPUT) != 0;
+ return (gs->subcode & GF_ASM_INPUT) != 0;
}
@@ -2930,8 +3285,9 @@ gimple_asm_input_p (const_gimple gs)
static inline tree
gimple_catch_types (const_gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_CATCH);
- return gs->gimple_catch.types;
+ const gimple_statement_catch *catch_stmt =
+ as_a <const gimple_statement_catch> (gs);
+ return catch_stmt->types;
}
@@ -2940,8 +3296,8 @@ gimple_catch_types (const_gimple gs)
static inline tree *
gimple_catch_types_ptr (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_CATCH);
- return &gs->gimple_catch.types;
+ gimple_statement_catch *catch_stmt = as_a <gimple_statement_catch> (gs);
+ return &catch_stmt->types;
}
@@ -2951,8 +3307,8 @@ gimple_catch_types_ptr (gimple gs)
static inline gimple_seq *
gimple_catch_handler_ptr (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_CATCH);
- return &gs->gimple_catch.handler;
+ gimple_statement_catch *catch_stmt = as_a <gimple_statement_catch> (gs);
+ return &catch_stmt->handler;
}
@@ -2971,8 +3327,8 @@ gimple_catch_handler (gimple gs)
static inline void
gimple_catch_set_types (gimple gs, tree t)
{
- GIMPLE_CHECK (gs, GIMPLE_CATCH);
- gs->gimple_catch.types = t;
+ gimple_statement_catch *catch_stmt = as_a <gimple_statement_catch> (gs);
+ catch_stmt->types = t;
}
@@ -2981,8 +3337,8 @@ gimple_catch_set_types (gimple gs, tree t)
static inline void
gimple_catch_set_handler (gimple gs, gimple_seq handler)
{
- GIMPLE_CHECK (gs, GIMPLE_CATCH);
- gs->gimple_catch.handler = handler;
+ gimple_statement_catch *catch_stmt = as_a <gimple_statement_catch> (gs);
+ catch_stmt->handler = handler;
}
@@ -2991,8 +3347,9 @@ gimple_catch_set_handler (gimple gs, gimple_seq handler)
static inline tree
gimple_eh_filter_types (const_gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_EH_FILTER);
- return gs->gimple_eh_filter.types;
+ const gimple_statement_eh_filter *eh_filter_stmt =
+ as_a <const gimple_statement_eh_filter> (gs);
+ return eh_filter_stmt->types;
}
@@ -3002,8 +3359,9 @@ gimple_eh_filter_types (const_gimple gs)
static inline tree *
gimple_eh_filter_types_ptr (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_EH_FILTER);
- return &gs->gimple_eh_filter.types;
+ gimple_statement_eh_filter *eh_filter_stmt =
+ as_a <gimple_statement_eh_filter> (gs);
+ return &eh_filter_stmt->types;
}
@@ -3013,8 +3371,9 @@ gimple_eh_filter_types_ptr (gimple gs)
static inline gimple_seq *
gimple_eh_filter_failure_ptr (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_EH_FILTER);
- return &gs->gimple_eh_filter.failure;
+ gimple_statement_eh_filter *eh_filter_stmt =
+ as_a <gimple_statement_eh_filter> (gs);
+ return &eh_filter_stmt->failure;
}
@@ -3033,8 +3392,9 @@ gimple_eh_filter_failure (gimple gs)
static inline void
gimple_eh_filter_set_types (gimple gs, tree types)
{
- GIMPLE_CHECK (gs, GIMPLE_EH_FILTER);
- gs->gimple_eh_filter.types = types;
+ gimple_statement_eh_filter *eh_filter_stmt =
+ as_a <gimple_statement_eh_filter> (gs);
+ eh_filter_stmt->types = types;
}
@@ -3044,8 +3404,9 @@ gimple_eh_filter_set_types (gimple gs, tree types)
static inline void
gimple_eh_filter_set_failure (gimple gs, gimple_seq failure)
{
- GIMPLE_CHECK (gs, GIMPLE_EH_FILTER);
- gs->gimple_eh_filter.failure = failure;
+ gimple_statement_eh_filter *eh_filter_stmt =
+ as_a <gimple_statement_eh_filter> (gs);
+ eh_filter_stmt->failure = failure;
}
/* Get the function decl to be called by the MUST_NOT_THROW region. */
@@ -3053,8 +3414,8 @@ gimple_eh_filter_set_failure (gimple gs, gimple_seq failure)
static inline tree
gimple_eh_must_not_throw_fndecl (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_EH_MUST_NOT_THROW);
- return gs->gimple_eh_mnt.fndecl;
+ gimple_statement_eh_mnt *eh_mnt_stmt = as_a <gimple_statement_eh_mnt> (gs);
+ return eh_mnt_stmt->fndecl;
}
/* Set the function decl to be called by GS to DECL. */
@@ -3062,8 +3423,8 @@ gimple_eh_must_not_throw_fndecl (gimple gs)
static inline void
gimple_eh_must_not_throw_set_fndecl (gimple gs, tree decl)
{
- GIMPLE_CHECK (gs, GIMPLE_EH_MUST_NOT_THROW);
- gs->gimple_eh_mnt.fndecl = decl;
+ gimple_statement_eh_mnt *eh_mnt_stmt = as_a <gimple_statement_eh_mnt> (gs);
+ eh_mnt_stmt->fndecl = decl;
}
/* GIMPLE_EH_ELSE accessors. */
@@ -3071,8 +3432,9 @@ gimple_eh_must_not_throw_set_fndecl (gimple gs, tree decl)
static inline gimple_seq *
gimple_eh_else_n_body_ptr (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_EH_ELSE);
- return &gs->gimple_eh_else.n_body;
+ gimple_statement_eh_else *eh_else_stmt =
+ as_a <gimple_statement_eh_else> (gs);
+ return &eh_else_stmt->n_body;
}
static inline gimple_seq
@@ -3084,8 +3446,9 @@ gimple_eh_else_n_body (gimple gs)
static inline gimple_seq *
gimple_eh_else_e_body_ptr (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_EH_ELSE);
- return &gs->gimple_eh_else.e_body;
+ gimple_statement_eh_else *eh_else_stmt =
+ as_a <gimple_statement_eh_else> (gs);
+ return &eh_else_stmt->e_body;
}
static inline gimple_seq
@@ -3097,15 +3460,17 @@ gimple_eh_else_e_body (gimple gs)
static inline void
gimple_eh_else_set_n_body (gimple gs, gimple_seq seq)
{
- GIMPLE_CHECK (gs, GIMPLE_EH_ELSE);
- gs->gimple_eh_else.n_body = seq;
+ gimple_statement_eh_else *eh_else_stmt =
+ as_a <gimple_statement_eh_else> (gs);
+ eh_else_stmt->n_body = seq;
}
static inline void
gimple_eh_else_set_e_body (gimple gs, gimple_seq seq)
{
- GIMPLE_CHECK (gs, GIMPLE_EH_ELSE);
- gs->gimple_eh_else.e_body = seq;
+ gimple_statement_eh_else *eh_else_stmt =
+ as_a <gimple_statement_eh_else> (gs);
+ eh_else_stmt->e_body = seq;
}
/* GIMPLE_TRY accessors. */
@@ -3117,7 +3482,7 @@ static inline enum gimple_try_flags
gimple_try_kind (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
- return (enum gimple_try_flags) (gs->gsbase.subcode & GIMPLE_TRY_KIND);
+ return (enum gimple_try_flags) (gs->subcode & GIMPLE_TRY_KIND);
}
@@ -3130,7 +3495,7 @@ gimple_try_set_kind (gimple gs, enum gimple_try_flags kind)
gcc_gimple_checking_assert (kind == GIMPLE_TRY_CATCH
|| kind == GIMPLE_TRY_FINALLY);
if (gimple_try_kind (gs) != kind)
- gs->gsbase.subcode = (unsigned int) kind;
+ gs->subcode = (unsigned int) kind;
}
@@ -3140,7 +3505,7 @@ static inline bool
gimple_try_catch_is_cleanup (const_gimple gs)
{
gcc_gimple_checking_assert (gimple_try_kind (gs) == GIMPLE_TRY_CATCH);
- return (gs->gsbase.subcode & GIMPLE_TRY_CATCH_IS_CLEANUP) != 0;
+ return (gs->subcode & GIMPLE_TRY_CATCH_IS_CLEANUP) != 0;
}
@@ -3150,8 +3515,8 @@ gimple_try_catch_is_cleanup (const_gimple gs)
static inline gimple_seq *
gimple_try_eval_ptr (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_TRY);
- return &gs->gimple_try.eval;
+ gimple_statement_try *try_stmt = as_a <gimple_statement_try> (gs);
+ return &try_stmt->eval;
}
@@ -3170,8 +3535,8 @@ gimple_try_eval (gimple gs)
static inline gimple_seq *
gimple_try_cleanup_ptr (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_TRY);
- return &gs->gimple_try.cleanup;
+ gimple_statement_try *try_stmt = as_a <gimple_statement_try> (gs);
+ return &try_stmt->cleanup;
}
@@ -3192,9 +3557,9 @@ gimple_try_set_catch_is_cleanup (gimple g, bool catch_is_cleanup)
{
gcc_gimple_checking_assert (gimple_try_kind (g) == GIMPLE_TRY_CATCH);
if (catch_is_cleanup)
- g->gsbase.subcode |= GIMPLE_TRY_CATCH_IS_CLEANUP;
+ g->subcode |= GIMPLE_TRY_CATCH_IS_CLEANUP;
else
- g->gsbase.subcode &= ~GIMPLE_TRY_CATCH_IS_CLEANUP;
+ g->subcode &= ~GIMPLE_TRY_CATCH_IS_CLEANUP;
}
@@ -3204,8 +3569,8 @@ gimple_try_set_catch_is_cleanup (gimple g, bool catch_is_cleanup)
static inline void
gimple_try_set_eval (gimple gs, gimple_seq eval)
{
- GIMPLE_CHECK (gs, GIMPLE_TRY);
- gs->gimple_try.eval = eval;
+ gimple_statement_try *try_stmt = as_a <gimple_statement_try> (gs);
+ try_stmt->eval = eval;
}
@@ -3215,8 +3580,8 @@ gimple_try_set_eval (gimple gs, gimple_seq eval)
static inline void
gimple_try_set_cleanup (gimple gs, gimple_seq cleanup)
{
- GIMPLE_CHECK (gs, GIMPLE_TRY);
- gs->gimple_try.cleanup = cleanup;
+ gimple_statement_try *try_stmt = as_a <gimple_statement_try> (gs);
+ try_stmt->cleanup = cleanup;
}
@@ -3225,8 +3590,8 @@ gimple_try_set_cleanup (gimple gs, gimple_seq cleanup)
static inline gimple_seq *
gimple_wce_cleanup_ptr (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
- return &gs->gimple_wce.cleanup;
+ gimple_statement_wce *wce_stmt = as_a <gimple_statement_wce> (gs);
+ return &wce_stmt->cleanup;
}
@@ -3244,8 +3609,8 @@ gimple_wce_cleanup (gimple gs)
static inline void
gimple_wce_set_cleanup (gimple gs, gimple_seq cleanup)
{
- GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
- gs->gimple_wce.cleanup = cleanup;
+ gimple_statement_wce *wce_stmt = as_a <gimple_statement_wce> (gs);
+ wce_stmt->cleanup = cleanup;
}
@@ -3255,7 +3620,7 @@ static inline bool
gimple_wce_cleanup_eh_only (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
- return gs->gsbase.subcode != 0;
+ return gs->subcode != 0;
}
@@ -3265,7 +3630,7 @@ static inline void
gimple_wce_set_cleanup_eh_only (gimple gs, bool eh_only_p)
{
GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
- gs->gsbase.subcode = (unsigned int) eh_only_p;
+ gs->subcode = (unsigned int) eh_only_p;
}
@@ -3274,8 +3639,9 @@ gimple_wce_set_cleanup_eh_only (gimple gs, bool eh_only_p)
static inline unsigned
gimple_phi_capacity (const_gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_PHI);
- return gs->gimple_phi.capacity;
+ const gimple_statement_phi *phi_stmt =
+ as_a <const gimple_statement_phi> (gs);
+ return phi_stmt->capacity;
}
@@ -3286,8 +3652,9 @@ gimple_phi_capacity (const_gimple gs)
static inline unsigned
gimple_phi_num_args (const_gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_PHI);
- return gs->gimple_phi.nargs;
+ const gimple_statement_phi *phi_stmt =
+ as_a <const gimple_statement_phi> (gs);
+ return phi_stmt->nargs;
}
@@ -3296,8 +3663,9 @@ gimple_phi_num_args (const_gimple gs)
static inline tree
gimple_phi_result (const_gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_PHI);
- return gs->gimple_phi.result;
+ const gimple_statement_phi *phi_stmt =
+ as_a <const gimple_statement_phi> (gs);
+ return phi_stmt->result;
}
/* Return a pointer to the SSA name created by GIMPLE_PHI GS. */
@@ -3305,8 +3673,8 @@ gimple_phi_result (const_gimple gs)
static inline tree *
gimple_phi_result_ptr (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_PHI);
- return &gs->gimple_phi.result;
+ gimple_statement_phi *phi_stmt = as_a <gimple_statement_phi> (gs);
+ return &phi_stmt->result;
}
/* Set RESULT to be the SSA name created by GIMPLE_PHI GS. */
@@ -3314,8 +3682,8 @@ gimple_phi_result_ptr (gimple gs)
static inline void
gimple_phi_set_result (gimple gs, tree result)
{
- GIMPLE_CHECK (gs, GIMPLE_PHI);
- gs->gimple_phi.result = result;
+ gimple_statement_phi *phi_stmt = as_a <gimple_statement_phi> (gs);
+ phi_stmt->result = result;
if (result && TREE_CODE (result) == SSA_NAME)
SSA_NAME_DEF_STMT (result) = gs;
}
@@ -3327,9 +3695,9 @@ gimple_phi_set_result (gimple gs, tree result)
static inline struct phi_arg_d *
gimple_phi_arg (gimple gs, unsigned index)
{
- GIMPLE_CHECK (gs, GIMPLE_PHI);
- gcc_gimple_checking_assert (index <= gs->gimple_phi.capacity);
- return &(gs->gimple_phi.args[index]);
+ gimple_statement_phi *phi_stmt = as_a <gimple_statement_phi> (gs);
+ gcc_gimple_checking_assert (index <= phi_stmt->capacity);
+ return &(phi_stmt->args[index]);
}
/* Set PHIARG to be the argument corresponding to incoming edge INDEX
@@ -3338,9 +3706,9 @@ gimple_phi_arg (gimple gs, unsigned index)
static inline void
gimple_phi_set_arg (gimple gs, unsigned index, struct phi_arg_d * phiarg)
{
- GIMPLE_CHECK (gs, GIMPLE_PHI);
- gcc_gimple_checking_assert (index <= gs->gimple_phi.nargs);
- gs->gimple_phi.args[index] = *phiarg;
+ gimple_statement_phi *phi_stmt = as_a <gimple_statement_phi> (gs);
+ gcc_gimple_checking_assert (index <= phi_stmt->nargs);
+ phi_stmt->args[index] = *phiarg;
}
/* Return the PHI nodes for basic block BB, or NULL if there are no
@@ -3425,8 +3793,9 @@ gimple_phi_arg_has_location (gimple gs, size_t i)
static inline int
gimple_resx_region (const_gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_RESX);
- return gs->gimple_eh_ctrl.region;
+ const gimple_statement_eh_ctrl *eh_ctrl_stmt =
+ as_a <const gimple_statement_eh_ctrl> (gs);
+ return eh_ctrl_stmt->region;
}
/* Set REGION to be the region number for GIMPLE_RESX GS. */
@@ -3434,8 +3803,9 @@ gimple_resx_region (const_gimple gs)
static inline void
gimple_resx_set_region (gimple gs, int region)
{
- GIMPLE_CHECK (gs, GIMPLE_RESX);
- gs->gimple_eh_ctrl.region = region;
+ gimple_statement_eh_ctrl *eh_ctrl_stmt =
+ as_a <gimple_statement_eh_ctrl> (gs);
+ eh_ctrl_stmt->region = region;
}
/* Return the region number for GIMPLE_EH_DISPATCH GS. */
@@ -3443,8 +3813,9 @@ gimple_resx_set_region (gimple gs, int region)
static inline int
gimple_eh_dispatch_region (const_gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_EH_DISPATCH);
- return gs->gimple_eh_ctrl.region;
+ const gimple_statement_eh_ctrl *eh_ctrl_stmt =
+ as_a <const gimple_statement_eh_ctrl> (gs);
+ return eh_ctrl_stmt->region;
}
/* Set REGION to be the region number for GIMPLE_EH_DISPATCH GS. */
@@ -3452,8 +3823,9 @@ gimple_eh_dispatch_region (const_gimple gs)
static inline void
gimple_eh_dispatch_set_region (gimple gs, int region)
{
- GIMPLE_CHECK (gs, GIMPLE_EH_DISPATCH);
- gs->gimple_eh_ctrl.region = region;
+ gimple_statement_eh_ctrl *eh_ctrl_stmt =
+ as_a <gimple_statement_eh_ctrl> (gs);
+ eh_ctrl_stmt->region = region;
}
/* Return the number of labels associated with the switch statement GS. */
@@ -3566,7 +3938,7 @@ static inline bool
gimple_debug_bind_p (const_gimple s)
{
if (is_gimple_debug (s))
- return s->gsbase.subcode == GIMPLE_DEBUG_BIND;
+ return s->subcode == GIMPLE_DEBUG_BIND;
return false;
}
@@ -3658,7 +4030,7 @@ static inline bool
gimple_debug_source_bind_p (const_gimple s)
{
if (is_gimple_debug (s))
- return s->gsbase.subcode == GIMPLE_DEBUG_SOURCE_BIND;
+ return s->subcode == GIMPLE_DEBUG_SOURCE_BIND;
return false;
}
@@ -3738,7 +4110,7 @@ get_lineno (const_gimple stmt)
static inline gimple_seq *
gimple_omp_body_ptr (gimple gs)
{
- return &gs->omp.body;
+ return &static_cast <gimple_statement_omp *> (gs)->body;
}
/* Return the body for the OMP statement GS. */
@@ -3754,7 +4126,7 @@ gimple_omp_body (gimple gs)
static inline void
gimple_omp_set_body (gimple gs, gimple_seq body)
{
- gs->omp.body = body;
+ static_cast <gimple_statement_omp *> (gs)->body = body;
}
@@ -3763,8 +4135,9 @@ gimple_omp_set_body (gimple gs, gimple_seq body)
static inline tree
gimple_omp_critical_name (const_gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL);
- return gs->gimple_omp_critical.name;
+ const gimple_statement_omp_critical *omp_critical_stmt =
+ as_a <const gimple_statement_omp_critical> (gs);
+ return omp_critical_stmt->name;
}
@@ -3773,8 +4146,9 @@ gimple_omp_critical_name (const_gimple gs)
static inline tree *
gimple_omp_critical_name_ptr (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL);
- return &gs->gimple_omp_critical.name;
+ gimple_statement_omp_critical *omp_critical_stmt =
+ as_a <gimple_statement_omp_critical> (gs);
+ return &omp_critical_stmt->name;
}
@@ -3783,8 +4157,9 @@ gimple_omp_critical_name_ptr (gimple gs)
static inline void
gimple_omp_critical_set_name (gimple gs, tree name)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL);
- gs->gimple_omp_critical.name = name;
+ gimple_statement_omp_critical *omp_critical_stmt =
+ as_a <gimple_statement_omp_critical> (gs);
+ omp_critical_stmt->name = name;
}
@@ -3804,7 +4179,7 @@ static inline void
gimple_omp_for_set_kind (gimple g, int kind)
{
GIMPLE_CHECK (g, GIMPLE_OMP_FOR);
- g->gsbase.subcode = (g->gsbase.subcode & ~GF_OMP_FOR_KIND_MASK)
+ g->subcode = (g->subcode & ~GF_OMP_FOR_KIND_MASK)
| (kind & GF_OMP_FOR_KIND_MASK);
}
@@ -3828,9 +4203,9 @@ gimple_omp_for_set_combined_p (gimple g, bool combined_p)
{
GIMPLE_CHECK (g, GIMPLE_OMP_FOR);
if (combined_p)
- g->gsbase.subcode |= GF_OMP_FOR_COMBINED;
+ g->subcode |= GF_OMP_FOR_COMBINED;
else
- g->gsbase.subcode &= ~GF_OMP_FOR_COMBINED;
+ g->subcode &= ~GF_OMP_FOR_COMBINED;
}
@@ -3853,9 +4228,9 @@ gimple_omp_for_set_combined_into_p (gimple g, bool combined_p)
{
GIMPLE_CHECK (g, GIMPLE_OMP_FOR);
if (combined_p)
- g->gsbase.subcode |= GF_OMP_FOR_COMBINED_INTO;
+ g->subcode |= GF_OMP_FOR_COMBINED_INTO;
else
- g->gsbase.subcode &= ~GF_OMP_FOR_COMBINED_INTO;
+ g->subcode &= ~GF_OMP_FOR_COMBINED_INTO;
}
@@ -3864,8 +4239,9 @@ gimple_omp_for_set_combined_into_p (gimple g, bool combined_p)
static inline tree
gimple_omp_for_clauses (const_gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
- return gs->gimple_omp_for.clauses;
+ const gimple_statement_omp_for *omp_for_stmt =
+ as_a <const gimple_statement_omp_for> (gs);
+ return omp_for_stmt->clauses;
}
@@ -3874,8 +4250,9 @@ gimple_omp_for_clauses (const_gimple gs)
static inline tree *
gimple_omp_for_clauses_ptr (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
- return &gs->gimple_omp_for.clauses;
+ gimple_statement_omp_for *omp_for_stmt =
+ as_a <gimple_statement_omp_for> (gs);
+ return &omp_for_stmt->clauses;
}
@@ -3884,8 +4261,9 @@ gimple_omp_for_clauses_ptr (gimple gs)
static inline void
gimple_omp_for_set_clauses (gimple gs, tree clauses)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
- gs->gimple_omp_for.clauses = clauses;
+ gimple_statement_omp_for *omp_for_stmt =
+ as_a <gimple_statement_omp_for> (gs);
+ omp_for_stmt->clauses = clauses;
}
@@ -3894,8 +4272,9 @@ gimple_omp_for_set_clauses (gimple gs, tree clauses)
static inline size_t
gimple_omp_for_collapse (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
- return gs->gimple_omp_for.collapse;
+ gimple_statement_omp_for *omp_for_stmt =
+ as_a <gimple_statement_omp_for> (gs);
+ return omp_for_stmt->collapse;
}
@@ -3904,9 +4283,10 @@ gimple_omp_for_collapse (gimple gs)
static inline tree
gimple_omp_for_index (const_gimple gs, size_t i)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
- gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
- return gs->gimple_omp_for.iter[i].index;
+ const gimple_statement_omp_for *omp_for_stmt =
+ as_a <const gimple_statement_omp_for> (gs);
+ gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
+ return omp_for_stmt->iter[i].index;
}
@@ -3915,9 +4295,10 @@ gimple_omp_for_index (const_gimple gs, size_t i)
static inline tree *
gimple_omp_for_index_ptr (gimple gs, size_t i)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
- gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
- return &gs->gimple_omp_for.iter[i].index;
+ gimple_statement_omp_for *omp_for_stmt =
+ as_a <gimple_statement_omp_for> (gs);
+ gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
+ return &omp_for_stmt->iter[i].index;
}
@@ -3926,9 +4307,10 @@ gimple_omp_for_index_ptr (gimple gs, size_t i)
static inline void
gimple_omp_for_set_index (gimple gs, size_t i, tree index)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
- gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
- gs->gimple_omp_for.iter[i].index = index;
+ gimple_statement_omp_for *omp_for_stmt =
+ as_a <gimple_statement_omp_for> (gs);
+ gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
+ omp_for_stmt->iter[i].index = index;
}
@@ -3937,9 +4319,10 @@ gimple_omp_for_set_index (gimple gs, size_t i, tree index)
static inline tree
gimple_omp_for_initial (const_gimple gs, size_t i)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
- gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
- return gs->gimple_omp_for.iter[i].initial;
+ const gimple_statement_omp_for *omp_for_stmt =
+ as_a <const gimple_statement_omp_for> (gs);
+ gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
+ return omp_for_stmt->iter[i].initial;
}
@@ -3948,9 +4331,10 @@ gimple_omp_for_initial (const_gimple gs, size_t i)
static inline tree *
gimple_omp_for_initial_ptr (gimple gs, size_t i)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
- gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
- return &gs->gimple_omp_for.iter[i].initial;
+ gimple_statement_omp_for *omp_for_stmt =
+ as_a <gimple_statement_omp_for> (gs);
+ gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
+ return &omp_for_stmt->iter[i].initial;
}
@@ -3959,9 +4343,10 @@ gimple_omp_for_initial_ptr (gimple gs, size_t i)
static inline void
gimple_omp_for_set_initial (gimple gs, size_t i, tree initial)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
- gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
- gs->gimple_omp_for.iter[i].initial = initial;
+ gimple_statement_omp_for *omp_for_stmt =
+ as_a <gimple_statement_omp_for> (gs);
+ gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
+ omp_for_stmt->iter[i].initial = initial;
}
@@ -3970,9 +4355,10 @@ gimple_omp_for_set_initial (gimple gs, size_t i, tree initial)
static inline tree
gimple_omp_for_final (const_gimple gs, size_t i)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
- gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
- return gs->gimple_omp_for.iter[i].final;
+ const gimple_statement_omp_for *omp_for_stmt =
+ as_a <const gimple_statement_omp_for> (gs);
+ gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
+ return omp_for_stmt->iter[i].final;
}
@@ -3981,9 +4367,10 @@ gimple_omp_for_final (const_gimple gs, size_t i)
static inline tree *
gimple_omp_for_final_ptr (gimple gs, size_t i)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
- gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
- return &gs->gimple_omp_for.iter[i].final;
+ gimple_statement_omp_for *omp_for_stmt =
+ as_a <gimple_statement_omp_for> (gs);
+ gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
+ return &omp_for_stmt->iter[i].final;
}
@@ -3992,9 +4379,10 @@ gimple_omp_for_final_ptr (gimple gs, size_t i)
static inline void
gimple_omp_for_set_final (gimple gs, size_t i, tree final)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
- gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
- gs->gimple_omp_for.iter[i].final = final;
+ gimple_statement_omp_for *omp_for_stmt =
+ as_a <gimple_statement_omp_for> (gs);
+ gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
+ omp_for_stmt->iter[i].final = final;
}
@@ -4003,9 +4391,10 @@ gimple_omp_for_set_final (gimple gs, size_t i, tree final)
static inline tree
gimple_omp_for_incr (const_gimple gs, size_t i)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
- gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
- return gs->gimple_omp_for.iter[i].incr;
+ const gimple_statement_omp_for *omp_for_stmt =
+ as_a <const gimple_statement_omp_for> (gs);
+ gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
+ return omp_for_stmt->iter[i].incr;
}
@@ -4014,9 +4403,10 @@ gimple_omp_for_incr (const_gimple gs, size_t i)
static inline tree *
gimple_omp_for_incr_ptr (gimple gs, size_t i)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
- gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
- return &gs->gimple_omp_for.iter[i].incr;
+ gimple_statement_omp_for *omp_for_stmt =
+ as_a <gimple_statement_omp_for> (gs);
+ gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
+ return &omp_for_stmt->iter[i].incr;
}
@@ -4025,9 +4415,10 @@ gimple_omp_for_incr_ptr (gimple gs, size_t i)
static inline void
gimple_omp_for_set_incr (gimple gs, size_t i, tree incr)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
- gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
- gs->gimple_omp_for.iter[i].incr = incr;
+ gimple_statement_omp_for *omp_for_stmt =
+ as_a <gimple_statement_omp_for> (gs);
+ gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
+ omp_for_stmt->iter[i].incr = incr;
}
@@ -4037,8 +4428,9 @@ gimple_omp_for_set_incr (gimple gs, size_t i, tree incr)
static inline gimple_seq *
gimple_omp_for_pre_body_ptr (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
- return &gs->gimple_omp_for.pre_body;
+ gimple_statement_omp_for *omp_for_stmt =
+ as_a <gimple_statement_omp_for> (gs);
+ return &omp_for_stmt->pre_body;
}
@@ -4058,8 +4450,9 @@ gimple_omp_for_pre_body (gimple gs)
static inline void
gimple_omp_for_set_pre_body (gimple gs, gimple_seq pre_body)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
- gs->gimple_omp_for.pre_body = pre_body;
+ gimple_statement_omp_for *omp_for_stmt =
+ as_a <gimple_statement_omp_for> (gs);
+ omp_for_stmt->pre_body = pre_body;
}
@@ -4068,8 +4461,9 @@ gimple_omp_for_set_pre_body (gimple gs, gimple_seq pre_body)
static inline tree
gimple_omp_parallel_clauses (const_gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
- return gs->gimple_omp_parallel.clauses;
+ const gimple_statement_omp_parallel *omp_parallel_stmt =
+ as_a <const gimple_statement_omp_parallel> (gs);
+ return omp_parallel_stmt->clauses;
}
@@ -4078,8 +4472,9 @@ gimple_omp_parallel_clauses (const_gimple gs)
static inline tree *
gimple_omp_parallel_clauses_ptr (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
- return &gs->gimple_omp_parallel.clauses;
+ gimple_statement_omp_parallel *omp_parallel_stmt =
+ as_a <gimple_statement_omp_parallel> (gs);
+ return &omp_parallel_stmt->clauses;
}
@@ -4089,8 +4484,9 @@ gimple_omp_parallel_clauses_ptr (gimple gs)
static inline void
gimple_omp_parallel_set_clauses (gimple gs, tree clauses)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
- gs->gimple_omp_parallel.clauses = clauses;
+ gimple_statement_omp_parallel *omp_parallel_stmt =
+ as_a <gimple_statement_omp_parallel> (gs);
+ omp_parallel_stmt->clauses = clauses;
}
@@ -4099,8 +4495,9 @@ gimple_omp_parallel_set_clauses (gimple gs, tree clauses)
static inline tree
gimple_omp_parallel_child_fn (const_gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
- return gs->gimple_omp_parallel.child_fn;
+ const gimple_statement_omp_parallel *omp_parallel_stmt =
+ as_a <const gimple_statement_omp_parallel> (gs);
+ return omp_parallel_stmt->child_fn;
}
/* Return a pointer to the child function used to hold the body of
@@ -4109,8 +4506,9 @@ gimple_omp_parallel_child_fn (const_gimple gs)
static inline tree *
gimple_omp_parallel_child_fn_ptr (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
- return &gs->gimple_omp_parallel.child_fn;
+ gimple_statement_omp_parallel *omp_parallel_stmt =
+ as_a <gimple_statement_omp_parallel> (gs);
+ return &omp_parallel_stmt->child_fn;
}
@@ -4119,8 +4517,9 @@ gimple_omp_parallel_child_fn_ptr (gimple gs)
static inline void
gimple_omp_parallel_set_child_fn (gimple gs, tree child_fn)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
- gs->gimple_omp_parallel.child_fn = child_fn;
+ gimple_statement_omp_parallel *omp_parallel_stmt =
+ as_a <gimple_statement_omp_parallel> (gs);
+ omp_parallel_stmt->child_fn = child_fn;
}
@@ -4130,8 +4529,9 @@ gimple_omp_parallel_set_child_fn (gimple gs, tree child_fn)
static inline tree
gimple_omp_parallel_data_arg (const_gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
- return gs->gimple_omp_parallel.data_arg;
+ const gimple_statement_omp_parallel *omp_parallel_stmt =
+ as_a <const gimple_statement_omp_parallel> (gs);
+ return omp_parallel_stmt->data_arg;
}
@@ -4140,8 +4540,9 @@ gimple_omp_parallel_data_arg (const_gimple gs)
static inline tree *
gimple_omp_parallel_data_arg_ptr (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
- return &gs->gimple_omp_parallel.data_arg;
+ gimple_statement_omp_parallel *omp_parallel_stmt =
+ as_a <gimple_statement_omp_parallel> (gs);
+ return &omp_parallel_stmt->data_arg;
}
@@ -4150,8 +4551,9 @@ gimple_omp_parallel_data_arg_ptr (gimple gs)
static inline void
gimple_omp_parallel_set_data_arg (gimple gs, tree data_arg)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
- gs->gimple_omp_parallel.data_arg = data_arg;
+ gimple_statement_omp_parallel *omp_parallel_stmt =
+ as_a <gimple_statement_omp_parallel> (gs);
+ omp_parallel_stmt->data_arg = data_arg;
}
@@ -4160,8 +4562,9 @@ gimple_omp_parallel_set_data_arg (gimple gs, tree data_arg)
static inline tree
gimple_omp_task_clauses (const_gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
- return gs->gimple_omp_parallel.clauses;
+ const gimple_statement_omp_task *omp_task_stmt =
+ as_a <const gimple_statement_omp_task> (gs);
+ return omp_task_stmt->clauses;
}
@@ -4170,8 +4573,9 @@ gimple_omp_task_clauses (const_gimple gs)
static inline tree *
gimple_omp_task_clauses_ptr (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
- return &gs->gimple_omp_parallel.clauses;
+ gimple_statement_omp_task *omp_task_stmt =
+ as_a <gimple_statement_omp_task> (gs);
+ return &omp_task_stmt->clauses;
}
@@ -4181,8 +4585,9 @@ gimple_omp_task_clauses_ptr (gimple gs)
static inline void
gimple_omp_task_set_clauses (gimple gs, tree clauses)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
- gs->gimple_omp_parallel.clauses = clauses;
+ gimple_statement_omp_task *omp_task_stmt =
+ as_a <gimple_statement_omp_task> (gs);
+ omp_task_stmt->clauses = clauses;
}
@@ -4191,8 +4596,9 @@ gimple_omp_task_set_clauses (gimple gs, tree clauses)
static inline tree
gimple_omp_task_child_fn (const_gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
- return gs->gimple_omp_parallel.child_fn;
+ const gimple_statement_omp_task *omp_task_stmt =
+ as_a <const gimple_statement_omp_task> (gs);
+ return omp_task_stmt->child_fn;
}
/* Return a pointer to the child function used to hold the body of
@@ -4201,8 +4607,9 @@ gimple_omp_task_child_fn (const_gimple gs)
static inline tree *
gimple_omp_task_child_fn_ptr (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
- return &gs->gimple_omp_parallel.child_fn;
+ gimple_statement_omp_task *omp_task_stmt =
+ as_a <gimple_statement_omp_task> (gs);
+ return &omp_task_stmt->child_fn;
}
@@ -4211,8 +4618,9 @@ gimple_omp_task_child_fn_ptr (gimple gs)
static inline void
gimple_omp_task_set_child_fn (gimple gs, tree child_fn)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
- gs->gimple_omp_parallel.child_fn = child_fn;
+ gimple_statement_omp_task *omp_task_stmt =
+ as_a <gimple_statement_omp_task> (gs);
+ omp_task_stmt->child_fn = child_fn;
}
@@ -4222,8 +4630,9 @@ gimple_omp_task_set_child_fn (gimple gs, tree child_fn)
static inline tree
gimple_omp_task_data_arg (const_gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
- return gs->gimple_omp_parallel.data_arg;
+ const gimple_statement_omp_task *omp_task_stmt =
+ as_a <const gimple_statement_omp_task> (gs);
+ return omp_task_stmt->data_arg;
}
@@ -4232,8 +4641,9 @@ gimple_omp_task_data_arg (const_gimple gs)
static inline tree *
gimple_omp_task_data_arg_ptr (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
- return &gs->gimple_omp_parallel.data_arg;
+ gimple_statement_omp_task *omp_task_stmt =
+ as_a <gimple_statement_omp_task> (gs);
+ return &omp_task_stmt->data_arg;
}
@@ -4242,8 +4652,9 @@ gimple_omp_task_data_arg_ptr (gimple gs)
static inline void
gimple_omp_task_set_data_arg (gimple gs, tree data_arg)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
- gs->gimple_omp_parallel.data_arg = data_arg;
+ gimple_statement_omp_task *omp_task_stmt =
+ as_a <gimple_statement_omp_task> (gs);
+ omp_task_stmt->data_arg = data_arg;
}
@@ -4252,9 +4663,9 @@ gimple_omp_task_set_data_arg (gimple gs, tree data_arg)
static inline tree
gimple_omp_taskreg_clauses (const_gimple gs)
{
- if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
- GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
- return gs->gimple_omp_parallel.clauses;
+ const gimple_statement_omp_parallel *omp_parallel_stmt =
+ as_a <const gimple_statement_omp_parallel> (gs);
+ return omp_parallel_stmt->clauses;
}
@@ -4263,9 +4674,9 @@ gimple_omp_taskreg_clauses (const_gimple gs)
static inline tree *
gimple_omp_taskreg_clauses_ptr (gimple gs)
{
- if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
- GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
- return &gs->gimple_omp_parallel.clauses;
+ gimple_statement_omp_parallel *omp_parallel_stmt =
+ as_a <gimple_statement_omp_parallel> (gs);
+ return &omp_parallel_stmt->clauses;
}
@@ -4275,9 +4686,9 @@ gimple_omp_taskreg_clauses_ptr (gimple gs)
static inline void
gimple_omp_taskreg_set_clauses (gimple gs, tree clauses)
{
- if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
- GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
- gs->gimple_omp_parallel.clauses = clauses;
+ gimple_statement_omp_parallel *omp_parallel_stmt =
+ as_a <gimple_statement_omp_parallel> (gs);
+ omp_parallel_stmt->clauses = clauses;
}
@@ -4286,9 +4697,9 @@ gimple_omp_taskreg_set_clauses (gimple gs, tree clauses)
static inline tree
gimple_omp_taskreg_child_fn (const_gimple gs)
{
- if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
- GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
- return gs->gimple_omp_parallel.child_fn;
+ const gimple_statement_omp_parallel *omp_parallel_stmt =
+ as_a <const gimple_statement_omp_parallel> (gs);
+ return omp_parallel_stmt->child_fn;
}
/* Return a pointer to the child function used to hold the body of
@@ -4297,9 +4708,9 @@ gimple_omp_taskreg_child_fn (const_gimple gs)
static inline tree *
gimple_omp_taskreg_child_fn_ptr (gimple gs)
{
- if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
- GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
- return &gs->gimple_omp_parallel.child_fn;
+ gimple_statement_omp_parallel *omp_parallel_stmt =
+ as_a <gimple_statement_omp_parallel> (gs);
+ return &omp_parallel_stmt->child_fn;
}
@@ -4308,9 +4719,9 @@ gimple_omp_taskreg_child_fn_ptr (gimple gs)
static inline void
gimple_omp_taskreg_set_child_fn (gimple gs, tree child_fn)
{
- if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
- GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
- gs->gimple_omp_parallel.child_fn = child_fn;
+ gimple_statement_omp_parallel *omp_parallel_stmt =
+ as_a <gimple_statement_omp_parallel> (gs);
+ omp_parallel_stmt->child_fn = child_fn;
}
@@ -4320,9 +4731,9 @@ gimple_omp_taskreg_set_child_fn (gimple gs, tree child_fn)
static inline tree
gimple_omp_taskreg_data_arg (const_gimple gs)
{
- if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
- GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
- return gs->gimple_omp_parallel.data_arg;
+ const gimple_statement_omp_parallel *omp_parallel_stmt =
+ as_a <const gimple_statement_omp_parallel> (gs);
+ return omp_parallel_stmt->data_arg;
}
@@ -4331,9 +4742,9 @@ gimple_omp_taskreg_data_arg (const_gimple gs)
static inline tree *
gimple_omp_taskreg_data_arg_ptr (gimple gs)
{
- if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
- GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
- return &gs->gimple_omp_parallel.data_arg;
+ gimple_statement_omp_parallel *omp_parallel_stmt =
+ as_a <gimple_statement_omp_parallel> (gs);
+ return &omp_parallel_stmt->data_arg;
}
@@ -4342,9 +4753,9 @@ gimple_omp_taskreg_data_arg_ptr (gimple gs)
static inline void
gimple_omp_taskreg_set_data_arg (gimple gs, tree data_arg)
{
- if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
- GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
- gs->gimple_omp_parallel.data_arg = data_arg;
+ gimple_statement_omp_parallel *omp_parallel_stmt =
+ as_a <gimple_statement_omp_parallel> (gs);
+ omp_parallel_stmt->data_arg = data_arg;
}
@@ -4353,8 +4764,9 @@ gimple_omp_taskreg_set_data_arg (gimple gs, tree data_arg)
static inline tree
gimple_omp_task_copy_fn (const_gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
- return gs->gimple_omp_task.copy_fn;
+ const gimple_statement_omp_task *omp_task_stmt =
+ as_a <const gimple_statement_omp_task> (gs);
+ return omp_task_stmt->copy_fn;
}
/* Return a pointer to the copy function used to hold the body of
@@ -4363,8 +4775,9 @@ gimple_omp_task_copy_fn (const_gimple gs)
static inline tree *
gimple_omp_task_copy_fn_ptr (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
- return &gs->gimple_omp_task.copy_fn;
+ gimple_statement_omp_task *omp_task_stmt =
+ as_a <gimple_statement_omp_task> (gs);
+ return &omp_task_stmt->copy_fn;
}
@@ -4373,8 +4786,9 @@ gimple_omp_task_copy_fn_ptr (gimple gs)
static inline void
gimple_omp_task_set_copy_fn (gimple gs, tree copy_fn)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
- gs->gimple_omp_task.copy_fn = copy_fn;
+ gimple_statement_omp_task *omp_task_stmt =
+ as_a <gimple_statement_omp_task> (gs);
+ omp_task_stmt->copy_fn = copy_fn;
}
@@ -4383,8 +4797,9 @@ gimple_omp_task_set_copy_fn (gimple gs, tree copy_fn)
static inline tree
gimple_omp_task_arg_size (const_gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
- return gs->gimple_omp_task.arg_size;
+ const gimple_statement_omp_task *omp_task_stmt =
+ as_a <const gimple_statement_omp_task> (gs);
+ return omp_task_stmt->arg_size;
}
@@ -4393,8 +4808,9 @@ gimple_omp_task_arg_size (const_gimple gs)
static inline tree *
gimple_omp_task_arg_size_ptr (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
- return &gs->gimple_omp_task.arg_size;
+ gimple_statement_omp_task *omp_task_stmt =
+ as_a <gimple_statement_omp_task> (gs);
+ return &omp_task_stmt->arg_size;
}
@@ -4403,8 +4819,9 @@ gimple_omp_task_arg_size_ptr (gimple gs)
static inline void
gimple_omp_task_set_arg_size (gimple gs, tree arg_size)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
- gs->gimple_omp_task.arg_size = arg_size;
+ gimple_statement_omp_task *omp_task_stmt =
+ as_a <gimple_statement_omp_task> (gs);
+ omp_task_stmt->arg_size = arg_size;
}
@@ -4413,8 +4830,9 @@ gimple_omp_task_set_arg_size (gimple gs, tree arg_size)
static inline tree
gimple_omp_task_arg_align (const_gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
- return gs->gimple_omp_task.arg_align;
+ const gimple_statement_omp_task *omp_task_stmt =
+ as_a <const gimple_statement_omp_task> (gs);
+ return omp_task_stmt->arg_align;
}
@@ -4423,8 +4841,9 @@ gimple_omp_task_arg_align (const_gimple gs)
static inline tree *
gimple_omp_task_arg_align_ptr (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
- return &gs->gimple_omp_task.arg_align;
+ gimple_statement_omp_task *omp_task_stmt =
+ as_a <gimple_statement_omp_task> (gs);
+ return &omp_task_stmt->arg_align;
}
@@ -4433,8 +4852,9 @@ gimple_omp_task_arg_align_ptr (gimple gs)
static inline void
gimple_omp_task_set_arg_align (gimple gs, tree arg_align)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
- gs->gimple_omp_task.arg_align = arg_align;
+ gimple_statement_omp_task *omp_task_stmt =
+ as_a <gimple_statement_omp_task> (gs);
+ omp_task_stmt->arg_align = arg_align;
}
@@ -4443,8 +4863,9 @@ gimple_omp_task_set_arg_align (gimple gs, tree arg_align)
static inline tree
gimple_omp_single_clauses (const_gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE);
- return gs->gimple_omp_single.clauses;
+ const gimple_statement_omp_single *omp_single_stmt =
+ as_a <const gimple_statement_omp_single> (gs);
+ return omp_single_stmt->clauses;
}
@@ -4453,8 +4874,9 @@ gimple_omp_single_clauses (const_gimple gs)
static inline tree *
gimple_omp_single_clauses_ptr (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE);
- return &gs->gimple_omp_single.clauses;
+ gimple_statement_omp_single *omp_single_stmt =
+ as_a <gimple_statement_omp_single> (gs);
+ return &omp_single_stmt->clauses;
}
@@ -4463,8 +4885,9 @@ gimple_omp_single_clauses_ptr (gimple gs)
static inline void
gimple_omp_single_set_clauses (gimple gs, tree clauses)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE);
- gs->gimple_omp_single.clauses = clauses;
+ gimple_statement_omp_single *omp_single_stmt =
+ as_a <gimple_statement_omp_single> (gs);
+ omp_single_stmt->clauses = clauses;
}
@@ -4473,8 +4896,9 @@ gimple_omp_single_set_clauses (gimple gs, tree clauses)
static inline tree
gimple_omp_target_clauses (const_gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_TARGET);
- return gs->gimple_omp_parallel.clauses;
+ const gimple_statement_omp_parallel *omp_parallel_stmt =
+ as_a <const gimple_statement_omp_parallel> (gs);
+ return omp_parallel_stmt->clauses;
}
@@ -4483,8 +4907,9 @@ gimple_omp_target_clauses (const_gimple gs)
static inline tree *
gimple_omp_target_clauses_ptr (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_TARGET);
- return &gs->gimple_omp_parallel.clauses;
+ gimple_statement_omp_parallel *omp_parallel_stmt =
+ as_a <gimple_statement_omp_parallel> (gs);
+ return &omp_parallel_stmt->clauses;
}
@@ -4493,8 +4918,9 @@ gimple_omp_target_clauses_ptr (gimple gs)
static inline void
gimple_omp_target_set_clauses (gimple gs, tree clauses)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_TARGET);
- gs->gimple_omp_parallel.clauses = clauses;
+ gimple_statement_omp_parallel *omp_parallel_stmt =
+ as_a <gimple_statement_omp_parallel> (gs);
+ omp_parallel_stmt->clauses = clauses;
}
@@ -4514,7 +4940,7 @@ static inline void
gimple_omp_target_set_kind (gimple g, int kind)
{
GIMPLE_CHECK (g, GIMPLE_OMP_TARGET);
- g->gsbase.subcode = (g->gsbase.subcode & ~GF_OMP_TARGET_KIND_MASK)
+ g->subcode = (g->subcode & ~GF_OMP_TARGET_KIND_MASK)
| (kind & GF_OMP_TARGET_KIND_MASK);
}
@@ -4524,8 +4950,9 @@ gimple_omp_target_set_kind (gimple g, int kind)
static inline tree
gimple_omp_target_child_fn (const_gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_TARGET);
- return gs->gimple_omp_parallel.child_fn;
+ const gimple_statement_omp_parallel *omp_parallel_stmt =
+ as_a <const gimple_statement_omp_parallel> (gs);
+ return omp_parallel_stmt->child_fn;
}
/* Return a pointer to the child function used to hold the body of
@@ -4534,8 +4961,9 @@ gimple_omp_target_child_fn (const_gimple gs)
static inline tree *
gimple_omp_target_child_fn_ptr (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_TARGET);
- return &gs->gimple_omp_parallel.child_fn;
+ gimple_statement_omp_parallel *omp_parallel_stmt =
+ as_a <gimple_statement_omp_parallel> (gs);
+ return &omp_parallel_stmt->child_fn;
}
@@ -4544,8 +4972,9 @@ gimple_omp_target_child_fn_ptr (gimple gs)
static inline void
gimple_omp_target_set_child_fn (gimple gs, tree child_fn)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_TARGET);
- gs->gimple_omp_parallel.child_fn = child_fn;
+ gimple_statement_omp_parallel *omp_parallel_stmt =
+ as_a <gimple_statement_omp_parallel> (gs);
+ omp_parallel_stmt->child_fn = child_fn;
}
@@ -4555,8 +4984,9 @@ gimple_omp_target_set_child_fn (gimple gs, tree child_fn)
static inline tree
gimple_omp_target_data_arg (const_gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_TARGET);
- return gs->gimple_omp_parallel.data_arg;
+ const gimple_statement_omp_parallel *omp_parallel_stmt =
+ as_a <const gimple_statement_omp_parallel> (gs);
+ return omp_parallel_stmt->data_arg;
}
@@ -4565,8 +4995,9 @@ gimple_omp_target_data_arg (const_gimple gs)
static inline tree *
gimple_omp_target_data_arg_ptr (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_TARGET);
- return &gs->gimple_omp_parallel.data_arg;
+ gimple_statement_omp_parallel *omp_parallel_stmt =
+ as_a <gimple_statement_omp_parallel> (gs);
+ return &omp_parallel_stmt->data_arg;
}
@@ -4575,8 +5006,9 @@ gimple_omp_target_data_arg_ptr (gimple gs)
static inline void
gimple_omp_target_set_data_arg (gimple gs, tree data_arg)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_TARGET);
- gs->gimple_omp_parallel.data_arg = data_arg;
+ gimple_statement_omp_parallel *omp_parallel_stmt =
+ as_a <gimple_statement_omp_parallel> (gs);
+ omp_parallel_stmt->data_arg = data_arg;
}
@@ -4585,8 +5017,9 @@ gimple_omp_target_set_data_arg (gimple gs, tree data_arg)
static inline tree
gimple_omp_teams_clauses (const_gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_TEAMS);
- return gs->gimple_omp_single.clauses;
+ const gimple_statement_omp_single *omp_single_stmt =
+ as_a <const gimple_statement_omp_single> (gs);
+ return omp_single_stmt->clauses;
}
@@ -4595,8 +5028,9 @@ gimple_omp_teams_clauses (const_gimple gs)
static inline tree *
gimple_omp_teams_clauses_ptr (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_TEAMS);
- return &gs->gimple_omp_single.clauses;
+ gimple_statement_omp_single *omp_single_stmt =
+ as_a <gimple_statement_omp_single> (gs);
+ return &omp_single_stmt->clauses;
}
@@ -4605,8 +5039,9 @@ gimple_omp_teams_clauses_ptr (gimple gs)
static inline void
gimple_omp_teams_set_clauses (gimple gs, tree clauses)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_TEAMS);
- gs->gimple_omp_single.clauses = clauses;
+ gimple_statement_omp_single *omp_single_stmt =
+ as_a <gimple_statement_omp_single> (gs);
+ omp_single_stmt->clauses = clauses;
}
@@ -4615,8 +5050,9 @@ gimple_omp_teams_set_clauses (gimple gs, tree clauses)
static inline tree
gimple_omp_sections_clauses (const_gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
- return gs->gimple_omp_sections.clauses;
+ const gimple_statement_omp_sections *omp_sections_stmt =
+ as_a <const gimple_statement_omp_sections> (gs);
+ return omp_sections_stmt->clauses;
}
@@ -4625,8 +5061,9 @@ gimple_omp_sections_clauses (const_gimple gs)
static inline tree *
gimple_omp_sections_clauses_ptr (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
- return &gs->gimple_omp_sections.clauses;
+ gimple_statement_omp_sections *omp_sections_stmt =
+ as_a <gimple_statement_omp_sections> (gs);
+ return &omp_sections_stmt->clauses;
}
@@ -4636,8 +5073,9 @@ gimple_omp_sections_clauses_ptr (gimple gs)
static inline void
gimple_omp_sections_set_clauses (gimple gs, tree clauses)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
- gs->gimple_omp_sections.clauses = clauses;
+ gimple_statement_omp_sections *omp_sections_stmt =
+ as_a <gimple_statement_omp_sections> (gs);
+ omp_sections_stmt->clauses = clauses;
}
@@ -4647,8 +5085,9 @@ gimple_omp_sections_set_clauses (gimple gs, tree clauses)
static inline tree
gimple_omp_sections_control (const_gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
- return gs->gimple_omp_sections.control;
+ const gimple_statement_omp_sections *omp_sections_stmt =
+ as_a <const gimple_statement_omp_sections> (gs);
+ return omp_sections_stmt->control;
}
@@ -4658,8 +5097,9 @@ gimple_omp_sections_control (const_gimple gs)
static inline tree *
gimple_omp_sections_control_ptr (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
- return &gs->gimple_omp_sections.control;
+ gimple_statement_omp_sections *omp_sections_stmt =
+ as_a <gimple_statement_omp_sections> (gs);
+ return &omp_sections_stmt->control;
}
@@ -4669,8 +5109,9 @@ gimple_omp_sections_control_ptr (gimple gs)
static inline void
gimple_omp_sections_set_control (gimple gs, tree control)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
- gs->gimple_omp_sections.control = control;
+ gimple_statement_omp_sections *omp_sections_stmt =
+ as_a <gimple_statement_omp_sections> (gs);
+ omp_sections_stmt->control = control;
}
@@ -4679,10 +5120,11 @@ gimple_omp_sections_set_control (gimple gs, tree control)
static inline void
gimple_omp_for_set_cond (gimple gs, size_t i, enum tree_code cond)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
+ gimple_statement_omp_for *omp_for_stmt =
+ as_a <gimple_statement_omp_for> (gs);
gcc_gimple_checking_assert (TREE_CODE_CLASS (cond) == tcc_comparison
- && i < gs->gimple_omp_for.collapse);
- gs->gimple_omp_for.iter[i].cond = cond;
+ && i < omp_for_stmt->collapse);
+ omp_for_stmt->iter[i].cond = cond;
}
@@ -4691,9 +5133,10 @@ gimple_omp_for_set_cond (gimple gs, size_t i, enum tree_code cond)
static inline enum tree_code
gimple_omp_for_cond (const_gimple gs, size_t i)
{
- GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
- gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
- return gs->gimple_omp_for.iter[i].cond;
+ const gimple_statement_omp_for *omp_for_stmt =
+ as_a <const gimple_statement_omp_for> (gs);
+ gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
+ return omp_for_stmt->iter[i].cond;
}
@@ -4702,8 +5145,9 @@ gimple_omp_for_cond (const_gimple gs, size_t i)
static inline void
gimple_omp_atomic_store_set_val (gimple g, tree val)
{
- GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
- g->gimple_omp_atomic_store.val = val;
+ gimple_statement_omp_atomic_store *omp_atomic_store_stmt =
+ as_a <gimple_statement_omp_atomic_store> (g);
+ omp_atomic_store_stmt->val = val;
}
@@ -4712,8 +5156,9 @@ gimple_omp_atomic_store_set_val (gimple g, tree val)
static inline tree
gimple_omp_atomic_store_val (const_gimple g)
{
- GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
- return g->gimple_omp_atomic_store.val;
+ const gimple_statement_omp_atomic_store *omp_atomic_store_stmt =
+ as_a <const gimple_statement_omp_atomic_store> (g);
+ return omp_atomic_store_stmt->val;
}
@@ -4722,8 +5167,9 @@ gimple_omp_atomic_store_val (const_gimple g)
static inline tree *
gimple_omp_atomic_store_val_ptr (gimple g)
{
- GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
- return &g->gimple_omp_atomic_store.val;
+ gimple_statement_omp_atomic_store *omp_atomic_store_stmt =
+ as_a <gimple_statement_omp_atomic_store> (g);
+ return &omp_atomic_store_stmt->val;
}
@@ -4732,8 +5178,9 @@ gimple_omp_atomic_store_val_ptr (gimple g)
static inline void
gimple_omp_atomic_load_set_lhs (gimple g, tree lhs)
{
- GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
- g->gimple_omp_atomic_load.lhs = lhs;
+ gimple_statement_omp_atomic_load *omp_atomic_load_stmt =
+ as_a <gimple_statement_omp_atomic_load> (g);
+ omp_atomic_load_stmt->lhs = lhs;
}
@@ -4742,8 +5189,9 @@ gimple_omp_atomic_load_set_lhs (gimple g, tree lhs)
static inline tree
gimple_omp_atomic_load_lhs (const_gimple g)
{
- GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
- return g->gimple_omp_atomic_load.lhs;
+ const gimple_statement_omp_atomic_load *omp_atomic_load_stmt =
+ as_a <const gimple_statement_omp_atomic_load> (g);
+ return omp_atomic_load_stmt->lhs;
}
@@ -4752,8 +5200,9 @@ gimple_omp_atomic_load_lhs (const_gimple g)
static inline tree *
gimple_omp_atomic_load_lhs_ptr (gimple g)
{
- GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
- return &g->gimple_omp_atomic_load.lhs;
+ gimple_statement_omp_atomic_load *omp_atomic_load_stmt =
+ as_a <gimple_statement_omp_atomic_load> (g);
+ return &omp_atomic_load_stmt->lhs;
}
@@ -4762,8 +5211,9 @@ gimple_omp_atomic_load_lhs_ptr (gimple g)
static inline void
gimple_omp_atomic_load_set_rhs (gimple g, tree rhs)
{
- GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
- g->gimple_omp_atomic_load.rhs = rhs;
+ gimple_statement_omp_atomic_load *omp_atomic_load_stmt =
+ as_a <gimple_statement_omp_atomic_load> (g);
+ omp_atomic_load_stmt->rhs = rhs;
}
@@ -4772,8 +5222,9 @@ gimple_omp_atomic_load_set_rhs (gimple g, tree rhs)
static inline tree
gimple_omp_atomic_load_rhs (const_gimple g)
{
- GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
- return g->gimple_omp_atomic_load.rhs;
+ const gimple_statement_omp_atomic_load *omp_atomic_load_stmt =
+ as_a <const gimple_statement_omp_atomic_load> (g);
+ return omp_atomic_load_stmt->rhs;
}
@@ -4782,8 +5233,9 @@ gimple_omp_atomic_load_rhs (const_gimple g)
static inline tree *
gimple_omp_atomic_load_rhs_ptr (gimple g)
{
- GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
- return &g->gimple_omp_atomic_load.rhs;
+ gimple_statement_omp_atomic_load *omp_atomic_load_stmt =
+ as_a <gimple_statement_omp_atomic_load> (g);
+ return &omp_atomic_load_stmt->rhs;
}
@@ -4792,8 +5244,9 @@ gimple_omp_atomic_load_rhs_ptr (gimple g)
static inline tree
gimple_omp_continue_control_def (const_gimple g)
{
- GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
- return g->gimple_omp_continue.control_def;
+ const gimple_statement_omp_continue *omp_continue_stmt =
+ as_a <const gimple_statement_omp_continue> (g);
+ return omp_continue_stmt->control_def;
}
/* The same as above, but return the address. */
@@ -4801,8 +5254,9 @@ gimple_omp_continue_control_def (const_gimple g)
static inline tree *
gimple_omp_continue_control_def_ptr (gimple g)
{
- GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
- return &g->gimple_omp_continue.control_def;
+ gimple_statement_omp_continue *omp_continue_stmt =
+ as_a <gimple_statement_omp_continue> (g);
+ return &omp_continue_stmt->control_def;
}
/* Set the definition of the control variable in a GIMPLE_OMP_CONTINUE. */
@@ -4810,8 +5264,9 @@ gimple_omp_continue_control_def_ptr (gimple g)
static inline void
gimple_omp_continue_set_control_def (gimple g, tree def)
{
- GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
- g->gimple_omp_continue.control_def = def;
+ gimple_statement_omp_continue *omp_continue_stmt =
+ as_a <gimple_statement_omp_continue> (g);
+ omp_continue_stmt->control_def = def;
}
@@ -4820,8 +5275,9 @@ gimple_omp_continue_set_control_def (gimple g, tree def)
static inline tree
gimple_omp_continue_control_use (const_gimple g)
{
- GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
- return g->gimple_omp_continue.control_use;
+ const gimple_statement_omp_continue *omp_continue_stmt =
+ as_a <const gimple_statement_omp_continue> (g);
+ return omp_continue_stmt->control_use;
}
@@ -4830,8 +5286,9 @@ gimple_omp_continue_control_use (const_gimple g)
static inline tree *
gimple_omp_continue_control_use_ptr (gimple g)
{
- GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
- return &g->gimple_omp_continue.control_use;
+ gimple_statement_omp_continue *omp_continue_stmt =
+ as_a <gimple_statement_omp_continue> (g);
+ return &omp_continue_stmt->control_use;
}
@@ -4840,8 +5297,9 @@ gimple_omp_continue_control_use_ptr (gimple g)
static inline void
gimple_omp_continue_set_control_use (gimple g, tree use)
{
- GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
- g->gimple_omp_continue.control_use = use;
+ gimple_statement_omp_continue *omp_continue_stmt =
+ as_a <gimple_statement_omp_continue> (g);
+ omp_continue_stmt->control_use = use;
}
/* Return a pointer to the body for the GIMPLE_TRANSACTION statement GS. */
@@ -4849,8 +5307,9 @@ gimple_omp_continue_set_control_use (gimple g, tree use)
static inline gimple_seq *
gimple_transaction_body_ptr (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
- return &gs->gimple_transaction.body;
+ gimple_statement_transaction *transaction_stmt =
+ as_a <gimple_statement_transaction> (gs);
+ return &transaction_stmt->body;
}
/* Return the body for the GIMPLE_TRANSACTION statement GS. */
@@ -4866,15 +5325,17 @@ gimple_transaction_body (gimple gs)
static inline tree
gimple_transaction_label (const_gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
- return gs->gimple_transaction.label;
+ const gimple_statement_transaction *transaction_stmt =
+ as_a <const gimple_statement_transaction> (gs);
+ return transaction_stmt->label;
}
static inline tree *
gimple_transaction_label_ptr (gimple gs)
{
- GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
- return &gs->gimple_transaction.label;
+ gimple_statement_transaction *transaction_stmt =
+ as_a <gimple_statement_transaction> (gs);
+ return &transaction_stmt->label;
}
/* Return the subcode associated with a GIMPLE_TRANSACTION. */
@@ -4883,7 +5344,7 @@ static inline unsigned int
gimple_transaction_subcode (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
- return gs->gsbase.subcode;
+ return gs->subcode;
}
/* Set BODY to be the body for the GIMPLE_TRANSACTION statement GS. */
@@ -4891,8 +5352,9 @@ gimple_transaction_subcode (const_gimple gs)
static inline void
gimple_transaction_set_body (gimple gs, gimple_seq body)
{
- GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
- gs->gimple_transaction.body = body;
+ gimple_statement_transaction *transaction_stmt =
+ as_a <gimple_statement_transaction> (gs);
+ transaction_stmt->body = body;
}
/* Set the label associated with a GIMPLE_TRANSACTION. */
@@ -4900,8 +5362,9 @@ gimple_transaction_set_body (gimple gs, gimple_seq body)
static inline void
gimple_transaction_set_label (gimple gs, tree label)
{
- GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
- gs->gimple_transaction.label = label;
+ gimple_statement_transaction *transaction_stmt =
+ as_a <gimple_statement_transaction> (gs);
+ transaction_stmt->label = label;
}
/* Set the subcode associated with a GIMPLE_TRANSACTION. */
@@ -4910,7 +5373,7 @@ static inline void
gimple_transaction_set_subcode (gimple gs, unsigned int subcode)
{
GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
- gs->gsbase.subcode = subcode;
+ gs->subcode = subcode;
}
@@ -5020,7 +5483,7 @@ static inline enum br_predictor
gimple_predict_predictor (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
- return (enum br_predictor) (gs->gsbase.subcode & ~GF_PREDICT_TAKEN);
+ return (enum br_predictor) (gs->subcode & ~GF_PREDICT_TAKEN);
}
@@ -5030,7 +5493,7 @@ static inline void
gimple_predict_set_predictor (gimple gs, enum br_predictor predictor)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
- gs->gsbase.subcode = (gs->gsbase.subcode & GF_PREDICT_TAKEN)
+ gs->subcode = (gs->subcode & GF_PREDICT_TAKEN)
| (unsigned) predictor;
}
@@ -5041,7 +5504,7 @@ static inline enum prediction
gimple_predict_outcome (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
- return (gs->gsbase.subcode & GF_PREDICT_TAKEN) ? TAKEN : NOT_TAKEN;
+ return (gs->subcode & GF_PREDICT_TAKEN) ? TAKEN : NOT_TAKEN;
}
@@ -5052,9 +5515,9 @@ gimple_predict_set_outcome (gimple gs, enum prediction outcome)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
if (outcome == TAKEN)
- gs->gsbase.subcode |= GF_PREDICT_TAKEN;
+ gs->subcode |= GF_PREDICT_TAKEN;
else
- gs->gsbase.subcode &= ~GF_PREDICT_TAKEN;
+ gs->subcode &= ~GF_PREDICT_TAKEN;
}
diff --git a/gcc/gimplify-me.c b/gcc/gimplify-me.c
index c4818fab756..a7e9387b648 100644
--- a/gcc/gimplify-me.c
+++ b/gcc/gimplify-me.c
@@ -25,11 +25,14 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "tree.h"
+#include "stmt.h"
+#include "stor-layout.h"
#include "gimple.h"
#include "gimple-iterator.h"
#include "gimplify.h"
#include "gimplify-me.h"
#include "gimple-ssa.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
diff --git a/gcc/gimplify.c b/gcc/gimplify.c
index 94d0beba5e9..faba41959b4 100644
--- a/gcc/gimplify.c
+++ b/gcc/gimplify.c
@@ -24,9 +24,16 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "tree.h"
+#include "expr.h"
#include "gimple.h"
#include "gimplify.h"
#include "gimple-iterator.h"
+#include "stringpool.h"
+#include "calls.h"
+#include "varasm.h"
+#include "stor-layout.h"
+#include "stmt.h"
+#include "print-tree.h"
#include "tree-iterator.h"
#include "tree-inline.h"
#include "tree-pretty-print.h"
@@ -5002,7 +5009,7 @@ gimplify_cleanup_point_expr (tree *expr_p, gimple_seq *pre_p)
}
else
{
- gimple gtry;
+ gimple_statement_try *gtry;
gimple_seq seq;
enum gimple_try_flags kind;
@@ -5016,7 +5023,7 @@ gimplify_cleanup_point_expr (tree *expr_p, gimple_seq *pre_p)
/* Do not use gsi_replace here, as it may scan operands.
We want to do a simple structural modification only. */
gsi_set_stmt (&iter, gtry);
- iter = gsi_start (gtry->gimple_try.eval);
+ iter = gsi_start (gtry->eval);
}
}
else
diff --git a/gcc/go/ChangeLog b/gcc/go/ChangeLog
index 6f6b53e58a0..918862ef8fe 100644
--- a/gcc/go/ChangeLog
+++ b/gcc/go/ChangeLog
@@ -12,6 +12,14 @@
* go-lang.c: Include only gimplify.h and gimple.h as needed.
+2013-11-14 Diego Novillo <dnovillo@google.com>
+
+ * go-backend.c: Include stor-layout.h.
+ * go-gcc.cc: Include stringpool.h.
+ Include stor-layout.h.
+ Include varasm.h.
+ * go-lang.c: Include stor-layout.h.
+
2013-11-12 Andrew MacLeod <amacleod@redhat.com>
* go-lang.c: Include gimplify.h.
diff --git a/gcc/go/go-backend.c b/gcc/go/go-backend.c
index c3ffa3b1dea..31d01221fd1 100644
--- a/gcc/go/go-backend.c
+++ b/gcc/go/go-backend.c
@@ -23,6 +23,7 @@ along with GCC; see the file COPYING3. If not see
#include "simple-object.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
#include "tm_p.h"
#include "intl.h"
#include "output.h" /* for assemble_string */
diff --git a/gcc/go/go-gcc.cc b/gcc/go/go-gcc.cc
index d70e4f120e0..f47f48c262e 100644
--- a/gcc/go/go-gcc.cc
+++ b/gcc/go/go-gcc.cc
@@ -25,6 +25,9 @@
#include <gmp.h>
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
+#include "varasm.h"
#include "tree-iterator.h"
#include "gimple.h"
#include "toplev.h"
diff --git a/gcc/go/go-lang.c b/gcc/go/go-lang.c
index aa1d80b5be2..93a0446efe6 100644
--- a/gcc/go/go-lang.c
+++ b/gcc/go/go-lang.c
@@ -25,6 +25,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree.h"
#include "gimple.h"
#include "gimplify.h"
+#include "stor-layout.h"
#include "ggc.h"
#include "toplev.h"
#include "debug.h"
diff --git a/gcc/go/gofrontend/expressions.cc b/gcc/go/gofrontend/expressions.cc
index 823c5e1a4c8..14c10d66a04 100644
--- a/gcc/go/gofrontend/expressions.cc
+++ b/gcc/go/gofrontend/expressions.cc
@@ -11,6 +11,8 @@
#include "toplev.h"
#include "intl.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
#include "gimple.h"
#include "gimplify.h"
#include "tree-iterator.h"
diff --git a/gcc/go/gofrontend/gogo-tree.cc b/gcc/go/gofrontend/gogo-tree.cc
index 12a0889397d..001d24d238d 100644
--- a/gcc/go/gofrontend/gogo-tree.cc
+++ b/gcc/go/gofrontend/gogo-tree.cc
@@ -8,6 +8,9 @@
#include "toplev.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
+#include "varasm.h"
#include "gimple.h"
#include "gimplify.h"
#include "tree-iterator.h"
diff --git a/gcc/graph.c b/gcc/graph.c
index 5c890e5956c..b75135af742 100644
--- a/gcc/graph.c
+++ b/gcc/graph.c
@@ -153,7 +153,7 @@ draw_cfg_node_succ_edges (pretty_printer *pp, int funcdef_no, basic_block bb)
static void
draw_cfg_nodes_no_loops (pretty_printer *pp, struct function *fun)
{
- int *rpo = XNEWVEC (int, n_basic_blocks_for_function (fun));
+ int *rpo = XNEWVEC (int, n_basic_blocks_for_fn (fun));
int i, n;
sbitmap visited;
@@ -161,8 +161,8 @@ draw_cfg_nodes_no_loops (pretty_printer *pp, struct function *fun)
bitmap_clear (visited);
n = pre_and_rev_post_order_compute_fn (fun, NULL, rpo, true);
- for (i = n_basic_blocks_for_function (fun) - n;
- i < n_basic_blocks_for_function (fun); i++)
+ for (i = n_basic_blocks_for_fn (fun) - n;
+ i < n_basic_blocks_for_fn (fun); i++)
{
basic_block bb = BASIC_BLOCK (rpo[i]);
draw_cfg_node (pp, fun->funcdef_no, bb);
@@ -170,7 +170,7 @@ draw_cfg_nodes_no_loops (pretty_printer *pp, struct function *fun)
}
free (rpo);
- if (n != n_basic_blocks_for_function (fun))
+ if (n != n_basic_blocks_for_fn (fun))
{
/* Some blocks are unreachable. We still want to dump them. */
basic_block bb;
@@ -195,7 +195,7 @@ draw_cfg_nodes_for_loop (pretty_printer *pp, int funcdef_no,
const char *fillcolors[3] = { "grey88", "grey77", "grey66" };
if (loop->header != NULL
- && loop->latch != EXIT_BLOCK_PTR)
+ && loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun))
pp_printf (pp,
"\tsubgraph cluster_%d_%d {\n"
"\tstyle=\"filled\";\n"
@@ -214,7 +214,7 @@ draw_cfg_nodes_for_loop (pretty_printer *pp, int funcdef_no,
if (loop->header == NULL)
return;
- if (loop->latch == EXIT_BLOCK_PTR)
+ if (loop->latch == EXIT_BLOCK_PTR_FOR_FN (cfun))
body = get_loop_body (loop);
else
body = get_loop_body_in_bfs_order (loop);
@@ -228,7 +228,7 @@ draw_cfg_nodes_for_loop (pretty_printer *pp, int funcdef_no,
free (body);
- if (loop->latch != EXIT_BLOCK_PTR)
+ if (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun))
pp_printf (pp, "\t}\n");
}
diff --git a/gcc/graphite-clast-to-gimple.c b/gcc/graphite-clast-to-gimple.c
index 8f17934710d..4d273353371 100644
--- a/gcc/graphite-clast-to-gimple.c
+++ b/gcc/graphite-clast-to-gimple.c
@@ -1097,7 +1097,7 @@ translate_clast_user (struct clast_user_stmt *stmt, edge next_e,
gimple_bb_p gbb = PBB_BLACK_BOX (pbb);
vec<tree> iv_map;
- if (GBB_BB (gbb) == ENTRY_BLOCK_PTR)
+ if (GBB_BB (gbb) == ENTRY_BLOCK_PTR_FOR_FN (cfun))
return next_e;
nb_loops = number_of_loops (cfun);
@@ -1717,10 +1717,9 @@ gloog (scop_p scop, bb_pbb_htab_type bb_pbb_mapping)
if (dump_file && (dump_flags & TDF_DETAILS))
{
loop_p loop;
- loop_iterator li;
int num_no_dependency = 0;
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
if (loop->can_be_parallel)
num_no_dependency++;
diff --git a/gcc/graphite-scop-detection.c b/gcc/graphite-scop-detection.c
index 7b0ae7ea74d..0cfb5a59cc9 100644
--- a/gcc/graphite-scop-detection.c
+++ b/gcc/graphite-scop-detection.c
@@ -448,7 +448,7 @@ scopdet_basic_block_info (basic_block bb, loop_p outermost_loop,
gimple stmt;
/* XXX: ENTRY_BLOCK_PTR could be optimized in later steps. */
- basic_block entry_block = ENTRY_BLOCK_PTR;
+ basic_block entry_block = ENTRY_BLOCK_PTR_FOR_FN (cfun);
stmt = harmful_stmt_in_bb (entry_block, outermost_loop, bb);
result.difficult = (stmt != NULL);
result.exit = NULL;
@@ -1030,7 +1030,7 @@ create_sese_edges (vec<sd_region> regions)
FOR_EACH_VEC_ELT (regions, i, s)
/* Don't handle multiple edges exiting the function. */
if (!find_single_exit_edge (s)
- && s->exit != EXIT_BLOCK_PTR)
+ && s->exit != EXIT_BLOCK_PTR_FOR_FN (cfun))
create_single_exit_edge (s);
unmark_exit_edges (regions);
@@ -1375,14 +1375,13 @@ canonicalize_loop_closed_ssa (loop_p loop)
static void
canonicalize_loop_closed_ssa_form (void)
{
- loop_iterator li;
loop_p loop;
#ifdef ENABLE_CHECKING
verify_loop_closed_ssa (true);
#endif
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
canonicalize_loop_closed_ssa (loop);
rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
@@ -1403,7 +1402,8 @@ build_scops (vec<scop_p> *scops)
stack_vec<sd_region, 3> regions;
canonicalize_loop_closed_ssa_form ();
- build_scops_1 (single_succ (ENTRY_BLOCK_PTR), ENTRY_BLOCK_PTR->loop_father,
+ build_scops_1 (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)->loop_father,
&regions, loop);
create_sese_edges (regions);
build_graphite_scops (regions, scops);
diff --git a/gcc/graphite-sese-to-poly.c b/gcc/graphite-sese-to-poly.c
index 11c61759548..efe14b39d53 100644
--- a/gcc/graphite-sese-to-poly.c
+++ b/gcc/graphite-sese-to-poly.c
@@ -42,6 +42,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-cfg.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-ssa-loop-manip.h"
#include "tree-ssa-loop-niter.h"
@@ -55,8 +56,10 @@ along with GCC; see the file COPYING3. If not see
#include "domwalk.h"
#include "sese.h"
#include "tree-ssa-propagate.h"
+#include "expr.h"
#ifdef HAVE_cloog
+#include "expr.h"
#include "graphite-poly.h"
#include "graphite-sese-to-poly.h"
@@ -3061,12 +3064,11 @@ rewrite_commutative_reductions_out_of_ssa_loop (scop_p scop,
static void
rewrite_commutative_reductions_out_of_ssa (scop_p scop)
{
- loop_iterator li;
loop_p loop;
bool changed = false;
sese region = SCOP_REGION (scop);
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
if (loop_in_sese_p (loop, region))
changed |= rewrite_commutative_reductions_out_of_ssa_loop (scop, loop);
@@ -3088,12 +3090,11 @@ rewrite_commutative_reductions_out_of_ssa (scop_p scop)
static bool
scop_ivs_can_be_represented (scop_p scop)
{
- loop_iterator li;
loop_p loop;
gimple_stmt_iterator psi;
bool result = true;
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
{
if (!loop_in_sese_p (loop, SCOP_REGION (scop)))
continue;
@@ -3113,7 +3114,7 @@ scop_ivs_can_be_represented (scop_p scop)
}
}
if (!result)
- FOR_EACH_LOOP_BREAK (li);
+ break;
}
return result;
diff --git a/gcc/graphite.c b/gcc/graphite.c
index 5223de959d5..f87aede8420 100644
--- a/gcc/graphite.c
+++ b/gcc/graphite.c
@@ -208,7 +208,8 @@ graphite_initialize (isl_ctx *ctx)
if (number_of_loops (cfun) <= 1
/* FIXME: This limit on the number of basic blocks of a function
should be removed when the SCOP detection is faster. */
- || n_basic_blocks > PARAM_VALUE (PARAM_GRAPHITE_MAX_BBS_PER_FUNCTION))
+ || (n_basic_blocks_for_fn (cfun) >
+ PARAM_VALUE (PARAM_GRAPHITE_MAX_BBS_PER_FUNCTION)))
{
if (dump_file && (dump_flags & TDF_DETAILS))
print_global_statistics (dump_file);
diff --git a/gcc/haifa-sched.c b/gcc/haifa-sched.c
index aa3ffe3be30..c98b36c1ab6 100644
--- a/gcc/haifa-sched.c
+++ b/gcc/haifa-sched.c
@@ -1615,7 +1615,7 @@ priority (rtx insn)
/* Selective scheduling does not define RECOVERY_BLOCK macro. */
rec = sel_sched_p () ? NULL : RECOVERY_BLOCK (insn);
- if (!rec || rec == EXIT_BLOCK_PTR)
+ if (!rec || rec == EXIT_BLOCK_PTR_FOR_FN (cfun))
{
prev_first = PREV_INSN (insn);
twin = insn;
@@ -6754,7 +6754,7 @@ haifa_sched_init (void)
whole function. */
{
bb_vec_t bbs;
- bbs.create (n_basic_blocks);
+ bbs.create (n_basic_blocks_for_fn (cfun));
basic_block bb;
sched_init_bbs ();
@@ -7522,7 +7522,7 @@ static void
sched_extend_bb (void)
{
/* The following is done to keep current_sched_info->next_tail non null. */
- rtx end = BB_END (EXIT_BLOCK_PTR->prev_bb);
+ rtx end = BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
rtx insn = DEBUG_INSN_P (end) ? prev_nondebug_insn (end) : end;
if (NEXT_INSN (end) == 0
|| (!NOTE_P (insn)
@@ -7533,7 +7533,7 @@ sched_extend_bb (void)
rtx note = emit_note_after (NOTE_INSN_DELETED, end);
/* Make note appear outside BB. */
set_block_for_insn (note, NULL);
- BB_END (EXIT_BLOCK_PTR->prev_bb) = end;
+ BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb) = end;
}
}
@@ -7551,7 +7551,7 @@ init_before_recovery (basic_block *before_recovery_ptr)
basic_block last;
edge e;
- last = EXIT_BLOCK_PTR->prev_bb;
+ last = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
e = find_fallthru_edge_from (last);
if (e)
@@ -7591,7 +7591,8 @@ init_before_recovery (basic_block *before_recovery_ptr)
redirect_edge_succ (e, single);
make_single_succ_edge (single, empty, 0);
- make_single_succ_edge (empty, EXIT_BLOCK_PTR, EDGE_FALLTHRU);
+ make_single_succ_edge (empty, EXIT_BLOCK_PTR_FOR_FN (cfun),
+ EDGE_FALLTHRU);
label = block_label (empty);
x = emit_jump_insn_after (gen_jump (label), BB_END (single));
@@ -7734,14 +7735,14 @@ create_check_block_twin (rtx insn, bool mutate_p)
}
else
{
- rec = EXIT_BLOCK_PTR;
+ rec = EXIT_BLOCK_PTR_FOR_FN (cfun);
label = NULL_RTX;
}
/* Emit CHECK. */
check = targetm.sched.gen_spec_check (insn, label, todo_spec);
- if (rec != EXIT_BLOCK_PTR)
+ if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
/* To have mem_reg alive at the beginning of second_bb,
we emit check BEFORE insn, so insn after splitting
@@ -7774,7 +7775,7 @@ create_check_block_twin (rtx insn, bool mutate_p)
/* Initialize TWIN (twin is a duplicate of original instruction
in the recovery block). */
- if (rec != EXIT_BLOCK_PTR)
+ if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
sd_iterator_def sd_it;
dep_t dep;
@@ -7811,7 +7812,7 @@ create_check_block_twin (rtx insn, bool mutate_p)
provide correct value for INSN_TICK (TWIN). */
sd_copy_back_deps (twin, insn, true);
- if (rec != EXIT_BLOCK_PTR)
+ if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
/* In case of branchy check, fix CFG. */
{
basic_block first_bb, second_bb;
@@ -7823,7 +7824,7 @@ create_check_block_twin (rtx insn, bool mutate_p)
sched_create_recovery_edges (first_bb, rec, second_bb);
sched_init_only_bb (second_bb, first_bb);
- sched_init_only_bb (rec, EXIT_BLOCK_PTR);
+ sched_init_only_bb (rec, EXIT_BLOCK_PTR_FOR_FN (cfun));
jump = BB_END (rec);
haifa_init_insn (jump);
@@ -7864,7 +7865,7 @@ create_check_block_twin (rtx insn, bool mutate_p)
init_dep_1 (new_dep, pro, check, DEP_TYPE (dep), ds);
sd_add_dep (new_dep, false);
- if (rec != EXIT_BLOCK_PTR)
+ if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
DEP_CON (new_dep) = twin;
sd_add_dep (new_dep, false);
@@ -7913,7 +7914,7 @@ create_check_block_twin (rtx insn, bool mutate_p)
/* Future speculations: call the helper. */
process_insn_forw_deps_be_in_spec (insn, twin, fs);
- if (rec != EXIT_BLOCK_PTR)
+ if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
/* Which types of dependencies should we use here is,
generally, machine-dependent question... But, for now,
@@ -8127,7 +8128,7 @@ unlink_bb_notes (basic_block first, basic_block last)
bb_header = XNEWVEC (rtx, last_basic_block);
/* Make a sentinel. */
- if (last->next_bb != EXIT_BLOCK_PTR)
+ if (last->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
bb_header[last->next_bb->index] = 0;
first = first->next_bb;
@@ -8171,7 +8172,7 @@ restore_bb_notes (basic_block first)
first = first->next_bb;
/* Remember: FIRST is actually a second basic block in the ebb. */
- while (first != EXIT_BLOCK_PTR
+ while (first != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& bb_header[first->index])
{
rtx prev, label, note, next;
diff --git a/gcc/hw-doloop.c b/gcc/hw-doloop.c
index 5d266387485..77c8149f806 100644
--- a/gcc/hw-doloop.c
+++ b/gcc/hw-doloop.c
@@ -260,7 +260,7 @@ discover_loop (hwloop_info loop, basic_block tail_bb, rtx tail_insn, rtx reg)
{
edge e;
edge_iterator ei;
- if (bb == EXIT_BLOCK_PTR)
+ if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
{
/* We've reached the exit block. The loop must be bad. */
if (dump_file)
@@ -539,7 +539,7 @@ reorder_loops (hwloop_info loops)
FOR_EACH_BB (bb)
{
- if (bb->next_bb != EXIT_BLOCK_PTR)
+ if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
bb->aux = bb->next_bb;
else
bb->aux = NULL;
diff --git a/gcc/ifcvt.c b/gcc/ifcvt.c
index 17d26c583c7..ac0276cea04 100644
--- a/gcc/ifcvt.c
+++ b/gcc/ifcvt.c
@@ -3185,7 +3185,8 @@ merge_if_block (struct ce_if_block * ce_info)
/* There should still be something at the end of the THEN or ELSE
blocks taking us to our final destination. */
gcc_assert (JUMP_P (last)
- || (EDGE_SUCC (combo_bb, 0)->dest == EXIT_BLOCK_PTR
+ || (EDGE_SUCC (combo_bb, 0)->dest
+ == EXIT_BLOCK_PTR_FOR_FN (cfun)
&& CALL_P (last)
&& SIBLING_CALL_P (last))
|| ((EDGE_SUCC (combo_bb, 0)->flags & EDGE_EH)
@@ -3199,7 +3200,7 @@ merge_if_block (struct ce_if_block * ce_info)
may be zero incoming edges if the THEN block didn't actually join
back up (as with a call to a non-return function). */
else if (EDGE_COUNT (join_bb->preds) < 2
- && join_bb != EXIT_BLOCK_PTR)
+ && join_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
/* We can merge the JOIN cleanly and update the dataflow try
again on this pass.*/
@@ -3216,7 +3217,7 @@ merge_if_block (struct ce_if_block * ce_info)
&& single_succ (combo_bb) == join_bb);
/* Remove the jump and cruft from the end of the COMBO block. */
- if (join_bb != EXIT_BLOCK_PTR)
+ if (join_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
tidy_fallthru_edge (single_succ_edge (combo_bb));
}
@@ -3495,7 +3496,7 @@ cond_exec_find_if_block (struct ce_if_block * ce_info)
code processing. ??? we should fix this in the future. */
if (EDGE_COUNT (then_bb->succs) == 0)
{
- if (single_pred_p (else_bb) && else_bb != EXIT_BLOCK_PTR)
+ if (single_pred_p (else_bb) && else_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
rtx last_insn = BB_END (then_bb);
@@ -3586,7 +3587,8 @@ cond_exec_find_if_block (struct ce_if_block * ce_info)
next = then_bb;
if (else_bb && (next = next->next_bb) != else_bb)
return FALSE;
- if ((next = next->next_bb) != join_bb && join_bb != EXIT_BLOCK_PTR)
+ if ((next = next->next_bb) != join_bb
+ && join_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
if (else_bb)
join_bb = NULL;
@@ -3725,7 +3727,7 @@ block_has_only_trap (basic_block bb)
rtx trap;
/* We're not the exit block. */
- if (bb == EXIT_BLOCK_PTR)
+ if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
return NULL_RTX;
/* The block must have no successors. */
@@ -3881,7 +3883,7 @@ find_if_case_1 (basic_block test_bb, edge then_edge, edge else_edge)
predictable_edge_p (then_edge)))))
return FALSE;
- if (else_bb == EXIT_BLOCK_PTR)
+ if (else_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
{
rtx jump = BB_END (else_edge->src);
gcc_assert (JUMP_P (jump));
@@ -3902,12 +3904,12 @@ find_if_case_1 (basic_block test_bb, edge then_edge, edge else_edge)
if (then_bb->next_bb == else_bb
&& then_bb->prev_bb == test_bb
- && else_bb != EXIT_BLOCK_PTR)
+ && else_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
redirect_edge_succ (FALLTHRU_EDGE (test_bb), else_bb);
new_bb = 0;
}
- else if (else_bb == EXIT_BLOCK_PTR)
+ else if (else_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
new_bb = force_nonfallthru_and_redirect (FALLTHRU_EDGE (test_bb),
else_bb, else_target);
else
@@ -4196,9 +4198,9 @@ dead_or_predicable (basic_block test_bb, basic_block merge_bb,
saved in caller-saved regs. A caller-saved reg requires the
prologue, killing a shrink-wrap opportunity. */
if ((flag_shrink_wrap && HAVE_simple_return && !epilogue_completed)
- && ENTRY_BLOCK_PTR->next_bb == test_bb
+ && ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb == test_bb
&& single_succ_p (new_dest)
- && single_succ (new_dest) == EXIT_BLOCK_PTR
+ && single_succ (new_dest) == EXIT_BLOCK_PTR_FOR_FN (cfun)
&& bitmap_intersect_p (df_get_live_in (new_dest), merge_set))
{
regset return_regs;
@@ -4213,8 +4215,10 @@ dead_or_predicable (basic_block test_bb, basic_block merge_bb,
&& targetm.calls.function_value_regno_p (i))
bitmap_set_bit (return_regs, INCOMING_REGNO (i));
- bitmap_and_into (return_regs, df_get_live_out (ENTRY_BLOCK_PTR));
- bitmap_and_into (return_regs, df_get_live_in (EXIT_BLOCK_PTR));
+ bitmap_and_into (return_regs,
+ df_get_live_out (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
+ bitmap_and_into (return_regs,
+ df_get_live_in (EXIT_BLOCK_PTR_FOR_FN (cfun)));
if (!bitmap_empty_p (return_regs))
{
FOR_BB_INSNS_REVERSE (new_dest, insn)
@@ -4259,7 +4263,7 @@ dead_or_predicable (basic_block test_bb, basic_block merge_bb,
{
if (JUMP_P (BB_END (dest_edge->src)))
new_dest_label = JUMP_LABEL (BB_END (dest_edge->src));
- else if (new_dest == EXIT_BLOCK_PTR)
+ else if (new_dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
new_dest_label = ret_rtx;
else
new_dest_label = block_label (new_dest);
diff --git a/gcc/internal-fn.c b/gcc/internal-fn.c
index a22f222012c..867747041ae 100644
--- a/gcc/internal-fn.c
+++ b/gcc/internal-fn.c
@@ -22,6 +22,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "internal-fn.h"
#include "tree.h"
+#include "stor-layout.h"
#include "expr.h"
#include "optabs.h"
#include "gimple.h"
@@ -139,6 +140,14 @@ expand_GOMP_SIMD_LAST_LANE (gimple stmt ATTRIBUTE_UNUSED)
gcc_unreachable ();
}
+/* This should get expanded in the sanopt pass. */
+
+static void
+expand_UBSAN_NULL (gimple stmt ATTRIBUTE_UNUSED)
+{
+ gcc_unreachable ();
+}
+
/* Routines to expand each internal function, indexed by function number.
Each routine has the prototype:
diff --git a/gcc/internal-fn.def b/gcc/internal-fn.def
index 0f5cc3cba42..7193874c811 100644
--- a/gcc/internal-fn.def
+++ b/gcc/internal-fn.def
@@ -44,3 +44,4 @@ DEF_INTERNAL_FN (GOMP_SIMD_LANE, ECF_NOVOPS | ECF_LEAF | ECF_NOTHROW)
DEF_INTERNAL_FN (GOMP_SIMD_VF, ECF_CONST | ECF_LEAF | ECF_NOTHROW)
DEF_INTERNAL_FN (GOMP_SIMD_LAST_LANE, ECF_CONST | ECF_LEAF | ECF_NOTHROW)
DEF_INTERNAL_FN (ANNOTATE, ECF_CONST | ECF_LEAF | ECF_NOTHROW)
+DEF_INTERNAL_FN (UBSAN_NULL, ECF_LEAF | ECF_NOTHROW)
diff --git a/gcc/ipa-devirt.c b/gcc/ipa-devirt.c
index 026c109bc5e..b1efde7fbf4 100644
--- a/gcc/ipa-devirt.c
+++ b/gcc/ipa-devirt.c
@@ -110,7 +110,10 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "print-tree.h"
+#include "calls.h"
#include "cgraph.h"
+#include "expr.h"
#include "tree-pass.h"
#include "ggc.h"
#include "pointer-set.h"
@@ -121,6 +124,12 @@ along with GCC; see the file COPYING3. If not see
#include "gimple.h"
#include "ipa-inline.h"
#include "diagnostic.h"
+#include "tree-dfa.h"
+
+/* Dummy polymorphic call context. */
+
+const ipa_polymorphic_call_context ipa_dummy_polymorphic_call_context
+ = {0, NULL, false, true};
/* Pointer set of all call targets appearing in the cache. */
static pointer_set_t *cached_polymorphic_call_targets;
@@ -292,8 +301,6 @@ add_type_duplicate (odr_type val, tree type)
inform (DECL_SOURCE_LOCATION (TYPE_NAME (val->type)),
"a type with the same name but different layout is "
"defined in another translation unit");
- debug_tree (BINFO_VTABLE (TYPE_BINFO (type)));
- debug_tree (BINFO_VTABLE (TYPE_BINFO (val->type)));
if (cgraph_dump_file)
{
fprintf (cgraph_dump_file, "ODR violation or merging or ODR type bug?\n");
@@ -522,6 +529,7 @@ tree
method_class_type (tree t)
{
tree first_parm_type = TREE_VALUE (TYPE_ARG_TYPES (t));
+ gcc_assert (TREE_CODE (t) == METHOD_TYPE);
return TREE_TYPE (first_parm_type);
}
@@ -555,34 +563,50 @@ build_type_inheritance_graph (void)
timevar_pop (TV_IPA_INHERITANCE);
}
-/* If TARGET has associated node, record it in the NODES array. */
+/* If TARGET has associated node, record it in the NODES array.
+ if TARGET can not be inserted (for example because its body was
+ already removed and there is no way to refer to it), clear COMPLETEP. */
static void
maybe_record_node (vec <cgraph_node *> &nodes,
- tree target, pointer_set_t *inserted)
+ tree target, pointer_set_t *inserted,
+ bool *completep)
{
struct cgraph_node *target_node;
enum built_in_function fcode;
- if (target
+ if (!target
/* Those are used to mark impossible scenarios. */
- && (fcode = DECL_FUNCTION_CODE (target))
- != BUILT_IN_UNREACHABLE
- && fcode != BUILT_IN_TRAP
- && !pointer_set_insert (inserted, target)
- && (target_node = cgraph_get_node (target)) != NULL
+ || (fcode = DECL_FUNCTION_CODE (target))
+ == BUILT_IN_UNREACHABLE
+ || fcode == BUILT_IN_TRAP)
+ return;
+
+ target_node = cgraph_get_node (target);
+
+ if (target_node != NULL
&& (TREE_PUBLIC (target)
|| target_node->definition)
&& symtab_real_symbol_p (target_node))
{
- pointer_set_insert (cached_polymorphic_call_targets,
- target_node);
- nodes.safe_push (target_node);
+ gcc_assert (!target_node->global.inlined_to);
+ gcc_assert (symtab_real_symbol_p (target_node));
+ if (!pointer_set_insert (inserted, target))
+ {
+ pointer_set_insert (cached_polymorphic_call_targets,
+ target_node);
+ nodes.safe_push (target_node);
+ }
}
+ else if (completep
+ && !type_in_anonymous_namespace_p
+ (method_class_type (TREE_TYPE (target))))
+ *completep = true;
}
-/* See if BINFO's type match OTR_TYPE. If so, lookup method
- in vtable of TYPE_BINFO and insert method to NODES array.
+/* See if BINFO's type match OUTER_TYPE. If so, lookup
+ BINFO of subtype of OTR_TYPE at OFFSET and in that BINFO find
+ method in vtable and insert method to NODES array.
Otherwise recurse to base BINFOs.
This match what get_binfo_at_offset does, but with offset
being unknown.
@@ -593,20 +617,23 @@ maybe_record_node (vec <cgraph_node *> &nodes,
otherwise it is binfo of BINFO's type.
MATCHED_VTABLES tracks virtual tables we already did lookup
- for virtual function in.
+ for virtual function in. INSERTED tracks nodes we already
+ inserted.
ANONYMOUS is true if BINFO is part of anonymous namespace.
*/
static void
-record_binfo (vec <cgraph_node *> &nodes,
- tree binfo,
- tree otr_type,
- tree type_binfo,
- HOST_WIDE_INT otr_token,
- pointer_set_t *inserted,
- pointer_set_t *matched_vtables,
- bool anonymous)
+record_target_from_binfo (vec <cgraph_node *> &nodes,
+ tree binfo,
+ tree otr_type,
+ tree type_binfo,
+ HOST_WIDE_INT otr_token,
+ tree outer_type,
+ HOST_WIDE_INT offset,
+ pointer_set_t *inserted,
+ pointer_set_t *matched_vtables,
+ bool anonymous)
{
tree type = BINFO_TYPE (binfo);
int i;
@@ -614,14 +641,15 @@ record_binfo (vec <cgraph_node *> &nodes,
gcc_checking_assert (BINFO_VTABLE (type_binfo));
- if (types_same_for_odr (type, otr_type)
- && !pointer_set_insert (matched_vtables, BINFO_VTABLE (type_binfo)))
+ if (types_same_for_odr (type, outer_type))
{
+ tree inner_binfo = get_binfo_at_offset (type_binfo,
+ offset, otr_type);
/* For types in anonymous namespace first check if the respective vtable
is alive. If not, we know the type can't be called. */
if (!flag_ltrans && anonymous)
{
- tree vtable = BINFO_VTABLE (type_binfo);
+ tree vtable = BINFO_VTABLE (inner_binfo);
struct varpool_node *vnode;
if (TREE_CODE (vtable) == POINTER_PLUS_EXPR)
@@ -630,9 +658,13 @@ record_binfo (vec <cgraph_node *> &nodes,
if (!vnode || !vnode->definition)
return;
}
- tree target = gimple_get_virt_method_for_binfo (otr_token, type_binfo);
- if (target)
- maybe_record_node (nodes, target, inserted);
+ gcc_assert (inner_binfo);
+ if (!pointer_set_insert (matched_vtables, BINFO_VTABLE (inner_binfo)))
+ {
+ tree target = gimple_get_virt_method_for_binfo (otr_token, inner_binfo);
+ if (target)
+ maybe_record_node (nodes, target, inserted, NULL);
+ }
return;
}
@@ -640,12 +672,13 @@ record_binfo (vec <cgraph_node *> &nodes,
for (i = 0; BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
/* Walking bases that have no virtual method is pointless excercise. */
if (polymorphic_type_binfo_p (base_binfo))
- record_binfo (nodes, base_binfo, otr_type,
- /* In the case of single inheritance, the virtual table
- is shared with the outer type. */
- BINFO_VTABLE (base_binfo) ? base_binfo : type_binfo,
- otr_token, inserted,
- matched_vtables, anonymous);
+ record_target_from_binfo (nodes, base_binfo, otr_type,
+ /* In the case of single inheritance,
+ the virtual table is shared with
+ the outer type. */
+ BINFO_VTABLE (base_binfo) ? base_binfo : type_binfo,
+ otr_token, outer_type, offset, inserted,
+ matched_vtables, anonymous);
}
/* Lookup virtual methods matching OTR_TYPE (with OFFSET and OTR_TOKEN)
@@ -659,19 +692,23 @@ possible_polymorphic_call_targets_1 (vec <cgraph_node *> &nodes,
pointer_set_t *matched_vtables,
tree otr_type,
odr_type type,
- HOST_WIDE_INT otr_token)
+ HOST_WIDE_INT otr_token,
+ tree outer_type,
+ HOST_WIDE_INT offset)
{
tree binfo = TYPE_BINFO (type->type);
unsigned int i;
- record_binfo (nodes, binfo, otr_type, binfo, otr_token, inserted,
- matched_vtables, type->anonymous_namespace);
+ record_target_from_binfo (nodes, binfo, otr_type, binfo, otr_token,
+ outer_type, offset,
+ inserted, matched_vtables,
+ type->anonymous_namespace);
for (i = 0; i < type->derived_types.length (); i++)
possible_polymorphic_call_targets_1 (nodes, inserted,
matched_vtables,
otr_type,
type->derived_types[i],
- otr_token);
+ otr_token, outer_type, offset);
}
/* Cache of queries for polymorphic call targets.
@@ -682,9 +719,11 @@ possible_polymorphic_call_targets_1 (vec <cgraph_node *> &nodes,
struct polymorphic_call_target_d
{
- odr_type type;
HOST_WIDE_INT otr_token;
+ ipa_polymorphic_call_context context;
+ odr_type type;
vec <cgraph_node *> targets;
+ bool final;
};
/* Polymorphic call target cache helpers. */
@@ -703,8 +742,17 @@ struct polymorphic_call_target_hasher
inline hashval_t
polymorphic_call_target_hasher::hash (const value_type *odr_query)
{
- return iterative_hash_hashval_t (odr_query->type->id,
- odr_query->otr_token);
+ hashval_t hash;
+
+ hash = iterative_hash_host_wide_int
+ (odr_query->otr_token,
+ odr_query->type->id);
+ hash = iterative_hash_hashval_t (TYPE_UID (odr_query->context.outer_type),
+ hash);
+ hash = iterative_hash_host_wide_int (odr_query->context.offset, hash);
+ return iterative_hash_hashval_t
+ (((int)odr_query->context.maybe_in_construction << 1)
+ | (int)odr_query->context.maybe_derived_type, hash);
}
/* Compare cache entries T1 and T2. */
@@ -713,7 +761,12 @@ inline bool
polymorphic_call_target_hasher::equal (const value_type *t1,
const compare_type *t2)
{
- return t1->type == t2->type && t1->otr_token == t2->otr_token;
+ return (t1->type == t2->type && t1->otr_token == t2->otr_token
+ && t1->context.offset == t2->context.offset
+ && t1->context.outer_type == t2->context.outer_type
+ && t1->context.maybe_in_construction
+ == t2->context.maybe_in_construction
+ && t1->context.maybe_derived_type == t2->context.maybe_derived_type);
}
/* Remove entry in polymorphic call target cache hash. */
@@ -754,6 +807,337 @@ devirt_node_removal_hook (struct cgraph_node *n, void *d ATTRIBUTE_UNUSED)
free_polymorphic_call_targets_hash ();
}
+/* CONTEXT->OUTER_TYPE is a type of memory object where object of EXPECTED_TYPE
+ is contained at CONTEXT->OFFSET. Walk the memory representation of
+ CONTEXT->OUTER_TYPE and find the outermost class type that match
+ EXPECTED_TYPE or contain EXPECTED_TYPE as a base. Update CONTEXT
+ to represent it.
+
+ For example when CONTEXT represents type
+ class A
+ {
+ int a;
+ class B b;
+ }
+ and we look for type at offset sizeof(int), we end up with B and offset 0.
+ If the same is produced by multiple inheritance, we end up with A and offset
+ sizeof(int).
+
+ If we can not find corresponding class, give up by setting
+ CONTEXT->OUTER_TYPE to EXPECTED_TYPE and CONTEXT->OFFSET to NULL.
+ Return true when lookup was sucesful. */
+
+static bool
+get_class_context (ipa_polymorphic_call_context *context,
+ tree expected_type)
+{
+ tree type = context->outer_type;
+ HOST_WIDE_INT offset = context->offset;
+
+ /* Find the sub-object the constant actually refers to and mark whether it is
+ an artificial one (as opposed to a user-defined one). */
+ while (true)
+ {
+ HOST_WIDE_INT pos, size;
+ tree fld;
+
+ /* On a match, just return what we found. */
+ if (TREE_CODE (type) == TREE_CODE (expected_type)
+ && types_same_for_odr (type, expected_type))
+ {
+ gcc_assert (offset == 0);
+ return true;
+ }
+
+ /* Walk fields and find corresponding on at OFFSET. */
+ if (TREE_CODE (type) == RECORD_TYPE)
+ {
+ for (fld = TYPE_FIELDS (type); fld; fld = DECL_CHAIN (fld))
+ {
+ if (TREE_CODE (fld) != FIELD_DECL)
+ continue;
+
+ pos = int_bit_position (fld);
+ size = tree_to_uhwi (DECL_SIZE (fld));
+ if (pos <= offset && (pos + size) > offset)
+ break;
+ }
+
+ if (!fld)
+ goto give_up;
+
+ type = TREE_TYPE (fld);
+ offset -= pos;
+ /* DECL_ARTIFICIAL represents a basetype. */
+ if (!DECL_ARTIFICIAL (fld))
+ {
+ context->outer_type = type;
+ context->offset = offset;
+ /* As soon as we se an field containing the type,
+ we know we are not looking for derivations. */
+ context->maybe_derived_type = false;
+ }
+ }
+ else if (TREE_CODE (type) == ARRAY_TYPE)
+ {
+ tree subtype = TREE_TYPE (type);
+
+ /* Give up if we don't know array size. */
+ if (!tree_fits_shwi_p (TYPE_SIZE (subtype))
+ || !tree_to_shwi (TYPE_SIZE (subtype)) <= 0)
+ goto give_up;
+ offset = offset % tree_to_shwi (TYPE_SIZE (subtype));
+ type = subtype;
+ context->outer_type = type;
+ context->offset = offset;
+ context->maybe_derived_type = false;
+ }
+ /* Give up on anything else. */
+ else
+ goto give_up;
+ }
+
+ /* If we failed to find subtype we look for, give up and fall back to the
+ most generic query. */
+give_up:
+ context->outer_type = expected_type;
+ context->offset = 0;
+ context->maybe_derived_type = true;
+ return false;
+}
+
+/* Return true if OUTER_TYPE contains OTR_TYPE at OFFSET. */
+
+static bool
+contains_type_p (tree outer_type, HOST_WIDE_INT offset,
+ tree otr_type)
+{
+ ipa_polymorphic_call_context context = {offset, outer_type,
+ false, true};
+ return get_class_context (&context, otr_type);
+}
+
+/* Given REF call in FNDECL, determine class of the polymorphic
+ call (OTR_TYPE), its token (OTR_TOKEN) and CONTEXT.
+ Return pointer to object described by the context */
+
+tree
+get_polymorphic_call_info (tree fndecl,
+ tree ref,
+ tree *otr_type,
+ HOST_WIDE_INT *otr_token,
+ ipa_polymorphic_call_context *context)
+{
+ tree base_pointer;
+ *otr_type = obj_type_ref_class (ref);
+ *otr_token = tree_to_uhwi (OBJ_TYPE_REF_TOKEN (ref));
+
+ /* Set up basic info in case we find nothing interesting in the analysis. */
+ context->outer_type = *otr_type;
+ context->offset = 0;
+ base_pointer = OBJ_TYPE_REF_OBJECT (ref);
+ context->maybe_derived_type = true;
+ context->maybe_in_construction = false;
+
+ /* Walk SSA for outer object. */
+ do
+ {
+ if (TREE_CODE (base_pointer) == SSA_NAME
+ && !SSA_NAME_IS_DEFAULT_DEF (base_pointer)
+ && SSA_NAME_DEF_STMT (base_pointer)
+ && gimple_assign_single_p (SSA_NAME_DEF_STMT (base_pointer)))
+ {
+ base_pointer = gimple_assign_rhs1 (SSA_NAME_DEF_STMT (base_pointer));
+ STRIP_NOPS (base_pointer);
+ }
+ else if (TREE_CODE (base_pointer) == ADDR_EXPR)
+ {
+ HOST_WIDE_INT size, max_size;
+ HOST_WIDE_INT offset2;
+ tree base = get_ref_base_and_extent (TREE_OPERAND (base_pointer, 0),
+ &offset2, &size, &max_size);
+
+ /* If this is a varying address, punt. */
+ if ((TREE_CODE (base) == MEM_REF || DECL_P (base))
+ && max_size != -1
+ && max_size == size)
+ {
+ /* We found dereference of a pointer. Type of the pointer
+ and MEM_REF is meaningless, but we can look futher. */
+ if (TREE_CODE (base) == MEM_REF)
+ {
+ base_pointer = TREE_OPERAND (base, 0);
+ context->offset
+ += offset2 + mem_ref_offset (base).ulow () * BITS_PER_UNIT;
+ context->outer_type = NULL;
+ }
+ /* We found base object. In this case the outer_type
+ is known. */
+ else if (DECL_P (base))
+ {
+ context->outer_type = TREE_TYPE (base);
+ gcc_assert (!POINTER_TYPE_P (context->outer_type));
+
+ /* Only type inconsistent programs can have otr_type that is
+ not part of outer type. */
+ if (!contains_type_p (context->outer_type,
+ context->offset, *otr_type))
+ return base_pointer;
+ context->offset += offset2;
+ base_pointer = NULL;
+ /* Make very conservative assumption that all objects
+ may be in construction.
+ TODO: ipa-prop already contains code to tell better.
+ merge it later. */
+ context->maybe_in_construction = true;
+ context->maybe_derived_type = false;
+ return base_pointer;
+ }
+ else
+ break;
+ }
+ else
+ break;
+ }
+ else if (TREE_CODE (base_pointer) == POINTER_PLUS_EXPR
+ && tree_fits_uhwi_p (TREE_OPERAND (base_pointer, 1)))
+ {
+ context->offset += tree_to_shwi (TREE_OPERAND (base_pointer, 1))
+ * BITS_PER_UNIT;
+ base_pointer = TREE_OPERAND (base_pointer, 0);
+ }
+ else
+ break;
+ }
+ while (true);
+
+ /* Try to determine type of the outer object. */
+ if (TREE_CODE (base_pointer) == SSA_NAME
+ && SSA_NAME_IS_DEFAULT_DEF (base_pointer)
+ && TREE_CODE (SSA_NAME_VAR (base_pointer)) == PARM_DECL)
+ {
+ /* See if parameter is THIS pointer of a method. */
+ if (TREE_CODE (TREE_TYPE (fndecl)) == METHOD_TYPE
+ && SSA_NAME_VAR (base_pointer) == DECL_ARGUMENTS (fndecl))
+ {
+ context->outer_type = TREE_TYPE (TREE_TYPE (base_pointer));
+ gcc_assert (TREE_CODE (context->outer_type) == RECORD_TYPE);
+
+ /* Dynamic casting has possibly upcasted the type
+ in the hiearchy. In this case outer type is less
+ informative than inner type and we should forget
+ about it. */
+ if (!contains_type_p (context->outer_type, context->offset,
+ *otr_type))
+ {
+ context->outer_type = NULL;
+ return base_pointer;
+ }
+
+ /* If the function is constructor or destructor, then
+ the type is possibly in consturction, but we know
+ it is not derived type. */
+ if (DECL_CXX_CONSTRUCTOR_P (fndecl)
+ || DECL_CXX_DESTRUCTOR_P (fndecl))
+ {
+ context->maybe_in_construction = true;
+ context->maybe_derived_type = false;
+ }
+ else
+ {
+ context->maybe_derived_type = true;
+ context->maybe_in_construction = false;
+ }
+ return base_pointer;
+ }
+ /* Non-PODs passed by value are really passed by invisible
+ reference. In this case we also know the type of the
+ object. */
+ if (DECL_BY_REFERENCE (SSA_NAME_VAR (base_pointer)))
+ {
+ context->outer_type = TREE_TYPE (TREE_TYPE (base_pointer));
+ gcc_assert (!POINTER_TYPE_P (context->outer_type));
+ /* Only type inconsistent programs can have otr_type that is
+ not part of outer type. */
+ if (!contains_type_p (context->outer_type, context->offset,
+ *otr_type))
+ {
+ context->outer_type = NULL;
+ gcc_unreachable ();
+ return base_pointer;
+ }
+ context->maybe_derived_type = false;
+ context->maybe_in_construction = false;
+ return base_pointer;
+ }
+ }
+ /* TODO: There are multiple ways to derive a type. For instance
+ if BASE_POINTER is passed to an constructor call prior our refernece.
+ We do not make this type of flow sensitive analysis yet. */
+ return base_pointer;
+}
+
+/* Walk bases of OUTER_TYPE that contain OTR_TYPE at OFFSET.
+ Lookup their respecitve virtual methods for OTR_TOKEN and OTR_TYPE
+ and insert them to NODES.
+
+ MATCHED_VTABLES and INSERTED is used to avoid duplicated work. */
+
+static void
+record_targets_from_bases (tree otr_type,
+ HOST_WIDE_INT otr_token,
+ tree outer_type,
+ HOST_WIDE_INT offset,
+ vec <cgraph_node *> nodes,
+ pointer_set_t *inserted,
+ pointer_set_t *matched_vtables,
+ bool *completep)
+{
+ while (true)
+ {
+ HOST_WIDE_INT pos, size;
+ tree base_binfo;
+ tree fld;
+
+ if (types_same_for_odr (outer_type, otr_type))
+ return;
+
+ for (fld = TYPE_FIELDS (outer_type); fld; fld = DECL_CHAIN (fld))
+ {
+ if (TREE_CODE (fld) != FIELD_DECL)
+ continue;
+
+ pos = int_bit_position (fld);
+ size = tree_to_shwi (DECL_SIZE (fld));
+ if (pos <= offset && (pos + size) > offset)
+ break;
+ }
+ /* Within a class type we should always find correcponding fields. */
+ gcc_assert (fld && TREE_CODE (TREE_TYPE (fld)) == RECORD_TYPE);
+
+ /* Nonbasetypes should have been stripped by outer_class_type. */
+ gcc_assert (DECL_ARTIFICIAL (fld));
+
+ outer_type = TREE_TYPE (fld);
+ offset -= pos;
+
+ base_binfo = get_binfo_at_offset (TYPE_BINFO (outer_type),
+ offset, otr_type);
+ gcc_assert (base_binfo);
+ if (!pointer_set_insert (matched_vtables, BINFO_VTABLE (base_binfo)))
+ {
+ tree target = gimple_get_virt_method_for_binfo (otr_token, base_binfo);
+ if (target)
+ maybe_record_node (nodes, target, inserted, completep);
+ /* The only way method in anonymous namespace can become unreferable
+ is that it has been fully optimized out. */
+ else if (flag_ltrans || !type_in_anonymous_namespace_p (outer_type))
+ *completep = false;
+ pointer_set_insert (matched_vtables, BINFO_VTABLE (base_binfo));
+ }
+ }
+}
+
/* When virtual table is removed, we may need to flush the cache. */
static void
@@ -767,8 +1151,14 @@ devirt_variable_node_removal_hook (struct varpool_node *n,
}
/* Return vector containing possible targets of polymorphic call of type
- OTR_TYPE caling method OTR_TOKEN with OFFSET. If FINALp is non-NULL,
- store true if the list is complette.
+ OTR_TYPE caling method OTR_TOKEN within type of OTR_OUTER_TYPE and OFFSET.
+ If INCLUDE_BASES is true, walk also base types of OUTER_TYPES containig
+ OTR_TYPE and include their virtual method. This is useful for types
+ possibly in construction or destruction where the virtual table may
+ temporarily change to one of base types. INCLUDE_DERIVER_TYPES make
+ us to walk the inheritance graph for all derivations.
+
+ If COMPLETEP is non-NULL, store true if the list is complette.
CACHE_TOKEN (if non-NULL) will get stored to an unique ID of entry
in the target cache. If user needs to visit every target list
just once, it can memoize them.
@@ -780,32 +1170,44 @@ devirt_variable_node_removal_hook (struct varpool_node *n,
vec <cgraph_node *>
possible_polymorphic_call_targets (tree otr_type,
HOST_WIDE_INT otr_token,
- bool *finalp,
+ ipa_polymorphic_call_context context,
+ bool *completep,
void **cache_token)
{
static struct cgraph_node_hook_list *node_removal_hook_holder;
pointer_set_t *inserted;
pointer_set_t *matched_vtables;
vec <cgraph_node *> nodes=vNULL;
- odr_type type;
+ odr_type type, outer_type;
polymorphic_call_target_d key;
polymorphic_call_target_d **slot;
unsigned int i;
tree binfo, target;
+ bool final;
- if (finalp)
- *finalp = false;
+ type = get_odr_type (otr_type, true);
- type = get_odr_type (otr_type, false);
- /* If we do not have type in our hash it means we never seen any method
- in it. */
- if (!type)
- return nodes;
+ /* Lookup the outer class type we want to walk. */
+ if (context.outer_type)
+ get_class_context (&context, otr_type);
- /* For anonymous namespace types we can attempt to build full type.
- All derivations must be in this unit. */
- if (type->anonymous_namespace && finalp && !flag_ltrans)
- *finalp = true;
+ /* We now canonicalize our query, so we do not need extra hashtable entries. */
+
+ /* Without outer type, we have no use for offset. Just do the
+ basic search from innter type */
+ if (!context.outer_type)
+ {
+ context.outer_type = otr_type;
+ context.offset = 0;
+ }
+ /* We need to update our hiearchy if the type does not exist. */
+ outer_type = get_odr_type (context.outer_type, true);
+ /* If outer and inner type match, there are no bases to see. */
+ if (type == outer_type)
+ context.maybe_in_construction = false;
+ /* If the type is final, there are no derivations. */
+ if (TYPE_FINAL_P (outer_type->type))
+ context.maybe_derived_type = false;
/* Initialize query cache. */
if (!cached_polymorphic_call_targets)
@@ -824,43 +1226,75 @@ possible_polymorphic_call_targets (tree otr_type,
/* Lookup cached answer. */
key.type = type;
key.otr_token = otr_token;
+ key.context = context;
slot = polymorphic_call_target_hash.find_slot (&key, INSERT);
if (cache_token)
*cache_token = (void *)*slot;
if (*slot)
- return (*slot)->targets;
+ {
+ if (completep)
+ *completep = (*slot)->final;
+ return (*slot)->targets;
+ }
+
+ final = true;
/* Do actual search. */
timevar_push (TV_IPA_VIRTUAL_CALL);
*slot = XCNEW (polymorphic_call_target_d);
if (cache_token)
- *cache_token = (void *)*slot;
+ *cache_token = (void *)*slot;
(*slot)->type = type;
(*slot)->otr_token = otr_token;
+ (*slot)->context = context;
inserted = pointer_set_create ();
matched_vtables = pointer_set_create ();
/* First see virtual method of type itself. */
- binfo = TYPE_BINFO (type->type);
+ binfo = get_binfo_at_offset (TYPE_BINFO (outer_type->type),
+ context.offset, otr_type);
target = gimple_get_virt_method_for_binfo (otr_token, binfo);
if (target)
- maybe_record_node (nodes, target, inserted);
+ {
+ maybe_record_node (nodes, target, inserted, &final);
+
+ /* In the case we get final method, we don't need
+ to walk derivations. */
+ if (DECL_FINAL_P (target))
+ context.maybe_derived_type = false;
+ }
+ /* The only way method in anonymous namespace can become unreferable
+ is that it has been fully optimized out. */
+ else if (flag_ltrans || !type->anonymous_namespace)
+ final = false;
pointer_set_insert (matched_vtables, BINFO_VTABLE (binfo));
- /* TODO: If method is final, we can stop here and signaize that
- list is final. We need C++ FE to pass our info about final
- methods and classes. */
+ /* Next walk bases, if asked to. */
+ if (context.maybe_in_construction)
+ record_targets_from_bases (otr_type, otr_token, outer_type->type,
+ context.offset, nodes, inserted,
+ matched_vtables, &final);
- /* Walk recursively all derived types. Here we need to lookup proper basetype
- via their BINFO walk that is done by record_binfo */
- for (i = 0; i < type->derived_types.length (); i++)
- possible_polymorphic_call_targets_1 (nodes, inserted,
- matched_vtables,
- otr_type, type->derived_types[i],
- otr_token);
+ /* Finally walk recursively all derived types. */
+ if (context.maybe_derived_type)
+ {
+ /* For anonymous namespace types we can attempt to build full type.
+ All derivations must be in this unit (unless we see partial unit). */
+ if (!type->anonymous_namespace || flag_ltrans)
+ final = false;
+ for (i = 0; i < outer_type->derived_types.length(); i++)
+ possible_polymorphic_call_targets_1 (nodes, inserted,
+ matched_vtables,
+ otr_type, outer_type->derived_types[i],
+ otr_token, outer_type->type,
+ context.offset);
+ }
(*slot)->targets = nodes;
+ (*slot)->final = final;
+ if (completep)
+ *completep = final;
pointer_set_destroy (inserted);
pointer_set_destroy (matched_vtables);
@@ -872,8 +1306,9 @@ possible_polymorphic_call_targets (tree otr_type,
void
dump_possible_polymorphic_call_targets (FILE *f,
- tree otr_type,
- HOST_WIDE_INT otr_token)
+ tree otr_type,
+ HOST_WIDE_INT otr_token,
+ const ipa_polymorphic_call_context &ctx)
{
vec <cgraph_node *> targets;
bool final;
@@ -883,16 +1318,25 @@ dump_possible_polymorphic_call_targets (FILE *f,
if (!type)
return;
targets = possible_polymorphic_call_targets (otr_type, otr_token,
+ ctx,
&final);
- fprintf (f, "Targets of polymorphic call of type %i ", type->id);
+ fprintf (f, " Targets of polymorphic call of type %i:", type->id);
print_generic_expr (f, type->type, TDF_SLIM);
- fprintf (f, " token %i%s:",
- (int)otr_token,
- final ? " (full list)" : " (partial list, may call to other unit)");
+ fprintf (f, " token %i\n"
+ " Contained in type:",
+ (int)otr_token);
+ print_generic_expr (f, ctx.outer_type, TDF_SLIM);
+ fprintf (f, " at offset "HOST_WIDE_INT_PRINT_DEC"\n"
+ " %s%s%s\n ",
+ ctx.offset,
+ final ? "This is full list." :
+ "This is partial list; extra targets may be defined in other units.",
+ ctx.maybe_in_construction ? " (base types included)" : "",
+ ctx.maybe_derived_type ? " (derived types included)" : "");
for (i = 0; i < targets.length (); i++)
fprintf (f, " %s/%i", targets[i]->name (),
targets[i]->order);
- fprintf (f, "\n");
+ fprintf (f, "\n\n");
}
@@ -902,17 +1346,25 @@ dump_possible_polymorphic_call_targets (FILE *f,
bool
possible_polymorphic_call_target_p (tree otr_type,
HOST_WIDE_INT otr_token,
+ const ipa_polymorphic_call_context &ctx,
struct cgraph_node *n)
{
vec <cgraph_node *> targets;
unsigned int i;
+ enum built_in_function fcode;
bool final;
+ if (TREE_CODE (TREE_TYPE (n->decl)) == FUNCTION_TYPE
+ && ((fcode = DECL_FUNCTION_CODE (n->decl))
+ == BUILT_IN_UNREACHABLE
+ || fcode == BUILT_IN_TRAP))
+ return true;
+
if (!odr_hash.is_created ())
return true;
- targets = possible_polymorphic_call_targets (otr_type, otr_token, &final);
+ targets = possible_polymorphic_call_targets (otr_type, otr_token, ctx, &final);
for (i = 0; i < targets.length (); i++)
- if (n == targets[i])
+ if (symtab_semantically_equivalent_p (n, targets[i]))
return true;
/* At a moment we allow middle end to dig out new external declarations
@@ -935,7 +1387,7 @@ update_type_inheritance_graph (void)
return;
free_polymorphic_call_targets_hash ();
timevar_push (TV_IPA_INHERITANCE);
- /* We reconstruct the graph starting of types of all methods seen in the
+ /* We reconstruct the graph starting from types of all methods seen in the
the unit. */
FOR_EACH_FUNCTION (n)
if (DECL_VIRTUAL_P (n->decl)
diff --git a/gcc/ipa-inline-analysis.c b/gcc/ipa-inline-analysis.c
index fb05caec657..3d95de144f9 100644
--- a/gcc/ipa-inline-analysis.c
+++ b/gcc/ipa-inline-analysis.c
@@ -69,6 +69,9 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "stringpool.h"
+#include "print-tree.h"
#include "tree-inline.h"
#include "langhooks.h"
#include "flags.h"
@@ -1838,9 +1841,9 @@ compute_bb_predicates (struct cgraph_node *node,
}
/* Entry block is always executable. */
- ENTRY_BLOCK_PTR_FOR_FUNCTION (my_function)->aux
+ ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux
= pool_alloc (edge_predicate_pool);
- *(struct predicate *) ENTRY_BLOCK_PTR_FOR_FUNCTION (my_function)->aux
+ *(struct predicate *) ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux
= true_predicate ();
/* A simple dataflow propagation of predicates forward in the CFG.
@@ -2063,7 +2066,7 @@ record_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef, void *data)
return false;
bitmap_set_bit (info->bb_set,
SSA_NAME_IS_DEFAULT_DEF (vdef)
- ? ENTRY_BLOCK_PTR->index
+ ? ENTRY_BLOCK_PTR_FOR_FN (cfun)->index
: gimple_bb (SSA_NAME_DEF_STMT (vdef))->index);
return false;
}
@@ -2099,7 +2102,7 @@ param_change_prob (gimple stmt, int i)
return REG_BR_PROB_BASE;
if (SSA_NAME_IS_DEFAULT_DEF (op))
- init_freq = ENTRY_BLOCK_PTR->frequency;
+ init_freq = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency;
else
init_freq = gimple_bb (SSA_NAME_DEF_STMT (op))->frequency;
@@ -2139,8 +2142,8 @@ param_change_prob (gimple stmt, int i)
/* Assume that every memory is initialized at entry.
TODO: Can we easilly determine if value is always defined
and thus we may skip entry block? */
- if (ENTRY_BLOCK_PTR->frequency)
- max = ENTRY_BLOCK_PTR->frequency;
+ if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency)
+ max = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency;
else
max = 1;
@@ -2397,7 +2400,7 @@ estimate_function_body_sizes (struct cgraph_node *node, bool early)
if (parms_info)
compute_bb_predicates (node, parms_info, info);
gcc_assert (cfun == my_function);
- order = XNEWVEC (int, n_basic_blocks);
+ order = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
nblocks = pre_and_rev_post_order_compute (NULL, order, false);
for (n = 0; n < nblocks; n++)
{
@@ -2598,14 +2601,13 @@ estimate_function_body_sizes (struct cgraph_node *node, bool early)
if (!early && nonconstant_names.exists ())
{
struct loop *loop;
- loop_iterator li;
predicate loop_iterations = true_predicate ();
predicate loop_stride = true_predicate ();
if (dump_file && (dump_flags & TDF_DETAILS))
flow_loops_dump (dump_file, NULL, 0);
scev_initialize ();
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
{
vec<edge> exits;
edge ex;
diff --git a/gcc/ipa-inline.c b/gcc/ipa-inline.c
index c9a373bc08f..fbb63da7dc8 100644
--- a/gcc/ipa-inline.c
+++ b/gcc/ipa-inline.c
@@ -94,6 +94,8 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "trans-mem.h"
+#include "calls.h"
#include "tree-inline.h"
#include "langhooks.h"
#include "flags.h"
diff --git a/gcc/ipa-prop.c b/gcc/ipa-prop.c
index 6db1caacdb1..39534425c53 100644
--- a/gcc/ipa-prop.c
+++ b/gcc/ipa-prop.c
@@ -22,6 +22,9 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tree.h"
#include "gimple.h"
+#include "expr.h"
+#include "stor-layout.h"
+#include "print-tree.h"
#include "gimplify.h"
#include "gimple-iterator.h"
#include "gimplify-me.h"
@@ -386,6 +389,7 @@ ipa_set_jf_known_type (struct ipa_jump_func *jfunc, HOST_WIDE_INT offset,
jfunc->value.known_type.offset = offset,
jfunc->value.known_type.base_type = base_type;
jfunc->value.known_type.component_type = component_type;
+ gcc_assert (component_type);
}
/* Set JFUNC to be a copy of another jmp (to be used by jump function
@@ -1739,8 +1743,6 @@ ipa_note_param_call (struct cgraph_node *node, int param_index, gimple stmt)
cs = cgraph_edge (node, stmt);
cs->indirect_info->param_index = param_index;
- cs->indirect_info->offset = 0;
- cs->indirect_info->polymorphic = 0;
cs->indirect_info->agg_contents = 0;
cs->indirect_info->member_ptr = 0;
return cs;
@@ -1837,6 +1839,8 @@ ipa_analyze_indirect_call_uses (struct cgraph_node *node,
NULL, &by_ref))
{
struct cgraph_edge *cs = ipa_note_param_call (node, index, call);
+ if (cs->indirect_info->offset != offset)
+ cs->indirect_info->outer_type = NULL;
cs->indirect_info->offset = offset;
cs->indirect_info->agg_contents = 1;
cs->indirect_info->by_ref = by_ref;
@@ -1934,6 +1938,8 @@ ipa_analyze_indirect_call_uses (struct cgraph_node *node,
&& parm_preserved_before_stmt_p (&parms_ainfo[index], call, rec))
{
struct cgraph_edge *cs = ipa_note_param_call (node, index, call);
+ if (cs->indirect_info->offset != offset)
+ cs->indirect_info->outer_type = NULL;
cs->indirect_info->offset = offset;
cs->indirect_info->agg_contents = 1;
cs->indirect_info->member_ptr = 1;
@@ -2770,6 +2776,8 @@ update_indirect_edges_after_inlining (struct cgraph_edge *cs,
else
{
ici->param_index = ipa_get_jf_ancestor_formal_id (jfunc);
+ if (ipa_get_jf_ancestor_offset (jfunc))
+ ici->outer_type = NULL;
ici->offset += ipa_get_jf_ancestor_offset (jfunc);
}
}
@@ -4083,12 +4091,15 @@ ipa_write_indirect_edge_info (struct output_block *ob,
bp_pack_value (&bp, ii->agg_contents, 1);
bp_pack_value (&bp, ii->member_ptr, 1);
bp_pack_value (&bp, ii->by_ref, 1);
+ bp_pack_value (&bp, ii->maybe_in_construction, 1);
+ bp_pack_value (&bp, ii->maybe_derived_type, 1);
streamer_write_bitpack (&bp);
if (ii->polymorphic)
{
streamer_write_hwi (ob, ii->otr_token);
stream_write_tree (ob, ii->otr_type, true);
+ stream_write_tree (ob, ii->outer_type, true);
}
}
@@ -4110,10 +4121,13 @@ ipa_read_indirect_edge_info (struct lto_input_block *ib,
ii->agg_contents = bp_unpack_value (&bp, 1);
ii->member_ptr = bp_unpack_value (&bp, 1);
ii->by_ref = bp_unpack_value (&bp, 1);
+ ii->maybe_in_construction = bp_unpack_value (&bp, 1);
+ ii->maybe_derived_type = bp_unpack_value (&bp, 1);
if (ii->polymorphic)
{
ii->otr_token = (HOST_WIDE_INT) streamer_read_hwi (ib);
ii->otr_type = stream_read_tree (ib, data_in);
+ ii->outer_type = stream_read_tree (ib, data_in);
}
}
diff --git a/gcc/ipa-pure-const.c b/gcc/ipa-pure-const.c
index 52be4bf8869..ed96c3c21ff 100644
--- a/gcc/ipa-pure-const.c
+++ b/gcc/ipa-pure-const.c
@@ -36,6 +36,8 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "print-tree.h"
+#include "calls.h"
#include "gimple.h"
#include "gimple-iterator.h"
#include "gimple-walk.h"
@@ -789,17 +791,16 @@ end:
}
else
{
- loop_iterator li;
struct loop *loop;
scev_initialize ();
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
if (!finite_loop_p (loop))
{
if (dump_file)
fprintf (dump_file, " can not prove finiteness of "
"loop %i\n", loop->num);
l->looping =true;
- FOR_EACH_LOOP_BREAK (li);
+ break;
}
scev_finalize ();
}
@@ -1586,7 +1587,7 @@ local_pure_const (void)
/* Do NORETURN discovery. */
if (!skip && !TREE_THIS_VOLATILE (current_function_decl)
- && EDGE_COUNT (EXIT_BLOCK_PTR->preds) == 0)
+ && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) == 0)
{
warn_function_noreturn (cfun->decl);
if (dump_file)
@@ -1722,7 +1723,7 @@ static unsigned int
execute_warn_function_noreturn (void)
{
if (!TREE_THIS_VOLATILE (current_function_decl)
- && EDGE_COUNT (EXIT_BLOCK_PTR->preds) == 0)
+ && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) == 0)
warn_function_noreturn (current_function_decl);
return 0;
}
diff --git a/gcc/ipa-reference.c b/gcc/ipa-reference.c
index 9f80086c747..27208deb519 100644
--- a/gcc/ipa-reference.c
+++ b/gcc/ipa-reference.c
@@ -41,6 +41,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "calls.h"
#include "gimple.h"
#include "tree-inline.h"
#include "tree-pass.h"
diff --git a/gcc/ipa-split.c b/gcc/ipa-split.c
index e55b3f59dbc..d7d6b8fd70f 100644
--- a/gcc/ipa-split.c
+++ b/gcc/ipa-split.c
@@ -79,6 +79,9 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tree.h"
#include "gimple.h"
+#include "stringpool.h"
+#include "expr.h"
+#include "calls.h"
#include "gimplify.h"
#include "gimple-iterator.h"
#include "gimplify-me.h"
@@ -89,6 +92,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-cfg.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-into-ssa.h"
#include "tree-dfa.h"
@@ -206,7 +210,7 @@ verify_non_ssa_vars (struct split_point *current, bitmap non_ssa_vars,
bool ok = true;
FOR_EACH_EDGE (e, ei, current->entry_bb->preds)
- if (e->src != ENTRY_BLOCK_PTR
+ if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
&& !bitmap_bit_p (current->split_bbs, e->src->index))
{
worklist.safe_push (e->src);
@@ -219,7 +223,7 @@ verify_non_ssa_vars (struct split_point *current, bitmap non_ssa_vars,
basic_block bb = worklist.pop ();
FOR_EACH_EDGE (e, ei, bb->preds)
- if (e->src != ENTRY_BLOCK_PTR
+ if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
&& bitmap_set_bit (seen, e->src->index))
{
gcc_checking_assert (!bitmap_bit_p (current->split_bbs,
@@ -392,7 +396,7 @@ consider_split (struct split_point *current, bitmap non_ssa_vars,
/* Do not split when we would end up calling function anyway. */
if (incoming_freq
- >= (ENTRY_BLOCK_PTR->frequency
+ >= (ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency
* PARAM_VALUE (PARAM_PARTIAL_INLINING_ENTRY_PROBABILITY) / 100))
{
/* When profile is guessed, we can not expect it to give us
@@ -402,13 +406,13 @@ consider_split (struct split_point *current, bitmap non_ssa_vars,
is likely noticeable win. */
if (back_edge
&& profile_status != PROFILE_READ
- && incoming_freq < ENTRY_BLOCK_PTR->frequency)
+ && incoming_freq < ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
" Split before loop, accepting despite low frequencies %i %i.\n",
incoming_freq,
- ENTRY_BLOCK_PTR->frequency);
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency);
}
else
{
@@ -579,7 +583,7 @@ consider_split (struct split_point *current, bitmap non_ssa_vars,
/* split_function fixes up at most one PHI non-virtual PHI node in return_bb,
for the return value. If there are other PHIs, give up. */
- if (return_bb != EXIT_BLOCK_PTR)
+ if (return_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
gimple_stmt_iterator psi;
@@ -646,15 +650,15 @@ static basic_block
find_return_bb (void)
{
edge e;
- basic_block return_bb = EXIT_BLOCK_PTR;
+ basic_block return_bb = EXIT_BLOCK_PTR_FOR_FN (cfun);
gimple_stmt_iterator bsi;
bool found_return = false;
tree retval = NULL_TREE;
- if (!single_pred_p (EXIT_BLOCK_PTR))
+ if (!single_pred_p (EXIT_BLOCK_PTR_FOR_FN (cfun)))
return return_bb;
- e = single_pred_edge (EXIT_BLOCK_PTR);
+ e = single_pred_edge (EXIT_BLOCK_PTR_FOR_FN (cfun));
for (bsi = gsi_last_bb (e->src); !gsi_end_p (bsi); gsi_prev (&bsi))
{
gimple stmt = gsi_stmt (bsi);
@@ -933,7 +937,7 @@ find_split_points (int overall_time, int overall_size)
current.split_size = 0;
current.ssa_names_to_pass = BITMAP_ALLOC (NULL);
- first.bb = ENTRY_BLOCK_PTR;
+ first.bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
first.edge_num = 0;
first.overall_time = 0;
first.overall_size = 0;
@@ -942,7 +946,7 @@ find_split_points (int overall_time, int overall_size)
first.used_ssa_names = 0;
first.bbs_visited = 0;
stack.safe_push (first);
- ENTRY_BLOCK_PTR->aux = (void *)(intptr_t)-1;
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)->aux = (void *)(intptr_t)-1;
while (!stack.is_empty ())
{
@@ -953,7 +957,7 @@ find_split_points (int overall_time, int overall_size)
articulation, we want to have processed everything reachable
from articulation but nothing that reaches into it. */
if (entry->edge_num == EDGE_COUNT (entry->bb->succs)
- && entry->bb != ENTRY_BLOCK_PTR)
+ && entry->bb != ENTRY_BLOCK_PTR_FOR_FN (cfun))
{
int pos = stack.length ();
entry->can_split &= visit_bb (entry->bb, return_bb,
@@ -1005,7 +1009,7 @@ find_split_points (int overall_time, int overall_size)
entry->edge_num++;
/* New BB to visit, push it to the stack. */
- if (dest != return_bb && dest != EXIT_BLOCK_PTR
+ if (dest != return_bb && dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& !dest->aux)
{
stack_entry new_entry;
@@ -1033,7 +1037,7 @@ find_split_points (int overall_time, int overall_size)
}
/* We are done with examining the edges. Pop off the value from stack
and merge stuff we accumulate during the walk. */
- else if (entry->bb != ENTRY_BLOCK_PTR)
+ else if (entry->bb != ENTRY_BLOCK_PTR_FOR_FN (cfun))
{
stack_entry *prev = &stack[stack.length () - 2];
@@ -1059,7 +1063,7 @@ find_split_points (int overall_time, int overall_size)
else
stack.pop ();
}
- ENTRY_BLOCK_PTR->aux = NULL;
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)->aux = NULL;
FOR_EACH_BB (bb)
bb->aux = NULL;
stack.release ();
@@ -1135,7 +1139,7 @@ split_function (struct split_point *split_point)
if (!split_part_return_p)
;
/* We have no return block, so nothing is needed. */
- else if (return_bb == EXIT_BLOCK_PTR)
+ else if (return_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
;
/* When we do not want to return value, we need to construct
new return block with empty return statement.
@@ -1162,7 +1166,7 @@ split_function (struct split_point *split_point)
break;
}
}
- e = make_edge (new_return_bb, EXIT_BLOCK_PTR, 0);
+ e = make_edge (new_return_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
e->probability = REG_BR_PROB_BASE;
e->count = new_return_bb->count;
if (current_loops)
@@ -1179,7 +1183,7 @@ split_function (struct split_point *split_point)
Note this can happen whether or not we have a return value. If we have
a return value, then RETURN_BB may have PHIs for real operands too. */
- if (return_bb != EXIT_BLOCK_PTR)
+ if (return_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
bool phi_p = false;
for (gsi = gsi_start_phis (return_bb); !gsi_end_p (gsi);)
@@ -1321,7 +1325,7 @@ split_function (struct split_point *split_point)
push_cfun (DECL_STRUCT_FUNCTION (node->decl));
var = BLOCK_VARS (DECL_INITIAL (node->decl));
i = vec_safe_length (*debug_args);
- cgsi = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR));
+ cgsi = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
do
{
i -= 2;
@@ -1362,13 +1366,14 @@ split_function (struct split_point *split_point)
else
{
e = make_edge (call_bb, return_bb,
- return_bb == EXIT_BLOCK_PTR ? 0 : EDGE_FALLTHRU);
+ return_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
+ ? 0 : EDGE_FALLTHRU);
e->count = call_bb->count;
e->probability = REG_BR_PROB_BASE;
/* If there is return basic block, see what value we need to store
return value into and put call just before it. */
- if (return_bb != EXIT_BLOCK_PTR)
+ if (return_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
real_retval = retval = find_retval (return_bb);
diff --git a/gcc/ipa-utils.c b/gcc/ipa-utils.c
index db775f4cdcc..1aa239d3b7d 100644
--- a/gcc/ipa-utils.c
+++ b/gcc/ipa-utils.c
@@ -700,8 +700,8 @@ ipa_merge_profiles (struct cgraph_node *dst,
cgraph_get_body (dst);
srccfun = DECL_STRUCT_FUNCTION (src->decl);
dstcfun = DECL_STRUCT_FUNCTION (dst->decl);
- if (n_basic_blocks_for_function (srccfun)
- != n_basic_blocks_for_function (dstcfun))
+ if (n_basic_blocks_for_fn (srccfun)
+ != n_basic_blocks_for_fn (dstcfun))
{
if (cgraph_dump_file)
fprintf (cgraph_dump_file,
diff --git a/gcc/ipa-utils.h b/gcc/ipa-utils.h
index ca8b87290b5..b52742517ca 100644
--- a/gcc/ipa-utils.h
+++ b/gcc/ipa-utils.h
@@ -34,6 +34,21 @@ struct ipa_dfs_info {
PTR aux;
};
+/* Context of polymorphic call. This is used by ipa-devirt walkers of the
+ type inheritance graph. */
+struct ipa_polymorphic_call_context {
+ /* The called object appears in an object of type OUTER_TYPE
+ at offset OFFSET. */
+ HOST_WIDE_INT offset;
+ tree outer_type;
+ /* True if outer object may be in construction or destruction. */
+ bool maybe_in_construction;
+ /* True if outer object may be of derived type. */
+ bool maybe_derived_type;
+};
+
+/* Context representing "I know nothing". */
+extern const ipa_polymorphic_call_context ipa_dummy_polymorphic_call_context;
/* In ipa-utils.c */
void ipa_print_order (FILE*, const char *, struct cgraph_node**, int);
@@ -59,13 +74,19 @@ void build_type_inheritance_graph (void);
void update_type_inheritance_graph (void);
vec <cgraph_node *>
possible_polymorphic_call_targets (tree, HOST_WIDE_INT,
+ ipa_polymorphic_call_context,
bool *final = NULL,
void **cache_token = NULL);
odr_type get_odr_type (tree, bool insert = false);
-void dump_possible_polymorphic_call_targets (FILE *, tree, HOST_WIDE_INT);
+void dump_possible_polymorphic_call_targets (FILE *, tree, HOST_WIDE_INT,
+ const ipa_polymorphic_call_context &);
bool possible_polymorphic_call_target_p (tree, HOST_WIDE_INT,
+ const ipa_polymorphic_call_context &,
struct cgraph_node *n);
tree method_class_type (tree);
+tree get_polymorphic_call_info (tree, tree, tree *,
+ HOST_WIDE_INT *,
+ ipa_polymorphic_call_context *);
/* Return vector containing possible targets of polymorphic call E.
If FINALP is non-NULL, store true if the list is complette.
@@ -83,8 +104,27 @@ possible_polymorphic_call_targets (struct cgraph_edge *e,
void **cache_token = NULL)
{
gcc_checking_assert (e->indirect_info->polymorphic);
+ ipa_polymorphic_call_context context = {e->indirect_info->offset,
+ e->indirect_info->outer_type,
+ e->indirect_info->maybe_in_construction,
+ e->indirect_info->maybe_derived_type};
return possible_polymorphic_call_targets (e->indirect_info->otr_type,
e->indirect_info->otr_token,
+ context,
+ final, cache_token);
+}
+
+/* Same as above but taking OBJ_TYPE_REF as an parameter. */
+
+inline vec <cgraph_node *>
+possible_polymorphic_call_targets (tree call,
+ bool *final = NULL,
+ void **cache_token = NULL)
+{
+ return possible_polymorphic_call_targets (obj_type_ref_class (call),
+ tree_to_uhwi
+ (OBJ_TYPE_REF_TOKEN (call)),
+ ipa_dummy_polymorphic_call_context,
final, cache_token);
}
@@ -94,8 +134,13 @@ inline void
dump_possible_polymorphic_call_targets (FILE *f, struct cgraph_edge *e)
{
gcc_checking_assert (e->indirect_info->polymorphic);
+ ipa_polymorphic_call_context context = {e->indirect_info->offset,
+ e->indirect_info->outer_type,
+ e->indirect_info->maybe_in_construction,
+ e->indirect_info->maybe_derived_type};
dump_possible_polymorphic_call_targets (f, e->indirect_info->otr_type,
- e->indirect_info->otr_token);
+ e->indirect_info->otr_token,
+ context);
}
/* Return true if N can be possibly target of a polymorphic call of
@@ -105,8 +150,13 @@ inline bool
possible_polymorphic_call_target_p (struct cgraph_edge *e,
struct cgraph_node *n)
{
+ ipa_polymorphic_call_context context = {e->indirect_info->offset,
+ e->indirect_info->outer_type,
+ e->indirect_info->maybe_in_construction,
+ e->indirect_info->maybe_derived_type};
return possible_polymorphic_call_target_p (e->indirect_info->otr_type,
- e->indirect_info->otr_token, n);
+ e->indirect_info->otr_token,
+ context, n);
}
/* Return true if N can be possibly target of a polymorphic call of
@@ -118,7 +168,8 @@ possible_polymorphic_call_target_p (tree call,
{
return possible_polymorphic_call_target_p (obj_type_ref_class (call),
tree_to_uhwi
- (OBJ_TYPE_REF_TOKEN (call)),
+ (OBJ_TYPE_REF_TOKEN (call)),
+ ipa_dummy_polymorphic_call_context,
n);
}
#endif /* GCC_IPA_UTILS_H */
diff --git a/gcc/ipa.c b/gcc/ipa.c
index e541090ee3a..3950d4eb856 100644
--- a/gcc/ipa.c
+++ b/gcc/ipa.c
@@ -22,6 +22,8 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "calls.h"
+#include "stringpool.h"
#include "cgraph.h"
#include "tree-pass.h"
#include "gimple.h"
diff --git a/gcc/ira-build.c b/gcc/ira-build.c
index ed513767f3c..e249ba0dcff 100644
--- a/gcc/ira-build.c
+++ b/gcc/ira-build.c
@@ -1745,7 +1745,7 @@ ira_loop_tree_body_rev_postorder (ira_loop_tree_node_t loop_node ATTRIBUTE_UNUSE
ira_loop_tree_node_t pred_node;
basic_block pred_bb = e->src;
- if (e->src == ENTRY_BLOCK_PTR)
+ if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
continue;
pred_node = IRA_BB_NODE_BY_INDEX (pred_bb->index);
@@ -3496,7 +3496,7 @@ ira_build (void)
}
fprintf (ira_dump_file, " regions=%d, blocks=%d, points=%d\n",
current_loops == NULL ? 1 : number_of_loops (cfun),
- n_basic_blocks, ira_max_point);
+ n_basic_blocks_for_fn (cfun), ira_max_point);
fprintf (ira_dump_file,
" allocnos=%d (big %d), copies=%d, conflicts=%d, ranges=%d\n",
ira_allocnos_num, nr_big, ira_copies_num, n, nr);
diff --git a/gcc/ira-color.c b/gcc/ira-color.c
index 6c52a2b7245..30282aad974 100644
--- a/gcc/ira-color.c
+++ b/gcc/ira-color.c
@@ -3100,7 +3100,7 @@ print_loop_title (ira_loop_tree_node_t loop_tree_node)
{
fprintf (ira_dump_file, " %d", subloop_node->bb->index);
FOR_EACH_EDGE (e, ei, subloop_node->bb->succs)
- if (e->dest != EXIT_BLOCK_PTR
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& ((dest_loop_node = IRA_BB_NODE (e->dest)->parent)
!= loop_tree_node))
fprintf (ira_dump_file, "(->%d:l%d)",
diff --git a/gcc/ira-emit.c b/gcc/ira-emit.c
index cdd694176aa..198fa47b702 100644
--- a/gcc/ira-emit.c
+++ b/gcc/ira-emit.c
@@ -403,7 +403,7 @@ entered_from_non_parent_p (ira_loop_tree_node_t loop_node)
if (bb_node->bb != NULL)
{
FOR_EACH_EDGE (e, ei, bb_node->bb->preds)
- if (e->src != ENTRY_BLOCK_PTR
+ if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
&& (src_loop_node = IRA_BB_NODE (e->src)->parent) != loop_node)
{
for (parent = src_loop_node->parent;
@@ -1263,7 +1263,7 @@ ira_emit (bool loops_p)
at_bb_start[bb->index] = NULL;
at_bb_end[bb->index] = NULL;
FOR_EACH_EDGE (e, ei, bb->succs)
- if (e->dest != EXIT_BLOCK_PTR)
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
generate_edge_moves (e);
}
allocno_last_set
diff --git a/gcc/ira-int.h b/gcc/ira-int.h
index b9b21ba27af..b46e7b00274 100644
--- a/gcc/ira-int.h
+++ b/gcc/ira-int.h
@@ -43,8 +43,9 @@ along with GCC; see the file COPYING3. If not see
executed, frequency is always equivalent. Otherwise rescale the
edge frequency. */
#define REG_FREQ_FROM_EDGE_FREQ(freq) \
- (optimize_size || (flag_branch_probabilities && !ENTRY_BLOCK_PTR->count) \
- ? REG_FREQ_MAX : (freq * REG_FREQ_MAX / BB_FREQ_MAX) \
+ (optimize_size || (flag_branch_probabilities \
+ && !ENTRY_BLOCK_PTR_FOR_FN (cfun)->count) \
+ ? REG_FREQ_MAX : (freq * REG_FREQ_MAX / BB_FREQ_MAX) \
? (freq * REG_FREQ_MAX / BB_FREQ_MAX) : 1)
/* A modified value of flag `-fira-verbose' used internally. */
diff --git a/gcc/ira.c b/gcc/ira.c
index dbc5a0ad997..93a2bbdc90e 100644
--- a/gcc/ira.c
+++ b/gcc/ira.c
@@ -4522,6 +4522,9 @@ find_moveable_pseudos (void)
pseudo_replaced_reg.release ();
pseudo_replaced_reg.safe_grow_cleared (max_regs);
+ df_analyze ();
+ calculate_dominance_info (CDI_DOMINATORS);
+
i = 0;
bitmap_initialize (&live, 0);
bitmap_initialize (&used, 0);
@@ -4834,6 +4837,14 @@ find_moveable_pseudos (void)
free (bb_moveable_reg_sets);
last_moveable_pseudo = max_reg_num ();
+
+ fix_reg_equiv_init ();
+ expand_reg_info ();
+ regstat_free_n_sets_and_refs ();
+ regstat_free_ri ();
+ regstat_init_n_sets_and_refs ();
+ regstat_compute_ri ();
+ free_dominance_info (CDI_DOMINATORS);
}
@@ -4865,7 +4876,7 @@ static bool
split_live_ranges_for_shrink_wrap (void)
{
basic_block bb, call_dom = NULL;
- basic_block first = single_succ (ENTRY_BLOCK_PTR);
+ basic_block first = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
rtx insn, last_interesting_insn = NULL;
bitmap_head need_new, reachable;
vec<basic_block> queue;
@@ -4875,7 +4886,7 @@ split_live_ranges_for_shrink_wrap (void)
bitmap_initialize (&need_new, 0);
bitmap_initialize (&reachable, 0);
- queue.create (n_basic_blocks);
+ queue.create (n_basic_blocks_for_fn (cfun));
FOR_EACH_BB (bb)
FOR_BB_INSNS (bb, insn)
@@ -4910,7 +4921,7 @@ split_live_ranges_for_shrink_wrap (void)
bb = queue.pop ();
FOR_EACH_EDGE (e, ei, bb->succs)
- if (e->dest != EXIT_BLOCK_PTR
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& bitmap_set_bit (&reachable, e->dest->index))
queue.quick_push (e->dest);
}
@@ -5194,7 +5205,19 @@ ira (FILE *f)
#endif
df_analyze ();
+ init_reg_equiv ();
+ if (ira_conflicts_p)
+ {
+ calculate_dominance_info (CDI_DOMINATORS);
+
+ if (split_live_ranges_for_shrink_wrap ())
+ df_analyze ();
+
+ free_dominance_info (CDI_DOMINATORS);
+ }
+
df_clear_flags (DF_NO_INSN_RESCAN);
+
regstat_init_n_sets_and_refs ();
regstat_compute_ri ();
@@ -5212,7 +5235,6 @@ ira (FILE *f)
if (resize_reg_info () && flag_ira_loop_pressure)
ira_set_pseudo_classes (true, ira_dump_file);
- init_reg_equiv ();
rebuild_p = update_equiv_regs ();
setup_reg_equiv ();
setup_reg_equiv_init ();
@@ -5235,22 +5257,7 @@ ira (FILE *f)
allocation because of -O0 usage or because the function is too
big. */
if (ira_conflicts_p)
- {
- df_analyze ();
- calculate_dominance_info (CDI_DOMINATORS);
-
- find_moveable_pseudos ();
- if (split_live_ranges_for_shrink_wrap ())
- df_analyze ();
-
- fix_reg_equiv_init ();
- expand_reg_info ();
- regstat_free_n_sets_and_refs ();
- regstat_free_ri ();
- regstat_init_n_sets_and_refs ();
- regstat_compute_ri ();
- free_dominance_info (CDI_DOMINATORS);
- }
+ find_moveable_pseudos ();
max_regno_before_ira = max_reg_num ();
ira_setup_eliminable_regset (true);
diff --git a/gcc/java/ChangeLog b/gcc/java/ChangeLog
index 3ffa76da8c2..0e277e4bf40 100644
--- a/gcc/java/ChangeLog
+++ b/gcc/java/ChangeLog
@@ -12,6 +12,30 @@
* java-gimplify.c: Include only gimplify.h and gimple.h as needed.
+2013-11-14 Diego Novillo <dnovillo@google.com>
+
+ * builtins.c: Include stor-layout.h.
+ Include stringpool.h.
+ * class.c: Include stringpool.h.
+ Include stor-layout.h.
+ Include varasm.h.
+ * constants.c: Include stringpool.h.
+ Include stor-layout.h.
+ * decl.c: Include stor-layout.h.
+ Include stringpool.h.
+ Include varasm.h.
+ * except.c: Include stringpool.h.
+ Include stor-layout.h.
+ * expr.c: Include stringpool.h.
+ Include stor-layout.h.
+ * jcf-parse.c: Include stringpool.h.
+ * mangle.c: Include stringpool.h.
+ * resource.c: Include stringpool.h.
+ Include stor-layout.h.
+ * typeck.c: Include stor-layout.h.
+ Include stringpool.h.
+ * verify-glue.c: Include stringpool.h.
+
2013-11-12 Andrew MacLeod <amacleod@redhat.com>
* java-gimplify.c: Include gimplify.h.
diff --git a/gcc/java/builtins.c b/gcc/java/builtins.c
index c05543782ae..46d1d86820c 100644
--- a/gcc/java/builtins.c
+++ b/gcc/java/builtins.c
@@ -31,6 +31,8 @@ The Free Software Foundation is independent of Sun Microsystems, Inc. */
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "stringpool.h"
#include "ggc.h"
#include "flags.h"
#include "langhooks.h"
diff --git a/gcc/java/class.c b/gcc/java/class.c
index 251873e0811..58bc92f94d9 100644
--- a/gcc/java/class.c
+++ b/gcc/java/class.c
@@ -27,6 +27,9 @@ The Free Software Foundation is independent of Sun Microsystems, Inc. */
#include "system.h"
#include "coretypes.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
+#include "varasm.h"
#include "flags.h"
#include "java-tree.h"
#include "jcf.h"
diff --git a/gcc/java/constants.c b/gcc/java/constants.c
index ca411b5f9ff..64f6e696403 100644
--- a/gcc/java/constants.c
+++ b/gcc/java/constants.c
@@ -26,6 +26,8 @@ The Free Software Foundation is independent of Sun Microsystems, Inc. */
#include "tm.h"
#include "jcf.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
#include "java-tree.h"
#include "diagnostic-core.h"
#include "toplev.h"
diff --git a/gcc/java/decl.c b/gcc/java/decl.c
index 0a2cecc3fef..70e21b071f0 100644
--- a/gcc/java/decl.c
+++ b/gcc/java/decl.c
@@ -28,6 +28,9 @@ The Free Software Foundation is independent of Sun Microsystems, Inc. */
#include "system.h"
#include "coretypes.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "stringpool.h"
+#include "varasm.h"
#include "diagnostic-core.h"
#include "toplev.h"
#include "flags.h"
diff --git a/gcc/java/except.c b/gcc/java/except.c
index b6d0c8d6c0f..9674abac22c 100644
--- a/gcc/java/except.c
+++ b/gcc/java/except.c
@@ -26,6 +26,8 @@ The Free Software Foundation is independent of Sun Microsystems, Inc. */
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
#include "java-tree.h"
#include "javaop.h"
#include "java-opcodes.h"
diff --git a/gcc/java/expr.c b/gcc/java/expr.c
index 268d193e257..7ae852f5276 100644
--- a/gcc/java/expr.c
+++ b/gcc/java/expr.c
@@ -33,6 +33,8 @@ The Free Software Foundation is independent of Sun Microsystems, Inc. */
PARM_BOUNDARY. */
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
#include "flags.h"
#include "java-tree.h"
#include "javaop.h"
diff --git a/gcc/java/jcf-parse.c b/gcc/java/jcf-parse.c
index 750a17faec6..fbd332cb7c0 100644
--- a/gcc/java/jcf-parse.c
+++ b/gcc/java/jcf-parse.c
@@ -27,6 +27,7 @@ The Free Software Foundation is independent of Sun Microsystems, Inc. */
#include "system.h"
#include "coretypes.h"
#include "tree.h"
+#include "stringpool.h"
#include "obstack.h"
#include "flags.h"
#include "java-except.h"
diff --git a/gcc/java/mangle.c b/gcc/java/mangle.c
index 10557e94ebd..c4e088303c1 100644
--- a/gcc/java/mangle.c
+++ b/gcc/java/mangle.c
@@ -29,6 +29,7 @@ The Free Software Foundation is independent of Sun Microsystems, Inc. */
#include "coretypes.h"
#include "jcf.h"
#include "tree.h"
+#include "stringpool.h"
#include "java-tree.h"
#include "obstack.h"
#include "diagnostic-core.h"
diff --git a/gcc/java/resource.c b/gcc/java/resource.c
index 54403d9f187..17155b8c362 100644
--- a/gcc/java/resource.c
+++ b/gcc/java/resource.c
@@ -25,6 +25,8 @@ The Free Software Foundation is independent of Sun Microsystems, Inc. */
#include "system.h"
#include "coretypes.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
#include "java-tree.h"
#include "jcf.h"
#include "diagnostic-core.h"
diff --git a/gcc/java/typeck.c b/gcc/java/typeck.c
index 004ebf151b7..39ee4199908 100644
--- a/gcc/java/typeck.c
+++ b/gcc/java/typeck.c
@@ -27,6 +27,8 @@ The Free Software Foundation is independent of Sun Microsystems, Inc. */
#include "system.h"
#include "coretypes.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "stringpool.h"
#include "obstack.h"
#include "flags.h"
#include "java-tree.h"
diff --git a/gcc/java/verify-glue.c b/gcc/java/verify-glue.c
index 21acc4259df..022f4c4e058 100644
--- a/gcc/java/verify-glue.c
+++ b/gcc/java/verify-glue.c
@@ -28,6 +28,7 @@ The Free Software Foundation is independent of Sun Microsystems, Inc. */
#include "system.h"
#include "coretypes.h"
#include "tree.h"
+#include "stringpool.h"
#include "parse.h"
#include "verify.h"
diff --git a/gcc/langhooks.c b/gcc/langhooks.c
index ec0dd4d75ee..4b72f2a6238 100644
--- a/gcc/langhooks.c
+++ b/gcc/langhooks.c
@@ -25,6 +25,8 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "toplev.h"
#include "tree.h"
+#include "stringpool.h"
+#include "attribs.h"
#include "tree-inline.h"
#include "gimple.h"
#include "gimplify.h"
diff --git a/gcc/lcm.c b/gcc/lcm.c
index c13d2a6aa51..aa63c7272f0 100644
--- a/gcc/lcm.c
+++ b/gcc/lcm.c
@@ -101,7 +101,7 @@ compute_antinout_edge (sbitmap *antloc, sbitmap *transp, sbitmap *antin,
/* Allocate a worklist array/queue. Entries are only added to the
list if they were not already on the list. So the size is
bounded by the number of basic blocks. */
- qin = qout = worklist = XNEWVEC (basic_block, n_basic_blocks);
+ qin = qout = worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
/* We want a maximal solution, so make an optimistic initialization of
ANTIN. */
@@ -116,13 +116,13 @@ compute_antinout_edge (sbitmap *antloc, sbitmap *transp, sbitmap *antin,
}
qin = worklist;
- qend = &worklist[n_basic_blocks - NUM_FIXED_BLOCKS];
- qlen = n_basic_blocks - NUM_FIXED_BLOCKS;
+ qend = &worklist[n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS];
+ qlen = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
/* Mark blocks which are predecessors of the exit block so that we
can easily identify them below. */
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
- e->src->aux = EXIT_BLOCK_PTR;
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
+ e->src->aux = EXIT_BLOCK_PTR_FOR_FN (cfun);
/* Iterate until the worklist is empty. */
while (qlen)
@@ -134,7 +134,7 @@ compute_antinout_edge (sbitmap *antloc, sbitmap *transp, sbitmap *antin,
if (qout >= qend)
qout = worklist;
- if (bb->aux == EXIT_BLOCK_PTR)
+ if (bb->aux == EXIT_BLOCK_PTR_FOR_FN (cfun))
/* Do not clear the aux field for blocks which are predecessors of
the EXIT block. That way we never add then to the worklist
again. */
@@ -153,7 +153,7 @@ compute_antinout_edge (sbitmap *antloc, sbitmap *transp, sbitmap *antin,
to add the predecessors of this block to the worklist
if they are not already on the worklist. */
FOR_EACH_EDGE (e, ei, bb->preds)
- if (!e->src->aux && e->src != ENTRY_BLOCK_PTR)
+ if (!e->src->aux && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
{
*qin++ = e->src;
e->src->aux = e;
@@ -188,11 +188,11 @@ compute_earliest (struct edge_list *edge_list, int n_exprs, sbitmap *antin,
{
pred = INDEX_EDGE_PRED_BB (edge_list, x);
succ = INDEX_EDGE_SUCC_BB (edge_list, x);
- if (pred == ENTRY_BLOCK_PTR)
+ if (pred == ENTRY_BLOCK_PTR_FOR_FN (cfun))
bitmap_copy (earliest[x], antin[succ->index]);
else
{
- if (succ == EXIT_BLOCK_PTR)
+ if (succ == EXIT_BLOCK_PTR_FOR_FN (cfun))
bitmap_clear (earliest[x]);
else
{
@@ -254,7 +254,7 @@ compute_laterin (struct edge_list *edge_list, sbitmap *earliest,
list if they were not already on the list. So the size is
bounded by the number of basic blocks. */
qin = qout = worklist
- = XNEWVEC (basic_block, n_basic_blocks);
+ = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
/* Initialize a mapping from each edge to its index. */
for (i = 0; i < num_edges; i++)
@@ -276,7 +276,7 @@ compute_laterin (struct edge_list *edge_list, sbitmap *earliest,
do not want to be overly optimistic. Consider an outgoing edge from
the entry block. That edge should always have a LATER value the
same as EARLIEST for that edge. */
- FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
+ FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
bitmap_copy (later[(size_t) e->aux], earliest[(size_t) e->aux]);
/* Add all the blocks to the worklist. This prevents an early exit from
@@ -290,8 +290,8 @@ compute_laterin (struct edge_list *edge_list, sbitmap *earliest,
/* Note that we do not use the last allocated element for our queue,
as EXIT_BLOCK is never inserted into it. */
qin = worklist;
- qend = &worklist[n_basic_blocks - NUM_FIXED_BLOCKS];
- qlen = n_basic_blocks - NUM_FIXED_BLOCKS;
+ qend = &worklist[n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS];
+ qlen = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
/* Iterate until the worklist is empty. */
while (qlen)
@@ -317,7 +317,7 @@ compute_laterin (struct edge_list *edge_list, sbitmap *earliest,
antloc[e->src->index])
/* If LATER for an outgoing edge was changed, then we need
to add the target of the outgoing edge to the worklist. */
- && e->dest != EXIT_BLOCK_PTR && e->dest->aux == 0)
+ && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun) && e->dest->aux == 0)
{
*qin++ = e->dest;
e->dest->aux = e;
@@ -331,7 +331,7 @@ compute_laterin (struct edge_list *edge_list, sbitmap *earliest,
for the EXIT block. We allocated an extra entry in the LATERIN array
for just this purpose. */
bitmap_ones (laterin[last_basic_block]);
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
bitmap_and (laterin[last_basic_block],
laterin[last_basic_block],
later[(size_t) e->aux]);
@@ -358,7 +358,7 @@ compute_insert_delete (struct edge_list *edge_list, sbitmap *antloc,
{
basic_block b = INDEX_EDGE_SUCC_BB (edge_list, x);
- if (b == EXIT_BLOCK_PTR)
+ if (b == EXIT_BLOCK_PTR_FOR_FN (cfun))
bitmap_and_compl (insert[x], later[x], laterin[last_basic_block]);
else
bitmap_and_compl (insert[x], later[x], laterin[b->index]);
@@ -481,7 +481,7 @@ compute_available (sbitmap *avloc, sbitmap *kill, sbitmap *avout,
list if they were not already on the list. So the size is
bounded by the number of basic blocks. */
qin = qout = worklist =
- XNEWVEC (basic_block, n_basic_blocks - NUM_FIXED_BLOCKS);
+ XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS);
/* We want a maximal solution. */
bitmap_vector_ones (avout, last_basic_block);
@@ -495,13 +495,13 @@ compute_available (sbitmap *avloc, sbitmap *kill, sbitmap *avout,
}
qin = worklist;
- qend = &worklist[n_basic_blocks - NUM_FIXED_BLOCKS];
- qlen = n_basic_blocks - NUM_FIXED_BLOCKS;
+ qend = &worklist[n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS];
+ qlen = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
/* Mark blocks which are successors of the entry block so that we
can easily identify them below. */
- FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
- e->dest->aux = ENTRY_BLOCK_PTR;
+ FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
+ e->dest->aux = ENTRY_BLOCK_PTR_FOR_FN (cfun);
/* Iterate until the worklist is empty. */
while (qlen)
@@ -516,7 +516,7 @@ compute_available (sbitmap *avloc, sbitmap *kill, sbitmap *avout,
/* If one of the predecessor blocks is the ENTRY block, then the
intersection of avouts is the null set. We can identify such blocks
by the special value in the AUX field in the block structure. */
- if (bb->aux == ENTRY_BLOCK_PTR)
+ if (bb->aux == ENTRY_BLOCK_PTR_FOR_FN (cfun))
/* Do not clear the aux field for blocks which are successors of the
ENTRY block. That way we never add then to the worklist again. */
bitmap_clear (avin[bb->index]);
@@ -534,7 +534,7 @@ compute_available (sbitmap *avloc, sbitmap *kill, sbitmap *avout,
to add the successors of this block to the worklist
if they are not already on the worklist. */
FOR_EACH_EDGE (e, ei, bb->succs)
- if (!e->dest->aux && e->dest != EXIT_BLOCK_PTR)
+ if (!e->dest->aux && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
*qin++ = e->dest;
e->dest->aux = e;
@@ -570,11 +570,11 @@ compute_farthest (struct edge_list *edge_list, int n_exprs,
{
pred = INDEX_EDGE_PRED_BB (edge_list, x);
succ = INDEX_EDGE_SUCC_BB (edge_list, x);
- if (succ == EXIT_BLOCK_PTR)
+ if (succ == EXIT_BLOCK_PTR_FOR_FN (cfun))
bitmap_copy (farthest[x], st_avout[pred->index]);
else
{
- if (pred == ENTRY_BLOCK_PTR)
+ if (pred == ENTRY_BLOCK_PTR_FOR_FN (cfun))
bitmap_clear (farthest[x]);
else
{
@@ -610,7 +610,7 @@ compute_nearerout (struct edge_list *edge_list, sbitmap *farthest,
/* Allocate a worklist array/queue. Entries are only added to the
list if they were not already on the list. So the size is
bounded by the number of basic blocks. */
- tos = worklist = XNEWVEC (basic_block, n_basic_blocks + 1);
+ tos = worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) + 1);
/* Initialize NEARER for each edge and build a mapping from an edge to
its index. */
@@ -624,7 +624,7 @@ compute_nearerout (struct edge_list *edge_list, sbitmap *farthest,
do not want to be overly optimistic. Consider an incoming edge to
the exit block. That edge should always have a NEARER value the
same as FARTHEST for that edge. */
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
bitmap_copy (nearer[(size_t)e->aux], farthest[(size_t)e->aux]);
/* Add all the blocks to the worklist. This prevents an early exit
@@ -656,7 +656,7 @@ compute_nearerout (struct edge_list *edge_list, sbitmap *farthest,
st_avloc[e->dest->index])
/* If NEARER for an incoming edge was changed, then we need
to add the source of the incoming edge to the worklist. */
- && e->src != ENTRY_BLOCK_PTR && e->src->aux == 0)
+ && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun) && e->src->aux == 0)
{
*tos++ = e->src;
e->src->aux = e;
@@ -667,7 +667,7 @@ compute_nearerout (struct edge_list *edge_list, sbitmap *farthest,
for the ENTRY block. We allocated an extra entry in the NEAREROUT array
for just this purpose. */
bitmap_ones (nearerout[last_basic_block]);
- FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
+ FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
bitmap_and (nearerout[last_basic_block],
nearerout[last_basic_block],
nearer[(size_t) e->aux]);
@@ -693,7 +693,7 @@ compute_rev_insert_delete (struct edge_list *edge_list, sbitmap *st_avloc,
for (x = 0; x < NUM_EDGES (edge_list); x++)
{
basic_block b = INDEX_EDGE_PRED_BB (edge_list, x);
- if (b == ENTRY_BLOCK_PTR)
+ if (b == ENTRY_BLOCK_PTR_FOR_FN (cfun))
bitmap_and_compl (insert[x], nearer[x], nearerout[last_basic_block]);
else
bitmap_and_compl (insert[x], nearer[x], nearerout[b->index]);
diff --git a/gcc/loop-doloop.c b/gcc/loop-doloop.c
index a2173dbd126..0afd79dbc08 100644
--- a/gcc/loop-doloop.c
+++ b/gcc/loop-doloop.c
@@ -735,10 +735,9 @@ doloop_optimize (struct loop *loop)
void
doloop_optimize_loops (void)
{
- loop_iterator li;
struct loop *loop;
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
{
doloop_optimize (loop);
}
diff --git a/gcc/loop-init.c b/gcc/loop-init.c
index 8cc96af10fd..664b1ace427 100644
--- a/gcc/loop-init.c
+++ b/gcc/loop-init.c
@@ -135,7 +135,6 @@ loop_optimizer_init (unsigned flags)
void
loop_optimizer_finalize (void)
{
- loop_iterator li;
struct loop *loop;
basic_block bb;
@@ -162,10 +161,8 @@ loop_optimizer_finalize (void)
gcc_assert (current_loops != NULL);
- FOR_EACH_LOOP (li, loop, 0)
- {
- free_simple_loop_desc (loop);
- }
+ FOR_EACH_LOOP (loop, 0)
+ free_simple_loop_desc (loop);
/* Clean up. */
flow_loops_free (current_loops);
@@ -199,7 +196,6 @@ fix_loop_structure (bitmap changed_bbs)
{
basic_block bb;
int record_exits = 0;
- loop_iterator li;
struct loop *loop;
unsigned old_nloops, i;
@@ -224,7 +220,7 @@ fix_loop_structure (bitmap changed_bbs)
loops, so that when we remove the loops, we know that the loops inside
are preserved, and do not waste time relinking loops that will be
removed later. */
- FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
+ FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
{
/* Detect the case that the loop is no longer present even though
it wasn't marked for removal.
diff --git a/gcc/loop-invariant.c b/gcc/loop-invariant.c
index c5d6b5ac29d..efe2e7a9f16 100644
--- a/gcc/loop-invariant.c
+++ b/gcc/loop-invariant.c
@@ -1815,9 +1815,8 @@ calculate_loop_reg_pressure (void)
basic_block bb;
rtx insn, link;
struct loop *loop, *parent;
- loop_iterator li;
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
if (loop->aux == NULL)
{
loop->aux = xcalloc (1, sizeof (struct loop_data));
@@ -1884,7 +1883,7 @@ calculate_loop_reg_pressure (void)
bitmap_clear (&curr_regs_live);
if (flag_ira_region == IRA_REGION_MIXED
|| flag_ira_region == IRA_REGION_ALL)
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
{
EXECUTE_IF_SET_IN_BITMAP (&LOOP_DATA (loop)->regs_live, 0, j, bi)
if (! bitmap_bit_p (&LOOP_DATA (loop)->regs_ref, j))
@@ -1898,7 +1897,7 @@ calculate_loop_reg_pressure (void)
}
if (dump_file == NULL)
return;
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
{
parent = loop_outer (loop);
fprintf (dump_file, "\n Loop %d (parent %d, header bb%d, depth %d)\n",
@@ -1933,7 +1932,6 @@ void
move_loop_invariants (void)
{
struct loop *loop;
- loop_iterator li;
if (flag_ira_loop_pressure)
{
@@ -1945,7 +1943,7 @@ move_loop_invariants (void)
}
df_set_flags (DF_EQ_NOTES + DF_DEFER_INSN_RESCAN);
/* Process the loops, innermost first. */
- FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
+ FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
{
curr_loop = loop;
/* move_single_loop_invariants for very large loops
@@ -1954,7 +1952,7 @@ move_loop_invariants (void)
move_single_loop_invariants (loop);
}
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
{
free_loop_data (loop);
}
diff --git a/gcc/loop-iv.c b/gcc/loop-iv.c
index 6b5f82b16b1..01f8dceb061 100644
--- a/gcc/loop-iv.c
+++ b/gcc/loop-iv.c
@@ -1937,7 +1937,7 @@ simplify_using_initial_values (struct loop *loop, enum rtx_code op, rtx *expr)
return;
e = loop_preheader_edge (loop);
- if (e->src == ENTRY_BLOCK_PTR)
+ if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
return;
altered = ALLOC_REG_SET (&reg_obstack);
@@ -2068,7 +2068,7 @@ simplify_using_initial_values (struct loop *loop, enum rtx_code op, rtx *expr)
}
if (!single_pred_p (e->src)
- || single_pred (e->src) == ENTRY_BLOCK_PTR)
+ || single_pred (e->src) == ENTRY_BLOCK_PTR_FOR_FN (cfun))
break;
e = single_pred_edge (e->src);
}
diff --git a/gcc/loop-unroll.c b/gcc/loop-unroll.c
index ac43f582d36..25e06461cf0 100644
--- a/gcc/loop-unroll.c
+++ b/gcc/loop-unroll.c
@@ -269,7 +269,6 @@ unroll_and_peel_loops (int flags)
{
struct loop *loop;
bool changed = false;
- loop_iterator li;
/* First perform complete loop peeling (it is almost surely a win,
and affects parameters for further decision a lot). */
@@ -279,7 +278,7 @@ unroll_and_peel_loops (int flags)
decide_unrolling_and_peeling (flags);
/* Scan the loops, inner ones first. */
- FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
+ FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
{
/* And perform the appropriate transformations. */
switch (loop->lpt_decision.decision)
@@ -345,11 +344,10 @@ static void
peel_loops_completely (int flags)
{
struct loop *loop;
- loop_iterator li;
bool changed = false;
/* Scan the loops, the inner ones first. */
- FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
+ FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
{
loop->lpt_decision.decision = LPT_NONE;
location_t locus = get_loop_location (loop);
@@ -386,10 +384,9 @@ static void
decide_unrolling_and_peeling (int flags)
{
struct loop *loop;
- loop_iterator li;
/* Scan the loops, inner ones first. */
- FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
+ FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
{
loop->lpt_decision.decision = LPT_NONE;
location_t locus = get_loop_location (loop);
diff --git a/gcc/loop-unswitch.c b/gcc/loop-unswitch.c
index 219c943545b..c8f1281a0ef 100644
--- a/gcc/loop-unswitch.c
+++ b/gcc/loop-unswitch.c
@@ -138,13 +138,12 @@ compare_and_jump_seq (rtx op0, rtx op1, enum rtx_code comp, rtx label, int prob,
void
unswitch_loops (void)
{
- loop_iterator li;
struct loop *loop;
bool changed = false;
/* Go through inner loops (only original ones). */
- FOR_EACH_LOOP (li, loop, LI_ONLY_INNERMOST)
+ FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
changed |= unswitch_single_loop (loop, NULL_RTX, 0);
iv_analysis_done ();
@@ -434,7 +433,7 @@ unswitch_loop (struct loop *loop, basic_block unswitch_on, rtx cond, rtx cinsn)
/* Create a block with the condition. */
prob = true_edge->probability;
- switch_bb = create_empty_bb (EXIT_BLOCK_PTR->prev_bb);
+ switch_bb = create_empty_bb (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
seq = compare_and_jump_seq (XEXP (cond, 0), XEXP (cond, 1), GET_CODE (cond),
block_label (true_edge->dest),
prob, cinsn);
diff --git a/gcc/lra-assigns.c b/gcc/lra-assigns.c
index 54ffc779f11..88fc693bf2d 100644
--- a/gcc/lra-assigns.c
+++ b/gcc/lra-assigns.c
@@ -612,7 +612,7 @@ find_hard_regno_for (int regno, int *cost, int try_only_hard_regno)
&& ! df_regs_ever_live_p (hard_regno + j))
/* It needs save restore. */
hard_regno_costs[hard_regno]
- += 2 * ENTRY_BLOCK_PTR->next_bb->frequency + 1;
+ += 2 * ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb->frequency + 1;
priority = targetm.register_priority (hard_regno);
if (best_hard_regno < 0 || hard_regno_costs[hard_regno] < best_cost
|| (hard_regno_costs[hard_regno] == best_cost
diff --git a/gcc/lra-constraints.c b/gcc/lra-constraints.c
index ee82c6f496c..94b6e2559ef 100644
--- a/gcc/lra-constraints.c
+++ b/gcc/lra-constraints.c
@@ -5295,7 +5295,8 @@ lra_inheritance (void)
{
if (lra_dump_file != NULL)
fprintf (lra_dump_file, " %d", bb->index);
- if (bb->next_bb == EXIT_BLOCK_PTR || LABEL_P (BB_HEAD (bb->next_bb)))
+ if (bb->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
+ || LABEL_P (BB_HEAD (bb->next_bb)))
break;
e = find_fallthru_edge (bb->succs);
if (! e)
diff --git a/gcc/lra-lives.c b/gcc/lra-lives.c
index f3bad974a87..efc19f20140 100644
--- a/gcc/lra-lives.c
+++ b/gcc/lra-lives.c
@@ -998,11 +998,12 @@ lra_create_live_ranges (bool all_p)
lra_point_freq = point_freq_vec.address ();
int *post_order_rev_cfg = XNEWVEC (int, last_basic_block);
int n_blocks_inverted = inverted_post_order_compute (post_order_rev_cfg);
- lra_assert (n_blocks_inverted == n_basic_blocks);
+ lra_assert (n_blocks_inverted == n_basic_blocks_for_fn (cfun));
for (i = n_blocks_inverted - 1; i >= 0; --i)
{
bb = BASIC_BLOCK (post_order_rev_cfg[i]);
- if (bb == EXIT_BLOCK_PTR || bb == ENTRY_BLOCK_PTR)
+ if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun) || bb
+ == ENTRY_BLOCK_PTR_FOR_FN (cfun))
continue;
process_bb_lives (bb, curr_point);
}
diff --git a/gcc/lra.c b/gcc/lra.c
index 1aea599a2e5..0deae88e85a 100644
--- a/gcc/lra.c
+++ b/gcc/lra.c
@@ -2059,14 +2059,14 @@ has_nonexceptional_receiver (void)
return true;
/* First determine which blocks can reach exit via normal paths. */
- tos = worklist = XNEWVEC (basic_block, n_basic_blocks + 1);
+ tos = worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) + 1);
FOR_EACH_BB (bb)
bb->flags &= ~BB_REACHABLE;
/* Place the exit block on our worklist. */
- EXIT_BLOCK_PTR->flags |= BB_REACHABLE;
- *tos++ = EXIT_BLOCK_PTR;
+ EXIT_BLOCK_PTR_FOR_FN (cfun)->flags |= BB_REACHABLE;
+ *tos++ = EXIT_BLOCK_PTR_FOR_FN (cfun);
/* Iterate: find everything reachable from what we've already seen. */
while (tos != worklist)
diff --git a/gcc/lto-cgraph.c b/gcc/lto-cgraph.c
index 99dbf96b7a5..d82759955d8 100644
--- a/gcc/lto-cgraph.c
+++ b/gcc/lto-cgraph.c
@@ -25,6 +25,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
#include "gimple.h"
#include "expr.h"
#include "flags.h"
diff --git a/gcc/lto-opts.c b/gcc/lto-opts.c
index c9d4e03d00c..2cb536bfc55 100644
--- a/gcc/lto-opts.c
+++ b/gcc/lto-opts.c
@@ -85,14 +85,35 @@ lto_write_options (void)
function rather than per compilation unit. */
/* -fexceptions causes the EH machinery to be initialized, enabling
generation of unwind data so that explicit throw() calls work. */
- if (global_options.x_flag_exceptions)
+ if (!global_options_set.x_flag_exceptions
+ && global_options.x_flag_exceptions)
append_to_collect_gcc_options (&temporary_obstack, &first_p,
"-fexceptions");
/* -fnon-call-exceptions changes the generation of exception
regions. It is enabled implicitly by the Go frontend. */
- if (global_options.x_flag_non_call_exceptions)
+ if (!global_options_set.x_flag_non_call_exceptions
+ && global_options.x_flag_non_call_exceptions)
append_to_collect_gcc_options (&temporary_obstack, &first_p,
"-fnon-call-exceptions");
+ /* The default -ffp-contract changes depending on the language
+ standard. Pass thru conservative standard settings. */
+ if (!global_options_set.x_flag_fp_contract_mode)
+ switch (global_options.x_flag_fp_contract_mode)
+ {
+ case FP_CONTRACT_OFF:
+ append_to_collect_gcc_options (&temporary_obstack, &first_p,
+ "-ffp-contract=off");
+ break;
+ case FP_CONTRACT_ON:
+ append_to_collect_gcc_options (&temporary_obstack, &first_p,
+ "-ffp-contract=on");
+ break;
+ case FP_CONTRACT_FAST:
+ /* Nothing. That merges conservatively and is the default for LTO. */
+ break;
+ default:
+ gcc_unreachable ();
+ }
/* Output explicitly passed options. */
for (i = 1; i < save_decoded_options_count; ++i)
diff --git a/gcc/lto-streamer-in.c b/gcc/lto-streamer-in.c
index 1a469391e66..c5b73ffb8d9 100644
--- a/gcc/lto-streamer-in.c
+++ b/gcc/lto-streamer-in.c
@@ -26,6 +26,7 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "toplev.h"
#include "tree.h"
+#include "stringpool.h"
#include "expr.h"
#include "flags.h"
#include "params.h"
@@ -587,7 +588,7 @@ make_new_block (struct function *fn, unsigned int index)
basic_block bb = alloc_block ();
bb->index = index;
SET_BASIC_BLOCK_FOR_FUNCTION (fn, index, bb);
- n_basic_blocks_for_function (fn)++;
+ n_basic_blocks_for_fn (fn)++;
return bb;
}
@@ -658,7 +659,7 @@ input_cfg (struct lto_input_block *ib, struct function *fn,
index = streamer_read_hwi (ib);
}
- p_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION (fn);
+ p_bb = ENTRY_BLOCK_PTR_FOR_FN (fn);
index = streamer_read_hwi (ib);
while (index != -1)
{
@@ -1007,7 +1008,7 @@ input_function (tree fn_decl, struct data_in *data_in,
of a gimple body is used by the cgraph routines, but we should
really use the presence of the CFG. */
{
- edge_iterator ei = ei_start (ENTRY_BLOCK_PTR->succs);
+ edge_iterator ei = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
gimple_set_body (fn_decl, bb_seq (ei_edge (ei)->dest));
}
diff --git a/gcc/lto-streamer-out.c b/gcc/lto-streamer-out.c
index 4400095cd6f..733119f6953 100644
--- a/gcc/lto-streamer-out.c
+++ b/gcc/lto-streamer-out.c
@@ -25,6 +25,8 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "stringpool.h"
#include "expr.h"
#include "flags.h"
#include "params.h"
@@ -1595,7 +1597,7 @@ output_cfg (struct output_block *ob, struct function *fn)
streamer_write_hwi (ob, -1);
- bb = ENTRY_BLOCK_PTR;
+ bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
while (bb->next_bb)
{
streamer_write_hwi (ob, bb->next_bb->index);
@@ -1979,7 +1981,7 @@ copy_function (struct cgraph_node *node)
/* Main entry point from the pass manager. */
-static void
+void
lto_output (void)
{
struct lto_out_decl_state *decl_state;
@@ -2029,53 +2031,6 @@ lto_output (void)
#endif
}
-namespace {
-
-const pass_data pass_data_ipa_lto_gimple_out =
-{
- IPA_PASS, /* type */
- "lto_gimple_out", /* name */
- OPTGROUP_NONE, /* optinfo_flags */
- true, /* has_gate */
- false, /* has_execute */
- TV_IPA_LTO_GIMPLE_OUT, /* tv_id */
- 0, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- 0, /* todo_flags_finish */
-};
-
-class pass_ipa_lto_gimple_out : public ipa_opt_pass_d
-{
-public:
- pass_ipa_lto_gimple_out (gcc::context *ctxt)
- : ipa_opt_pass_d (pass_data_ipa_lto_gimple_out, ctxt,
- NULL, /* generate_summary */
- lto_output, /* write_summary */
- NULL, /* read_summary */
- lto_output, /* write_optimization_summary */
- NULL, /* read_optimization_summary */
- NULL, /* stmt_fixup */
- 0, /* function_transform_todo_flags_start */
- NULL, /* function_transform */
- NULL) /* variable_transform */
- {}
-
- /* opt_pass methods: */
- bool gate () { return gate_lto_out (); }
-
-}; // class pass_ipa_lto_gimple_out
-
-} // anon namespace
-
-ipa_opt_pass_d *
-make_pass_ipa_lto_gimple_out (gcc::context *ctxt)
-{
- return new pass_ipa_lto_gimple_out (ctxt);
-}
-
-
/* Write each node in encoded by ENCODER to OB, as well as those reachable
from it and required for correct representation of its semantics.
Each node in ENCODER must be a global declaration or a type. A node
@@ -2395,7 +2350,7 @@ produce_symtab (struct output_block *ob)
this file to be written in to a section that can then be read in to
recover these on other side. */
-static void
+void
produce_asm_for_decls (void)
{
struct lto_out_decl_state *out_state;
@@ -2499,50 +2454,3 @@ produce_asm_for_decls (void)
lto_function_decl_states.release ();
destroy_output_block (ob);
}
-
-
-namespace {
-
-const pass_data pass_data_ipa_lto_finish_out =
-{
- IPA_PASS, /* type */
- "lto_decls_out", /* name */
- OPTGROUP_NONE, /* optinfo_flags */
- true, /* has_gate */
- false, /* has_execute */
- TV_IPA_LTO_DECL_OUT, /* tv_id */
- 0, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- 0, /* todo_flags_finish */
-};
-
-class pass_ipa_lto_finish_out : public ipa_opt_pass_d
-{
-public:
- pass_ipa_lto_finish_out (gcc::context *ctxt)
- : ipa_opt_pass_d (pass_data_ipa_lto_finish_out, ctxt,
- NULL, /* generate_summary */
- produce_asm_for_decls, /* write_summary */
- NULL, /* read_summary */
- produce_asm_for_decls, /* write_optimization_summary */
- NULL, /* read_optimization_summary */
- NULL, /* stmt_fixup */
- 0, /* function_transform_todo_flags_start */
- NULL, /* function_transform */
- NULL) /* variable_transform */
- {}
-
- /* opt_pass methods: */
- bool gate () { return gate_lto_out (); }
-
-}; // class pass_ipa_lto_finish_out
-
-} // anon namespace
-
-ipa_opt_pass_d *
-make_pass_ipa_lto_finish_out (gcc::context *ctxt)
-{
- return new pass_ipa_lto_finish_out (ctxt);
-}
diff --git a/gcc/lto-streamer.h b/gcc/lto-streamer.h
index 797e92e335b..9dac7c9e846 100644
--- a/gcc/lto-streamer.h
+++ b/gcc/lto-streamer.h
@@ -862,6 +862,8 @@ extern void destroy_output_block (struct output_block *);
extern void lto_output_tree (struct output_block *, tree, bool, bool);
extern void lto_output_toplevel_asms (void);
extern void produce_asm (struct output_block *ob, tree fn);
+extern void lto_output ();
+extern void produce_asm_for_decls ();
void lto_output_decl_state_streams (struct output_block *,
struct lto_out_decl_state *);
void lto_output_decl_state_refs (struct output_block *,
diff --git a/gcc/lto-wrapper.c b/gcc/lto-wrapper.c
index 57978c883d4..335ec8fd01a 100644
--- a/gcc/lto-wrapper.c
+++ b/gcc/lto-wrapper.c
@@ -422,6 +422,18 @@ merge_and_complain (struct cl_decoded_option **decoded_options,
append_option (decoded_options, decoded_options_count, foption);
break;
+ case OPT_ffp_contract_:
+ /* For selected options we can merge conservatively. */
+ for (j = 0; j < *decoded_options_count; ++j)
+ if ((*decoded_options)[j].opt_index == foption->opt_index)
+ break;
+ if (j == *decoded_options_count)
+ append_option (decoded_options, decoded_options_count, foption);
+ /* FP_CONTRACT_OFF < FP_CONTRACT_ON < FP_CONTRACT_FAST. */
+ else if (foption->value < (*decoded_options)[j].value)
+ (*decoded_options)[j] = *foption;
+ break;
+
case OPT_freg_struct_return:
case OPT_fpcc_struct_return:
for (j = 0; j < *decoded_options_count; ++j)
@@ -578,6 +590,7 @@ run_gcc (unsigned argc, char *argv[])
case OPT_fgnu_tm:
case OPT_freg_struct_return:
case OPT_fpcc_struct_return:
+ case OPT_ffp_contract_:
break;
default:
diff --git a/gcc/lto/ChangeLog b/gcc/lto/ChangeLog
index 266d00ff1e6..1b83f22046a 100644
--- a/gcc/lto/ChangeLog
+++ b/gcc/lto/ChangeLog
@@ -2,6 +2,13 @@
* lto-partition.c lto-symtab.c lto.c Adjust.
+2013-11-14 Diego Novillo <dnovillo@google.com>
+
+ * lto-lang.c: Include stringpool.h.
+ Include stor-layout.h.
+ * lto-partition.c: Include gcc-symtab.h.
+ * lto.c: Include stor-layout.h.
+
2013-10-31 David Malcolm <dmalcolm@redhat.com>
Automated part of renaming of symtab_node_base to symtab_node.
diff --git a/gcc/lto/lto-lang.c b/gcc/lto/lto-lang.c
index b087cc81b04..77896adc907 100644
--- a/gcc/lto/lto-lang.c
+++ b/gcc/lto/lto-lang.c
@@ -24,6 +24,8 @@ along with GCC; see the file COPYING3. If not see
#include "flags.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
#include "target.h"
#include "langhooks.h"
#include "langhooks-def.h"
diff --git a/gcc/lto/lto-partition.c b/gcc/lto/lto-partition.c
index e0d020d30ec..42d78307ded 100644
--- a/gcc/lto/lto-partition.c
+++ b/gcc/lto/lto-partition.c
@@ -22,6 +22,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "toplev.h"
#include "tree.h"
+#include "gcc-symtab.h"
#include "gimple.h"
#include "tm.h"
#include "cgraph.h"
diff --git a/gcc/lto/lto.c b/gcc/lto/lto.c
index 1788b03eee6..c43fe8476ec 100644
--- a/gcc/lto/lto.c
+++ b/gcc/lto/lto.c
@@ -24,6 +24,7 @@ along with GCC; see the file COPYING3. If not see
#include "opts.h"
#include "toplev.h"
#include "tree.h"
+#include "stor-layout.h"
#include "diagnostic-core.h"
#include "tm.h"
#include "cgraph.h"
diff --git a/gcc/mcf.c b/gcc/mcf.c
index 52020b8c2f8..e709f2ac2c6 100644
--- a/gcc/mcf.c
+++ b/gcc/mcf.c
@@ -471,12 +471,14 @@ create_fixup_graph (fixup_graph_type *fixup_graph)
int fnum_edges;
/* Each basic_block will be split into 2 during vertex transformation. */
- int fnum_vertices_after_transform = 2 * n_basic_blocks;
- int fnum_edges_after_transform = n_edges + n_basic_blocks;
+ int fnum_vertices_after_transform = 2 * n_basic_blocks_for_fn (cfun);
+ int fnum_edges_after_transform =
+ n_edges_for_fn (cfun) + n_basic_blocks_for_fn (cfun);
/* Count the new SOURCE and EXIT vertices to be added. */
int fmax_num_vertices =
- fnum_vertices_after_transform + n_edges + n_basic_blocks + 2;
+ (fnum_vertices_after_transform + n_edges_for_fn (cfun)
+ + n_basic_blocks_for_fn (cfun) + 2);
/* In create_fixup_graph: Each basic block and edge can be split into 3
edges. Number of balance edges = n_basic_blocks. So after
@@ -486,10 +488,11 @@ create_fixup_graph (fixup_graph_type *fixup_graph)
max_edges = 2 * (4 * n_basic_blocks + 3 * n_edges)
= 8 * n_basic_blocks + 6 * n_edges
< 8 * n_basic_blocks + 8 * n_edges. */
- int fmax_num_edges = 8 * (n_basic_blocks + n_edges);
+ int fmax_num_edges = 8 * (n_basic_blocks_for_fn (cfun) +
+ n_edges_for_fn (cfun));
/* Initial num of vertices in the fixup graph. */
- fixup_graph->num_vertices = n_basic_blocks;
+ fixup_graph->num_vertices = n_basic_blocks_for_fn (cfun);
/* Fixup graph vertex list. */
fixup_graph->vertex_list =
@@ -505,10 +508,11 @@ create_fixup_graph (fixup_graph_type *fixup_graph)
/* Compute constants b, k_pos, k_neg used in the cost function calculation.
b = sqrt(avg_vertex_weight(cfg)); k_pos = b; k_neg = 50b. */
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
total_vertex_weight += bb->count;
- sqrt_avg_vertex_weight = mcf_sqrt (total_vertex_weight / n_basic_blocks);
+ sqrt_avg_vertex_weight = mcf_sqrt (total_vertex_weight /
+ n_basic_blocks_for_fn (cfun));
k_pos = K_POS (sqrt_avg_vertex_weight);
k_neg = K_NEG (sqrt_avg_vertex_weight);
@@ -519,7 +523,7 @@ create_fixup_graph (fixup_graph_type *fixup_graph)
if (dump_file)
fprintf (dump_file, "\nVertex transformation:\n");
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
{
/* v'->v'': index1->(index1+1). */
i = 2 * bb->index;
@@ -1121,7 +1125,8 @@ adjust_cfg_counts (fixup_graph_type *fixup_graph)
if (dump_file)
fprintf (dump_file, "\nadjust_cfg_counts():\n");
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
+ EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
{
i = 2 * bb->index;
@@ -1234,8 +1239,10 @@ adjust_cfg_counts (fixup_graph_type *fixup_graph)
}
}
- ENTRY_BLOCK_PTR->count = sum_edge_counts (ENTRY_BLOCK_PTR->succs);
- EXIT_BLOCK_PTR->count = sum_edge_counts (EXIT_BLOCK_PTR->preds);
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)->count =
+ sum_edge_counts (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
+ EXIT_BLOCK_PTR_FOR_FN (cfun)->count =
+ sum_edge_counts (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
/* Compute edge probabilities. */
FOR_ALL_BB (bb)
diff --git a/gcc/mode-switching.c b/gcc/mode-switching.c
index d54f32ca071..ed45094c395 100644
--- a/gcc/mode-switching.c
+++ b/gcc/mode-switching.c
@@ -211,7 +211,7 @@ create_pre_exit (int n_entities, int *entity_map, const int *num_modes)
fallthrough edge; there can be at most one, but there could be
none at all, e.g. when exit is called. */
pre_exit = 0;
- FOR_EACH_EDGE (eg, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (eg, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
if (eg->flags & EDGE_FALLTHRU)
{
basic_block src_bb = eg->src;
@@ -221,7 +221,7 @@ create_pre_exit (int n_entities, int *entity_map, const int *num_modes)
/* If this function returns a value at the end, we have to
insert the final mode switch before the return value copy
to its hard register. */
- if (EDGE_COUNT (EXIT_BLOCK_PTR->preds) == 1
+ if (EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) == 1
&& NONJUMP_INSN_P ((last_insn = BB_END (src_bb)))
&& GET_CODE (PATTERN (last_insn)) == USE
&& GET_CODE ((ret_reg = XEXP (PATTERN (last_insn), 0))) == REG)
@@ -492,7 +492,7 @@ optimize_mode_switching (void)
#if defined (MODE_ENTRY) && defined (MODE_EXIT)
/* Split the edge from the entry block, so that we can note that
there NORMAL_MODE is supplied. */
- post_entry = split_edge (single_succ_edge (ENTRY_BLOCK_PTR));
+ post_entry = split_edge (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
pre_exit = create_pre_exit (n_entities, entity_map, num_modes);
#endif
diff --git a/gcc/modulo-sched.c b/gcc/modulo-sched.c
index 599c025f57d..f3130449909 100644
--- a/gcc/modulo-sched.c
+++ b/gcc/modulo-sched.c
@@ -1308,7 +1308,7 @@ canon_loop (struct loop *loop)
/* Avoid annoying special cases of edges going to exit
block. */
- FOR_EACH_EDGE (e, i, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, i, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
if ((e->flags & EDGE_FALLTHRU) && (EDGE_COUNT (e->src->succs) > 1))
split_edge (e);
@@ -1351,7 +1351,6 @@ sms_schedule (void)
ddg_ptr *g_arr, g;
int * node_order;
int maxii, max_asap;
- loop_iterator li;
partial_schedule_ptr ps;
basic_block bb = NULL;
struct loop *loop;
@@ -1395,7 +1394,7 @@ sms_schedule (void)
/* Build DDGs for all the relevant loops and hold them in G_ARR
indexed by the loop index. */
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
{
rtx head, tail;
rtx count_reg;
@@ -1406,7 +1405,7 @@ sms_schedule (void)
if (dump_file)
fprintf (dump_file, "SMS reached max limit... \n");
- FOR_EACH_LOOP_BREAK (li);
+ break;
}
if (dump_file)
@@ -1533,7 +1532,7 @@ sms_schedule (void)
}
/* We don't want to perform SMS on new loops - created by versioning. */
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
{
rtx head, tail;
rtx count_reg, count_init;
@@ -3345,7 +3344,7 @@ rest_of_handle_sms (void)
/* Finalize layout changes. */
FOR_EACH_BB (bb)
- if (bb->next_bb != EXIT_BLOCK_PTR)
+ if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
bb->aux = bb->next_bb;
free_dominance_info (CDI_DOMINATORS);
cfg_layout_finalize ();
diff --git a/gcc/objc/ChangeLog b/gcc/objc/ChangeLog
index 881ac6ed341..000ba9e71b1 100644
--- a/gcc/objc/ChangeLog
+++ b/gcc/objc/ChangeLog
@@ -12,6 +12,19 @@
* objc-act.c: Include only gimplify.h and gimple.h as needed.
+2013-11-14 Diego Novillo <dnovillo@google.com>
+
+ * objc-act.c: Include stringpool.h.
+ Include stor-layout.h.
+ Include attribs.h.
+ * objc-encoding.c: Include stringpool.h.
+ Include stor-layout.h.
+ * objc-gnu-runtime-abi-01.c: Include stringpool.h.
+ * objc-next-runtime-abi-01.c:
+ Include stringpool.h.
+ * objc-next-runtime-abi-02.c: Include stringpool.h.
+ * objc-runtime-shared-support.c: Include stringpool.h.
+
2013-11-12 Andrew MacLeod <amacleod@redhat.com>
* objc-act.c: Include gimplify.h.
diff --git a/gcc/objc/objc-act.c b/gcc/objc/objc-act.c
index 05f7ebaca8d..7e9f8577a0e 100644
--- a/gcc/objc/objc-act.c
+++ b/gcc/objc/objc-act.c
@@ -23,6 +23,9 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
+#include "attribs.h"
#ifdef OBJCPLUS
#include "cp/cp-tree.h"
diff --git a/gcc/objc/objc-encoding.c b/gcc/objc/objc-encoding.c
index 532725114a2..61d722a77b0 100644
--- a/gcc/objc/objc-encoding.c
+++ b/gcc/objc/objc-encoding.c
@@ -21,6 +21,8 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
#ifdef OBJCPLUS
#include "cp/cp-tree.h"
diff --git a/gcc/objc/objc-gnu-runtime-abi-01.c b/gcc/objc/objc-gnu-runtime-abi-01.c
index f0116831e25..ef2e033b6dd 100644
--- a/gcc/objc/objc-gnu-runtime-abi-01.c
+++ b/gcc/objc/objc-gnu-runtime-abi-01.c
@@ -22,6 +22,7 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "tree.h"
+#include "stringpool.h"
#ifdef OBJCPLUS
#include "cp/cp-tree.h"
diff --git a/gcc/objc/objc-next-runtime-abi-01.c b/gcc/objc/objc-next-runtime-abi-01.c
index 000256d8311..9c7bf4529b1 100644
--- a/gcc/objc/objc-next-runtime-abi-01.c
+++ b/gcc/objc/objc-next-runtime-abi-01.c
@@ -27,6 +27,7 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "tree.h"
+#include "stringpool.h"
#ifdef OBJCPLUS
#include "cp/cp-tree.h"
diff --git a/gcc/objc/objc-next-runtime-abi-02.c b/gcc/objc/objc-next-runtime-abi-02.c
index 885047705d4..c7215a86682 100644
--- a/gcc/objc/objc-next-runtime-abi-02.c
+++ b/gcc/objc/objc-next-runtime-abi-02.c
@@ -30,6 +30,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
#ifdef OBJCPLUS
#include "cp/cp-tree.h"
diff --git a/gcc/objc/objc-runtime-shared-support.c b/gcc/objc/objc-runtime-shared-support.c
index 45efb780c28..9278b39e11b 100644
--- a/gcc/objc/objc-runtime-shared-support.c
+++ b/gcc/objc/objc-runtime-shared-support.c
@@ -23,6 +23,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
#ifdef OBJCPLUS
#include "cp/cp-tree.h"
diff --git a/gcc/omp-low.c b/gcc/omp-low.c
index 8a3988c4948..17bf1dad0b7 100644
--- a/gcc/omp-low.c
+++ b/gcc/omp-low.c
@@ -26,6 +26,8 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
#include "rtl.h"
#include "gimple.h"
#include "gimplify.h"
@@ -43,6 +45,7 @@ along with GCC; see the file COPYING3. If not see
#include "ssa-iterators.h"
#include "tree-ssanames.h"
#include "tree-into-ssa.h"
+#include "expr.h"
#include "tree-dfa.h"
#include "tree-ssa.h"
#include "flags.h"
@@ -8230,7 +8233,7 @@ build_omp_regions (void)
{
gcc_assert (root_omp_region == NULL);
calculate_dominance_info (CDI_DOMINATORS);
- build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
+ build_omp_regions_1 (ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, false);
}
/* Main entry point for expanding OMP-GIMPLE into runtime calls. */
diff --git a/gcc/optabs.c b/gcc/optabs.c
index 54164124cf1..7400b493640 100644
--- a/gcc/optabs.c
+++ b/gcc/optabs.c
@@ -29,6 +29,9 @@ along with GCC; see the file COPYING3. If not see
#include "insn-config.h"
#include "rtl.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "stringpool.h"
+#include "varasm.h"
#include "tm_p.h"
#include "flags.h"
#include "function.h"
@@ -8226,6 +8229,10 @@ maybe_gen_insn (enum insn_code icode, unsigned int nops,
return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
ops[3].value, ops[4].value, ops[5].value,
ops[6].value, ops[7].value);
+ case 9:
+ return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
+ ops[3].value, ops[4].value, ops[5].value,
+ ops[6].value, ops[7].value, ops[8].value);
}
gcc_unreachable ();
}
diff --git a/gcc/optabs.h b/gcc/optabs.h
index 4de4409342d..6a5ec19a539 100644
--- a/gcc/optabs.h
+++ b/gcc/optabs.h
@@ -551,5 +551,6 @@ extern void gen_satfract_conv_libfunc (convert_optab, const char *,
extern void gen_satfractuns_conv_libfunc (convert_optab, const char *,
enum machine_mode,
enum machine_mode);
+extern void init_tree_optimization_optabs (tree);
#endif /* GCC_OPTABS_H */
diff --git a/gcc/opts.c b/gcc/opts.c
index 3a939ac92b9..cd48c73267b 100644
--- a/gcc/opts.c
+++ b/gcc/opts.c
@@ -493,6 +493,7 @@ static const struct default_options default_options_table[] =
{ OPT_LEVELS_2_PLUS, OPT_fvect_cost_model_, NULL, VECT_COST_MODEL_CHEAP },
{ OPT_LEVELS_2_PLUS_SPEED_ONLY, OPT_foptimize_strlen, NULL, 1 },
{ OPT_LEVELS_2_PLUS, OPT_fhoist_adjacent_loads, NULL, 1 },
+ { OPT_LEVELS_2_PLUS, OPT_fipa_sem_equality, NULL, 1 },
{ OPT_LEVELS_2_PLUS, OPT_fisolate_erroneous_paths, NULL, 1 },
/* -O3 optimizations. */
@@ -737,9 +738,10 @@ finish_options (struct gcc_options *opts, struct gcc_options *opts_set,
&& opts->x_flag_reorder_blocks_and_partition
&& (ui_except == UI_SJLJ || ui_except >= UI_TARGET))
{
- inform (loc,
- "-freorder-blocks-and-partition does not work "
- "with exceptions on this architecture");
+ if (opts_set->x_flag_reorder_blocks_and_partition)
+ inform (loc,
+ "-freorder-blocks-and-partition does not work "
+ "with exceptions on this architecture");
opts->x_flag_reorder_blocks_and_partition = 0;
opts->x_flag_reorder_blocks = 1;
}
@@ -752,9 +754,10 @@ finish_options (struct gcc_options *opts, struct gcc_options *opts_set,
&& opts->x_flag_reorder_blocks_and_partition
&& (ui_except == UI_SJLJ || ui_except >= UI_TARGET))
{
- inform (loc,
- "-freorder-blocks-and-partition does not support "
- "unwind info on this architecture");
+ if (opts_set->x_flag_reorder_blocks_and_partition)
+ inform (loc,
+ "-freorder-blocks-and-partition does not support "
+ "unwind info on this architecture");
opts->x_flag_reorder_blocks_and_partition = 0;
opts->x_flag_reorder_blocks = 1;
}
@@ -769,9 +772,10 @@ finish_options (struct gcc_options *opts, struct gcc_options *opts_set,
&& targetm_common.unwind_tables_default
&& (ui_except == UI_SJLJ || ui_except >= UI_TARGET))))
{
- inform (loc,
- "-freorder-blocks-and-partition does not work "
- "on this architecture");
+ if (opts_set->x_flag_reorder_blocks_and_partition)
+ inform (loc,
+ "-freorder-blocks-and-partition does not work "
+ "on this architecture");
opts->x_flag_reorder_blocks_and_partition = 0;
opts->x_flag_reorder_blocks = 1;
}
@@ -806,8 +810,12 @@ finish_options (struct gcc_options *opts, struct gcc_options *opts_set,
error_at (loc, "LTO support has not been enabled in this configuration");
#endif
if (!opts->x_flag_fat_lto_objects && !HAVE_LTO_PLUGIN)
- error_at (loc, "-fno-fat-lto-objects are supported only with linker plugin.");
-}
+ {
+ if (opts_set->x_flag_fat_lto_objects)
+ error_at (loc, "-fno-fat-lto-objects are supported only with linker plugin.");
+ opts->x_flag_fat_lto_objects = 1;
+ }
+ }
if ((opts->x_flag_lto_partition_balanced != 0) + (opts->x_flag_lto_partition_1to1 != 0)
+ (opts->x_flag_lto_partition_none != 0) >= 1)
{
@@ -1446,6 +1454,7 @@ common_handle_option (struct gcc_options *opts,
{ "unreachable", SANITIZE_UNREACHABLE,
sizeof "unreachable" - 1 },
{ "vla-bound", SANITIZE_VLA, sizeof "vla-bound" - 1 },
+ { "null", SANITIZE_NULL, sizeof "null" - 1 },
{ NULL, 0, 0 }
};
const char *comma;
@@ -1487,6 +1496,10 @@ common_handle_option (struct gcc_options *opts,
p = comma + 1;
}
+ /* When instrumenting the pointers, we don't want to remove
+ the null pointer checks. */
+ if (flag_sanitize & SANITIZE_NULL)
+ opts->x_flag_delete_null_pointer_checks = 0;
break;
}
diff --git a/gcc/pass_manager.h b/gcc/pass_manager.h
index 77d78eb11f5..9a71e9c7cbf 100644
--- a/gcc/pass_manager.h
+++ b/gcc/pass_manager.h
@@ -29,7 +29,6 @@ struct register_pass_info;
DEF_PASS_LIST (all_lowering_passes) \
DEF_PASS_LIST (all_small_ipa_passes) \
DEF_PASS_LIST (all_regular_ipa_passes) \
- DEF_PASS_LIST (all_lto_gen_passes) \
DEF_PASS_LIST (all_passes)
#define DEF_PASS_LIST(LIST) PASS_LIST_NO_##LIST,
@@ -82,7 +81,6 @@ public:
opt_pass *all_small_ipa_passes;
opt_pass *all_lowering_passes;
opt_pass *all_regular_ipa_passes;
- opt_pass *all_lto_gen_passes;
opt_pass *all_late_ipa_passes;
/* A map from static pass id to optimization pass. */
diff --git a/gcc/passes.c b/gcc/passes.c
index f45ed0aed1c..fee1513b404 100644
--- a/gcc/passes.c
+++ b/gcc/passes.c
@@ -30,6 +30,7 @@ along with GCC; see the file COPYING3. If not see
#include "hash-table.h"
#include "input.h"
#include "tree.h"
+#include "varasm.h"
#include "rtl.h"
#include "tm_p.h"
#include "flags.h"
@@ -63,6 +64,7 @@ along with GCC; see the file COPYING3. If not see
#include "gimple.h"
#include "gimple-ssa.h"
#include "tree-cfg.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-ssa-loop-manip.h"
#include "tree-into-ssa.h"
@@ -907,7 +909,6 @@ pass_manager::dump_passes () const
dump_pass_list (all_lowering_passes, 1);
dump_pass_list (all_small_ipa_passes, 1);
dump_pass_list (all_regular_ipa_passes, 1);
- dump_pass_list (all_lto_gen_passes, 1);
dump_pass_list (all_late_ipa_passes, 1);
dump_pass_list (all_passes, 1);
@@ -1426,8 +1427,6 @@ pass_manager::register_pass (struct register_pass_info *pass_info)
if (!success || all_instances)
success |= position_pass (pass_info, &all_regular_ipa_passes);
if (!success || all_instances)
- success |= position_pass (pass_info, &all_lto_gen_passes);
- if (!success || all_instances)
success |= position_pass (pass_info, &all_late_ipa_passes);
if (!success || all_instances)
success |= position_pass (pass_info, &all_passes);
@@ -1498,7 +1497,7 @@ pass_manager::operator new (size_t sz)
pass_manager::pass_manager (context *ctxt)
: all_passes (NULL), all_small_ipa_passes (NULL), all_lowering_passes (NULL),
- all_regular_ipa_passes (NULL), all_lto_gen_passes (NULL),
+ all_regular_ipa_passes (NULL),
all_late_ipa_passes (NULL), passes_by_id (NULL), passes_by_id_size (0),
m_ctxt (ctxt)
{
@@ -1553,9 +1552,6 @@ pass_manager::pass_manager (context *ctxt)
register_dump_files (all_regular_ipa_passes,
PROP_gimple_any | PROP_gimple_lcf | PROP_gimple_leh
| PROP_cfg);
- register_dump_files (all_lto_gen_passes,
- PROP_gimple_any | PROP_gimple_lcf | PROP_gimple_leh
- | PROP_cfg);
register_dump_files (all_late_ipa_passes,
PROP_gimple_any | PROP_gimple_lcf | PROP_gimple_leh
| PROP_cfg);
@@ -2274,6 +2270,18 @@ execute_pass_list (struct opt_pass *pass)
while (pass);
}
+/* Write out all LTO data. */
+static void
+write_lto (void)
+{
+ timevar_push (TV_IPA_LTO_GIMPLE_OUT);
+ lto_output ();
+ timevar_pop (TV_IPA_LTO_GIMPLE_OUT);
+ timevar_push (TV_IPA_LTO_DECL_OUT);
+ produce_asm_for_decls ();
+ timevar_pop (TV_IPA_LTO_DECL_OUT);
+}
+
/* Same as execute_pass_list but assume that subpasses of IPA passes
are local passes. If SET is not NULL, write out summaries of only
those node in SET. */
@@ -2328,7 +2336,8 @@ ipa_write_summaries_1 (lto_symtab_encoder_t encoder)
gcc_assert (!flag_wpa);
ipa_write_summaries_2 (passes->all_regular_ipa_passes, state);
- ipa_write_summaries_2 (passes->all_lto_gen_passes, state);
+
+ write_lto ();
gcc_assert (lto_get_out_decl_state () == state);
lto_pop_out_decl_state ();
@@ -2461,7 +2470,8 @@ ipa_write_optimization_summaries (lto_symtab_encoder_t encoder)
gcc_assert (flag_wpa);
pass_manager *passes = g->get_passes ();
ipa_write_optimization_summaries_1 (passes->all_regular_ipa_passes, state);
- ipa_write_optimization_summaries_1 (passes->all_lto_gen_passes, state);
+
+ write_lto ();
gcc_assert (lto_get_out_decl_state () == state);
lto_pop_out_decl_state ();
@@ -2509,14 +2519,13 @@ ipa_read_summaries_1 (struct opt_pass *pass)
}
-/* Read all the summaries for all_regular_ipa_passes and all_lto_gen_passes. */
+/* Read all the summaries for all_regular_ipa_passes. */
void
ipa_read_summaries (void)
{
pass_manager *passes = g->get_passes ();
ipa_read_summaries_1 (passes->all_regular_ipa_passes);
- ipa_read_summaries_1 (passes->all_lto_gen_passes);
}
/* Same as execute_pass_list but assume that subpasses of IPA passes
@@ -2559,14 +2568,13 @@ ipa_read_optimization_summaries_1 (struct opt_pass *pass)
}
}
-/* Read all the summaries for all_regular_ipa_passes and all_lto_gen_passes. */
+/* Read all the summaries for all_regular_ipa_passes. */
void
ipa_read_optimization_summaries (void)
{
pass_manager *passes = g->get_passes ();
ipa_read_optimization_summaries_1 (passes->all_regular_ipa_passes);
- ipa_read_optimization_summaries_1 (passes->all_lto_gen_passes);
}
/* Same as execute_pass_list but assume that subpasses of IPA passes
diff --git a/gcc/passes.def b/gcc/passes.def
index 49faf257b51..a9411fa0baa 100644
--- a/gcc/passes.def
+++ b/gcc/passes.def
@@ -55,6 +55,7 @@ along with GCC; see the file COPYING3. If not see
NEXT_PASS (pass_init_datastructures);
NEXT_PASS (pass_build_ssa);
+ NEXT_PASS (pass_ubsan);
NEXT_PASS (pass_early_warn_uninitialized);
NEXT_PASS (pass_rebuild_cgraph_edges);
NEXT_PASS (pass_inline_parameters);
@@ -111,11 +112,6 @@ along with GCC; see the file COPYING3. If not see
NEXT_PASS (pass_ipa_reference);
TERMINATE_PASS_LIST ()
- INSERT_PASSES_AFTER (all_lto_gen_passes)
- NEXT_PASS (pass_ipa_lto_gimple_out);
- NEXT_PASS (pass_ipa_lto_finish_out); /* This must be the last LTO pass. */
- TERMINATE_PASS_LIST ()
-
/* Simple IPA passes executed after the regular passes. In WHOPR mode the
passes are executed after partitioning and thus see just parts of the
compiled unit. */
@@ -309,6 +305,7 @@ along with GCC; see the file COPYING3. If not see
NEXT_PASS (pass_lower_complex_O0);
NEXT_PASS (pass_asan_O0);
NEXT_PASS (pass_tsan_O0);
+ NEXT_PASS (pass_sanopt);
NEXT_PASS (pass_cleanup_eh);
NEXT_PASS (pass_lower_resx);
NEXT_PASS (pass_nrv);
@@ -387,6 +384,7 @@ along with GCC; see the file COPYING3. If not see
NEXT_PASS (pass_regrename);
NEXT_PASS (pass_cprop_hardreg);
NEXT_PASS (pass_fast_rtl_dce);
+ NEXT_PASS (pass_duplicate_computed_gotos);
NEXT_PASS (pass_reorder_blocks);
NEXT_PASS (pass_branch_target_load_optimize2);
NEXT_PASS (pass_leaf_regs);
@@ -398,7 +396,6 @@ along with GCC; see the file COPYING3. If not see
NEXT_PASS (pass_stack_regs_run);
POP_INSERT_PASSES ()
NEXT_PASS (pass_compute_alignments);
- NEXT_PASS (pass_duplicate_computed_gotos);
NEXT_PASS (pass_variable_tracking);
NEXT_PASS (pass_free_cfg);
NEXT_PASS (pass_machine_reorg);
diff --git a/gcc/plugin.def b/gcc/plugin.def
index c4ca61be14a..25a645854a7 100644
--- a/gcc/plugin.def
+++ b/gcc/plugin.def
@@ -92,6 +92,12 @@ DEFEVENT (PLUGIN_EARLY_GIMPLE_PASSES_END)
/* Called when a pass is first instantiated. */
DEFEVENT (PLUGIN_NEW_PASS)
+/* Called when a file is #include-d or given thru #line directive.
+ Could happen many times. The event data is the included file path,
+ as a const char* pointer. */
+DEFEVENT (PLUGIN_INCLUDE_FILE)
+
+
/* After the hard-coded events above, plugins can dynamically allocate events
at run time.
PLUGIN_EVENT_FIRST_DYNAMIC only appears as last enum element. */
diff --git a/gcc/postreload-gcse.c b/gcc/postreload-gcse.c
index 941007f5220..9ce17e50793 100644
--- a/gcc/postreload-gcse.c
+++ b/gcc/postreload-gcse.c
@@ -1158,12 +1158,12 @@ eliminate_partially_redundant_loads (void)
/* Note we start at block 1. */
- if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR)
+ if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
return;
FOR_BB_BETWEEN (bb,
- ENTRY_BLOCK_PTR->next_bb->next_bb,
- EXIT_BLOCK_PTR,
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb->next_bb,
+ EXIT_BLOCK_PTR_FOR_FN (cfun),
next_bb)
{
/* Don't try anything on basic blocks with strange predecessors. */
diff --git a/gcc/predict.c b/gcc/predict.c
index 182345f0eb0..afe78c6411e 100644
--- a/gcc/predict.c
+++ b/gcc/predict.c
@@ -32,6 +32,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "calls.h"
#include "rtl.h"
#include "tm_p.h"
#include "hard-reg-set.h"
@@ -128,11 +129,11 @@ maybe_hot_frequency_p (struct function *fun, int freq)
if (profile_status_for_function (fun) == PROFILE_ABSENT)
return true;
if (node->frequency == NODE_FREQUENCY_EXECUTED_ONCE
- && freq < (ENTRY_BLOCK_PTR_FOR_FUNCTION (fun)->frequency * 2 / 3))
+ && freq < (ENTRY_BLOCK_PTR_FOR_FN (fun)->frequency * 2 / 3))
return false;
if (PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION) == 0)
return false;
- if (freq < (ENTRY_BLOCK_PTR_FOR_FUNCTION (fun)->frequency
+ if (freq < (ENTRY_BLOCK_PTR_FOR_FN (fun)->frequency
/ PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION)))
return false;
return true;
@@ -250,24 +251,27 @@ probably_never_executed (struct function *fun,
return false;
if (!frequency)
return true;
- if (!ENTRY_BLOCK_PTR->frequency)
+ if (!ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency)
return false;
- if (ENTRY_BLOCK_PTR->count)
+ if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->count)
{
gcov_type computed_count;
/* Check for possibility of overflow, in which case entry bb count
is large enough to do the division first without losing much
precision. */
- if (ENTRY_BLOCK_PTR->count < REG_BR_PROB_BASE * REG_BR_PROB_BASE)
+ if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->count < REG_BR_PROB_BASE *
+ REG_BR_PROB_BASE)
{
gcov_type scaled_count
- = frequency * ENTRY_BLOCK_PTR->count * unlikely_count_fraction;
- computed_count = RDIV (scaled_count, ENTRY_BLOCK_PTR->frequency);
+ = frequency * ENTRY_BLOCK_PTR_FOR_FN (cfun)->count *
+ unlikely_count_fraction;
+ computed_count = RDIV (scaled_count,
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency);
}
else
{
- computed_count = RDIV (ENTRY_BLOCK_PTR->count,
- ENTRY_BLOCK_PTR->frequency);
+ computed_count = RDIV (ENTRY_BLOCK_PTR_FOR_FN (cfun)->count,
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency);
computed_count *= frequency * unlikely_count_fraction;
}
if (computed_count >= profile_info->runs)
@@ -612,7 +616,8 @@ void
gimple_predict_edge (edge e, enum br_predictor predictor, int probability)
{
gcc_assert (profile_status != PROFILE_GUESSED);
- if ((e->src != ENTRY_BLOCK_PTR && EDGE_COUNT (e->src->succs) > 1)
+ if ((e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun) && EDGE_COUNT (e->src->succs) >
+ 1)
&& flag_guess_branch_prob && optimize)
{
struct edge_prediction *i = XNEW (struct edge_prediction);
@@ -1495,12 +1500,11 @@ predict_extra_loop_exits (edge exit_edge)
static void
predict_loops (void)
{
- loop_iterator li;
struct loop *loop;
/* Try to predict out blocks in a loop that are not part of a
natural loop. */
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
{
basic_block bb, *bbs;
unsigned j, n_exits;
@@ -2152,7 +2156,7 @@ apply_return_prediction (void)
enum prediction direction;
edge_iterator ei;
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
{
return_stmt = last_stmt (e->src);
if (return_stmt
@@ -2200,7 +2204,7 @@ tree_bb_level_predictions (void)
edge e;
edge_iterator ei;
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
if (!(e->flags & (EDGE_ABNORMAL | EDGE_FAKE | EDGE_EH)))
{
has_return_edges = true;
@@ -2268,7 +2272,7 @@ tree_estimate_probability_bb (basic_block bb)
FOR_EACH_EDGE (e, ei, bb->succs)
{
/* Predict edges to user labels with attributes. */
- if (e->dest != EXIT_BLOCK_PTR)
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
gimple_stmt_iterator gi;
for (gi = gsi_start_bb (e->dest); !gsi_end_p (gi); gsi_next (&gi))
@@ -2306,9 +2310,9 @@ tree_estimate_probability_bb (basic_block bb)
return_block:
return_stmt. */
if (e->dest != bb->next_bb
- && e->dest != EXIT_BLOCK_PTR
+ && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& single_succ_p (e->dest)
- && single_succ_edge (e->dest)->dest == EXIT_BLOCK_PTR
+ && single_succ_edge (e->dest)->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
&& (last = last_stmt (e->dest)) != NULL
&& gimple_code (last) == GIMPLE_RETURN)
{
@@ -2332,7 +2336,7 @@ tree_estimate_probability_bb (basic_block bb)
/* Look for block we are guarding (ie we dominate it,
but it doesn't postdominate us). */
- if (e->dest != EXIT_BLOCK_PTR && e->dest != bb
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun) && e->dest != bb
&& dominated_by_p (CDI_DOMINATORS, e->dest, e->src)
&& !dominated_by_p (CDI_POST_DOMINATORS, e->src, e->dest))
{
@@ -2594,7 +2598,7 @@ propagate_freq (basic_block head, bitmap tovisit)
}
BLOCK_INFO (bb)->npredecessors = count;
/* When function never returns, we will never process exit block. */
- if (!count && bb == EXIT_BLOCK_PTR)
+ if (!count && bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
bb->count = bb->frequency = 0;
}
@@ -2744,7 +2748,7 @@ estimate_loops (void)
{
bitmap_set_bit (tovisit, bb->index);
}
- propagate_freq (ENTRY_BLOCK_PTR, tovisit);
+ propagate_freq (ENTRY_BLOCK_PTR_FOR_FN (cfun), tovisit);
BITMAP_FREE (tovisit);
}
@@ -2874,14 +2878,14 @@ counts_to_freqs (void)
/* Don't overwrite the estimated frequencies when the profile for
the function is missing. We may drop this function PROFILE_GUESSED
later in drop_profile (). */
- if (!ENTRY_BLOCK_PTR->count)
+ if (!ENTRY_BLOCK_PTR_FOR_FN (cfun)->count)
return 0;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
true_count_max = MAX (bb->count, true_count_max);
count_max = MAX (true_count_max, 1);
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
bb->frequency = (bb->count * BB_FREQ_MAX + count_max / 2) / count_max;
return true_count_max;
@@ -2906,11 +2910,11 @@ expensive_function_p (int threshold)
/* Frequencies are out of range. This either means that function contains
internal loop executing more than BB_FREQ_MAX times or profile feedback
is available and function has not been executed at all. */
- if (ENTRY_BLOCK_PTR->frequency == 0)
+ if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency == 0)
return true;
/* Maximally BB_FREQ_MAX^2 so overflow won't happen. */
- limit = ENTRY_BLOCK_PTR->frequency * threshold;
+ limit = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency * threshold;
FOR_EACH_BB (bb)
{
rtx insn;
@@ -2955,12 +2959,13 @@ estimate_bb_frequencies (bool force)
mark_dfs_back_edges ();
- single_succ_edge (ENTRY_BLOCK_PTR)->probability = REG_BR_PROB_BASE;
+ single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))->probability =
+ REG_BR_PROB_BASE;
/* Set up block info for each basic block. */
alloc_aux_for_blocks (sizeof (struct block_info_def));
alloc_aux_for_edges (sizeof (struct edge_info_def));
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
{
edge e;
edge_iterator ei;
@@ -2984,7 +2989,7 @@ estimate_bb_frequencies (bool force)
memcpy (&freq_max, &BLOCK_INFO (bb)->frequency, sizeof (freq_max));
sreal_div (&freq_max, &real_bb_freq_max, &freq_max);
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
{
sreal tmp;
@@ -3168,7 +3173,7 @@ rebuild_frequencies (void)
max counts. */
gcov_type count_max = 0;
basic_block bb;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
count_max = MAX (bb->count, count_max);
if (profile_status == PROFILE_GUESSED
diff --git a/gcc/print-rtl.c b/gcc/print-rtl.c
index 043f0f797f5..956290cf111 100644
--- a/gcc/print-rtl.c
+++ b/gcc/print-rtl.c
@@ -34,6 +34,7 @@ along with GCC; see the file COPYING3. If not see
generator programs. */
#ifndef GENERATOR_FILE
#include "tree.h"
+#include "print-tree.h"
#include "flags.h"
#include "hard-reg-set.h"
#include "basic-block.h"
diff --git a/gcc/print-rtl.h b/gcc/print-rtl.h
new file mode 100644
index 00000000000..3cb28d656fb
--- /dev/null
+++ b/gcc/print-rtl.h
@@ -0,0 +1,27 @@
+/* Print RTL for GCC.
+ Copyright (C) 1987-2013 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_PRINT_RTL_H
+#define GCC_PRINT_RTL_H
+
+#ifdef BUFSIZ
+extern void print_rtl (FILE *, const_rtx);
+#endif
+
+#endif // GCC_PRINT_RTL_H
diff --git a/gcc/print-tree.c b/gcc/print-tree.c
index f9e03424c36..649ff4f64da 100644
--- a/gcc/print-tree.c
+++ b/gcc/print-tree.c
@@ -23,6 +23,9 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "varasm.h"
+#include "print-rtl.h"
+#include "stor-layout.h"
#include "ggc.h"
#include "langhooks.h"
#include "tree-iterator.h"
diff --git a/gcc/print-tree.h b/gcc/print-tree.h
new file mode 100644
index 00000000000..7d1a5c82bdc
--- /dev/null
+++ b/gcc/print-tree.h
@@ -0,0 +1,46 @@
+/* Declarations for printing trees in human readable form
+ Copyright (C) 2013 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_PRINT_TREE_H
+#define GCC_PRINT_TREE_H
+
+extern void debug_tree (tree);
+extern void debug_raw (const tree_node &ref);
+extern void debug_raw (const tree_node *ptr);
+extern void debug (const tree_node &ref);
+extern void debug (const tree_node *ptr);
+extern void debug_verbose (const tree_node &ref);
+extern void debug_verbose (const tree_node *ptr);
+extern void debug_head (const tree_node &ref);
+extern void debug_head (const tree_node *ptr);
+extern void debug_body (const tree_node &ref);
+extern void debug_body (const tree_node *ptr);
+extern void debug_vec_tree (vec<tree, va_gc> *);
+extern void debug (vec<tree, va_gc> &ref);
+extern void debug (vec<tree, va_gc> *ptr);
+extern void debug_raw (vec<tree, va_gc> &ref);
+extern void debug_raw (vec<tree, va_gc> *ptr);
+#ifdef BUFSIZ
+extern void dump_addr (FILE*, const char *, const void *);
+extern void print_node (FILE *, const char *, tree, int);
+extern void print_node_brief (FILE *, const char *, const_tree, int);
+extern void indent_to (FILE *, int);
+#endif
+
+#endif // GCC_PRINT_TREE_H
diff --git a/gcc/profile.c b/gcc/profile.c
index 5f73b2ca462..85671b30bc4 100644
--- a/gcc/profile.c
+++ b/gcc/profile.c
@@ -117,7 +117,7 @@ instrument_edges (struct edge_list *el)
int num_edges = NUM_EDGES (el);
basic_block bb;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
{
edge e;
edge_iterator ei;
@@ -192,7 +192,8 @@ instrument_values (histogram_values values)
case HIST_TYPE_TIME_PROFILE:
{
- basic_block bb = split_edge (single_succ_edge (ENTRY_BLOCK_PTR));
+ basic_block bb =
+ split_edge (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
gimple_stmt_iterator gsi = gsi_start_bb (bb);
gimple_gen_time_profiler (t, 0, gsi);
@@ -272,7 +273,7 @@ get_exec_counts (unsigned cfg_checksum, unsigned lineno_checksum)
gcov_type *counts;
/* Count the edges to be (possibly) instrumented. */
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
{
edge e;
edge_iterator ei;
@@ -332,7 +333,7 @@ correct_negative_edge_counts (void)
edge e;
edge_iterator ei;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
{
FOR_EACH_EDGE (e, ei, bb->succs)
{
@@ -383,7 +384,8 @@ is_inconsistent (void)
inconsistent = true;
}
if (bb->count != sum_edge_counts (bb->succs) &&
- ! (find_edge (bb, EXIT_BLOCK_PTR) != NULL && block_ends_with_call_p (bb)))
+ ! (find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun)) != NULL
+ && block_ends_with_call_p (bb)))
{
if (dump_file)
{
@@ -408,7 +410,7 @@ static void
set_bb_counts (void)
{
basic_block bb;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
{
bb->count = sum_edge_counts (bb->succs);
gcc_assert (bb->count >= 0);
@@ -427,7 +429,7 @@ read_profile_edge_counts (gcov_type *exec_counts)
/* The first count in the .da file is the number of times that the function
was entered. This is the exec_count for block zero. */
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
{
edge e;
edge_iterator ei;
@@ -491,7 +493,7 @@ compute_frequency_overlap (void)
int overlap = 0;
basic_block bb;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
{
count_total += bb->count;
freq_total += bb->frequency;
@@ -500,7 +502,7 @@ compute_frequency_overlap (void)
if (count_total == 0 || freq_total == 0)
return 0;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
overlap += MIN (bb->count * OVERLAP_BASE / count_total,
bb->frequency * OVERLAP_BASE / freq_total);
@@ -528,11 +530,6 @@ compute_branch_probabilities (unsigned cfg_checksum, unsigned lineno_checksum)
/* Very simple sanity checks so we catch bugs in our profiling code. */
if (!profile_info)
return;
- if (profile_info->run_max * profile_info->runs < profile_info->sum_max)
- {
- error ("corrupted profile info: run_max * runs < sum_max");
- exec_counts = NULL;
- }
if (profile_info->sum_all < profile_info->sum_max)
{
@@ -542,7 +539,7 @@ compute_branch_probabilities (unsigned cfg_checksum, unsigned lineno_checksum)
/* Attach extra info block to each bb. */
alloc_aux_for_blocks (sizeof (struct bb_info));
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
{
edge e;
edge_iterator ei;
@@ -556,8 +553,8 @@ compute_branch_probabilities (unsigned cfg_checksum, unsigned lineno_checksum)
}
/* Avoid predicting entry on exit nodes. */
- BB_INFO (EXIT_BLOCK_PTR)->succ_count = 2;
- BB_INFO (ENTRY_BLOCK_PTR)->pred_count = 2;
+ BB_INFO (EXIT_BLOCK_PTR_FOR_FN (cfun))->succ_count = 2;
+ BB_INFO (ENTRY_BLOCK_PTR_FOR_FN (cfun))->pred_count = 2;
num_edges = read_profile_edge_counts (exec_counts);
@@ -587,7 +584,7 @@ compute_branch_probabilities (unsigned cfg_checksum, unsigned lineno_checksum)
{
passes++;
changes = 0;
- FOR_BB_BETWEEN (bb, EXIT_BLOCK_PTR, NULL, prev_bb)
+ FOR_BB_BETWEEN (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), NULL, prev_bb)
{
struct bb_info *bi = BB_INFO (bb);
if (! bi->count_valid)
@@ -729,7 +726,7 @@ compute_branch_probabilities (unsigned cfg_checksum, unsigned lineno_checksum)
hist_br_prob[i] = 0;
num_branches = 0;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
{
edge e;
edge_iterator ei;
@@ -748,9 +745,9 @@ compute_branch_probabilities (unsigned cfg_checksum, unsigned lineno_checksum)
already present. We get negative frequency from the entry
point. */
if ((e->count < 0
- && e->dest == EXIT_BLOCK_PTR)
+ && e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
|| (e->count > bb->count
- && e->dest != EXIT_BLOCK_PTR))
+ && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)))
{
if (block_ends_with_call_p (bb))
e->count = e->count < 0 ? 0 : bb->count;
@@ -1069,17 +1066,17 @@ branch_prob (void)
ne->goto_locus = e->goto_locus;
}
if ((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL))
- && e->dest != EXIT_BLOCK_PTR)
+ && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
need_exit_edge = 1;
- if (e->dest == EXIT_BLOCK_PTR)
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
have_exit_edge = 1;
}
FOR_EACH_EDGE (e, ei, bb->preds)
{
if ((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL))
- && e->src != ENTRY_BLOCK_PTR)
+ && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
need_entry_edge = 1;
- if (e->src == ENTRY_BLOCK_PTR)
+ if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
have_entry_edge = 1;
}
@@ -1088,14 +1085,14 @@ branch_prob (void)
if (dump_file)
fprintf (dump_file, "Adding fake exit edge to bb %i\n",
bb->index);
- make_edge (bb, EXIT_BLOCK_PTR, EDGE_FAKE);
+ make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
}
if (need_entry_edge && !have_entry_edge)
{
if (dump_file)
fprintf (dump_file, "Adding fake entry edge to bb %i\n",
bb->index);
- make_edge (ENTRY_BLOCK_PTR, bb, EDGE_FAKE);
+ make_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), bb, EDGE_FAKE);
/* Avoid bbs that have both fake entry edge and also some
exit edge. One of those edges wouldn't be added to the
spanning tree, but we can't instrument any of them. */
@@ -1151,7 +1148,8 @@ branch_prob (void)
/* Mark edges we've replaced by fake edges above as ignored. */
if ((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL))
- && e->src != ENTRY_BLOCK_PTR && e->dest != EXIT_BLOCK_PTR)
+ && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
EDGE_INFO (e)->ignore = 1;
ignored_edges++;
@@ -1182,9 +1180,9 @@ branch_prob (void)
num_instrumented++;
}
- total_num_blocks += n_basic_blocks;
+ total_num_blocks += n_basic_blocks_for_fn (cfun);
if (dump_file)
- fprintf (dump_file, "%d basic blocks\n", n_basic_blocks);
+ fprintf (dump_file, "%d basic blocks\n", n_basic_blocks_for_fn (cfun));
total_num_edges += num_edges;
if (dump_file)
@@ -1213,12 +1211,13 @@ branch_prob (void)
/* Basic block flags */
offset = gcov_write_tag (GCOV_TAG_BLOCKS);
- for (i = 0; i != (unsigned) (n_basic_blocks); i++)
+ for (i = 0; i != (unsigned) (n_basic_blocks_for_fn (cfun)); i++)
gcov_write_unsigned (0);
gcov_write_length (offset);
/* Arcs */
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
+ EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
{
edge e;
edge_iterator ei;
@@ -1262,7 +1261,7 @@ branch_prob (void)
gimple_stmt_iterator gsi;
gcov_position_t offset = 0;
- if (bb == ENTRY_BLOCK_PTR->next_bb)
+ if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb)
{
expanded_location curr_location =
expand_location (DECL_SOURCE_LOCATION (current_function_decl));
@@ -1386,11 +1385,11 @@ find_spanning_tree (struct edge_list *el)
basic_block bb;
/* We use aux field for standard union-find algorithm. */
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
bb->aux = bb;
/* Add fake edge exit to entry we can't instrument. */
- union_groups (EXIT_BLOCK_PTR, ENTRY_BLOCK_PTR);
+ union_groups (EXIT_BLOCK_PTR_FOR_FN (cfun), ENTRY_BLOCK_PTR_FOR_FN (cfun));
/* First add all abnormal edges to the tree unless they form a cycle. Also
add all edges to EXIT_BLOCK_PTR to avoid inserting profiling code behind
@@ -1399,7 +1398,7 @@ find_spanning_tree (struct edge_list *el)
{
edge e = INDEX_EDGE (el, i);
if (((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL | EDGE_FAKE))
- || e->dest == EXIT_BLOCK_PTR)
+ || e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
&& !EDGE_INFO (e)->ignore
&& (find_group (e->src) != find_group (e->dest)))
{
diff --git a/gcc/realmpfr.c b/gcc/realmpfr.c
index 34f77120c5d..b5c11206d6c 100644
--- a/gcc/realmpfr.c
+++ b/gcc/realmpfr.c
@@ -22,6 +22,7 @@
#include "coretypes.h"
#include "realmpfr.h"
#include "tree.h" /* For TYPE_MODE in real_from_mpfr. */
+#include "stor-layout.h"
/* Convert from REAL_VALUE_TYPE to MPFR. The caller is responsible
for initializing and clearing the MPFR parameter. */
diff --git a/gcc/reg-stack.c b/gcc/reg-stack.c
index 1917c46fe96..6aad46684d6 100644
--- a/gcc/reg-stack.c
+++ b/gcc/reg-stack.c
@@ -154,6 +154,7 @@
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "varasm.h"
#include "rtl-error.h"
#include "tm_p.h"
#include "function.h"
@@ -2648,7 +2649,7 @@ convert_regs_entry (void)
Note that we are inserting converted code here. This code is
never seen by the convert_regs pass. */
- FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
+ FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
{
basic_block block = e->dest;
block_info bi = BLOCK_INFO (block);
@@ -2692,7 +2693,7 @@ convert_regs_exit (void)
value_reg_high = END_HARD_REGNO (retvalue) - 1;
}
- output_stack = &BLOCK_INFO (EXIT_BLOCK_PTR)->stack_in;
+ output_stack = &BLOCK_INFO (EXIT_BLOCK_PTR_FOR_FN (cfun))->stack_in;
if (value_reg_low == -1)
output_stack->top = -1;
else
@@ -2846,7 +2847,7 @@ compensate_edges (void)
starting_stack_p = false;
FOR_EACH_BB (bb)
- if (bb != ENTRY_BLOCK_PTR)
+ if (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun))
{
edge e;
edge_iterator ei;
@@ -3080,7 +3081,7 @@ convert_regs_2 (basic_block block)
is only processed after all its predecessors. The number of predecessors
of every block has already been computed. */
- stack = XNEWVEC (basic_block, n_basic_blocks);
+ stack = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
sp = stack;
*sp++ = block;
@@ -3140,14 +3141,14 @@ convert_regs (void)
/* Construct the desired stack for function exit. */
convert_regs_exit ();
- BLOCK_INFO (EXIT_BLOCK_PTR)->done = 1;
+ BLOCK_INFO (EXIT_BLOCK_PTR_FOR_FN (cfun))->done = 1;
/* ??? Future: process inner loops first, and give them arbitrary
initial stacks which emit_swap_insn can modify. This ought to
prevent double fxch that often appears at the head of a loop. */
/* Process all blocks reachable from all entry points. */
- FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
+ FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
cfg_altered |= convert_regs_2 (e->dest);
/* ??? Process all unreachable blocks. Though there's no excuse
@@ -3220,7 +3221,7 @@ reg_to_stack (void)
FOR_EACH_EDGE (e, ei, bb->preds)
if (!(e->flags & EDGE_DFS_BACK)
- && e->src != ENTRY_BLOCK_PTR)
+ && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
bi->predecessors++;
/* Set current register status at last instruction `uninitialized'. */
diff --git a/gcc/regrename.c b/gcc/regrename.c
index 5b2c85799bb..5e86fa5a61a 100644
--- a/gcc/regrename.c
+++ b/gcc/regrename.c
@@ -672,7 +672,7 @@ regrename_analyze (bitmap bb_mask)
n_bbs = pre_and_rev_post_order_compute (NULL, inverse_postorder, false);
/* Gather some information about the blocks in this function. */
- rename_info = XCNEWVEC (struct bb_rename_info, n_basic_blocks);
+ rename_info = XCNEWVEC (struct bb_rename_info, n_basic_blocks_for_fn (cfun));
i = 0;
FOR_EACH_BB (bb)
{
diff --git a/gcc/regs.h b/gcc/regs.h
index b5fa3f3995f..9bf426cd175 100644
--- a/gcc/regs.h
+++ b/gcc/regs.h
@@ -137,7 +137,7 @@ extern size_t reg_info_p_size;
frequency. */
#define REG_FREQ_FROM_BB(bb) (optimize_size \
|| (flag_branch_probabilities \
- && !ENTRY_BLOCK_PTR->count) \
+ && !ENTRY_BLOCK_PTR_FOR_FN (cfun)->count) \
? REG_FREQ_MAX \
: ((bb)->frequency * REG_FREQ_MAX / BB_FREQ_MAX)\
? ((bb)->frequency * REG_FREQ_MAX / BB_FREQ_MAX)\
diff --git a/gcc/reload.c b/gcc/reload.c
index b69660d16af..96619f67820 100644
--- a/gcc/reload.c
+++ b/gcc/reload.c
@@ -1615,7 +1615,7 @@ push_reload (rtx in, rtx out, rtx *inloc, rtx *outloc,
&& reg_mentioned_p (XEXP (note, 0), in)
/* Check that a former pseudo is valid; see find_dummy_reload. */
&& (ORIGINAL_REGNO (XEXP (note, 0)) < FIRST_PSEUDO_REGISTER
- || (! bitmap_bit_p (DF_LR_OUT (ENTRY_BLOCK_PTR),
+ || (! bitmap_bit_p (DF_LR_OUT (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
ORIGINAL_REGNO (XEXP (note, 0)))
&& hard_regno_nregs[regno][GET_MODE (XEXP (note, 0))] == 1))
&& ! refers_to_regno_for_reload_p (regno,
@@ -1939,7 +1939,7 @@ combine_reloads (void)
&& !fixed_regs[regno]
/* Check that a former pseudo is valid; see find_dummy_reload. */
&& (ORIGINAL_REGNO (XEXP (note, 0)) < FIRST_PSEUDO_REGISTER
- || (!bitmap_bit_p (DF_LR_OUT (ENTRY_BLOCK_PTR),
+ || (!bitmap_bit_p (DF_LR_OUT (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
ORIGINAL_REGNO (XEXP (note, 0)))
&& hard_regno_nregs[regno][GET_MODE (XEXP (note, 0))] == 1)))
{
@@ -2098,7 +2098,7 @@ find_dummy_reload (rtx real_in, rtx real_out, rtx *inloc, rtx *outloc,
can ignore the conflict). We must never introduce writes
to such hardregs, as they would clobber the other live
pseudo. See PR 20973. */
- || (!bitmap_bit_p (DF_LR_OUT (ENTRY_BLOCK_PTR),
+ || (!bitmap_bit_p (DF_LR_OUT (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
ORIGINAL_REGNO (in))
/* Similarly, only do this if we can be sure that the death
note is still valid. global can assign some hardreg to
diff --git a/gcc/reload1.c b/gcc/reload1.c
index a40e16b12c3..6864ec1667f 100644
--- a/gcc/reload1.c
+++ b/gcc/reload1.c
@@ -611,14 +611,14 @@ has_nonexceptional_receiver (void)
return true;
/* First determine which blocks can reach exit via normal paths. */
- tos = worklist = XNEWVEC (basic_block, n_basic_blocks + 1);
+ tos = worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) + 1);
FOR_EACH_BB (bb)
bb->flags &= ~BB_REACHABLE;
/* Place the exit block on our worklist. */
- EXIT_BLOCK_PTR->flags |= BB_REACHABLE;
- *tos++ = EXIT_BLOCK_PTR;
+ EXIT_BLOCK_PTR_FOR_FN (cfun)->flags |= BB_REACHABLE;
+ *tos++ = EXIT_BLOCK_PTR_FOR_FN (cfun);
/* Iterate: find everything reachable from what we've already seen. */
while (tos != worklist)
diff --git a/gcc/reorg.c b/gcc/reorg.c
index a87979db293..dc20de46bee 100644
--- a/gcc/reorg.c
+++ b/gcc/reorg.c
@@ -3643,7 +3643,7 @@ dbr_schedule (rtx first)
/* If the current function has no insns other than the prologue and
epilogue, then do not try to fill any delay slots. */
- if (n_basic_blocks == NUM_FIXED_BLOCKS)
+ if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
return;
/* Find the highest INSN_UID and allocate and initialize our map from
diff --git a/gcc/resource.c b/gcc/resource.c
index 367181289df..4609c3ad963 100644
--- a/gcc/resource.c
+++ b/gcc/resource.c
@@ -147,7 +147,7 @@ find_basic_block (rtx insn, int search_limit)
/* The start of the function. */
else if (insn == 0)
- return ENTRY_BLOCK_PTR->next_bb->index;
+ return ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb->index;
/* See if any of the upcoming CODE_LABELs start a basic block. If we reach
anything other than a CODE_LABEL or note, we can't find this code. */
@@ -966,7 +966,7 @@ mark_target_live_regs (rtx insns, rtx target, struct resources *res)
/* Get starting and ending insn, handling the case where each might
be a SEQUENCE. */
- start_insn = (b == ENTRY_BLOCK_PTR->next_bb->index ?
+ start_insn = (b == ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb->index ?
insns : BB_HEAD (BASIC_BLOCK (b)));
stop_insn = target;
diff --git a/gcc/sanitizer.def b/gcc/sanitizer.def
index 0f45e9eead4..9c59778f9f4 100644
--- a/gcc/sanitizer.def
+++ b/gcc/sanitizer.def
@@ -301,3 +301,7 @@ DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_VLA_BOUND_NOT_POSITIVE,
"__ubsan_handle_vla_bound_not_positive",
BT_FN_VOID_PTR_PTR,
ATTR_COLD_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH,
+ "__ubsan_handle_type_mismatch",
+ BT_FN_VOID_PTR_PTR,
+ ATTR_COLD_NOTHROW_LEAF_LIST)
diff --git a/gcc/sched-deps.c b/gcc/sched-deps.c
index 8496014a72b..287b826cfc6 100644
--- a/gcc/sched-deps.c
+++ b/gcc/sched-deps.c
@@ -3963,7 +3963,7 @@ sched_deps_init (bool global_p)
{
/* Average number of insns in the basic block.
'+ 1' is used to make it nonzero. */
- int insns_in_block = sched_max_luid / n_basic_blocks + 1;
+ int insns_in_block = sched_max_luid / n_basic_blocks_for_fn (cfun) + 1;
init_deps_data_vector ();
diff --git a/gcc/sched-ebb.c b/gcc/sched-ebb.c
index b70e071a7f1..955501a9547 100644
--- a/gcc/sched-ebb.c
+++ b/gcc/sched-ebb.c
@@ -625,7 +625,7 @@ schedule_ebbs (void)
/* Taking care of this degenerate case makes the rest of
this code simpler. */
- if (n_basic_blocks == NUM_FIXED_BLOCKS)
+ if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
return;
if (profile_info && flag_branch_probabilities)
@@ -648,7 +648,7 @@ schedule_ebbs (void)
{
edge e;
tail = BB_END (bb);
- if (bb->next_bb == EXIT_BLOCK_PTR
+ if (bb->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
|| LABEL_P (BB_HEAD (bb->next_bb)))
break;
e = find_fallthru_edge (bb->succs);
@@ -683,7 +683,7 @@ ebb_add_block (basic_block bb, basic_block after)
/* Recovery blocks are always bounded by BARRIERS,
therefore, they always form single block EBB,
therefore, we can use rec->index to identify such EBBs. */
- if (after == EXIT_BLOCK_PTR)
+ if (after == EXIT_BLOCK_PTR_FOR_FN (cfun))
bitmap_set_bit (&dont_calc_deps, bb->index);
else if (after == last_bb)
last_bb = bb;
diff --git a/gcc/sched-int.h b/gcc/sched-int.h
index 33112eef075..070404c4245 100644
--- a/gcc/sched-int.h
+++ b/gcc/sched-int.h
@@ -945,14 +945,15 @@ extern vec<haifa_deps_insn_data_def> h_d_i_d;
/* INSN is a speculation check that will simply reexecute the speculatively
scheduled instruction if the speculation fails. */
#define IS_SPECULATION_SIMPLE_CHECK_P(INSN) \
- (RECOVERY_BLOCK (INSN) == EXIT_BLOCK_PTR)
+ (RECOVERY_BLOCK (INSN) == EXIT_BLOCK_PTR_FOR_FN (cfun))
/* INSN is a speculation check that will branch to RECOVERY_BLOCK if the
speculation fails. Insns in that block will reexecute the speculatively
scheduled code and then will return immediately after INSN thus preserving
semantics of the program. */
#define IS_SPECULATION_BRANCHY_CHECK_P(INSN) \
- (RECOVERY_BLOCK (INSN) != NULL && RECOVERY_BLOCK (INSN) != EXIT_BLOCK_PTR)
+ (RECOVERY_BLOCK (INSN) != NULL \
+ && RECOVERY_BLOCK (INSN) != EXIT_BLOCK_PTR_FOR_FN (cfun))
/* Dep status (aka ds_t) of the link encapsulates all information for a given
diff --git a/gcc/sched-rgn.c b/gcc/sched-rgn.c
index b2a7dbd4a94..1663e2fd95d 100644
--- a/gcc/sched-rgn.c
+++ b/gcc/sched-rgn.c
@@ -495,7 +495,7 @@ find_single_block_region (bool ebbs_p)
BLOCK_TO_BB (bb->index) = i - RGN_BLOCKS (nr_regions);
i++;
- if (bb->next_bb == EXIT_BLOCK_PTR
+ if (bb->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
|| LABEL_P (BB_HEAD (bb->next_bb)))
break;
@@ -643,7 +643,7 @@ haifa_find_rgns (void)
/* Allocate and initialize variables for the first traversal. */
max_hdr = XNEWVEC (int, last_basic_block);
dfs_nr = XCNEWVEC (int, last_basic_block);
- stack = XNEWVEC (edge_iterator, n_edges);
+ stack = XNEWVEC (edge_iterator, n_edges_for_fn (cfun));
inner = sbitmap_alloc (last_basic_block);
bitmap_ones (inner);
@@ -665,7 +665,7 @@ haifa_find_rgns (void)
/* DFS traversal to find inner loops in the cfg. */
- current_edge = ei_start (single_succ (ENTRY_BLOCK_PTR)->succs);
+ current_edge = ei_start (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))->succs);
sp = -1;
while (1)
@@ -793,7 +793,7 @@ haifa_find_rgns (void)
/* Second traversal:find reducible inner loops and topologically sort
block of each region. */
- queue = XNEWVEC (int, n_basic_blocks);
+ queue = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
extend_regions_p = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS) > 0;
if (extend_regions_p)
@@ -840,7 +840,7 @@ haifa_find_rgns (void)
/* If we exited the loop early, then I is the header of
a non-reducible loop and we should quit processing it
now. */
- if (jbb != EXIT_BLOCK_PTR)
+ if (jbb != EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
/* I is a header of an inner loop, or block 0 in a subroutine
@@ -858,7 +858,7 @@ haifa_find_rgns (void)
/* Decrease degree of all I's successors for topological
ordering. */
FOR_EACH_EDGE (e, ei, bb->succs)
- if (e->dest != EXIT_BLOCK_PTR)
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
--degree[e->dest->index];
/* Estimate # insns, and count # blocks in the region. */
@@ -875,7 +875,7 @@ haifa_find_rgns (void)
/* Leaf nodes have only a single successor which must
be EXIT_BLOCK. */
if (single_succ_p (jbb)
- && single_succ (jbb) == EXIT_BLOCK_PTR)
+ && single_succ (jbb) == EXIT_BLOCK_PTR_FOR_FN (cfun))
{
queue[++tail] = jbb->index;
bitmap_set_bit (in_queue, jbb->index);
@@ -893,7 +893,7 @@ haifa_find_rgns (void)
FOR_EACH_EDGE (e, ei, bb->preds)
{
- if (e->src == ENTRY_BLOCK_PTR)
+ if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
continue;
node = e->src->index;
@@ -954,7 +954,7 @@ haifa_find_rgns (void)
/* See discussion above about nodes not marked as in
this loop during the initial DFS traversal. */
- if (e->src == ENTRY_BLOCK_PTR
+ if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
|| max_hdr[node] != loop_head)
{
tail = -1;
@@ -1006,7 +1006,7 @@ haifa_find_rgns (void)
queue[head] = queue[tail--];
FOR_EACH_EDGE (e, ei, BASIC_BLOCK (child)->succs)
- if (e->dest != EXIT_BLOCK_PTR)
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
--degree[e->dest->index];
}
else
@@ -1026,7 +1026,7 @@ haifa_find_rgns (void)
This may provide several smaller regions instead
of one too_large region. */
FOR_EACH_EDGE (e, ei, bb->succs)
- if (e->dest != EXIT_BLOCK_PTR)
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
bitmap_set_bit (extended_rgn_header, e->dest->index);
}
}
@@ -1153,7 +1153,7 @@ void
extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
{
int *order, i, rescan = 0, idx = *idxp, iter = 0, max_iter, *max_hdr;
- int nblocks = n_basic_blocks - NUM_FIXED_BLOCKS;
+ int nblocks = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
max_iter = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS);
@@ -1305,7 +1305,7 @@ extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
BLOCK_TO_BB (bbn) = 0;
FOR_EACH_EDGE (e, ei, BASIC_BLOCK (bbn)->succs)
- if (e->dest != EXIT_BLOCK_PTR)
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
degree[e->dest->index]--;
if (!large)
@@ -1362,7 +1362,7 @@ extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
idx++;
FOR_EACH_EDGE (e, ei, BASIC_BLOCK (succn)->succs)
- if (e->dest != EXIT_BLOCK_PTR)
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
degree[e->dest->index]--;
}
}
@@ -1426,7 +1426,7 @@ compute_dom_prob_ps (int bb)
edge out_edge;
edge_iterator out_ei;
- if (in_edge->src == ENTRY_BLOCK_PTR)
+ if (in_edge->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
continue;
pred_bb = BLOCK_TO_BB (in_edge->src->index);
@@ -2663,7 +2663,7 @@ propagate_deps (int bb, struct deps_desc *pred_deps)
FOR_EACH_EDGE (e, ei, block->succs)
{
/* Only bbs "below" bb, in the same region, are interesting. */
- if (e->dest == EXIT_BLOCK_PTR
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
|| CONTAINING_RGN (block->index) != CONTAINING_RGN (e->dest->index)
|| BLOCK_TO_BB (e->dest->index) <= bb)
continue;
@@ -3115,7 +3115,7 @@ sched_rgn_init (bool single_blocks_p)
/* Compute regions for scheduling. */
if (single_blocks_p
- || n_basic_blocks == NUM_FIXED_BLOCKS + 1
+ || n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS + 1
|| !flag_schedule_interblock
|| is_cfg_nonregular ())
{
@@ -3139,7 +3139,7 @@ sched_rgn_init (bool single_blocks_p)
free_dominance_info (CDI_DOMINATORS);
}
- gcc_assert (0 < nr_regions && nr_regions <= n_basic_blocks);
+ gcc_assert (0 < nr_regions && nr_regions <= n_basic_blocks_for_fn (cfun));
RGN_BLOCKS (nr_regions) = (RGN_BLOCKS (nr_regions - 1) +
RGN_NR_BLOCKS (nr_regions - 1));
@@ -3375,7 +3375,7 @@ schedule_insns (void)
/* Taking care of this degenerate case makes the rest of
this code simpler. */
- if (n_basic_blocks == NUM_FIXED_BLOCKS)
+ if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
return;
rgn_setup_common_sched_info ();
@@ -3421,8 +3421,8 @@ rgn_add_remove_insn (rtx insn, int remove_p)
void
extend_regions (void)
{
- rgn_table = XRESIZEVEC (region, rgn_table, n_basic_blocks);
- rgn_bb_table = XRESIZEVEC (int, rgn_bb_table, n_basic_blocks);
+ rgn_table = XRESIZEVEC (region, rgn_table, n_basic_blocks_for_fn (cfun));
+ rgn_bb_table = XRESIZEVEC (int, rgn_bb_table, n_basic_blocks_for_fn (cfun));
block_to_bb = XRESIZEVEC (int, block_to_bb, last_basic_block);
containing_rgn = XRESIZEVEC (int, containing_rgn, last_basic_block);
}
@@ -3454,10 +3454,11 @@ rgn_add_block (basic_block bb, basic_block after)
extend_regions ();
bitmap_set_bit (&not_in_df, bb->index);
- if (after == 0 || after == EXIT_BLOCK_PTR)
+ if (after == 0 || after == EXIT_BLOCK_PTR_FOR_FN (cfun))
{
rgn_make_new_region_out_of_new_block (bb);
- RGN_DONT_CALC_DEPS (nr_regions - 1) = (after == EXIT_BLOCK_PTR);
+ RGN_DONT_CALC_DEPS (nr_regions - 1) = (after
+ == EXIT_BLOCK_PTR_FOR_FN (cfun));
}
else
{
diff --git a/gcc/sdbout.c b/gcc/sdbout.c
index f10869922d7..8af0bc5af5b 100644
--- a/gcc/sdbout.c
+++ b/gcc/sdbout.c
@@ -45,6 +45,8 @@ AT&T C compiler. From the example below I would conclude the following:
#include "tm.h"
#include "debug.h"
#include "tree.h"
+#include "varasm.h"
+#include "stor-layout.h"
#include "ggc.h"
#include "vec.h"
diff --git a/gcc/sel-sched-ir.c b/gcc/sel-sched-ir.c
index 7d436986084..0db84e64d20 100644
--- a/gcc/sel-sched-ir.c
+++ b/gcc/sel-sched-ir.c
@@ -3649,7 +3649,7 @@ sel_recompute_toporder (void)
int i, n, rgn;
int *postorder, n_blocks;
- postorder = XALLOCAVEC (int, n_basic_blocks);
+ postorder = XALLOCAVEC (int, n_basic_blocks_for_fn (cfun));
n_blocks = post_order_compute (postorder, false, false);
rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
@@ -3682,7 +3682,7 @@ maybe_tidy_empty_bb (basic_block bb)
successors. Otherwise remove it. */
if (!sel_bb_empty_p (bb)
|| (single_succ_p (bb)
- && single_succ (bb) == EXIT_BLOCK_PTR
+ && single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun)
&& (!single_pred_p (bb)
|| !(single_pred_edge (bb)->flags & EDGE_FALLTHRU)))
|| EDGE_COUNT (bb->preds) == 0
@@ -3853,7 +3853,7 @@ tidy_control_flow (basic_block xbb, bool full_tidying)
&& EDGE_COUNT (xbb->succs) == 1
&& (EDGE_SUCC (xbb, 0)->flags & EDGE_FALLTHRU)
/* When successor is an EXIT block, it may not be the next block. */
- && single_succ (xbb) != EXIT_BLOCK_PTR
+ && single_succ (xbb) != EXIT_BLOCK_PTR_FOR_FN (cfun)
/* And unconditional jump in previous basic block leads to
next basic block of XBB and this jump can be safely removed. */
&& in_current_region_p (xbb->prev_bb)
@@ -4325,7 +4325,7 @@ init_lv_sets (void)
init_lv_set (bb);
/* Don't forget EXIT_BLOCK. */
- init_lv_set (EXIT_BLOCK_PTR);
+ init_lv_set (EXIT_BLOCK_PTR_FOR_FN (cfun));
}
/* Release lv set of HEAD. */
@@ -4346,7 +4346,7 @@ free_lv_sets (void)
basic_block bb;
/* Don't forget EXIT_BLOCK. */
- free_lv_set (EXIT_BLOCK_PTR);
+ free_lv_set (EXIT_BLOCK_PTR_FOR_FN (cfun));
/* Free LV sets. */
FOR_EACH_BB (bb)
@@ -4524,7 +4524,7 @@ sel_bb_head (basic_block bb)
{
insn_t head;
- if (bb == EXIT_BLOCK_PTR)
+ if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
{
gcc_assert (exit_insn != NULL_RTX);
head = exit_insn;
@@ -4557,7 +4557,7 @@ sel_bb_end (basic_block bb)
if (sel_bb_empty_p (bb))
return NULL_RTX;
- gcc_assert (bb != EXIT_BLOCK_PTR);
+ gcc_assert (bb != EXIT_BLOCK_PTR_FOR_FN (cfun));
return BB_END (bb);
}
@@ -4852,7 +4852,7 @@ bb_ends_ebb_p (basic_block bb)
basic_block next_bb = bb_next_bb (bb);
edge e;
- if (next_bb == EXIT_BLOCK_PTR
+ if (next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
|| bitmap_bit_p (forced_ebb_heads, next_bb->index)
|| (LABEL_P (BB_HEAD (next_bb))
/* NB: LABEL_NUSES () is not maintained outside of jump.c.
@@ -4912,10 +4912,10 @@ recompute_rev_top_order (void)
rev_top_order_index_len);
}
- postorder = XNEWVEC (int, n_basic_blocks);
+ postorder = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
n_blocks = post_order_compute (postorder, true, false);
- gcc_assert (n_basic_blocks == n_blocks);
+ gcc_assert (n_basic_blocks_for_fn (cfun) == n_blocks);
/* Build reverse function: for each basic block with BB->INDEX == K
rev_top_order_index[K] is it's reverse topological sort number. */
@@ -5538,7 +5538,7 @@ sel_create_recovery_block (insn_t orig_insn)
recovery_block = sched_create_recovery_block (&before_recovery);
if (before_recovery)
- copy_lv_set_from (before_recovery, EXIT_BLOCK_PTR);
+ copy_lv_set_from (before_recovery, EXIT_BLOCK_PTR_FOR_FN (cfun));
gcc_assert (sel_bb_empty_p (recovery_block));
sched_create_recovery_edges (first_bb, recovery_block, second_bb);
@@ -5821,7 +5821,7 @@ setup_nop_and_exit_insns (void)
emit_insn (nop_pattern);
exit_insn = get_insns ();
end_sequence ();
- set_block_for_insn (exit_insn, EXIT_BLOCK_PTR);
+ set_block_for_insn (exit_insn, EXIT_BLOCK_PTR_FOR_FN (cfun));
}
/* Free special insns used in the scheduler. */
@@ -6201,11 +6201,10 @@ make_regions_from_the_rest (void)
/* Free data structures used in pipelining of loops. */
void sel_finish_pipelining (void)
{
- loop_iterator li;
struct loop *loop;
/* Release aux fields so we don't free them later by mistake. */
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
loop->aux = NULL;
loop_optimizer_finalize ();
@@ -6227,11 +6226,10 @@ sel_find_rgns (void)
if (current_loops)
{
loop_p loop;
- loop_iterator li;
- FOR_EACH_LOOP (li, loop, (flag_sel_sched_pipelining_outer_loops
- ? LI_FROM_INNERMOST
- : LI_ONLY_INNERMOST))
+ FOR_EACH_LOOP (loop, (flag_sel_sched_pipelining_outer_loops
+ ? LI_FROM_INNERMOST
+ : LI_ONLY_INNERMOST))
make_regions_from_loop_nest (loop);
}
@@ -6398,7 +6396,7 @@ sel_remove_loop_preheader (void)
If it is so - delete this jump and clear data sets of its
basic block if it becomes empty. */
if (next_bb->prev_bb == prev_bb
- && prev_bb != ENTRY_BLOCK_PTR
+ && prev_bb != ENTRY_BLOCK_PTR_FOR_FN (cfun)
&& bb_has_removable_jump_to_p (prev_bb, next_bb))
{
redirect_edge_and_branch (EDGE_SUCC (prev_bb, 0), next_bb);
diff --git a/gcc/sel-sched-ir.h b/gcc/sel-sched-ir.h
index 486159dd262..ff99e519cf9 100644
--- a/gcc/sel-sched-ir.h
+++ b/gcc/sel-sched-ir.h
@@ -1024,7 +1024,7 @@ inner_loop_header_p (basic_block bb)
if (!current_loop_nest)
return false;
- if (bb == EXIT_BLOCK_PTR)
+ if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
return false;
inner_loop = bb->loop_father;
@@ -1050,7 +1050,7 @@ get_loop_exit_edges_unique_dests (const struct loop *loop)
vec<edge> edges = vNULL;
struct loop_exit *exit;
- gcc_assert (loop->latch != EXIT_BLOCK_PTR
+ gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& current_loops->state & LOOPS_HAVE_RECORDED_EXITS);
for (exit = loop->exits->next; exit->e; exit = exit->next)
@@ -1083,7 +1083,7 @@ sel_bb_empty_or_nop_p (basic_block bb)
if (!INSN_NOP_P (first))
return false;
- if (bb == EXIT_BLOCK_PTR)
+ if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
return false;
last = sel_bb_end (bb);
@@ -1204,7 +1204,7 @@ _succ_iter_start (insn_t *succp, insn_t insn, int flags)
i.current_exit = -1;
i.loop_exits.create (0);
- if (bb != EXIT_BLOCK_PTR && BB_END (bb) != insn)
+ if (bb != EXIT_BLOCK_PTR_FOR_FN (cfun) && BB_END (bb) != insn)
{
i.bb_end = false;
@@ -1308,7 +1308,7 @@ _succ_iter_cond (succ_iterator *ip, rtx *succp, rtx insn,
{
basic_block bb = ip->e2->dest;
- if (bb == EXIT_BLOCK_PTR || bb == after_recovery)
+ if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun) || bb == after_recovery)
*succp = exit_insn;
else
{
diff --git a/gcc/sel-sched.c b/gcc/sel-sched.c
index 08fdc772292..1e3fcf0da5a 100644
--- a/gcc/sel-sched.c
+++ b/gcc/sel-sched.c
@@ -4551,7 +4551,8 @@ find_block_for_bookkeeping (edge e1, edge e2, bool lax)
edge e;
/* Loop over edges from E1 to E2, inclusive. */
- for (e = e1; !lax || e->dest != EXIT_BLOCK_PTR; e = EDGE_SUCC (e->dest, 0))
+ for (e = e1; !lax || e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun); e =
+ EDGE_SUCC (e->dest, 0))
{
if (EDGE_COUNT (e->dest->preds) == 2)
{
@@ -4642,7 +4643,7 @@ create_block_for_bookkeeping (edge e1, edge e2)
if (DEBUG_INSN_P (insn)
&& single_succ_p (new_bb)
&& (succ = single_succ (new_bb))
- && succ != EXIT_BLOCK_PTR
+ && succ != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& DEBUG_INSN_P ((last = sel_bb_end (new_bb))))
{
while (insn != last && (DEBUG_INSN_P (insn) || NOTE_P (insn)))
@@ -7764,7 +7765,7 @@ run_selective_scheduling (void)
{
int rgn;
- if (n_basic_blocks == NUM_FIXED_BLOCKS)
+ if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
return;
sel_global_init ();
diff --git a/gcc/sese.c b/gcc/sese.c
index d05b14afbb8..2fe77392bd4 100644
--- a/gcc/sese.c
+++ b/gcc/sese.c
@@ -33,6 +33,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-cfg.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-ssa-loop.h"
#include "tree-into-ssa.h"
diff --git a/gcc/simplify-rtx.c b/gcc/simplify-rtx.c
index f4598a6b1ab..44b500a38bb 100644
--- a/gcc/simplify-rtx.c
+++ b/gcc/simplify-rtx.c
@@ -24,6 +24,7 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "rtl.h"
#include "tree.h"
+#include "varasm.h"
#include "tm_p.h"
#include "regs.h"
#include "hard-reg-set.h"
@@ -321,7 +322,7 @@ delegitimize_mem_from_attrs (rtx x)
{
offset += bitpos / BITS_PER_UNIT;
if (toffset)
- offset += TREE_INT_CST_LOW (toffset);
+ offset += tree_to_shwi (toffset);
}
break;
}
diff --git a/gcc/stmt.c b/gcc/stmt.c
index 8184015f132..f7d5b4b945b 100644
--- a/gcc/stmt.c
+++ b/gcc/stmt.c
@@ -30,6 +30,8 @@ along with GCC; see the file COPYING3. If not see
#include "rtl.h"
#include "hard-reg-set.h"
#include "tree.h"
+#include "varasm.h"
+#include "stor-layout.h"
#include "tm_p.h"
#include "flags.h"
#include "except.h"
diff --git a/gcc/stmt.h b/gcc/stmt.h
new file mode 100644
index 00000000000..514be23a239
--- /dev/null
+++ b/gcc/stmt.h
@@ -0,0 +1,34 @@
+/* Declarations and data structures for stmt.c.
+ Copyright (C) 2013 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_STMT_H
+#define GCC_STMT_H
+
+extern void expand_label (tree);
+extern bool parse_output_constraint (const char **, int, int, int,
+ bool *, bool *, bool *);
+extern bool parse_input_constraint (const char **, int, int, int, int,
+ const char * const *, bool *, bool *);
+extern tree resolve_asm_operand_names (tree, tree, tree, tree);
+#ifdef HARD_CONST
+/* Silly ifdef to avoid having all includers depend on hard-reg-set.h. */
+extern tree tree_overlaps_hard_reg_set (tree, HARD_REG_SET *);
+#endif
+
+#endif // GCC_STMT_H
diff --git a/gcc/stor-layout.c b/gcc/stor-layout.c
index bb4b6d9b82b..fe50ca5769b 100644
--- a/gcc/stor-layout.c
+++ b/gcc/stor-layout.c
@@ -23,6 +23,10 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "stringpool.h"
+#include "varasm.h"
+#include "print-tree.h"
#include "rtl.h"
#include "tm_p.h"
#include "flags.h"
diff --git a/gcc/stor-layout.h b/gcc/stor-layout.h
new file mode 100644
index 00000000000..2be020dc2f3
--- /dev/null
+++ b/gcc/stor-layout.h
@@ -0,0 +1,115 @@
+/* Definitions and declarations for stor-layout.c.
+ Copyright (C) 2013 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_STOR_LAYOUT_H
+#define GCC_STOR_LAYOUT_H
+
+extern void set_min_and_max_values_for_integral_type (tree, int, signop);
+extern void fixup_signed_type (tree);
+extern void internal_reference_types (void);
+extern unsigned int update_alignment_for_field (record_layout_info, tree,
+ unsigned int);
+extern record_layout_info start_record_layout (tree);
+extern tree bit_from_pos (tree, tree);
+extern tree byte_from_pos (tree, tree);
+extern void pos_from_bit (tree *, tree *, unsigned int, tree);
+extern void normalize_offset (tree *, tree *, unsigned int);
+extern tree rli_size_unit_so_far (record_layout_info);
+extern tree rli_size_so_far (record_layout_info);
+extern void normalize_rli (record_layout_info);
+extern void place_field (record_layout_info, tree);
+extern void compute_record_mode (tree);
+extern void finish_record_layout (record_layout_info, int);
+extern unsigned int element_precision (const_tree);
+extern void finalize_size_functions (void);
+extern void fixup_unsigned_type (tree);
+extern void initialize_sizetypes (void);
+
+/* Finish up a builtin RECORD_TYPE. Give it a name and provide its
+ fields. Optionally specify an alignment, and then lay it out. */
+extern void finish_builtin_struct (tree, const char *, tree, tree);
+
+/* Given a VAR_DECL, PARM_DECL, RESULT_DECL or FIELD_DECL node,
+ calculates the DECL_SIZE, DECL_SIZE_UNIT, DECL_ALIGN and DECL_MODE
+ fields. Call this only once for any given decl node.
+
+ Second argument is the boundary that this field can be assumed to
+ be starting at (in bits). Zero means it can be assumed aligned
+ on any boundary that may be needed. */
+extern void layout_decl (tree, unsigned);
+
+/* Given a ..._TYPE node, calculate the TYPE_SIZE, TYPE_SIZE_UNIT,
+ TYPE_ALIGN and TYPE_MODE fields. If called more than once on one
+ node, does nothing except for the first time. */
+extern void layout_type (tree);
+
+/* Construct various nodes representing fract or accum data types. */
+extern tree make_fract_type (int, int, int);
+extern tree make_accum_type (int, int, int);
+
+#define make_signed_fract_type(P) make_fract_type (P, 0, 0)
+#define make_unsigned_fract_type(P) make_fract_type (P, 1, 0)
+#define make_sat_signed_fract_type(P) make_fract_type (P, 0, 1)
+#define make_sat_unsigned_fract_type(P) make_fract_type (P, 1, 1)
+#define make_signed_accum_type(P) make_accum_type (P, 0, 0)
+#define make_unsigned_accum_type(P) make_accum_type (P, 1, 0)
+#define make_sat_signed_accum_type(P) make_accum_type (P, 0, 1)
+#define make_sat_unsigned_accum_type(P) make_accum_type (P, 1, 1)
+
+#define make_or_reuse_signed_fract_type(P) \
+ make_or_reuse_fract_type (P, 0, 0)
+#define make_or_reuse_unsigned_fract_type(P) \
+ make_or_reuse_fract_type (P, 1, 0)
+#define make_or_reuse_sat_signed_fract_type(P) \
+ make_or_reuse_fract_type (P, 0, 1)
+#define make_or_reuse_sat_unsigned_fract_type(P) \
+ make_or_reuse_fract_type (P, 1, 1)
+#define make_or_reuse_signed_accum_type(P) \
+ make_or_reuse_accum_type (P, 0, 0)
+#define make_or_reuse_unsigned_accum_type(P) \
+ make_or_reuse_accum_type (P, 1, 0)
+#define make_or_reuse_sat_signed_accum_type(P) \
+ make_or_reuse_accum_type (P, 0, 1)
+#define make_or_reuse_sat_unsigned_accum_type(P) \
+ make_or_reuse_accum_type (P, 1, 1)
+
+extern tree make_signed_type (int);
+extern tree make_unsigned_type (int);
+
+/* Return the mode for data of a given size SIZE and mode class CLASS.
+ If LIMIT is nonzero, then don't use modes bigger than MAX_FIXED_MODE_SIZE.
+ The value is BLKmode if no other mode is found. This is like
+ mode_for_size, but is passed a tree. */
+extern enum machine_mode mode_for_size_tree (const_tree, enum mode_class, int);
+
+/* Given a VAR_DECL, PARM_DECL or RESULT_DECL, clears the results of
+ a previous call to layout_decl and calls it again. */
+extern void relayout_decl (tree);
+
+/* variable_size (EXP) is like save_expr (EXP) except that it
+ is for the special case of something that is part of a
+ variable size for a data type. It makes special arrangements
+ to compute the value at the right time when the data type
+ belongs to a function parameter. */
+extern tree variable_size (tree);
+
+/* Vector types need to check target flags to determine type. */
+extern enum machine_mode vector_type_mode (const_tree);
+
+#endif // GCC_STOR_LAYOUT_H
diff --git a/gcc/store-motion.c b/gcc/store-motion.c
index 68f293c6252..378d6c7e8ba 100644
--- a/gcc/store-motion.c
+++ b/gcc/store-motion.c
@@ -805,7 +805,7 @@ insert_store (struct st_expr * expr, edge e)
/* If tmp is NULL, we found an insertion on every edge, blank the
insertion vector for these edges, and insert at the start of the BB. */
- if (!tmp && bb != EXIT_BLOCK_PTR)
+ if (!tmp && bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
FOR_EACH_EDGE (tmp, ei, e->dest->preds)
{
@@ -848,7 +848,7 @@ remove_reachable_equiv_notes (basic_block bb, struct st_expr *smexpr)
rtx last, insn, note;
rtx mem = smexpr->pattern;
- stack = XNEWVEC (edge_iterator, n_basic_blocks);
+ stack = XNEWVEC (edge_iterator, n_basic_blocks_for_fn (cfun));
sp = 0;
ei = ei_start (bb->succs);
@@ -869,7 +869,7 @@ remove_reachable_equiv_notes (basic_block bb, struct st_expr *smexpr)
}
bb = act->dest;
- if (bb == EXIT_BLOCK_PTR
+ if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
|| bitmap_bit_p (visited, bb->index))
{
if (!ei_end_p (ei))
@@ -1208,7 +1208,7 @@ one_store_motion_pass (void)
if (dump_file)
{
fprintf (dump_file, "STORE_MOTION of %s, %d basic blocks, ",
- current_function_name (), n_basic_blocks);
+ current_function_name (), n_basic_blocks_for_fn (cfun));
fprintf (dump_file, "%d insns deleted, %d insns created\n",
n_stores_deleted, n_stores_created);
}
diff --git a/gcc/stringpool.h b/gcc/stringpool.h
new file mode 100644
index 00000000000..55592aa3957
--- /dev/null
+++ b/gcc/stringpool.h
@@ -0,0 +1,43 @@
+/* Declarations and definitons for stringpool.c.
+ Copyright (C) 2013 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_STRINGPOOL_H
+#define GCC_STRINGPOOL_H
+
+/* Return the (unique) IDENTIFIER_NODE node for a given name.
+ The name is supplied as a char *. */
+extern tree get_identifier (const char *);
+
+/* If an identifier with the name TEXT (a null-terminated string) has
+ previously been referred to, return that node; otherwise return
+ NULL_TREE. */
+extern tree maybe_get_identifier (const char *);
+
+/* Identical to get_identifier, except that the length is assumed
+ known. */
+extern tree get_identifier_with_length (const char *, size_t);
+
+#if GCC_VERSION >= 3000
+#define get_identifier(str) \
+ (__builtin_constant_p (str) \
+ ? get_identifier_with_length ((str), strlen (str)) \
+ : get_identifier (str))
+#endif
+
+#endif // GCC_STRINGPOOL_H
diff --git a/gcc/symtab.c b/gcc/symtab.c
index 9426f75399d..851264d954a 100644
--- a/gcc/symtab.c
+++ b/gcc/symtab.c
@@ -22,7 +22,12 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "tm.h"
+#include "rtl.h"
#include "tree.h"
+#include "print-tree.h"
+#include "varasm.h"
+#include "function.h"
+#include "emit-rtl.h"
#include "gimple.h"
#include "tree-inline.h"
#include "langhooks.h"
@@ -32,7 +37,6 @@ along with GCC; see the file COPYING3. If not see
#include "diagnostic.h"
#include "timevar.h"
#include "lto-streamer.h"
-#include "rtl.h"
#include "output.h"
const char * const ld_plugin_symbol_resolution_names[]=
diff --git a/gcc/system.h b/gcc/system.h
index e3fbce6ff42..adc97036bc9 100644
--- a/gcc/system.h
+++ b/gcc/system.h
@@ -1023,7 +1023,7 @@ helper_const_non_const_cast (const char *p)
#define CONST_CAST_TREE(X) CONST_CAST (union tree_node *, (X))
#define CONST_CAST_RTX(X) CONST_CAST (struct rtx_def *, (X))
#define CONST_CAST_BB(X) CONST_CAST (struct basic_block_def *, (X))
-#define CONST_CAST_GIMPLE(X) CONST_CAST (union gimple_statement_d *, (X))
+#define CONST_CAST_GIMPLE(X) CONST_CAST (struct gimple_statement_base *, (X))
/* Activate certain diagnostics as warnings (not errors via the
-Werror flag). */
diff --git a/gcc/targhooks.c b/gcc/targhooks.c
index 677f25513ff..5c13fc33910 100644
--- a/gcc/targhooks.c
+++ b/gcc/targhooks.c
@@ -53,6 +53,8 @@ along with GCC; see the file COPYING3. If not see
#include "machmode.h"
#include "rtl.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "varasm.h"
#include "expr.h"
#include "output.h"
#include "diagnostic-core.h"
@@ -70,6 +72,7 @@ along with GCC; see the file COPYING3. If not see
#include "opts.h"
#include "gimple.h"
#include "gimplify.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-ssa-alias.h"
#include "insn-codes.h"
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index e5e9ecbe46a..606e4de6979 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,3 +1,125 @@
+2013-11-20 Dominik Vogt <vogt@linux.vnet.ibm.com>
+
+ * gcc.target/s390/htm-1.c: Rename to ...
+ * gcc/testsuite/gcc.target/s390/htm-builtins-compile-1.c: ... this
+ one.
+ * gcc.target/s390/htm-xl-intrin-1.c: Rename to ...
+ * gcc.target/s390/htm-builtins-compile-3.c: ... this one.
+ * gcc.target/s390/htm-builtins-compile-2.c: New testcase.
+ * gcc.target/s390/htm-builtins-1.c: New testcase.
+ * gcc.target/s390/htm-builtins-2.c: New testcase.
+ * gcc.target/s390/s390.exp: Add check for htm machine.
+
+2013-11-19 Joshua J Cogliati <jrincayc@yahoo.com>
+
+ PR c/53001
+ * c-c++-common/Wfloat-conversion.c: Copies relevant
+ tests from c-c++-common/Wconversion-real.c,
+ gcc.dg/Wconversion-real-integer.c and gcc.dg/pr35635.c into
+ new testcase for conversions that are warned about by
+ -Wfloat-conversion.
+
+2013-11-19 Martin Jambor <mjambor@suse.cz>
+
+ PR rtl-optimization/59099
+ * gcc.target/i386/pr59099.c: New test.
+
+2013-11-19 Sriraman Tallam <tmsriram@google.com>
+
+ * gcc.dg/tree-prof/cold_partition_label.c: New testcase.
+
+2013-11-19 Ulrich Weigand <Ulrich.Weigand@de.ibm.com>
+
+ * gcc.target/powerpc/ppc64-abi-2.c (MAKE_SLOT): New macro to
+ construct parameter slot value in endian-independent way.
+ (fcevv, fciievv, fcvevv): Use it.
+
+2013-11-19 Jan Hubicka <jh@suse.cz>
+
+ * ipa/devirt9.C: Fix prevoius change.
+
+2013-11-19 Cesar Philippidis <cesar@codesourcery.com>
+
+ * gcc.c-torture/execute/20101011-1.c (__aarch64__):
+ Remove defined(__linux__).
+
+2013-11-19 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/59164
+ * gcc.dg/torture/pr59164.c: New testcase.
+
+2013-11-19 Richard Biener <rguenther@suse.de>
+
+ PR middle-end/58956
+ * gcc.dg/torture/pr58956.c: New testcase.
+
+2013-11-19 Marek Polacek <polacek@redhat.com>
+
+ * c-c++-common/ubsan/null-1.c: New test.
+ * c-c++-common/ubsan/null-2.c: New test.
+ * c-c++-common/ubsan/null-3.c: New test.
+ * c-c++-common/ubsan/null-4.c: New test.
+ * c-c++-common/ubsan/null-5.c: New test.
+ * c-c++-common/ubsan/null-6.c: New test.
+ * c-c++-common/ubsan/null-7.c: New test.
+ * c-c++-common/ubsan/null-8.c: New test.
+ * c-c++-common/ubsan/null-9.c: New test.
+ * c-c++-common/ubsan/null-10.c: New test.
+ * c-c++-common/ubsan/null-11.c: New test.
+ * gcc.dg/ubsan/c99-shift-2.c: Adjust dg-output.
+ * c-c++-common/ubsan/shift-1.c: Likewise.
+ * c-c++-common/ubsan/div-by-zero-3.c: Likewise.
+
+2013-11-19 Uros Bizjak <ubizjak@gmail.com>
+
+ * gcc.dg/c11-complex-1.c: Use dg-add-options ieee.
+
+2013-11-19 Jan Hubicka <jh@suse.cz>
+
+ * ipa/devirt9.C: Verify that the optimization happens already before.
+ whole-program.
+
+2013-11-19 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/57517
+ * gfortran.fortran-torture/compile/pr57517.f90: New testcase.
+ * gcc.dg/torture/pr57517.c: Likewise.
+
+2013-11-19 Jan Hubicka <jh@suse.cz>
+
+ * gcc.target/i386/memcpy-3.c: New testcase.
+
+2013-11-18 Jan Hubicka <jh@suse.cz>
+ Uros Bizjak <ubizjak@gmail.com>
+
+ PR middle-end/59175
+ * gcc.target/i386/memcpy-2.c: Fix template;
+ add +1 so the testcase passes at 32bit.
+
+2013-11-18 Dominique d'Humieres <dominiq@lps.ens.fr>
+
+ * c-c++-common/cilk-plus/PS/reduction-3.c: Use stdlib.h.
+ Remove spurious FIXME.
+
+2013-11-18 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
+
+ * c-c++-common/cilk-plus/PS/body.c: Add fopenmp effective target check.
+
+2013-11-18 Paolo Carlini <paolo.carlini@oracle.com>
+
+ PR c++/53473
+ * g++.dg/cpp0x/constexpr-noexcept7.C: New.
+
+2013-11-18 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/59125
+ PR tree-optimization/54570
+ * gcc.dg/builtin-object-size-8.c: Un-xfail.
+ * gcc.dg/builtin-object-size-14.c: New testcase.
+ * gcc.dg/strlenopt-14gf.c: Adjust.
+ * gcc.dg/strlenopt-1f.c: Likewise.
+ * gcc.dg/strlenopt-4gf.c: Likewise.
+
2013-11-18 Eric Botcazou <ebotcazou@adacore.com>
* gnat.dg/volatile11.adb: New test.
@@ -108,6 +230,11 @@
* g++.dg/cpp0x/nsdmi-template3.C: New.
* g++.dg/cpp0x/nsdmi-template4.C: Likewise.
+2013-11-14 Diego Novillo <dnovillo@google.com>
+
+ * gcc.dg/plugin/selfassign.c: Include stringpool.h.
+ * gcc.dg/plugin/start_unit_plugin.c: Likewise.
+
2013-11-14 Ulrich Weigand <Ulrich.Weigand@de.ibm.com>
* gcc.target/powerpc/ppc64-abi-1.c (stack_frame_t): Remove
@@ -1083,8 +1210,8 @@
* gcc.dg/vmx/gcc-bug-i.c: Add little endian variant.
* gcc.dg/vmx/eg-5.c: Likewise.
-2013-10-28 Claudiu Zissulescu <claziss@synopsys.com>
- Joern Rennecke <joern.rennecke@embecosm.com>
+2013-10-28 Claudiu Zissulescu <claziss@synopsys.com>
+ Joern Rennecke <joern.rennecke@embecosm.com>
* gcc.target/arc/jump-around-jump.c: New test.
diff --git a/gcc/testsuite/c-c++-common/Wfloat-conversion.c b/gcc/testsuite/c-c++-common/Wfloat-conversion.c
new file mode 100644
index 00000000000..e872755902d
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/Wfloat-conversion.c
@@ -0,0 +1,58 @@
+/* Test for diagnostics for Wconversion for floating-point. */
+
+/* { dg-do compile } */
+/* { dg-options "-std=c99 -Wfloat-conversion" { target c } } */
+/* { dg-options "-Wfloat-conversion" { target c++ } } */
+/* { dg-require-effective-target large_double } */
+/* { dg-require-effective-target int32plus } */
+/* { dg-require-effective-target double64plus } */
+#include <limits.h>
+
+float vfloat;
+double vdouble;
+long double vlongdouble;
+int bar;
+
+void fsi (signed int x);
+void fui (unsigned int x);
+void ffloat (float f);
+void fdouble (double d);
+void flongdouble (long double ld);
+
+void h (void)
+{
+ unsigned int ui = 3;
+ int si = 3;
+ unsigned char uc = 3;
+ signed char sc = 3;
+ float f = 0;
+ double d = 0;
+ long double ld = 0;
+
+ ffloat (3.1); /* { dg-warning "conversion to 'float' alters 'double' constant value" } */
+ vfloat = 3.1; /* { dg-warning "conversion to 'float' alters 'double' constant value" } */
+ ffloat (3.1L); /* { dg-warning "conversion to 'float' alters 'long double' constant value" } */
+ vfloat = 3.1L; /* { dg-warning "conversion to 'float' alters 'long double' constant value" } */
+ fdouble (3.1L); /* { dg-warning "conversion to 'double' alters 'long double' constant value" "" { target large_long_double } } */
+ vdouble = 3.1L; /* { dg-warning "conversion to 'double' alters 'long double' constant value" "" { target large_long_double } } */
+ ffloat (vdouble); /* { dg-warning "conversion to 'float' from 'double' may alter its value" } */
+ vfloat = vdouble; /* { dg-warning "conversion to 'float' from 'double' may alter its value" } */
+ ffloat (vlongdouble); /* { dg-warning "conversion to 'float' from 'long double' may alter its value" } */
+ vfloat = vlongdouble; /* { dg-warning "conversion to 'float' from 'long double' may alter its value" } */
+ fdouble (vlongdouble); /* { dg-warning "conversion to 'double' from 'long double' may alter its value" "" { target large_long_double } } */
+ vdouble = vlongdouble; /* { dg-warning "conversion to 'double' from 'long double' may alter its value" "" { target large_long_double } } */
+
+ fsi (3.1f); /* { dg-warning "conversion to 'int' alters 'float' constant value" } */
+ si = 3.1f; /* { dg-warning "conversion to 'int' alters 'float' constant value" } */
+ fsi (3.1); /* { dg-warning "conversion to 'int' alters 'double' constant value" } */
+ si = 3.1; /* { dg-warning "conversion to 'int' alters 'double' constant value" } */
+ fsi (d); /* { dg-warning "conversion to 'int' from 'double' may alter its value" } */
+ si = d; /* { dg-warning "conversion to 'int' from 'double' may alter its value" } */
+ ffloat (INT_MAX); /* { dg-warning "conversion to 'float' alters 'int' constant value" } */
+ vfloat = INT_MAX; /* { dg-warning "conversion to 'float' alters 'int' constant value" } */
+ ffloat (16777217); /* { dg-warning "conversion to 'float' alters 'int' constant value" } */
+ vfloat = 16777217; /* { dg-warning "conversion to 'float' alters 'int' constant value" } */
+
+ sc = bar != 0 ? 2.1 : 10; /* { dg-warning "conversion to 'signed char' alters 'double' constant value" } */
+ uc = bar != 0 ? 2.1 : 10; /* { dg-warning "conversion to 'unsigned char' alters 'double' constant value" } */
+}
diff --git a/gcc/testsuite/c-c++-common/cilk-plus/PS/body.c b/gcc/testsuite/c-c++-common/cilk-plus/PS/body.c
index 9b10041d669..82c0a0c20bf 100644
--- a/gcc/testsuite/c-c++-common/cilk-plus/PS/body.c
+++ b/gcc/testsuite/c-c++-common/cilk-plus/PS/body.c
@@ -1,5 +1,6 @@
/* { dg-do compile } */
/* { dg-options "-fcilkplus -fopenmp" } */
+/* { dg-require-effective-target fopenmp } */
int *a, *b, c;
void *jmpbuf[10];
diff --git a/gcc/testsuite/c-c++-common/cilk-plus/PS/reduction-3.c b/gcc/testsuite/c-c++-common/cilk-plus/PS/reduction-3.c
index 26822d633ff..35cb904a540 100644
--- a/gcc/testsuite/c-c++-common/cilk-plus/PS/reduction-3.c
+++ b/gcc/testsuite/c-c++-common/cilk-plus/PS/reduction-3.c
@@ -1,13 +1,11 @@
/* { dg-do run } */
/* { dg-options "-O3 -fcilkplus" } */
-/* FIXME: This test has been xfailed until reductions are fixed. */
-
#define N 256
#if HAVE_IO
#include <stdio.h>
#endif
-#include <malloc.h>
+#include <stdlib.h>
int
reduction_simd (int *a)
diff --git a/gcc/testsuite/c-c++-common/ubsan/div-by-zero-3.c b/gcc/testsuite/c-c++-common/ubsan/div-by-zero-3.c
index 719e6c98634..f3ee23bd021 100644
--- a/gcc/testsuite/c-c++-common/ubsan/div-by-zero-3.c
+++ b/gcc/testsuite/c-c++-common/ubsan/div-by-zero-3.c
@@ -16,6 +16,6 @@ main (void)
return 0;
}
-/* { dg-output "division of -2147483648 by -1 cannot be represented in type int(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*division of -2147483648 by -1 cannot be represented in type int(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*division of -2147483648 by -1 cannot be represented in type int(\n|\r\n|\r)" } */
+/* { dg-output "division of -2147483648 by -1 cannot be represented in type 'int'(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*division of -2147483648 by -1 cannot be represented in type 'int'(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*division of -2147483648 by -1 cannot be represented in type 'int'(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/null-1.c b/gcc/testsuite/c-c++-common/ubsan/null-1.c
new file mode 100644
index 00000000000..887dfdcdb9f
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/null-1.c
@@ -0,0 +1,13 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=null -w" } */
+/* { dg-shouldfail "ubsan" } */
+/* { dg-skip-if "" { *-*-* } { "-flto" } { "" } } */
+
+int
+main (void)
+{
+ int *p = 0;
+ return *p;
+}
+
+/* { dg-output "load of null pointer of type 'int'(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/null-10.c b/gcc/testsuite/c-c++-common/ubsan/null-10.c
new file mode 100644
index 00000000000..267ab1f321c
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/null-10.c
@@ -0,0 +1,14 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=null -w" } */
+/* { dg-shouldfail "ubsan" } */
+/* { dg-skip-if "" { *-*-* } { "-flto" } { "" } } */
+
+int
+main (void)
+{
+ short *p = 0, *u;
+ *(u + *p) = 23;
+ return 0;
+}
+
+/* { dg-output "load of null pointer of type 'short int'(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/null-11.c b/gcc/testsuite/c-c++-common/ubsan/null-11.c
new file mode 100644
index 00000000000..83e65af7e86
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/null-11.c
@@ -0,0 +1,17 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=null -w" } */
+/* { dg-shouldfail "ubsan" } */
+/* { dg-skip-if "" { *-*-* } { "-flto" } { "" } } */
+
+struct S {
+ int i;
+};
+
+int
+main (void)
+{
+ struct S **s = 0;
+ return (*s)->i;
+}
+
+/* { dg-output "load of null pointer of type 'struct S \\*'(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/null-2.c b/gcc/testsuite/c-c++-common/ubsan/null-2.c
new file mode 100644
index 00000000000..c5303ea97a7
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/null-2.c
@@ -0,0 +1,13 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=null -w" } */
+/* { dg-shouldfail "ubsan" } */
+/* { dg-skip-if "" { *-*-* } { "-flto" } { "" } } */
+
+int
+main (void)
+{
+ int ***ppp = 0;
+ return ***ppp;
+}
+
+/* { dg-output "load of null pointer of type 'int \\*\\*'(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/null-3.c b/gcc/testsuite/c-c++-common/ubsan/null-3.c
new file mode 100644
index 00000000000..0beb20cfbd0
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/null-3.c
@@ -0,0 +1,19 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=null -w" } */
+/* { dg-shouldfail "ubsan" } */
+/* { dg-skip-if "" { *-*-* } { "-flto" } { "" } } */
+
+int
+foo (int *p)
+{
+ return *p;
+}
+
+int
+main (void)
+{
+ int **p = 0;
+ return foo (*p);
+}
+
+/* { dg-output "load of null pointer of type 'int \\*'(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/null-4.c b/gcc/testsuite/c-c++-common/ubsan/null-4.c
new file mode 100644
index 00000000000..b5f03ed0b60
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/null-4.c
@@ -0,0 +1,15 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=null -w" } */
+/* { dg-shouldfail "ubsan" } */
+/* { dg-skip-if "" { *-*-* } { "-flto" } { "" } } */
+
+int
+main (void)
+{
+ _Complex double *p = 0;
+ if (p[0])
+ return 42;
+ return 0;
+}
+
+/* { dg-output "load of null pointer of type 'complex double'(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/null-5.c b/gcc/testsuite/c-c++-common/ubsan/null-5.c
new file mode 100644
index 00000000000..f6db4744446
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/null-5.c
@@ -0,0 +1,17 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=null -w" } */
+/* { dg-shouldfail "ubsan" } */
+/* { dg-skip-if "" { *-*-* } { "-flto" } { "" } } */
+
+typedef volatile const _Complex float *T;
+
+int
+main (void)
+{
+ T t = 0;
+ if (*t)
+ return 42;
+ return 0;
+}
+
+/* { dg-output "load of null pointer of type 'volatile const complex float'(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/null-6.c b/gcc/testsuite/c-c++-common/ubsan/null-6.c
new file mode 100644
index 00000000000..705635c0d84
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/null-6.c
@@ -0,0 +1,14 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=null -w" } */
+/* { dg-shouldfail "ubsan" } */
+/* { dg-skip-if "" { *-*-* } { "-flto" } { "" } } */
+
+int
+main (void)
+{
+ unsigned long int *p = 0;
+ *p = 42;
+ return 0;
+}
+
+/* { dg-output "store to null pointer of type 'long unsigned int'(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/null-7.c b/gcc/testsuite/c-c++-common/ubsan/null-7.c
new file mode 100644
index 00000000000..1d8216a3d2f
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/null-7.c
@@ -0,0 +1,18 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=null -w" } */
+/* { dg-shouldfail "ubsan" } */
+/* { dg-skip-if "" { *-*-* } { "-flto" } { "" } } */
+
+int *
+gao (void)
+{
+ return 0;
+}
+
+int
+main (void)
+{
+ return *gao ();
+}
+
+/* { dg-output "load of null pointer of type 'int'(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/null-8.c b/gcc/testsuite/c-c++-common/ubsan/null-8.c
new file mode 100644
index 00000000000..2cf3939ca5e
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/null-8.c
@@ -0,0 +1,17 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=null -w" } */
+/* { dg-shouldfail "ubsan" } */
+/* { dg-skip-if "" { *-*-* } { "-flto" } { "" } } */
+
+struct S {
+ int i;
+};
+
+int
+main (void)
+{
+ struct S *s = 0;
+ return s->i;
+}
+
+/* { dg-output "member access within null pointer of type 'struct S'(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/null-9.c b/gcc/testsuite/c-c++-common/ubsan/null-9.c
new file mode 100644
index 00000000000..7fabbeca4c7
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/null-9.c
@@ -0,0 +1,17 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=null -w" } */
+/* { dg-shouldfail "ubsan" } */
+/* { dg-skip-if "" { *-*-* } { "-flto" } { "" } } */
+
+union U {
+ int i;
+};
+
+int
+main (void)
+{
+ union U *u = 0;
+ return u->i;
+}
+
+/* { dg-output "member access within null pointer of type 'union U'(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/shift-1.c b/gcc/testsuite/c-c++-common/ubsan/shift-1.c
index 48cf3cd7bff..0928ff7a102 100644
--- a/gcc/testsuite/c-c++-common/ubsan/shift-1.c
+++ b/gcc/testsuite/c-c++-common/ubsan/shift-1.c
@@ -23,9 +23,9 @@ main (void)
return 0;
}
-/* { dg-output "shift exponent 152 is too large for \[^\n\r]*-bit type int(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*shift exponent 153 is too large for \[^\n\r]*-bit type int(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*shift exponent 154 is too large for \[^\n\r]*-bit type int(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*shift exponent 524 is too large for \[^\n\r]*-bit type long long unsigned int(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*shift exponent 370 is too large for \[^\n\r]*-bit type int(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*shift exponent 402 is too large for \[^\n\r]*-bit type long int(\n|\r\n|\r)" } */
+/* { dg-output "shift exponent 152 is too large for \[^\n\r]*-bit type 'int'(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*shift exponent 153 is too large for \[^\n\r]*-bit type 'int'(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*shift exponent 154 is too large for \[^\n\r]*-bit type 'int'(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*shift exponent 524 is too large for \[^\n\r]*-bit type 'long long unsigned int'(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*shift exponent 370 is too large for \[^\n\r]*-bit type 'int'(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*shift exponent 402 is too large for \[^\n\r]*-bit type 'long int'(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/g++.dg/cpp0x/constexpr-noexcept7.C b/gcc/testsuite/g++.dg/cpp0x/constexpr-noexcept7.C
new file mode 100644
index 00000000000..2a70d7bb529
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/constexpr-noexcept7.C
@@ -0,0 +1,9 @@
+// PR c++/53473
+// { dg-do compile { target c++11 } }
+
+template<typename T> struct A
+{
+ static constexpr T foo() noexcept { return 0; }
+};
+
+template<> constexpr int A<int>::foo() noexcept { return 0; }
diff --git a/gcc/testsuite/g++.dg/ipa/devirt-9.C b/gcc/testsuite/g++.dg/ipa/devirt-9.C
index 5be458cbb41..7fd0bf5f5cd 100644
--- a/gcc/testsuite/g++.dg/ipa/devirt-9.C
+++ b/gcc/testsuite/g++.dg/ipa/devirt-9.C
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -fdump-ipa-inline" } */
+/* { dg-options "-O2 -fdump-ipa-whole-program" } */
double foo ();
struct B
{
@@ -27,5 +27,7 @@ bar ()
static C c;
c.c1 (60, (int) foo ());
}
-/* { dg-final { scan-ipa-dump "Discovered a virtual call to a known target" "inline" } } */
-/* { dg-final { cleanup-ipa-dump "inline" } } */
+/* We optimize out this call just after early passes. Unfortunately
+ this unreachable removal is not logged in dump file. */
+/* { dg-final { scan-ipa-dump-not "OBJ_TYPE_REF" "whole-program" } } */
+/* { dg-final { cleanup-ipa-dump "whole-program" } } */
diff --git a/gcc/testsuite/gcc.c-torture/execute/20101011-1.c b/gcc/testsuite/gcc.c-torture/execute/20101011-1.c
index bd54318a263..56b48ca72bf 100644
--- a/gcc/testsuite/gcc.c-torture/execute/20101011-1.c
+++ b/gcc/testsuite/gcc.c-torture/execute/20101011-1.c
@@ -12,10 +12,9 @@
#elif defined (__sh__)
/* On SH division by zero does not trap. */
# define DO_TEST 0
-#elif defined (__aarch64__) && !defined(__linux__)
- /* AArch64 divisions do trap by default, but libgloss targets do not
- intercept the trap and raise a SIGFPE. So restrict the test to
- AArch64 systems that use the Linux kernel. */
+#elif defined (__aarch64__)
+ /* On AArch64 integer division by zero does not trap. */
+# define DO_TEST 0
#elif defined (__TMS320C6X__)
/* On TI C6X division by zero does not trap. */
# define DO_TEST 0
diff --git a/gcc/testsuite/gcc.dg/builtin-object-size-14.c b/gcc/testsuite/gcc.dg/builtin-object-size-14.c
new file mode 100644
index 00000000000..085011eda52
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/builtin-object-size-14.c
@@ -0,0 +1,28 @@
+/* { dg-do run } */
+/* { dg-options "-O2" } */
+
+extern void abort (void);
+extern char *strncpy(char *, const char *, __SIZE_TYPE__);
+
+union u {
+ struct {
+ char vi[8];
+ char pi[16];
+ };
+ char all[8+16+4];
+};
+
+void __attribute__((noinline,noclone))
+f(union u *u)
+{
+ char vi[8+1];
+ __builtin_strncpy(vi, u->vi, sizeof(u->vi));
+ if (__builtin_object_size (u->all, 1) != -1)
+ abort ();
+}
+int main()
+{
+ union u u;
+ f (&u);
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/builtin-object-size-8.c b/gcc/testsuite/gcc.dg/builtin-object-size-8.c
index 7af64d3ab7a..f2d88f9d591 100644
--- a/gcc/testsuite/gcc.dg/builtin-object-size-8.c
+++ b/gcc/testsuite/gcc.dg/builtin-object-size-8.c
@@ -1,4 +1,4 @@
-/* { dg-do run { xfail *-*-* } } */
+/* { dg-do run } */
/* { dg-options "-O2" } */
typedef __SIZE_TYPE__ size_t;
diff --git a/gcc/testsuite/gcc.dg/c11-complex-1.c b/gcc/testsuite/gcc.dg/c11-complex-1.c
index e2d3c460f40..4acb2bcc221 100644
--- a/gcc/testsuite/gcc.dg/c11-complex-1.c
+++ b/gcc/testsuite/gcc.dg/c11-complex-1.c
@@ -1,6 +1,7 @@
/* Test complex divide does not have the bug identified in N1496. */
/* { dg-do run } */
/* { dg-options "-std=c11 -pedantic-errors" } */
+/* { dg-add-options ieee } */
extern void abort (void);
extern void exit (int);
diff --git a/gcc/testsuite/gcc.dg/plugin/selfassign.c b/gcc/testsuite/gcc.dg/plugin/selfassign.c
index 2498153a273..cdab74a19ae 100644
--- a/gcc/testsuite/gcc.dg/plugin/selfassign.c
+++ b/gcc/testsuite/gcc.dg/plugin/selfassign.c
@@ -8,6 +8,7 @@
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
#include "toplev.h"
#include "basic-block.h"
#include "gimple.h"
diff --git a/gcc/testsuite/gcc.dg/plugin/start_unit_plugin.c b/gcc/testsuite/gcc.dg/plugin/start_unit_plugin.c
index 257aad85a8a..39f44626a55 100644
--- a/gcc/testsuite/gcc.dg/plugin/start_unit_plugin.c
+++ b/gcc/testsuite/gcc.dg/plugin/start_unit_plugin.c
@@ -11,6 +11,7 @@
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
#include "toplev.h"
#include "basic-block.h"
#include "gimple.h"
diff --git a/gcc/testsuite/gcc.dg/strlenopt-14gf.c b/gcc/testsuite/gcc.dg/strlenopt-14gf.c
index 6e5c9b08e16..8b78538d41d 100644
--- a/gcc/testsuite/gcc.dg/strlenopt-14gf.c
+++ b/gcc/testsuite/gcc.dg/strlenopt-14gf.c
@@ -11,14 +11,14 @@
memcpy. */
/* { dg-final { scan-tree-dump-times "strlen \\(" 4 "strlen" } } */
/* { dg-final { scan-tree-dump-times "__memcpy_chk \\(" 0 "strlen" } } */
-/* { dg-final { scan-tree-dump-times "__mempcpy_chk \\(" 2 "strlen" } } */
+/* { dg-final { scan-tree-dump-times "__mempcpy_chk \\(" 0 "strlen" } } */
/* { dg-final { scan-tree-dump-times "__strcpy_chk \\(" 0 "strlen" } } */
/* { dg-final { scan-tree-dump-times "__strcat_chk \\(" 0 "strlen" } } */
/* { dg-final { scan-tree-dump-times "strchr \\(" 0 "strlen" } } */
-/* { dg-final { scan-tree-dump-times "__stpcpy_chk \\(" 3 "strlen" } } */
-/* { dg-final { scan-tree-dump-times "memcpy \\(" 0 "strlen" } } */
-/* { dg-final { scan-tree-dump-times "mempcpy \\(" 0 "strlen" } } */
+/* { dg-final { scan-tree-dump-times "__stpcpy_chk \\(" 0 "strlen" } } */
+/* { dg-final { scan-tree-dump-times "memcpy \\(" 1 "strlen" } } */
+/* { dg-final { scan-tree-dump-times "mempcpy \\(" 2 "strlen" } } */
/* { dg-final { scan-tree-dump-times "strcpy \\(" 0 "strlen" } } */
/* { dg-final { scan-tree-dump-times "strcat \\(" 0 "strlen" } } */
-/* { dg-final { scan-tree-dump-times "stpcpy \\(" 0 "strlen" } } */
+/* { dg-final { scan-tree-dump-times "stpcpy \\(" 2 "strlen" } } */
/* { dg-final { cleanup-tree-dump "strlen" } } */
diff --git a/gcc/testsuite/gcc.dg/strlenopt-1f.c b/gcc/testsuite/gcc.dg/strlenopt-1f.c
index e0a2c926ca2..50c5f91306a 100644
--- a/gcc/testsuite/gcc.dg/strlenopt-1f.c
+++ b/gcc/testsuite/gcc.dg/strlenopt-1f.c
@@ -6,12 +6,12 @@
#include "strlenopt-1.c"
/* { dg-final { scan-tree-dump-times "strlen \\(" 2 "strlen" } } */
-/* { dg-final { scan-tree-dump-times "__memcpy_chk \\(" 4 "strlen" } } */
+/* { dg-final { scan-tree-dump-times "__memcpy_chk \\(" 0 "strlen" } } */
/* { dg-final { scan-tree-dump-times "__strcpy_chk \\(" 0 "strlen" } } */
/* { dg-final { scan-tree-dump-times "__strcat_chk \\(" 0 "strlen" } } */
/* { dg-final { scan-tree-dump-times "strchr \\(" 0 "strlen" } } */
/* { dg-final { scan-tree-dump-times "__stpcpy_chk \\(" 0 "strlen" } } */
-/* { dg-final { scan-tree-dump-times "memcpy \\(" 0 "strlen" } } */
+/* { dg-final { scan-tree-dump-times "memcpy \\(" 4 "strlen" } } */
/* { dg-final { scan-tree-dump-times "strcpy \\(" 0 "strlen" } } */
/* { dg-final { scan-tree-dump-times "strcat \\(" 0 "strlen" } } */
/* { dg-final { scan-tree-dump-times "stpcpy \\(" 0 "strlen" } } */
diff --git a/gcc/testsuite/gcc.dg/strlenopt-4gf.c b/gcc/testsuite/gcc.dg/strlenopt-4gf.c
index 743066f2eb0..e1762366e45 100644
--- a/gcc/testsuite/gcc.dg/strlenopt-4gf.c
+++ b/gcc/testsuite/gcc.dg/strlenopt-4gf.c
@@ -7,13 +7,13 @@
#include "strlenopt-4.c"
/* { dg-final { scan-tree-dump-times "strlen \\(" 1 "strlen" } } */
-/* { dg-final { scan-tree-dump-times "__memcpy_chk \\(" 4 "strlen" } } */
-/* { dg-final { scan-tree-dump-times "__strcpy_chk \\(" 1 "strlen" } } */
+/* { dg-final { scan-tree-dump-times "__memcpy_chk \\(" 0 "strlen" } } */
+/* { dg-final { scan-tree-dump-times "__strcpy_chk \\(" 0 "strlen" } } */
/* { dg-final { scan-tree-dump-times "__strcat_chk \\(" 0 "strlen" } } */
/* { dg-final { scan-tree-dump-times "strchr \\(" 0 "strlen" } } */
-/* { dg-final { scan-tree-dump-times "__stpcpy_chk \\(" 5 "strlen" } } */
-/* { dg-final { scan-tree-dump-times "memcpy \\(" 0 "strlen" } } */
-/* { dg-final { scan-tree-dump-times "strcpy \\(" 0 "strlen" } } */
+/* { dg-final { scan-tree-dump-times "__stpcpy_chk \\(" 0 "strlen" } } */
+/* { dg-final { scan-tree-dump-times "memcpy \\(" 4 "strlen" } } */
+/* { dg-final { scan-tree-dump-times "strcpy \\(" 1 "strlen" } } */
/* { dg-final { scan-tree-dump-times "strcat \\(" 0 "strlen" } } */
-/* { dg-final { scan-tree-dump-times "stpcpy \\(" 0 "strlen" } } */
+/* { dg-final { scan-tree-dump-times "stpcpy \\(" 5 "strlen" } } */
/* { dg-final { cleanup-tree-dump "strlen" } } */
diff --git a/gcc/testsuite/gcc.dg/torture/pr57517.c b/gcc/testsuite/gcc.dg/torture/pr57517.c
new file mode 100644
index 00000000000..2422d8ee64a
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/pr57517.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+
+int x[1024], y[1024], z[1024], w[1024];
+void foo (void)
+{
+ int i;
+ for (i = 1; i < 1024; ++i)
+ {
+ int a = x[i];
+ int b = y[i];
+ int c = x[i-1];
+ int d = y[i-1];
+ if (w[i])
+ z[i] = (a + b) + (c + d);
+ }
+}
diff --git a/gcc/testsuite/gcc.dg/torture/pr58956.c b/gcc/testsuite/gcc.dg/torture/pr58956.c
new file mode 100644
index 00000000000..7576ba7fb5c
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/pr58956.c
@@ -0,0 +1,30 @@
+/* { dg-do run } */
+
+extern void abort (void);
+
+struct S
+{
+ int f0;
+} a = {1}, b, g, *c = &b, **f = &c;
+
+int *d, **e = &d, h;
+
+struct S
+foo ()
+{
+ *e = &h;
+ if (!d)
+ __builtin_unreachable ();
+ *f = &g;
+ return a;
+}
+
+int
+main ()
+{
+ struct S *i = c;
+ *i = foo ();
+ if (b.f0 != 1)
+ abort ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/torture/pr59164.c b/gcc/testsuite/gcc.dg/torture/pr59164.c
new file mode 100644
index 00000000000..1ec69610c21
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/pr59164.c
@@ -0,0 +1,21 @@
+/* { dg-do compile } */
+
+int a, d, e;
+long b[10];
+int c[10][8];
+
+int fn1(p1)
+{
+ return 1 >> p1;
+}
+
+void fn2(void)
+{
+ int f;
+ for (a=1; a <= 4; a++)
+ {
+ f = fn1(0 < c[a][0]);
+ if (f || d)
+ e = b[a] = 1;
+ }
+}
diff --git a/gcc/testsuite/gcc.dg/tree-prof/cold_partition_label.c b/gcc/testsuite/gcc.dg/tree-prof/cold_partition_label.c
new file mode 100644
index 00000000000..9dc75668e8d
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-prof/cold_partition_label.c
@@ -0,0 +1,36 @@
+/* Test case to check if function foo gets split and the cold function
+ gets a label. */
+/* { dg-require-effective-target freorder } */
+/* { dg-options "-O2 -freorder-blocks-and-partition --save-temps" } */
+
+#define SIZE 10000
+
+const char *sarr[SIZE];
+const char *buf_hot;
+const char *buf_cold;
+
+__attribute__((noinline))
+void
+foo (int path)
+{
+ int i;
+ if (path)
+ {
+ for (i = 0; i < SIZE; i++)
+ sarr[i] = buf_hot;
+ }
+ else
+ {
+ for (i = 0; i < SIZE; i++)
+ sarr[i] = buf_cold;
+ }
+}
+
+int
+main (int argc, char *argv[])
+{
+ buf_hot = "hello";
+ buf_cold = "world";
+ foo (argc);
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/ubsan/c99-shift-2.c b/gcc/testsuite/gcc.dg/ubsan/c99-shift-2.c
index 7dceb585739..c6662dc43bc 100644
--- a/gcc/testsuite/gcc.dg/ubsan/c99-shift-2.c
+++ b/gcc/testsuite/gcc.dg/ubsan/c99-shift-2.c
@@ -7,4 +7,4 @@ main (void)
int a = 1;
a <<= 31;
}
-/* { dg-output "left shift of 1 by 31 places cannot be represented in type int" } */
+/* { dg-output "left shift of 1 by 31 places cannot be represented in type 'int'" } */
diff --git a/gcc/testsuite/gcc.target/i386/memcpy-2.c b/gcc/testsuite/gcc.target/i386/memcpy-2.c
index fe53f2653a0..56cdd56fae4 100644
--- a/gcc/testsuite/gcc.target/i386/memcpy-2.c
+++ b/gcc/testsuite/gcc.target/i386/memcpy-2.c
@@ -1,11 +1,11 @@
/* { dg-do compile } */
/* { dg-options "-O2" } */
-/* Memcpy should be inlined because block size is known. */
-/* { dg-final { scan-assembler-not "memcpy" } } */
void *a;
void *b;
t(unsigned int c)
{
if (c<10)
- memcpy (a,b,c);
+ __builtin_memcpy (a,b,c+1);
}
+/* Memcpy should be inlined because block size is known. */
+/* { dg-final { scan-assembler-not "(jmp|call)\[\\t \]*memcpy" } } */
diff --git a/gcc/testsuite/gcc.target/i386/memcpy-3.c b/gcc/testsuite/gcc.target/i386/memcpy-3.c
new file mode 100644
index 00000000000..b9ea9c28e23
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/memcpy-3.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+void *a;
+void *b;
+t(int c)
+{
+ if (c<10)
+ __builtin_memcpy (a,b,c);
+}
+/* Memcpy should be inlined because block size is known. */
+/* { dg-final { scan-assembler-not "(jmp|call)\[\\t \]*memcpy" } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr59099.c b/gcc/testsuite/gcc.target/i386/pr59099.c
new file mode 100644
index 00000000000..7dc12ff3f7c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr59099.c
@@ -0,0 +1,76 @@
+/* { dg-do run } */
+/* { dg-options "-O2 -fPIC -m32" } */
+
+void (*pfn)(void);
+
+struct s
+{
+ void** q;
+ int h;
+ int t;
+ int s;
+};
+
+
+void* f (struct s *, struct s *) __attribute__ ((noinline, regparm(1)));
+
+void*
+__attribute__ ((regparm(1)))
+f (struct s *p, struct s *p2)
+{
+ void *gp, *gp1;
+ int t, h, s, t2, h2, c, i;
+
+ if (p2->h == p2->t)
+ return 0;
+
+ (*pfn) ();
+
+ h = p->h;
+ t = p->t;
+ s = p->s;
+
+ h2 = p2->h;
+ t2 = p2->t;
+
+ gp = p2->q[h2++];
+
+ c = (t2 - h2) / 2;
+ for (i = 0; i != c; i++)
+ {
+ if (t == h || (h == 0 && t == s - 1))
+ break;
+ gp1 = p2->q[h2++];
+ p->q[t++] = gp1;
+ if (t == s)
+ t = 0;
+ }
+
+ p2->h = h2;
+ return gp;
+}
+
+static void gn () { }
+
+int
+main()
+{
+ struct s s1, s2;
+ void *q[10];
+
+ pfn = gn;
+
+ s1.q = q;
+ s1.h = 0;
+ s1.t = 2;
+ s1.s = 4;
+
+ s2.q = q;
+ s2.h = 0;
+ s2.t = 4;
+ s2.s = 2;
+
+ f (&s1, &s2);
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/powerpc/ppc64-abi-2.c b/gcc/testsuite/gcc.target/powerpc/ppc64-abi-2.c
index fdb781554c6..e4825973b11 100644
--- a/gcc/testsuite/gcc.target/powerpc/ppc64-abi-2.c
+++ b/gcc/testsuite/gcc.target/powerpc/ppc64-abi-2.c
@@ -121,6 +121,12 @@ typedef union
vector int v;
} vector_int_t;
+#ifdef __LITTLE_ENDIAN__
+#define MAKE_SLOT(x, y) ((long)x | ((long)y << 32))
+#else
+#define MAKE_SLOT(x, y) ((long)y | ((long)x << 32))
+#endif
+
/* Paramter passing.
s : gpr 3
v : vpr 2
@@ -228,8 +234,8 @@ fcevv (char *s, ...)
sp = __builtin_frame_address(0);
sp = sp->backchain;
- if (sp->slot[2].l != 0x100000002ULL
- || sp->slot[4].l != 0x500000006ULL)
+ if (sp->slot[2].l != MAKE_SLOT (1, 2)
+ || sp->slot[4].l != MAKE_SLOT (5, 6))
abort();
}
@@ -270,8 +276,8 @@ fciievv (char *s, int i, int j, ...)
sp = __builtin_frame_address(0);
sp = sp->backchain;
- if (sp->slot[4].l != 0x100000002ULL
- || sp->slot[6].l != 0x500000006ULL)
+ if (sp->slot[4].l != MAKE_SLOT (1, 2)
+ || sp->slot[6].l != MAKE_SLOT (5, 6))
abort();
}
@@ -298,8 +304,8 @@ fcvevv (char *s, vector int x, ...)
sp = __builtin_frame_address(0);
sp = sp->backchain;
- if (sp->slot[4].l != 0x100000002ULL
- || sp->slot[6].l != 0x500000006ULL)
+ if (sp->slot[4].l != MAKE_SLOT (1, 2)
+ || sp->slot[6].l != MAKE_SLOT (5, 6))
abort();
}
diff --git a/gcc/testsuite/gcc.target/s390/htm-1.c b/gcc/testsuite/gcc.target/s390/htm-1.c
deleted file mode 100644
index 245ba2c7ef9..00000000000
--- a/gcc/testsuite/gcc.target/s390/htm-1.c
+++ /dev/null
@@ -1,73 +0,0 @@
-/* This checks the availability of the low-level builtins introduced
- for transactional execution. */
-
-/* { dg-do compile } */
-/* { dg-options "-O3 -march=zEC12 -mzarch" } */
-
-#include <stdint.h>
-#include <htmintrin.h>
-
-int global = 0;
-uint64_t g;
-struct __htm_tdb global_tdb;
-
-int
-foo (struct __htm_tdb* tdb, int reg, int *mem, uint64_t *mem64)
-{
-
- int cc;
- int n;
-
- cc = __builtin_tbegin (0);
- cc = __builtin_tbegin (tdb);
- cc = __builtin_tbegin (&global_tdb);
-
- cc = __builtin_tbegin_nofloat (0);
- cc = __builtin_tbegin_nofloat (&global_tdb);
-
- cc = __builtin_tbegin_retry (0, 42);
- cc = __builtin_tbegin_retry (0, reg);
- cc = __builtin_tbegin_retry (0, *mem);
- cc = __builtin_tbegin_retry (0, global);
- cc = __builtin_tbegin_retry (tdb, 42);
- cc = __builtin_tbegin_retry (&global_tdb, 42);
-
- cc = __builtin_tbegin_retry_nofloat (0, 42);
- cc = __builtin_tbegin_retry_nofloat (0, reg);
- cc = __builtin_tbegin_retry_nofloat (0, *mem);
- cc = __builtin_tbegin_retry_nofloat (0, global);
- cc = __builtin_tbegin_retry_nofloat (&global_tdb, 42);
-
- __builtin_tbeginc ();
-
- n = __builtin_tx_nesting_depth();
-
- __builtin_non_tx_store(&g, 23);
- __builtin_non_tx_store(mem64, 23);
- __builtin_non_tx_store(&g, reg);
- __builtin_non_tx_store(&g, *mem);
- __builtin_non_tx_store(&g, global);
-
- __builtin_tabort (42 + 255);
- __builtin_tabort (reg);
- /* { dg-final { scan-assembler-times "tabort\t255" 1 } } */
- __builtin_tabort (reg + 255);
- __builtin_tabort (*mem);
- __builtin_tabort (global);
- /* Here global + 255 gets reloaded into a reg. Better would be to
- just reload global or *mem and get the +255 for free as address
- arithmetic. */
- __builtin_tabort (*mem + 255);
- __builtin_tabort (global + 255);
-
- __builtin_tend();
-
- __builtin_tx_assist (23);
- __builtin_tx_assist (reg);
- __builtin_tx_assist (*mem);
- __builtin_tx_assist (global);
-}
-
-/* Make sure the tdb NULL argument ends up as immediate value in the
- instruction. */
-/* { dg-final { scan-assembler-times "tbegin\t0," 10 } } */
diff --git a/gcc/testsuite/gcc.target/s390/htm-builtins-1.c b/gcc/testsuite/gcc.target/s390/htm-builtins-1.c
new file mode 100644
index 00000000000..c90490faa59
--- /dev/null
+++ b/gcc/testsuite/gcc.target/s390/htm-builtins-1.c
@@ -0,0 +1,1073 @@
+/* Functional tests of the htm __builtin_... macros. */
+
+/* { dg-do run } */
+/* { dg-require-effective-target htm } */
+/* { dg-options "-O3 -march=zEC12 -mzarch" } */
+
+/* ---------------------------- included header files ---------------------- */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <htmintrin.h>
+
+/* ---------------------------- local definitions -------------------------- */
+
+#define DEFAULT_MAX_REPETITIONS 5
+#define DEFAULT_REQUIRED_QUORUM ((DEFAULT_MAX_REPETITIONS) - 1)
+#define NUM_WARMUP_RUNS 10
+
+/* ---------------------------- local macros ------------------------------- */
+
+#define TEST_DF_REP(name) \
+ { #name, name, DEFAULT_MAX_REPETITIONS, DEFAULT_REQUIRED_QUORUM }
+#define TEST_NO_REP(name) { #name, name, 1, 1 }
+
+/* ---------------------------- local types -------------------------------- */
+
+typedef int (*test_func_t)(void);
+
+typedef struct
+{
+ const char *name;
+ test_func_t test_func;
+ int max_repetitions;
+ int required_quorum;
+} test_table_entry_t;
+
+/* ---------------------------- local variables ---------------------------- */
+
+__attribute__ ((aligned(256))) static struct __htm_tdb local_tdb256;
+static struct __htm_tdb local_tdb;
+static int do_dump_tdb = 0;
+
+/* ---------------------------- exported variables (globals) --------------- */
+
+__attribute__ ((aligned(256))) struct
+{
+ float float_1;
+ float float_2;
+ float float_3;
+} global = { 1.0, 2.5, 0.0 };
+
+__attribute__ ((aligned(256))) struct
+{
+ volatile uint64_t c1;
+ volatile uint64_t c2;
+ volatile uint64_t c3;
+} counters = { 0, 0, 0 };
+
+/* ---------------------------- local helper functions --------------------- */
+
+static void dump_tdb (struct __htm_tdb *tdb)
+{
+ unsigned char *p;
+ int i;
+ int j;
+
+ if (do_dump_tdb == 0)
+ {
+ return;
+ }
+ p = (unsigned char *)tdb;
+ for (i = 0; i < 16; i++)
+ {
+ fprintf (stderr, "0x%02x ", i * 16);
+ for (j = 0; j < 16; j++)
+ {
+ fprintf (stderr, "%02x", (int)p[i * 16 + j]);
+ if (j < 15)
+ {
+ fprintf (stderr, " ");
+ }
+ if (j == 7)
+ {
+ fprintf (stderr, " ");
+ }
+ }
+ fprintf (stderr, "\n");
+ }
+
+ return;
+}
+
+/* ---------------------------- local test functions ----------------------- */
+
+/* Check values of the constants defined in htmintrin.h. */
+static int test_constants (void)
+{
+ if (_HTM_TBEGIN_STARTED != 0)
+ {
+ return 100 * _HTM_TBEGIN_STARTED + 1;
+ }
+ if (_HTM_TBEGIN_INDETERMINATE != 1)
+ {
+ return 100 * _HTM_TBEGIN_INDETERMINATE + 2;
+ }
+ if (_HTM_TBEGIN_TRANSIENT != 2)
+ {
+ return 100 * _HTM_TBEGIN_TRANSIENT + 3;
+ }
+ if (_HTM_TBEGIN_PERSISTENT != 3)
+ {
+ return 100 * _HTM_TBEGIN_PERSISTENT + 4;
+ }
+
+ return 0;
+}
+
+static int test_tbegin_ntstg_tend (void)
+{
+ int rc;
+
+ counters.c1 = 0;
+ counters.c2 = 0;
+ if ((rc = __builtin_tbegin ((void *)0)) == 0)
+ {
+ __builtin_non_tx_store ((uint64_t *)&counters.c1, 1);
+ counters.c2 = 2;
+ rc = __builtin_tend ();
+ if (rc != 0)
+ {
+ return 100 * rc + 5;
+ }
+ if (counters.c1 != 1)
+ {
+ return 100 * counters.c1 + 2;
+ }
+ if (counters.c2 != 2)
+ {
+ return 100 * counters.c2 + 3;
+ }
+ }
+ else
+ {
+ return 100 * rc + 4;
+ }
+
+ return 0;
+}
+
+static int test_tbegin_ntstg_tabort (void)
+{
+ float f;
+
+ counters.c1 = 0;
+ counters.c2 = 0;
+ f = 0;
+ if (__builtin_tbegin ((void *)0) == 0)
+ {
+ __builtin_non_tx_store ((uint64_t *)&counters.c1, 1);
+ counters.c2 = 2;
+ f = 1;
+ __builtin_tabort (256);
+ return 1;
+ }
+ if (counters.c1 != 1)
+ {
+ return 100 * counters.c1 + 2;
+ }
+ if (counters.c2 != 0)
+ {
+ return 100 * counters.c2 + 3;
+ }
+ if (f != 0)
+ {
+ return 100 * f + 4;
+ }
+
+ return 0;
+}
+
+static int test_tbegin_nofloat (void)
+{
+ int rc;
+
+ counters.c1 = 0;
+ counters.c2 = 0;
+ if ((rc = __builtin_tbegin_nofloat ((void *)0)) == 0)
+ {
+ __builtin_non_tx_store ((uint64_t *)&counters.c1, 1);
+ counters.c2 = 2;
+ rc = __builtin_tend ();
+ if (rc != 0)
+ {
+ return 100 * rc + 5;
+ }
+ if (counters.c1 != 1)
+ {
+ return 100 * counters.c1 + 2;
+ }
+ if (counters.c2 != 2)
+ {
+ return 100 * counters.c2 + 3;
+ }
+ }
+ else
+ {
+ return 100 * rc + 4;
+ }
+
+ return 0;
+}
+
+static int test_tbegin_retry (void)
+{
+ int rc;
+
+ counters.c1 = 0;
+ counters.c2 = 0;
+ counters.c3 = 0;
+ if ((rc = __builtin_tbegin_retry ((void *)0, 5)) == 0)
+ {
+ int do_abort;
+
+ do_abort = (counters.c1 == 0) ? 1 : 0;
+ __builtin_non_tx_store (
+ (uint64_t *)&counters.c1, counters.c1 + 1);
+ if (do_abort == 1)
+ {
+ __builtin_tabort (256);
+ }
+ counters.c2 = counters.c2 + 10;
+ __builtin_non_tx_store ((uint64_t *)&counters.c3, 3);
+ rc = __builtin_tend ();
+ if (rc != 0)
+ {
+ return 100 * rc + 5;
+ }
+ if (counters.c1 != 2)
+ {
+ return 100 * counters.c1 + 2;
+ }
+ if (counters.c2 != 10)
+ {
+ return 100 * counters.c2 + 3;
+ }
+ if (counters.c3 != 3)
+ {
+ return 100 * counters.c3 + 6;
+ }
+ }
+ else
+ {
+ return 100 * rc + 4;
+ }
+
+ return 0;
+}
+
+static int test_tbegin_retry_nofloat (void)
+{
+ int rc;
+
+ counters.c1 = 0;
+ counters.c2 = 0;
+ counters.c3 = 0;
+ if ((rc = __builtin_tbegin_retry_nofloat ((void *)0, 5)) == 0)
+ {
+ int do_abort;
+
+ do_abort = (counters.c1 == 0) ? 1 : 0;
+ __builtin_non_tx_store (
+ (uint64_t *)&counters.c1, counters.c1 + 1);
+ if (do_abort == 1)
+ {
+ __builtin_tabort (256);
+ }
+ counters.c2 = counters.c2 + 10;
+ __builtin_non_tx_store ((uint64_t *)&counters.c3, 3);
+ rc = __builtin_tend ();
+ if (rc != 0)
+ {
+ return 100 * rc + 5;
+ }
+ if (counters.c1 != 2)
+ {
+ return 100 * counters.c1 + 2;
+ }
+ if (counters.c2 != 10)
+ {
+ return 100 * counters.c2 + 3;
+ }
+ if (counters.c3 != 3)
+ {
+ return 100 * counters.c3 + 6;
+ }
+ }
+ else
+ {
+ return 100 * rc + 4;
+ }
+
+ return 0;
+}
+
+static int test_tbegin_aborts (void)
+{
+ float f;
+ int rc;
+
+ f = 77;
+ if ((rc = __builtin_tbegin ((void *)0)) == 0)
+ {
+ f = 88;
+ __builtin_tabort (256);
+ return 2;
+ }
+ else if (rc != 2)
+ {
+ return 3;
+ }
+ if (f != 77)
+ {
+ return 4;
+ }
+ f = 66;
+ if ((rc = __builtin_tbegin ((void *)0)) == 0)
+ {
+ f = 99;
+ __builtin_tabort (257);
+ return 5;
+ }
+ else if (rc != 3)
+ {
+ return 100 * rc + 6;
+ }
+ if (f != 66)
+ {
+ return 100 * f + 7;
+ }
+ if ((rc = __builtin_tbegin ((void *)0)) == 0)
+ {
+ global.float_3 = global.float_1 + global.float_2;
+ rc = __builtin_tend ();
+ if (rc != 0)
+ {
+ return 100 * rc + 8;
+ }
+ }
+ else
+ {
+ return 100 * rc + 9;
+ }
+ if (global.float_3 != global.float_1 + global.float_2)
+ {
+ return 100 * rc + 10;
+ }
+
+ return 0;
+}
+
+static __attribute__((noinline)) void indirect_abort(int abort_code)
+{
+ __builtin_tabort (abort_code);
+
+ return;
+}
+
+static int test_tbegin_indirect_aborts (void)
+{
+ float f;
+ int rc;
+
+ f = 77;
+ if ((rc = __builtin_tbegin ((void *)0)) == 0)
+ {
+ f = 88;
+ indirect_abort(256);
+ return 2;
+ }
+ else if (rc != 2)
+ {
+ return 100 * rc + 3;
+ }
+ if (f != 77)
+ {
+ return 100 * rc + 4;
+ }
+ f = 66;
+ if ((rc = __builtin_tbegin ((void *)0)) == 0)
+ {
+ f = 99;
+ indirect_abort(257);
+ return 5;
+ }
+ else if (rc != 3)
+ {
+ return 100 * rc + 6;
+ }
+ if (f != 66)
+ {
+ return 100 * f + 7;
+ }
+
+ return 0;
+}
+
+static int test_tbegin_nofloat_aborts (void)
+{
+ int rc;
+
+ if ((rc = __builtin_tbegin_nofloat ((void *)0)) == 0)
+ {
+ __builtin_tabort (256);
+ return 2;
+ }
+ if ((rc = __builtin_tbegin_nofloat ((void *)0)) == 0)
+ {
+ __builtin_tabort (257);
+ return 1005;
+ }
+ else if (rc != 3)
+ {
+ return 1000 * rc + 6;
+ }
+
+ return 0;
+}
+
+static int test_tbegin_nofloat_indirect_aborts (void)
+{
+ int rc;
+
+ if ((rc = __builtin_tbegin_nofloat ((void *)0)) == 0)
+ {
+ indirect_abort (256);
+ return 2;
+ }
+ if ((rc = __builtin_tbegin_nofloat ((void *)0)) == 0)
+ {
+ indirect_abort (257);
+ return 1005;
+ }
+ else if (rc != 3)
+ {
+ return 1000 * rc + 6;
+ }
+
+ return 0;
+}
+
+static
+int _test_tbegin_retry_aborts (int retries, uint64_t abort_code)
+{
+ int rc;
+
+ counters.c1 = 0;
+ if ((rc = __builtin_tbegin_retry ((void *)0, retries)) == 0)
+ {
+ __builtin_non_tx_store ((uint64_t *)&counters.c1, counters.c1 + 1);
+ __builtin_tabort (abort_code);
+ return 2;
+ }
+ else
+ {
+ if ((abort_code & 1) == 0)
+ {
+ if (rc != 2)
+ {
+ return 100 * rc + 2003;
+ }
+ else if (counters.c1 != (uint64_t)retries + 1)
+ {
+ return 1000 * counters.c1 + 100 * retries + 4;
+ }
+ }
+ else
+ {
+ if (rc != 3)
+ {
+ return 100 * rc + 3005;
+ }
+ else if (counters.c1 != 1)
+ {
+ return 1000 * counters.c1 + 100 * retries + 6;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int test_tbegin_retry_aborts (void)
+{
+ int rc;
+ int retries;
+
+ for (retries = 1; retries <= 3; retries++)
+ {
+ rc = _test_tbegin_retry_aborts (retries, 256);
+ if (rc != 0)
+ {
+ return 10000 + rc;
+ }
+ }
+ for (retries = 1; retries <= 3; retries++)
+ {
+ rc = _test_tbegin_retry_aborts (retries, 257);
+ if (rc != 0)
+ {
+ return 20000 + rc;
+ }
+ }
+ if ((rc = __builtin_tbegin_retry ((void *)0, 5)) == 0)
+ {
+ global.float_3 = global.float_1 + global.float_2;
+ rc = __builtin_tend ();
+ if (rc != 0)
+ {
+ return 30000 + 100 * rc + 6;
+ }
+ }
+ else
+ {
+ return 30000 + 100 * rc + 7;
+ }
+
+ return 0;
+}
+
+static int _test_tbegin_retry_nofloat_aborts (int retries, uint64_t abort_code)
+{
+ int rc;
+
+ counters.c1 = 0;
+ if ((rc = __builtin_tbegin_retry_nofloat ((void *)0, retries)) == 0)
+ {
+ __builtin_non_tx_store ((uint64_t *)&counters.c1, counters.c1 + 1);
+ __builtin_tabort (abort_code);
+ return 2;
+ }
+ else
+ {
+ if ((abort_code & 1) == 0)
+ {
+ if (rc != 2)
+ {
+ return 100 * rc + 2003;
+ }
+ else if (counters.c1 != (uint64_t)retries + 1)
+ {
+ return 1000 * counters.c1 + 100 * retries + 4;
+ }
+ }
+ else
+ {
+ if (rc != 3)
+ {
+ return 100 * rc + 3005;
+ }
+ else if (counters.c1 != 1)
+ {
+ return 1000 * counters.c1 + 100 * retries + 6;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int test_tbegin_retry_nofloat_aborts (void)
+{
+ int rc;
+ int retries;
+
+ for (retries = 1; retries <= 3; retries++)
+ {
+ rc = _test_tbegin_retry_nofloat_aborts (retries, 256);
+ if (rc != 0)
+ {
+ return 10 * retries + rc;
+ }
+ }
+ for (retries = 1; retries <= 3; retries++)
+ {
+ rc = _test_tbegin_retry_nofloat_aborts (retries, 257);
+ if (rc != 0)
+ {
+ return 10000 + 10 * retries + rc;
+ }
+ }
+
+ return 0;
+}
+
+static int test_tbegin_tdb (void)
+{
+ int rc;
+
+ local_tdb.format = 0;
+ if ((rc = __builtin_tbegin (&local_tdb)) == 0)
+ {
+ rc = __builtin_tend ();
+ if (rc != 0)
+ {
+ return 100 * rc + 1;
+ }
+ if (local_tdb.format != 0)
+ {
+ dump_tdb (&local_tdb);
+ return 100 * local_tdb.format + 2;
+ }
+ }
+ else
+ {
+ return 100 * rc + 3;
+ }
+ local_tdb.format = 0;
+ if ((rc = __builtin_tbegin (&local_tdb)) == 0)
+ {
+ __builtin_tabort (257);
+ return 4;
+ }
+ else
+ {
+ if (rc != 3)
+ {
+ return 100 * rc + 5;
+ }
+ if (local_tdb.format != 1)
+ {
+ dump_tdb (&local_tdb);
+ return 100 * local_tdb.format + 6;
+ }
+ }
+ local_tdb256.format = 0;
+ if ((rc = __builtin_tbegin (&local_tdb256)) == 0)
+ {
+ rc = __builtin_tend ();
+ if (rc != 0)
+ {
+ return 1100 * rc + 1;
+ }
+ if (local_tdb256.format != 0)
+ {
+ dump_tdb (&local_tdb256);
+ return 1100 * local_tdb256.format + 2;
+ }
+ }
+ else
+ {
+ return 1100 * rc + 3;
+ }
+ local_tdb256.format = 0;
+ if ((rc = __builtin_tbegin (&local_tdb256)) == 0)
+ {
+ __builtin_tabort (257);
+ return 2004;
+ }
+ else
+ {
+ if (rc != 3)
+ {
+ return 2100 * rc + 5;
+ }
+ if (local_tdb256.format != 1)
+ {
+ dump_tdb (&local_tdb256);
+ return 2100 * local_tdb256.format + 6;
+ }
+ }
+
+ return 0;
+}
+
+static int test_tbegin_nofloat_tdb (void)
+{
+ int rc;
+
+ local_tdb.format = 0;
+ if ((rc = __builtin_tbegin_nofloat (&local_tdb)) == 0)
+ {
+ rc = __builtin_tend ();
+ if (rc != 0)
+ {
+ return 100 * rc + 1;
+ }
+ if (local_tdb.format != 0)
+ {
+ dump_tdb (&local_tdb);
+ return 100 * local_tdb.format + 2;
+ }
+ }
+ else
+ {
+ return 3;
+ }
+ local_tdb.format = 0;
+ if ((rc = __builtin_tbegin_nofloat (&local_tdb)) == 0)
+ {
+ __builtin_tabort (257);
+ return 4;
+ }
+ else
+ {
+ if (rc != 3)
+ {
+ return 100 * rc + 5;
+ }
+ if (local_tdb.format != 1)
+ {
+ dump_tdb (&local_tdb);
+ return 100 * local_tdb.format + 6;
+ }
+ }
+ local_tdb256.format = 0;
+ if ((rc = __builtin_tbegin_nofloat (&local_tdb256)) == 0)
+ {
+ rc = __builtin_tend ();
+ if (rc != 0)
+ {
+ return 1100 * rc + 1;
+ }
+ if (local_tdb256.format != 0)
+ {
+ dump_tdb (&local_tdb256);
+ return 1100 * local_tdb256.format + 2;
+ }
+ }
+ else
+ {
+ return 1003;
+ }
+ local_tdb256.format = 0;
+ if ((rc = __builtin_tbegin_nofloat (&local_tdb256)) == 0)
+ {
+ __builtin_tabort (257);
+ return 2004;
+ }
+ else
+ {
+ if (rc != 3)
+ {
+ return 2100 * rc + 5;
+ }
+ if (local_tdb256.format != 1)
+ {
+ dump_tdb (&local_tdb256);
+ return 2100 * local_tdb256.format + 6;
+ }
+ }
+
+ return 0;
+}
+
+static int test_tbegin_retry_tdb (void)
+{
+ int rc;
+
+ local_tdb256.format = 0;
+ if ((rc = __builtin_tbegin_retry (&local_tdb256, 2)) == 0)
+ {
+ rc = __builtin_tend ();
+ if (rc != 0)
+ {
+ return 1100 * rc + 1;
+ }
+ if (local_tdb256.format != 0)
+ {
+ dump_tdb (&local_tdb256);
+ return 1100 * local_tdb256.format + 2;
+ }
+ }
+ else
+ {
+ return 1003;
+ }
+ local_tdb256.format = 0;
+ if ((rc = __builtin_tbegin_retry (&local_tdb256, 2)) == 0)
+ {
+ __builtin_tabort (257);
+ return 2004;
+ }
+ else
+ {
+ if (rc != 3)
+ {
+ return 2100 * rc + 5;
+ }
+ if (local_tdb256.format != 1)
+ {
+ dump_tdb (&local_tdb256);
+ return 2100 * local_tdb256.format + 6;
+ }
+ }
+
+ return 0;
+}
+
+static int test_tbegin_retry_nofloat_tdb (void)
+{
+ int rc;
+
+ local_tdb.format = 0;
+ if ((rc = __builtin_tbegin_retry_nofloat (&local_tdb, 2)) == 0)
+ {
+ rc = __builtin_tend ();
+ if (rc != 0)
+ {
+ return 100 * rc + 1;
+ }
+ if (local_tdb.format != 0)
+ {
+ dump_tdb (&local_tdb);
+ return 100 * local_tdb.format + 2;
+ }
+ }
+ else
+ {
+ return 100 * rc + 3;
+ }
+ local_tdb.format = 0;
+ if ((rc = __builtin_tbegin_retry_nofloat (&local_tdb, 2)) == 0)
+ {
+ __builtin_tabort (257);
+ return 4;
+ }
+ else
+ {
+ if (rc != 3)
+ {
+ return 100 * rc + 5;
+ }
+ if (local_tdb.format != 1)
+ {
+ dump_tdb (&local_tdb);
+ return 100 * local_tdb.format + 6;
+ }
+ }
+ local_tdb256.format = 0;
+ if ((rc = __builtin_tbegin_retry_nofloat (&local_tdb256, 2)) == 0)
+ {
+ rc = __builtin_tend ();
+ if (rc != 0)
+ {
+ return 1100 * rc + 1;
+ }
+ if (local_tdb256.format != 0)
+ {
+ dump_tdb (&local_tdb256);
+ return 1100 * local_tdb256.format + 2;
+ }
+ }
+ else
+ {
+ return 1100 * rc + 3;
+ }
+ local_tdb256.format = 0;
+ if ((rc = __builtin_tbegin_retry_nofloat (&local_tdb256, 2)) == 0)
+ {
+ __builtin_tabort (257);
+ return 2004;
+ }
+ else
+ {
+ if (rc != 3)
+ {
+ return 2100 * rc + 5;
+ }
+ if (local_tdb256.format != 1)
+ {
+ dump_tdb (&local_tdb256);
+ return 2100 * local_tdb256.format + 6;
+ }
+ }
+
+ return 0;
+}
+
+static int test_etnd (void)
+{
+ int rc;
+
+ counters.c1 = 0;
+ counters.c2 = 0;
+ counters.c3 = 0;
+ if ((rc = __builtin_tbegin ((void *)0)) == 0)
+ {
+ counters.c1 = __builtin_tx_nesting_depth ();
+ if (__builtin_tbegin ((void *)0) == 0)
+ {
+ counters.c2 = __builtin_tx_nesting_depth ();
+ if (__builtin_tbegin ((void *)0) == 0)
+ {
+ counters.c3 = __builtin_tx_nesting_depth ();
+ __builtin_tend ();
+ }
+ __builtin_tend ();
+ }
+ __builtin_tend ();
+ }
+ else
+ {
+ return 100 * rc + 1;
+ }
+ if (counters.c1 != 1)
+ {
+ return 100 * counters.c1 + 2;
+ }
+ if (counters.c2 != 2)
+ {
+ return 100 * counters.c2 + 3;
+ }
+ if (counters.c3 != 3)
+ {
+ return 100 * counters.c3 + 4;
+ }
+
+ return 0;
+}
+
+static int test_tbeginc (void)
+{
+ int rc;
+
+ counters.c1 = 0;
+ __builtin_tbeginc ();
+ counters.c1 = 1;
+ rc = __builtin_tend ();
+ if (rc != 0)
+ {
+ return 10000 * rc + 1;
+ }
+ if (counters.c1 != 1)
+ {
+ return 100000 * counters.c1 + 3;
+ }
+
+ return 0;
+}
+
+/* ---------------------------- local testing framework functions ---------- */
+
+static int run_one_test (const test_table_entry_t *test_entry)
+{
+ int do_print_passes;
+ int succeeded;
+ int rc;
+ int i;
+
+ /* Warmup run to get all necessary data and instruction pages into the page
+ * tables. */
+ {
+ int run;
+
+ do_dump_tdb = 0;
+ for (run = 0; run < NUM_WARMUP_RUNS; run++)
+ {
+ test_entry->test_func ();
+ }
+ do_dump_tdb = 1;
+ }
+ do_print_passes = (
+ test_entry->required_quorum != 1 ||
+ test_entry->max_repetitions != 1);
+ printf ("RRR RUN %s\n", test_entry->name);
+ if (do_print_passes == 1)
+ {
+ printf (
+ " (requires %d successful out of %d runs)\n",
+ test_entry->required_quorum,
+ test_entry->max_repetitions);
+ }
+ succeeded = 0;
+ rc = 0;
+ for (rc = 0, i = 0; i < test_entry->max_repetitions; i++)
+ {
+ if (do_print_passes == 1)
+ {
+ if (i == 0)
+ {
+ printf (" ");
+ }
+ else
+ {
+ printf (",");
+ }
+ }
+ rc = test_entry->test_func ();
+ if (rc == 0)
+ {
+ if (do_print_passes == 1)
+ {
+ printf (" success");
+ }
+ succeeded++;
+ if (succeeded >= test_entry->required_quorum)
+ {
+ break;
+ }
+ }
+ else
+ {
+ printf (" failed (rc = %d)", rc);
+ }
+ }
+ if (do_print_passes == 1 || rc != 0)
+ {
+ printf ("\n");
+ }
+ if (succeeded >= test_entry->required_quorum)
+ {
+ printf ("+++ OK %s\n", test_entry->name);
+
+ return 0;
+ }
+ else
+ {
+ printf ("--- FAIL %s\n", test_entry->name);
+
+ return (rc != 0) ? rc : -1;
+ }
+}
+
+static int run_all_tests (const test_table_entry_t *test_table)
+{
+ const test_table_entry_t *test;
+ int rc;
+
+ for (
+ rc = 0, test = &test_table[0];
+ test->test_func != NULL && rc == 0; test++)
+ {
+ rc = run_one_test (test);
+ }
+
+ return rc;
+}
+
+/* ---------------------------- interface functions ------------------------ */
+
+int main (void)
+{
+ const test_table_entry_t test_table[] = {
+ TEST_NO_REP (test_constants),
+ TEST_DF_REP (test_tbegin_ntstg_tend),
+ TEST_DF_REP (test_tbegin_ntstg_tabort),
+ TEST_DF_REP (test_tbegin_nofloat),
+ TEST_NO_REP (test_tbegin_retry),
+ TEST_NO_REP (test_tbegin_retry_nofloat),
+ TEST_DF_REP (test_tbegin_aborts),
+ TEST_DF_REP (test_tbegin_indirect_aborts),
+ TEST_DF_REP (test_tbegin_nofloat_aborts),
+ TEST_DF_REP (test_tbegin_nofloat_indirect_aborts),
+ TEST_NO_REP (test_tbegin_retry_aborts),
+ TEST_NO_REP (test_tbegin_retry_nofloat_aborts),
+ TEST_DF_REP (test_tbegin_tdb),
+ TEST_DF_REP (test_tbegin_nofloat_tdb),
+ TEST_NO_REP (test_tbegin_retry_tdb),
+ TEST_NO_REP (test_tbegin_retry_nofloat_tdb),
+ TEST_DF_REP (test_etnd),
+ TEST_DF_REP (test_tbeginc),
+ { (void *)0, 0, 0 }
+ };
+
+ {
+ int rc;
+
+ rc = run_all_tests (test_table);
+
+ return rc;
+ }
+}
diff --git a/gcc/testsuite/gcc.target/s390/htm-builtins-2.c b/gcc/testsuite/gcc.target/s390/htm-builtins-2.c
new file mode 100644
index 00000000000..15b0d12ae92
--- /dev/null
+++ b/gcc/testsuite/gcc.target/s390/htm-builtins-2.c
@@ -0,0 +1,682 @@
+/* Functional tests of the htm __TM_... macros. */
+
+/* { dg-do run } */
+/* { dg-require-effective-target htm } */
+/* { dg-options "-O3 -march=zEC12 -mzarch" } */
+
+/* ---------------------------- included header files ---------------------- */
+
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+#include <htmxlintrin.h>
+
+/* ---------------------------- local definitions -------------------------- */
+
+#define DEFAULT_MAX_REPETITIONS 5
+#define DEFAULT_REQUIRED_QUORUM ((DEFAULT_MAX_REPETITIONS) - 1)
+#define DEFAULT_ABORT_ADDRESS (0x12345678u)
+
+/* ---------------------------- local macros ------------------------------- */
+
+#define TEST_DF_REP(name) \
+ { #name, name, DEFAULT_MAX_REPETITIONS, DEFAULT_REQUIRED_QUORUM }
+#define TEST_NO_REP(name) { #name, name, 1, 1 }
+
+/* ---------------------------- local types -------------------------------- */
+
+typedef int (*test_func_t)(void);
+
+typedef struct
+{
+ const char *name;
+ test_func_t test_func;
+ int max_repetitions;
+ int required_quorum;
+} test_table_entry_t;
+
+typedef enum
+{
+ ABORT_T_SYSTEM = 0,
+ ABORT_T_USER = 1,
+} abort_user_t;
+
+typedef enum
+{
+ ABORT_T_NONE = 0,
+ ABORT_T_ILLEGAL,
+ ABORT_T_FOOTPRINT_EXCEEDED,
+ ABORT_T_NESTED_TOO_DEEP,
+ ABORT_T_CONFLICT,
+
+ ABORT_T_INVALID_ABORT_CODE
+} abort_t;
+
+/* ---------------------------- local variables ---------------------------- */
+
+__attribute__ ((aligned(256))) static struct __htm_tdb local_tdb256;
+static struct __htm_tdb local_tdb;
+
+static abort_t const abort_classes[] =
+{
+ ABORT_T_INVALID_ABORT_CODE,
+ ABORT_T_NONE,
+ ABORT_T_NONE,
+ ABORT_T_NONE,
+
+ ABORT_T_ILLEGAL,
+ ABORT_T_NONE,
+ ABORT_T_NONE,
+ ABORT_T_FOOTPRINT_EXCEEDED,
+
+ ABORT_T_FOOTPRINT_EXCEEDED,
+ ABORT_T_CONFLICT,
+ ABORT_T_CONFLICT,
+ ABORT_T_ILLEGAL,
+
+ ABORT_T_NONE,
+ ABORT_T_NESTED_TOO_DEEP,
+ ABORT_T_NONE,
+ ABORT_T_NONE,
+
+ ABORT_T_NONE
+};
+
+static size_t num_abort_classes = sizeof(abort_classes) / sizeof(abort_t);
+
+/* ---------------------------- exported variables (globals) --------------- */
+
+int global_int = 0;
+uint64_t global_u64 = 0;
+float global_float_1 = 1.0;
+float global_float_2 = 2.5;
+float global_float_3 = 0.0;
+__attribute__ ((aligned(256))) struct
+{
+ volatile uint64_t c1;
+ volatile uint64_t c2;
+ volatile uint64_t c3;
+} counters = { 0, 0, 0 };
+
+/* ---------------------------- local helper functions --------------------- */
+
+static void dump_tdb(struct __htm_tdb *tdb)
+{
+ unsigned char *p;
+ int i;
+ int j;
+
+ p = (unsigned char *)tdb;
+ for (i = 0; i < 16; i++)
+ {
+ fprintf(stderr, "0x%02x ", i * 16);
+ for (j = 0; j < 16; j++)
+ {
+ fprintf(stderr, "%02x", (int)p[i * 16 + j]);
+ if (j < 15)
+ {
+ fprintf(stderr, " ");
+ }
+ if (j == 7)
+ {
+ fprintf(stderr, " ");
+ }
+ }
+ fprintf(stderr, "\n");
+ }
+
+ return;
+}
+
+static void make_fake_tdb(struct __htm_tdb *tdb)
+{
+ memset(tdb, 0, sizeof(*tdb));
+ tdb->format = 1;
+ tdb->nesting_depth = 1;
+ tdb->atia = DEFAULT_ABORT_ADDRESS;
+ tdb->abort_code = 11;
+
+ return;
+}
+
+static int check_abort_code_in_tdb(struct __htm_tdb *tdb, uint64_t abort_code)
+{
+ long expect_rc;
+ long rc;
+
+ if (abort_code != 0)
+ {
+ long addr;
+
+ addr = __TM_failure_address(&local_tdb);
+ if (addr != DEFAULT_ABORT_ADDRESS)
+ {
+ return 11;
+ }
+ }
+ {
+ long long tdb_abort_code;
+
+ tdb_abort_code = __TM_failure_code(tdb);
+ if ((uint64_t)tdb_abort_code != abort_code)
+ {
+ fprintf(
+ stderr, "tm_ac %" PRIu64 ", ac %" PRIu64
+ ", tdb_ac %" PRIu64 "\n",
+ (uint64_t)tdb_abort_code, abort_code,
+ (uint64_t)tdb->abort_code);
+ return 10;
+ }
+ }
+ expect_rc = (abort_code >= 256) ? 1 : 0;
+ rc = __TM_is_user_abort(tdb);
+ if (rc != expect_rc)
+ {
+ fprintf(stderr, "rc %ld, expect_rc %ld\n", rc, expect_rc);
+ return 1;
+ }
+ {
+ unsigned char code;
+
+ code = 0xffu;
+ rc = __TM_is_named_user_abort(tdb, &code);
+ if (rc != expect_rc)
+ {
+ fprintf(
+ stderr, "rc %ld, expect_rc %ld\n", rc,
+ expect_rc);
+ return 2;
+ }
+ if (expect_rc == 1 && code != abort_code - 256)
+ {
+ return 3;
+ }
+ }
+ if (abort_code > (uint64_t)num_abort_classes)
+ {
+ abort_code = (uint64_t)num_abort_classes;
+ }
+ expect_rc = (abort_classes[abort_code] == ABORT_T_ILLEGAL) ? 1 : 0;
+ rc = __TM_is_illegal(tdb);
+ if (rc != expect_rc)
+ {
+ dump_tdb(tdb);
+ fprintf(stderr, "rc %ld, expect_rc %ld\n", rc, expect_rc);
+ return 4;
+ }
+ expect_rc =
+ (abort_classes[abort_code] == ABORT_T_FOOTPRINT_EXCEEDED) ?
+ 1 : 0;
+ rc = __TM_is_footprint_exceeded(tdb);
+ if (rc != expect_rc)
+ {
+ dump_tdb(tdb);
+ fprintf(stderr, "rc %ld, expect_rc %ld\n", rc, expect_rc);
+ return 5;
+ }
+ expect_rc =
+ (abort_classes[abort_code] == ABORT_T_NESTED_TOO_DEEP) ? 1 : 0;
+ rc = __TM_is_nested_too_deep(tdb);
+ if (rc != expect_rc)
+ {
+ dump_tdb(tdb);
+ fprintf(stderr, "rc %ld, expect_rc %ld\n", rc, expect_rc);
+ return 6;
+ }
+ expect_rc = (abort_classes[abort_code] == ABORT_T_CONFLICT) ? 1 : 0;
+ rc = __TM_is_conflict(tdb);
+ if (rc != expect_rc)
+ {
+ dump_tdb(tdb);
+ fprintf(stderr, "rc %ld, expect_rc %ld\n", rc, expect_rc);
+ return 7;
+ }
+
+ return 0;
+}
+
+/* ---------------------------- local test functions ----------------------- */
+
+/* Not a test; make sure that the involved global cachelines are reserved for
+ * writing. */
+static int init_cache(void)
+{
+ make_fake_tdb(&local_tdb);
+ make_fake_tdb(&local_tdb256);
+ global_int = 0;
+ global_u64 = 0;
+ global_float_1 = 1.0;
+ global_float_2 = 2.5;
+ global_float_3 = 0.0;
+ counters.c1 = 0;
+ counters.c2 = 0;
+ counters.c3 = 0;
+
+ return 0;
+}
+
+static int test_abort_classification(void)
+{
+ int i;
+
+ make_fake_tdb(&local_tdb);
+ for (i = 0; i <= 256; i++)
+ {
+ int rc;
+
+ local_tdb.abort_code = (uint64_t)i;
+ rc = check_abort_code_in_tdb(&local_tdb, (uint64_t)i);
+ if (rc != 0)
+ {
+ return 100 * i + rc;
+ }
+ }
+
+ return 0;
+}
+
+static int test_cc_classification(void)
+{
+ long rc;
+
+ rc = __TM_is_failure_persistent(0);
+ if (rc != 0)
+ {
+ return 1;
+ }
+ rc = __TM_is_failure_persistent(1);
+ if (rc != 0)
+ {
+ return 2;
+ }
+ rc = __TM_is_failure_persistent(2);
+ if (rc != 0)
+ {
+ return 3;
+ }
+ rc = __TM_is_failure_persistent(3);
+ if (rc != 1)
+ {
+ return 4;
+ }
+
+ return 0;
+}
+
+static int test_tbegin_ntstg_tend(void)
+{
+ long rc;
+
+ counters.c1 = 0;
+ counters.c2 = 0;
+ if ((rc = __TM_simple_begin()) == 0)
+ {
+ __TM_non_transactional_store((uint64_t *)&counters.c1, 1);
+ counters.c2 = 2;
+ rc = __TM_end();
+ if (rc != 0)
+ {
+ return 100 * rc + 5;
+ }
+ if (counters.c1 != 1)
+ {
+ return 100 * counters.c1 + 2;
+ }
+ if (counters.c2 != 2)
+ {
+ return 100 * counters.c2 + 3;
+ }
+ }
+ else
+ {
+ return 100 * rc + 4;
+ }
+
+ return 0;
+}
+
+static int test_tbegin_ntstg_tabort(void)
+{
+ register float f;
+
+ counters.c1 = 0;
+ counters.c2 = 0;
+ f = 0;
+ if (__TM_simple_begin() == 0)
+ {
+ __TM_non_transactional_store((uint64_t *)&counters.c1, 1);
+ counters.c2 = 2;
+ f = 1;
+ __TM_named_abort(0);
+ return 1;
+ }
+ if (counters.c1 != 1)
+ {
+ return 100 * counters.c1 + 2;
+ }
+ if (counters.c2 != 0)
+ {
+ return 100 * counters.c2 + 3;
+ }
+ if (f != 0)
+ {
+ return 100 * f + 4;
+ }
+
+ return 0;
+}
+
+static int test_tbegin_aborts(void)
+{
+ float f;
+ long rc;
+
+ f = 77;
+ if ((rc = __TM_simple_begin()) == 0)
+ {
+ f = 88;
+ __TM_abort();
+ return 2;
+ }
+ else if (rc != 2)
+ {
+ return 3;
+ }
+ if (f != 77)
+ {
+ return 4;
+ }
+ f = 66;
+ if ((rc = __TM_simple_begin()) == 0)
+ {
+ f = 99;
+ __TM_named_abort(3);
+ return 5;
+ }
+ else if (rc != 3)
+ {
+ return 100 * rc + 6;
+ }
+ if (f != 66)
+ {
+ return 100 * f + 7;
+ }
+ if ((rc = __TM_simple_begin()) == 0)
+ {
+ global_float_3 = global_float_1 + global_float_2;
+ rc = __TM_end();
+ if (rc != 0)
+ {
+ return 100 * rc + 8;
+ }
+ }
+ else
+ {
+ return 100 * rc + 9;
+ }
+ if (global_float_3 != global_float_1 + global_float_2)
+ {
+ return 100 * rc + 10;
+ }
+
+ return 0;
+}
+
+static int test_tbegin_tdb(void)
+{
+ long rc;
+
+ local_tdb.format = 0;
+ if ((rc = __TM_begin(&local_tdb)) == 0)
+ {
+ rc = __TM_end();
+ if (rc != 0)
+ {
+ return 100 * rc + 1;
+ }
+ if (local_tdb.format != 0)
+ {
+ dump_tdb(&local_tdb);
+ return 100 * local_tdb.format + 2;
+ }
+ }
+ else
+ {
+ return 100 * rc + 3;
+ }
+ local_tdb.format = 0;
+ if ((rc = __TM_begin(&local_tdb)) == 0)
+ {
+ __TM_named_abort(1);
+ return 4;
+ }
+ else
+ {
+ if (rc != 3)
+ {
+ return 100 * rc + 5;
+ }
+ if (local_tdb.format != 1)
+ {
+ dump_tdb(&local_tdb);
+ return 100 * local_tdb.format + 6;
+ }
+ }
+ local_tdb256.format = 0;
+ if ((rc = __TM_begin(&local_tdb256)) == 0)
+ {
+ rc = __TM_end();
+ if (rc != 0)
+ {
+ return 1100 * rc + 1;
+ }
+ if (local_tdb256.format != 0)
+ {
+ dump_tdb(&local_tdb256);
+ return 1100 * local_tdb256.format + 2;
+ }
+ }
+ else
+ {
+ return 1100 * rc + 3;
+ }
+#if 1 /*!!!does not work*/
+ local_tdb256.format = 0;
+ if ((rc = __TM_begin(&local_tdb256)) == 0)
+ {
+ __TM_named_abort(1);
+ return 2004;
+ }
+ else
+ {
+ if (rc != 3)
+ {
+ return 2100 * rc + 5;
+ }
+ if (local_tdb256.format != 1)
+ {
+ dump_tdb(&local_tdb256);
+ return 2100 * local_tdb256.format + 6;
+ }
+ }
+#endif
+
+ return 0;
+}
+
+static int test_etnd(void)
+{
+ long rc;
+
+ {
+ long nd;
+
+ make_fake_tdb(&local_tdb);
+ local_tdb.nesting_depth = 0;
+ nd = __TM_nesting_depth(&local_tdb);
+ if (nd != 0)
+ {
+ return 1;
+ }
+ local_tdb.nesting_depth = 7;
+ nd = __TM_nesting_depth(&local_tdb);
+ if (nd != 7)
+ {
+ return 7;
+ }
+ local_tdb.format = 0;
+ nd = __TM_nesting_depth(&local_tdb);
+ if (nd != 0)
+ {
+ return 2;
+ }
+ }
+ counters.c1 = 0;
+ counters.c1 = 0;
+ counters.c2 = 0;
+ counters.c3 = 0;
+ if ((rc = __TM_simple_begin()) == 0)
+ {
+ counters.c1 = __TM_nesting_depth(0);
+ if (__TM_simple_begin() == 0)
+ {
+ counters.c2 = __TM_nesting_depth(0);
+ if (__TM_simple_begin() == 0)
+ {
+ counters.c3 = __TM_nesting_depth(0);
+ __TM_end();
+ }
+ __TM_end();
+ }
+ __TM_end();
+ }
+ else
+ {
+ return 100 * rc + 1;
+ }
+ if (counters.c1 != 1)
+ {
+ return 100 * counters.c1 + 2;
+ }
+ if (counters.c2 != 2)
+ {
+ return 100 * counters.c2 + 3;
+ }
+ if (counters.c3 != 3)
+ {
+ return 100 * counters.c3 + 4;
+ }
+
+ return 0;
+}
+
+/* ---------------------------- local testing framework functions ---------- */
+
+static int run_one_test(const test_table_entry_t *test_entry)
+{
+ int do_print_passes;
+ int succeeded;
+ int rc;
+ int i;
+
+ do_print_passes = (
+ test_entry->required_quorum != 1 ||
+ test_entry->max_repetitions != 1);
+ printf("RRR RUN %s\n", test_entry->name);
+ if (do_print_passes == 1)
+ {
+ printf(
+ " (requires %d successful out of %d runs)\n",
+ test_entry->required_quorum,
+ test_entry->max_repetitions);
+ }
+ succeeded = 0;
+ rc = 0;
+ for (rc = 0, i = 0; i < test_entry->max_repetitions; i++)
+ {
+ if (do_print_passes == 1)
+ {
+ if (i == 0)
+ {
+ printf(" ");
+ }
+ else
+ {
+ printf(",");
+ }
+ }
+ rc = test_entry->test_func();
+ if (rc == 0)
+ {
+ if (do_print_passes == 1)
+ {
+ printf(" success");
+ }
+ succeeded++;
+ if (succeeded >= test_entry->required_quorum)
+ {
+ break;
+ }
+ }
+ else
+ {
+ printf(" failed (rc = %d)", rc);
+ }
+ }
+ if (do_print_passes == 1 || rc != 0)
+ {
+ printf("\n");
+ }
+ if (succeeded >= test_entry->required_quorum)
+ {
+ printf("+++ OK %s\n", test_entry->name);
+
+ return 0;
+ }
+ else
+ {
+ printf("--- FAIL %s\n", test_entry->name);
+
+ return (rc != 0) ? rc : -1;
+ }
+}
+
+static int run_all_tests(const test_table_entry_t *test_table)
+{
+ const test_table_entry_t *test;
+ int rc;
+
+ for (
+ rc = 0, test = &test_table[0];
+ test->test_func != NULL && rc == 0; test++)
+ {
+ rc = run_one_test(test);
+ }
+
+ return rc;
+}
+
+/* ---------------------------- interface functions ------------------------ */
+
+int main(void)
+{
+ const test_table_entry_t test_table[] = {
+ TEST_NO_REP(init_cache),
+ TEST_NO_REP(test_abort_classification),
+ TEST_NO_REP(test_cc_classification),
+ TEST_DF_REP(test_tbegin_ntstg_tend),
+ TEST_DF_REP(test_tbegin_ntstg_tabort),
+ TEST_DF_REP(test_tbegin_aborts),
+ TEST_DF_REP(test_tbegin_tdb),
+ TEST_DF_REP(test_etnd),
+ { (void *)0, 0, 0 }
+ };
+
+ {
+ int rc;
+
+ rc = run_all_tests(test_table);
+
+ return rc;
+ }
+}
diff --git a/gcc/testsuite/gcc.target/s390/htm-builtins-compile-1.c b/gcc/testsuite/gcc.target/s390/htm-builtins-compile-1.c
new file mode 100644
index 00000000000..c1b98e2bba4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/s390/htm-builtins-compile-1.c
@@ -0,0 +1,164 @@
+/* This checks the availability of the low-level builtins introduced
+ for transactional execution. */
+
+/* { dg-do compile } */
+/* { dg-options "-O3 -march=zEC12 -mzarch" } */
+
+#include <stdint.h>
+#include <htmintrin.h>
+
+int global = 0;
+uint64_t g;
+struct __htm_tdb global_tdb;
+
+int
+foo (struct __htm_tdb* tdb, int reg, int *mem, uint64_t *mem64)
+{
+
+ int cc;
+ int n;
+
+ __builtin_tbegin ((void *)0);
+ __builtin_tbegin ((void *)-99999);
+ __builtin_tbegin ((void *)99999);
+ while (__builtin_tbegin ((void *)0) != 0)
+ {
+ }
+ cc = __builtin_tbegin ((void *)0x12345678);
+ cc = __builtin_tbegin (tdb);
+ cc = __builtin_tbegin (&global_tdb);
+ cc = __builtin_tbegin ((void *)(long long)(reg + 0x12345678));
+ cc = __builtin_tbegin ((void *)(long long)(reg));
+
+ __builtin_tbegin_nofloat ((void *)0);
+ __builtin_tbegin_nofloat ((void *)-99999);
+ __builtin_tbegin_nofloat ((void *)99999);
+ cc = __builtin_tbegin_nofloat ((void *)0x12345678);
+ cc = __builtin_tbegin_nofloat (tdb);
+ cc = __builtin_tbegin_nofloat (&global_tdb);
+ cc = __builtin_tbegin_nofloat ((void *)(long long)(reg + 0x12345678));
+ cc = __builtin_tbegin_nofloat ((void *)(long long)(reg));
+
+ __builtin_tbegin_retry ((void *)0, 0);
+ cc = __builtin_tbegin_retry ((void *)0, 1);
+ cc = __builtin_tbegin_retry ((void *)0, -1);
+ cc = __builtin_tbegin_retry ((void *)0, 42);
+ cc = __builtin_tbegin_retry ((void *)0, reg);
+ cc = __builtin_tbegin_retry ((void *)0, *mem);
+ cc = __builtin_tbegin_retry ((void *)0, global);
+ cc = __builtin_tbegin_retry (tdb, 42);
+ cc = __builtin_tbegin_retry (&global_tdb, 42);
+ cc = __builtin_tbegin_retry ((void *)0x12345678, global);
+ cc = __builtin_tbegin_retry (
+ (void *)(long long) (reg + 0x12345678), global + 1);
+ cc = __builtin_tbegin_retry (
+ (void *)(long long)(reg), global - 1);
+
+ __builtin_tbegin_retry_nofloat ((void *)0, 0);
+ cc = __builtin_tbegin_retry_nofloat ((void *)0, 1);
+ cc = __builtin_tbegin_retry_nofloat ((void *)0, -1);
+ cc = __builtin_tbegin_retry_nofloat ((void *)0, 42);
+ cc = __builtin_tbegin_retry_nofloat ((void *)0, reg);
+ cc = __builtin_tbegin_retry_nofloat ((void *)0, *mem);
+ cc = __builtin_tbegin_retry_nofloat ((void *)0, global);
+ cc = __builtin_tbegin_retry_nofloat (tdb, 42);
+ cc = __builtin_tbegin_retry_nofloat (&global_tdb, 42);
+ cc = __builtin_tbegin_retry_nofloat ((void *)0x12345678, global);
+ cc = __builtin_tbegin_retry_nofloat (
+ (void *)(long long) (reg + 0x12345678), global + 1);
+ cc = __builtin_tbegin_retry_nofloat (
+ (void *)(long long)(reg), global - 1);
+
+ __builtin_tbeginc ();
+
+ __builtin_tx_nesting_depth ();
+ n = __builtin_tx_nesting_depth ();
+
+ __builtin_non_tx_store (mem64, 0);
+ {
+ const uint64_t val_var = 0x1122334455667788;
+
+ __builtin_non_tx_store (mem64, val_var);
+ }
+ __builtin_non_tx_store (mem64, (uint64_t)reg);
+ __builtin_non_tx_store (mem64, g);
+ __builtin_non_tx_store ((uint64_t *)0, 0);
+ __builtin_non_tx_store ((uint64_t *)0x12345678, 0);
+ __builtin_non_tx_store (&g, 23);
+ __builtin_non_tx_store (&g, reg);
+ __builtin_non_tx_store (&g, *mem);
+ __builtin_non_tx_store (&g, global);
+
+ __builtin_tend();
+
+ __builtin_tx_assist (0);
+ __builtin_tx_assist (1);
+ __builtin_tx_assist (reg);
+ __builtin_tx_assist (*mem);
+ __builtin_tx_assist (global);
+}
+
+/* The taborts must go into separate function since they are
+ "noreturn". */
+
+void
+tabort1 ()
+{
+ __builtin_tabort (256);
+}
+
+void
+tabort2 (int reg)
+{
+ __builtin_tabort (reg);
+}
+
+void
+tabort3 (int reg)
+{
+ /* { dg-final { scan-assembler-times "tabort\t255" 1 } } */
+ __builtin_tabort (reg + 255);
+}
+
+void
+tabort4 (int *mem)
+{
+ __builtin_tabort (*mem);
+}
+
+void
+tabort5 ()
+{
+ __builtin_tabort (global);
+}
+
+void
+tabort6 (int *mem)
+{
+ /* Here global + 255 gets reloaded into a reg. Better would be to
+ just reload global or *mem and get the +255 for free as address
+ arithmetic. */
+ __builtin_tabort (*mem + 255);
+}
+
+void
+tabort7 ()
+{
+ __builtin_tabort (global + 255);
+}
+
+void
+tabort8 ()
+{
+ __builtin_tabort (-1);
+}
+
+
+/* Make sure the tdb NULL argument ends up as immediate value in the
+ instruction. */
+/* { dg-final { scan-assembler-times "tbegin\t0," 17 } } */
+/* { dg-final { scan-assembler-times "tbegin\t" 41 } } */
+/* Check number of occurences of certain instructions. */
+/* { dg-final { scan-assembler-times "tbeginc\t" 1 } } */
+/* { dg-final { scan-assembler-times "tabort\t" 8 } } */
+/* { dg-final { scan-assembler "ppa\t" } } */
diff --git a/gcc/testsuite/gcc.target/s390/htm-builtins-compile-2.c b/gcc/testsuite/gcc.target/s390/htm-builtins-compile-2.c
new file mode 100644
index 00000000000..67d76a6d3d3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/s390/htm-builtins-compile-2.c
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-options "-O3 -march=zEC12 -mzarch" } */
+
+void must_not_compile1 (void)
+{
+ __builtin_tabort (0); /* { dg-error "Invalid transaction abort code:" } */
+}
+
+void must_not_compile2 (void)
+{
+ __builtin_tabort (255); /* { dg-error "Invalid transaction abort code:" } */
+}
diff --git a/gcc/testsuite/gcc.target/s390/htm-xl-intrin-1.c b/gcc/testsuite/gcc.target/s390/htm-builtins-compile-3.c
index 77ceeb7706f..77ceeb7706f 100644
--- a/gcc/testsuite/gcc.target/s390/htm-xl-intrin-1.c
+++ b/gcc/testsuite/gcc.target/s390/htm-builtins-compile-3.c
diff --git a/gcc/testsuite/gcc.target/s390/s390.exp b/gcc/testsuite/gcc.target/s390/s390.exp
index a4a6609cb01..f7f9ad25607 100644
--- a/gcc/testsuite/gcc.target/s390/s390.exp
+++ b/gcc/testsuite/gcc.target/s390/s390.exp
@@ -24,6 +24,19 @@ if ![istarget s390*-*-*] then {
# Load support procs.
load_lib gcc-dg.exp
+# Return 1 if htm (etnd - extract nesting depth) instructions can be
+# compiled.
+proc check_effective_target_htm { } {
+ if { ![check_runtime s390_check_htm [subst {
+ int main (void)
+ {
+ unsigned int nd = 77;
+ asm (".insn rre,0xb2ec0000,%0,0" : "=d" (nd));
+ return nd;
+ }
+ }]] } { return 0 } else { return 1 }
+}
+
# If a testcase doesn't have special options, use these.
global DEFAULT_CFLAGS
if ![info exists DEFAULT_CFLAGS] then {
diff --git a/gcc/testsuite/gfortran.fortran-torture/compile/pr57517.f90 b/gcc/testsuite/gfortran.fortran-torture/compile/pr57517.f90
new file mode 100644
index 00000000000..f32698aa3a6
--- /dev/null
+++ b/gcc/testsuite/gfortran.fortran-torture/compile/pr57517.f90
@@ -0,0 +1,13 @@
+SUBROUTINE cal_helicity (uh, ph, phb, wavg, ims, ime, its, ite)
+ INTEGER, INTENT( IN ) :: ims, ime, its, ite
+ REAL, DIMENSION( ims:ime), INTENT( IN ) :: ph, phb, wavg
+ REAL, DIMENSION( ims:ime), INTENT( INOUT ) :: uh
+ INTEGER :: i
+ REAL :: zu
+ DO i = its, ite
+ zu = (ph(i ) + phb(i)) + (ph(i-1) + phb(i-1))
+ IF (wavg(i) .GT. 0) THEN
+ uh(i) = uh(i) + zu
+ ENDIF
+ END DO
+END SUBROUTINE cal_helicity
diff --git a/gcc/timevar.def b/gcc/timevar.def
index 897f66dd82e..dd590ec385c 100644
--- a/gcc/timevar.def
+++ b/gcc/timevar.def
@@ -261,6 +261,7 @@ DEFTIMEVAR (TV_PLUGIN_INIT , "plugin initialization")
DEFTIMEVAR (TV_PLUGIN_RUN , "plugin execution")
DEFTIMEVAR (TV_GIMPLE_SLSR , "straight-line strength reduction")
DEFTIMEVAR (TV_VTABLE_VERIFICATION , "vtable verification")
+DEFTIMEVAR (TV_TREE_UBSAN , "tree ubsan")
/* Everything else in rest_of_compilation not included above. */
DEFTIMEVAR (TV_EARLY_LOCAL , "early local passes")
diff --git a/gcc/toplev.c b/gcc/toplev.c
index 66477b651dc..5fedcea9002 100644
--- a/gcc/toplev.c
+++ b/gcc/toplev.c
@@ -29,6 +29,8 @@ along with GCC; see the file COPYING3. If not see
#include "line-map.h"
#include "input.h"
#include "tree.h"
+#include "varasm.h"
+#include "tree-inline.h"
#include "realmpfr.h" /* For GMP/MPFR/MPC versions, in print_version. */
#include "version.h"
#include "rtl.h"
diff --git a/gcc/tracer.c b/gcc/tracer.c
index 71a9201fd09..1ff89c56b75 100644
--- a/gcc/tracer.c
+++ b/gcc/tracer.c
@@ -227,7 +227,7 @@ static bool
tail_duplicate (void)
{
fibnode_t *blocks = XCNEWVEC (fibnode_t, last_basic_block);
- basic_block *trace = XNEWVEC (basic_block, n_basic_blocks);
+ basic_block *trace = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
int *counts = XNEWVEC (int, last_basic_block);
int ninsns = 0, nduplicated = 0;
gcov_type weighted_insns = 0, traced_insns = 0;
@@ -371,7 +371,7 @@ tracer (void)
{
bool changed;
- if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
+ if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1)
return 0;
mark_dfs_back_edges ();
diff --git a/gcc/trans-mem.c b/gcc/trans-mem.c
index 879f37b9177..271f600323b 100644
--- a/gcc/trans-mem.c
+++ b/gcc/trans-mem.c
@@ -23,6 +23,10 @@
#include "hash-table.h"
#include "tree.h"
#include "gimple.h"
+#include "calls.h"
+#include "function.h"
+#include "rtl.h"
+#include "emit-rtl.h"
#include "gimplify.h"
#include "gimple-iterator.h"
#include "gimplify-me.h"
@@ -30,6 +34,7 @@
#include "gimple-ssa.h"
#include "cgraph.h"
#include "tree-cfg.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-into-ssa.h"
#include "tree-pass.h"
@@ -1945,7 +1950,7 @@ tm_region_init (struct tm_region *region)
vec<tm_region_p> bb_regions = vNULL;
all_tm_regions = region;
- bb = single_succ (ENTRY_BLOCK_PTR);
+ bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
/* We could store this information in bb->aux, but we may get called
through get_all_tm_blocks() from another pass that may be already
@@ -2011,7 +2016,7 @@ gate_tm_init (void)
struct tm_region *region = (struct tm_region *)
obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region));
memset (region, 0, sizeof (*region));
- region->entry_block = single_succ (ENTRY_BLOCK_PTR);
+ region->entry_block = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
/* For a clone, the entire function is the region. But even if
we don't need to record any exit blocks, we may need to
record irrevocable blocks. */
@@ -3628,7 +3633,8 @@ tm_memopt_compute_available (struct tm_region *region,
/* If the out state of this block changed, then we need to add
its successors to the worklist if they are not already in. */
FOR_EACH_EDGE (e, ei, bb->succs)
- if (!AVAIL_IN_WORKLIST_P (e->dest) && e->dest != EXIT_BLOCK_PTR)
+ if (!AVAIL_IN_WORKLIST_P (e->dest)
+ && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
*qin++ = e->dest;
AVAIL_IN_WORKLIST_P (e->dest) = true;
@@ -4534,12 +4540,14 @@ ipa_tm_scan_irr_function (struct cgraph_node *node, bool for_clone)
if (for_clone)
{
old_irr = d->irrevocable_blocks_clone;
- queue.quick_push (single_succ (ENTRY_BLOCK_PTR));
+ queue.quick_push (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr, NULL))
{
- ipa_tm_propagate_irr (single_succ (ENTRY_BLOCK_PTR), new_irr,
+ ipa_tm_propagate_irr (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
+ new_irr,
old_irr, NULL);
- ret = bitmap_bit_p (new_irr, single_succ (ENTRY_BLOCK_PTR)->index);
+ ret = bitmap_bit_p (new_irr,
+ single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))->index);
}
}
else
@@ -5289,7 +5297,8 @@ ipa_tm_transform_clone (struct cgraph_node *node)
calculate_dominance_info (CDI_DOMINATORS);
need_ssa_rename =
- ipa_tm_transform_calls (d->clone, NULL, single_succ (ENTRY_BLOCK_PTR),
+ ipa_tm_transform_calls (d->clone, NULL,
+ single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
d->irrevocable_blocks_clone);
if (need_ssa_rename)
diff --git a/gcc/trans-mem.h b/gcc/trans-mem.h
index 09f0b4d66e4..d68171fc4ea 100644
--- a/gcc/trans-mem.h
+++ b/gcc/trans-mem.h
@@ -17,6 +17,8 @@
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
+#ifndef GCC_TRANS_MEM_H
+#define GCC_TRANS_MEM_H
/* These defines must match the enumerations in libitm.h. */
#define PR_INSTRUMENTEDCODE 0x0001
@@ -37,3 +39,12 @@
extern void compute_transaction_bits (void);
extern bool is_tm_ending (gimple);
+extern tree build_tm_abort_call (location_t, bool);
+extern bool is_tm_safe (const_tree);
+extern bool is_tm_pure (const_tree);
+extern bool is_tm_may_cancel_outer (tree);
+extern bool is_tm_ending_fndecl (tree);
+extern void record_tm_replacement (tree, tree);
+extern void tm_malloc_replacement (tree);
+
+#endif // GCC_TRANS_MEM_H
diff --git a/gcc/tree-affine.c b/gcc/tree-affine.c
index 93d458a38e1..615ae98f1b9 100644
--- a/gcc/tree-affine.c
+++ b/gcc/tree-affine.c
@@ -21,6 +21,7 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "tree.h"
+#include "expr.h"
#include "tree-pretty-print.h"
#include "pointer-set.h"
#include "tree-affine.h"
diff --git a/gcc/tree-browser.c b/gcc/tree-browser.c
index dad06a5175a..c3483a7b3a7 100644
--- a/gcc/tree-browser.c
+++ b/gcc/tree-browser.c
@@ -24,6 +24,7 @@ along with GCC; see the file COPYING3. If not see
#include "hash-table.h"
#include "tree.h"
#include "tree-pretty-print.h"
+#include "print-tree.h"
#define TB_OUT_FILE stdout
#define TB_IN_FILE stdin
diff --git a/gcc/tree-call-cdce.c b/gcc/tree-call-cdce.c
index 58003612e63..4c6aae7c484 100644
--- a/gcc/tree-call-cdce.c
+++ b/gcc/tree-call-cdce.c
@@ -24,11 +24,13 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "basic-block.h"
#include "tree.h"
+#include "stor-layout.h"
#include "gimple-pretty-print.h"
#include "gimple.h"
#include "gimple-iterator.h"
#include "gimple-ssa.h"
#include "tree-cfg.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-into-ssa.h"
#include "tree-pass.h"
diff --git a/gcc/tree-cfg.c b/gcc/tree-cfg.c
index adf31244cb0..9ec7b4aa33e 100644
--- a/gcc/tree-cfg.c
+++ b/gcc/tree-cfg.c
@@ -24,6 +24,9 @@ along with GCC; see the file COPYING3. If not see
#include "hash-table.h"
#include "tm.h"
#include "tree.h"
+#include "trans-mem.h"
+#include "stor-layout.h"
+#include "print-tree.h"
#include "tm_p.h"
#include "basic-block.h"
#include "flags.h"
@@ -39,10 +42,12 @@ along with GCC; see the file COPYING3. If not see
#include "tree-cfg.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-ssa-loop-manip.h"
#include "tree-ssa-loop-niter.h"
#include "tree-into-ssa.h"
+#include "expr.h"
#include "tree-dfa.h"
#include "tree-ssa.h"
#include "tree-dump.h"
@@ -175,7 +180,7 @@ init_empty_tree_cfg_for_function (struct function *fn)
/* Initialize the basic block array. */
init_flow (fn);
profile_status_for_function (fn) = PROFILE_ABSENT;
- n_basic_blocks_for_function (fn) = NUM_FIXED_BLOCKS;
+ n_basic_blocks_for_fn (fn) = NUM_FIXED_BLOCKS;
last_basic_block_for_function (fn) = NUM_FIXED_BLOCKS;
vec_alloc (basic_block_info_for_function (fn), initial_cfg_capacity);
vec_safe_grow_cleared (basic_block_info_for_function (fn),
@@ -187,14 +192,14 @@ init_empty_tree_cfg_for_function (struct function *fn)
initial_cfg_capacity);
SET_BASIC_BLOCK_FOR_FUNCTION (fn, ENTRY_BLOCK,
- ENTRY_BLOCK_PTR_FOR_FUNCTION (fn));
+ ENTRY_BLOCK_PTR_FOR_FN (fn));
SET_BASIC_BLOCK_FOR_FUNCTION (fn, EXIT_BLOCK,
- EXIT_BLOCK_PTR_FOR_FUNCTION (fn));
+ EXIT_BLOCK_PTR_FOR_FN (fn));
- ENTRY_BLOCK_PTR_FOR_FUNCTION (fn)->next_bb
- = EXIT_BLOCK_PTR_FOR_FUNCTION (fn);
- EXIT_BLOCK_PTR_FOR_FUNCTION (fn)->prev_bb
- = ENTRY_BLOCK_PTR_FOR_FUNCTION (fn);
+ ENTRY_BLOCK_PTR_FOR_FN (fn)->next_bb
+ = EXIT_BLOCK_PTR_FOR_FN (fn);
+ EXIT_BLOCK_PTR_FOR_FN (fn)->prev_bb
+ = ENTRY_BLOCK_PTR_FOR_FN (fn);
}
void
@@ -232,12 +237,12 @@ build_gimple_cfg (gimple_seq seq)
factor_computed_gotos ();
/* Make sure there is always at least one block, even if it's empty. */
- if (n_basic_blocks == NUM_FIXED_BLOCKS)
- create_empty_bb (ENTRY_BLOCK_PTR);
+ if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
+ create_empty_bb (ENTRY_BLOCK_PTR_FOR_FN (cfun));
/* Adjust the size of the array. */
- if (basic_block_info->length () < (size_t) n_basic_blocks)
- vec_safe_grow_cleared (basic_block_info, n_basic_blocks);
+ if (basic_block_info->length () < (size_t) n_basic_blocks_for_fn (cfun))
+ vec_safe_grow_cleared (basic_block_info, n_basic_blocks_for_fn (cfun));
/* To speed up statement iterator walks, we first purge dead labels. */
cleanup_dead_labels ();
@@ -264,12 +269,11 @@ static void
replace_loop_annotate ()
{
struct loop *loop;
- loop_iterator li;
basic_block bb;
gimple_stmt_iterator gsi;
gimple stmt;
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
{
gsi = gsi_last_bb (loop->header);
stmt = gsi_stmt (gsi);
@@ -516,7 +520,7 @@ make_blocks (gimple_seq seq)
gimple stmt = NULL;
bool start_new_block = true;
bool first_stmt_of_seq = true;
- basic_block bb = ENTRY_BLOCK_PTR;
+ basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
while (!gsi_end_p (i))
{
@@ -607,7 +611,7 @@ create_bb (void *h, void *e, basic_block after)
/* Add the newly created block to the array. */
SET_BASIC_BLOCK (last_basic_block, bb);
- n_basic_blocks++;
+ n_basic_blocks_for_fn (cfun)++;
last_basic_block++;
return bb;
@@ -667,7 +671,8 @@ make_edges (void)
/* Create an edge from entry to the first block with executable
statements in it. */
- make_edge (ENTRY_BLOCK_PTR, BASIC_BLOCK (NUM_FIXED_BLOCKS), EDGE_FALLTHRU);
+ make_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), BASIC_BLOCK (NUM_FIXED_BLOCKS),
+ EDGE_FALLTHRU);
/* Traverse the basic block array placing edges. */
FOR_EACH_BB (bb)
@@ -685,7 +690,7 @@ make_edges (void)
fallthru = false;
break;
case GIMPLE_RETURN:
- make_edge (bb, EXIT_BLOCK_PTR, 0);
+ make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
fallthru = false;
break;
case GIMPLE_COND:
@@ -717,7 +722,8 @@ make_edges (void)
/* BUILTIN_RETURN is really a return statement. */
if (gimple_call_builtin_p (last, BUILT_IN_RETURN))
- make_edge (bb, EXIT_BLOCK_PTR, 0), fallthru = false;
+ make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0), fallthru =
+ false;
/* Some calls are known not to return. */
else
fallthru = !(gimple_call_flags (last) & ECF_NORETURN);
@@ -1501,7 +1507,7 @@ gimple_can_merge_blocks_p (basic_block a, basic_block b)
if (!single_pred_p (b))
return false;
- if (b == EXIT_BLOCK_PTR)
+ if (b == EXIT_BLOCK_PTR_FOR_FN (cfun))
return false;
/* If A ends by a statement causing exceptions or something similar, we
@@ -1637,9 +1643,8 @@ replace_uses_by (tree name, tree val)
if (current_loops)
{
struct loop *loop;
- loop_iterator li;
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
{
substitute_in_loop_info (loop, name, val);
}
@@ -2105,7 +2110,8 @@ gimple_dump_cfg (FILE *file, int flags)
{
dump_function_header (file, current_function_decl, flags);
fprintf (file, ";; \n%d basic blocks, %d edges, last basic block %d.\n\n",
- n_basic_blocks, n_edges, last_basic_block);
+ n_basic_blocks_for_fn (cfun), n_edges_for_fn (cfun),
+ last_basic_block);
brief_dump_cfg (file, flags | TDF_COMMENT);
fprintf (file, "\n");
@@ -2140,9 +2146,9 @@ dump_cfg_stats (FILE *file)
fprintf (file, fmt_str, "", " instances ", "used ");
fprintf (file, "---------------------------------------------------------\n");
- size = n_basic_blocks * sizeof (struct basic_block_def);
+ size = n_basic_blocks_for_fn (cfun) * sizeof (struct basic_block_def);
total += size;
- fprintf (file, fmt_str_1, "Basic blocks", n_basic_blocks,
+ fprintf (file, fmt_str_1, "Basic blocks", n_basic_blocks_for_fn (cfun),
SCALE (size), LABEL (size));
num_edges = 0;
@@ -4847,19 +4853,21 @@ gimple_verify_flow_info (void)
edge e;
edge_iterator ei;
- if (ENTRY_BLOCK_PTR->il.gimple.seq || ENTRY_BLOCK_PTR->il.gimple.phi_nodes)
+ if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->il.gimple.seq
+ || ENTRY_BLOCK_PTR_FOR_FN (cfun)->il.gimple.phi_nodes)
{
error ("ENTRY_BLOCK has IL associated with it");
err = 1;
}
- if (EXIT_BLOCK_PTR->il.gimple.seq || EXIT_BLOCK_PTR->il.gimple.phi_nodes)
+ if (EXIT_BLOCK_PTR_FOR_FN (cfun)->il.gimple.seq
+ || EXIT_BLOCK_PTR_FOR_FN (cfun)->il.gimple.phi_nodes)
{
error ("EXIT_BLOCK has IL associated with it");
err = 1;
}
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
if (e->flags & EDGE_FALLTHRU)
{
error ("fallthru to exit from bb %d", e->src->index);
@@ -5039,7 +5047,7 @@ gimple_verify_flow_info (void)
error ("wrong outgoing edge flags at end of bb %d", bb->index);
err = 1;
}
- if (single_succ (bb) != EXIT_BLOCK_PTR)
+ if (single_succ (bb) != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
error ("return edge does not point to exit in bb %d",
bb->index);
@@ -5279,7 +5287,7 @@ gimple_redirect_edge_and_branch (edge e, basic_block dest)
if (e->flags & EDGE_EH)
return redirect_eh_edge (e, dest);
- if (e->src != ENTRY_BLOCK_PTR)
+ if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
{
ret = gimple_try_redirect_by_replacing_jump (e, dest);
if (ret)
@@ -5562,7 +5570,7 @@ gimple_duplicate_bb (basic_block bb)
gimple_seq phis = phi_nodes (bb);
gimple phi, stmt, copy;
- new_bb = create_empty_bb (EXIT_BLOCK_PTR->prev_bb);
+ new_bb = create_empty_bb (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
/* Copy the PHI nodes. We ignore PHI node arguments here because
the incoming edges have not been setup yet. */
@@ -6899,9 +6907,9 @@ move_sese_region_to_fn (struct function *dest_cfun, basic_block entry_bb,
FIXME, this is silly. The CFG ought to become a parameter to
these helpers. */
push_cfun (dest_cfun);
- make_edge (ENTRY_BLOCK_PTR, entry_bb, EDGE_FALLTHRU);
+ make_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), entry_bb, EDGE_FALLTHRU);
if (exit_bb)
- make_edge (exit_bb, EXIT_BLOCK_PTR, 0);
+ make_edge (exit_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
pop_cfun ();
/* Back in the original function, the SESE region has disappeared,
@@ -7030,7 +7038,7 @@ dump_function_to_file (tree fndecl, FILE *file, int flags)
if (!ignore_topmost_bind)
fprintf (file, "{\n");
- if (any_var && n_basic_blocks_for_function (fun))
+ if (any_var && n_basic_blocks_for_fn (fun))
fprintf (file, "\n");
FOR_EACH_BB_FN (bb, fun)
@@ -7245,7 +7253,7 @@ print_loops (FILE *file, int verbosity)
{
basic_block bb;
- bb = ENTRY_BLOCK_PTR;
+ bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
if (bb && bb->loop_father)
print_loop_and_siblings (file, bb->loop_father, 0, verbosity);
}
@@ -7408,13 +7416,14 @@ gimple_flow_call_edges_add (sbitmap blocks)
int last_bb = last_basic_block;
bool check_last_block = false;
- if (n_basic_blocks == NUM_FIXED_BLOCKS)
+ if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
return 0;
if (! blocks)
check_last_block = true;
else
- check_last_block = bitmap_bit_p (blocks, EXIT_BLOCK_PTR->prev_bb->index);
+ check_last_block = bitmap_bit_p (blocks,
+ EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb->index);
/* In the last basic block, before epilogue generation, there will be
a fallthru edge to EXIT. Special care is required if the last insn
@@ -7430,7 +7439,7 @@ gimple_flow_call_edges_add (sbitmap blocks)
Handle this by adding a dummy instruction in a new last basic block. */
if (check_last_block)
{
- basic_block bb = EXIT_BLOCK_PTR->prev_bb;
+ basic_block bb = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb);
gimple t = NULL;
@@ -7441,7 +7450,7 @@ gimple_flow_call_edges_add (sbitmap blocks)
{
edge e;
- e = find_edge (bb, EXIT_BLOCK_PTR);
+ e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
if (e)
{
gsi_insert_on_edge (e, gimple_build_nop ());
@@ -7484,7 +7493,7 @@ gimple_flow_call_edges_add (sbitmap blocks)
#ifdef ENABLE_CHECKING
if (stmt == last_stmt)
{
- e = find_edge (bb, EXIT_BLOCK_PTR);
+ e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
gcc_assert (e == NULL);
}
#endif
@@ -7497,7 +7506,7 @@ gimple_flow_call_edges_add (sbitmap blocks)
if (e)
blocks_split++;
}
- make_edge (bb, EXIT_BLOCK_PTR, EDGE_FAKE);
+ make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
}
gsi_prev (&gsi);
}
@@ -7535,7 +7544,7 @@ remove_edge_and_dominated_blocks (edge e)
}
/* No updating is needed for edges to exit. */
- if (e->dest == EXIT_BLOCK_PTR)
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
{
if (cfgcleanup_altered_bbs)
bitmap_set_bit (cfgcleanup_altered_bbs, e->src->index);
@@ -7575,7 +7584,7 @@ remove_edge_and_dominated_blocks (edge e)
{
FOR_EACH_EDGE (f, ei, bb->succs)
{
- if (f->dest != EXIT_BLOCK_PTR)
+ if (f->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
bitmap_set_bit (df, f->dest->index);
}
}
@@ -7926,8 +7935,8 @@ split_critical_edges (void)
gimple_find_edge_insert_loc. */
else if ((!single_pred_p (e->dest)
|| !gimple_seq_empty_p (phi_nodes (e->dest))
- || e->dest == EXIT_BLOCK_PTR)
- && e->src != ENTRY_BLOCK_PTR
+ || e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
+ && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
&& !(e->flags & EDGE_ABNORMAL))
{
gimple_stmt_iterator gsi;
@@ -8051,10 +8060,10 @@ execute_warn_function_return (void)
/* If we have a path to EXIT, then we do return. */
if (TREE_THIS_VOLATILE (cfun->decl)
- && EDGE_COUNT (EXIT_BLOCK_PTR->preds) > 0)
+ && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0)
{
location = UNKNOWN_LOCATION;
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
{
last = last_stmt (e->src);
if ((gimple_code (last) == GIMPLE_RETURN
@@ -8071,10 +8080,10 @@ execute_warn_function_return (void)
without returning a value. */
else if (warn_return_type
&& !TREE_NO_WARNING (cfun->decl)
- && EDGE_COUNT (EXIT_BLOCK_PTR->preds) > 0
+ && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0
&& !VOID_TYPE_P (TREE_TYPE (TREE_TYPE (cfun->decl))))
{
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
{
gimple last = last_stmt (e->src);
if (gimple_code (last) == GIMPLE_RETURN
@@ -8291,13 +8300,15 @@ execute_fixup_cfg (void)
count_scale
= GCOV_COMPUTE_SCALE (cgraph_get_node (current_function_decl)->count,
- ENTRY_BLOCK_PTR->count);
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)->count);
- ENTRY_BLOCK_PTR->count = cgraph_get_node (current_function_decl)->count;
- EXIT_BLOCK_PTR->count = apply_scale (EXIT_BLOCK_PTR->count,
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)->count =
+ cgraph_get_node (current_function_decl)->count;
+ EXIT_BLOCK_PTR_FOR_FN (cfun)->count =
+ apply_scale (EXIT_BLOCK_PTR_FOR_FN (cfun)->count,
count_scale);
- FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
+ FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
e->count = apply_scale (e->count, count_scale);
FOR_EACH_BB (bb)
diff --git a/gcc/tree-cfgcleanup.c b/gcc/tree-cfgcleanup.c
index e864eed94f8..4e5adc28a06 100644
--- a/gcc/tree-cfgcleanup.c
+++ b/gcc/tree-cfgcleanup.c
@@ -36,8 +36,10 @@ along with GCC; see the file COPYING3. If not see
#include "tree-cfg.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-ssa-loop-manip.h"
+#include "expr.h"
#include "tree-dfa.h"
#include "tree-ssa.h"
#include "tree-pass.h"
@@ -249,14 +251,14 @@ tree_forwarder_block_p (basic_block bb, bool phi_wanted)
Otherwise, BB must have PHI nodes. */
|| gimple_seq_empty_p (phi_nodes (bb)) == phi_wanted
/* BB may not be a predecessor of EXIT_BLOCK_PTR. */
- || single_succ (bb) == EXIT_BLOCK_PTR
+ || single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun)
/* Nor should this be an infinite loop. */
|| single_succ (bb) == bb
/* BB may not have an abnormal outgoing edge. */
|| (single_succ_edge (bb)->flags & EDGE_ABNORMAL))
return false;
- gcc_checking_assert (bb != ENTRY_BLOCK_PTR);
+ gcc_checking_assert (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun));
locus = single_succ_edge (bb)->goto_locus;
@@ -266,7 +268,7 @@ tree_forwarder_block_p (basic_block bb, bool phi_wanted)
edge e;
FOR_EACH_EDGE (e, ei, bb->preds)
- if (e->src == ENTRY_BLOCK_PTR || (e->flags & EDGE_EH))
+ if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) || (e->flags & EDGE_EH))
return false;
/* If goto_locus of any of the edges differs, prevent removing
the forwarder block for -O0. */
@@ -905,7 +907,7 @@ remove_forwarder_block_with_phi (basic_block bb)
static unsigned int
merge_phi_nodes (void)
{
- basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks);
+ basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
basic_block *current = worklist;
basic_block bb;
diff --git a/gcc/tree-complex.c b/gcc/tree-complex.c
index 130674e673f..7bc3458165f 100644
--- a/gcc/tree-complex.c
+++ b/gcc/tree-complex.c
@@ -22,6 +22,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
#include "flags.h"
#include "gimple.h"
#include "gimplify.h"
@@ -31,7 +32,9 @@ along with GCC; see the file COPYING3. If not see
#include "tree-cfg.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
+#include "expr.h"
#include "tree-dfa.h"
#include "tree-ssa.h"
#include "tree-iterator.h"
@@ -687,7 +690,7 @@ update_complex_assignment (gimple_stmt_iterator *gsi, tree r, tree i)
static void
update_parameter_components (void)
{
- edge entry_edge = single_succ_edge (ENTRY_BLOCK_PTR);
+ edge entry_edge = single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun));
tree parm;
for (parm = DECL_ARGUMENTS (cfun->decl); parm ; parm = DECL_CHAIN (parm))
diff --git a/gcc/tree-data-ref.c b/gcc/tree-data-ref.c
index 84d54f735f4..4fc7e8210f1 100644
--- a/gcc/tree-data-ref.c
+++ b/gcc/tree-data-ref.c
@@ -77,6 +77,7 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "tree.h"
+#include "expr.h"
#include "gimple-pretty-print.h"
#include "gimple.h"
#include "gimple-iterator.h"
@@ -4744,10 +4745,9 @@ analyze_all_data_dependences (struct loop *loop)
void
tree_check_data_deps (void)
{
- loop_iterator li;
struct loop *loop_nest;
- FOR_EACH_LOOP (li, loop_nest, 0)
+ FOR_EACH_LOOP (loop_nest, 0)
analyze_all_data_dependences (loop_nest);
}
diff --git a/gcc/tree-dfa.c b/gcc/tree-dfa.c
index b2084961a3f..5392c23c142 100644
--- a/gcc/tree-dfa.c
+++ b/gcc/tree-dfa.c
@@ -25,6 +25,7 @@ along with GCC; see the file COPYING3. If not see
#include "hashtab.h"
#include "pointer-set.h"
#include "tree.h"
+#include "stor-layout.h"
#include "tm_p.h"
#include "basic-block.h"
#include "ggc.h"
@@ -38,7 +39,9 @@ along with GCC; see the file COPYING3. If not see
#include "gimple-ssa.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
+#include "expr.h"
#include "tree-dfa.h"
#include "tree-inline.h"
#include "tree-pass.h"
@@ -478,8 +481,7 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
&& csize
&& tree_fits_uhwi_p (csize)
&& wi::fits_shwi_p (bit_offset))
- maxsize = tree_to_shwi (csize)
- - bit_offset.to_shwi ();
+ maxsize = tree_to_uhwi (csize) - bit_offset.to_shwi ();
else
maxsize = -1;
}
@@ -609,7 +611,8 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
&& (!wi::fits_shwi_p (bit_offset)
|| !tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (exp)))
|| (bit_offset.to_shwi () + maxsize
- == (HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE (TREE_TYPE (exp))))))
+ == (HOST_WIDE_INT) tree_to_uhwi
+ (TYPE_SIZE (TREE_TYPE (exp))))))
maxsize = -1;
done:
diff --git a/gcc/tree-eh.c b/gcc/tree-eh.c
index 656ba6f5cb3..77786787585 100644
--- a/gcc/tree-eh.c
+++ b/gcc/tree-eh.c
@@ -23,6 +23,8 @@ along with GCC; see the file COPYING3. If not see
#include "hash-table.h"
#include "tm.h"
#include "tree.h"
+#include "expr.h"
+#include "calls.h"
#include "flags.h"
#include "function.h"
#include "except.h"
@@ -34,6 +36,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-cfg.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-into-ssa.h"
#include "tree-ssa.h"
diff --git a/gcc/tree-emutls.c b/gcc/tree-emutls.c
index 11337c0c127..7f0c0a1981a 100644
--- a/gcc/tree-emutls.c
+++ b/gcc/tree-emutls.c
@@ -21,6 +21,8 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "varasm.h"
#include "gimple.h"
#include "gimple-iterator.h"
#include "gimple-walk.h"
@@ -29,6 +31,7 @@ along with GCC; see the file COPYING3. If not see
#include "cgraph.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "langhooks.h"
#include "target.h"
diff --git a/gcc/tree-if-conv.c b/gcc/tree-if-conv.c
index 81403f25aa0..907b403e43c 100644
--- a/gcc/tree-if-conv.c
+++ b/gcc/tree-if-conv.c
@@ -85,6 +85,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
#include "flags.h"
#include "basic-block.h"
#include "gimple-pretty-print.h"
@@ -96,6 +97,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-cfg.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-into-ssa.h"
#include "tree-ssa.h"
@@ -916,7 +918,7 @@ get_loop_body_in_if_conv_order (const struct loop *loop)
unsigned int visited_count = 0;
gcc_assert (loop->num_nodes);
- gcc_assert (loop->latch != EXIT_BLOCK_PTR);
+ gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
blocks = XCNEWVEC (basic_block, loop->num_nodes);
visited = BITMAP_ALLOC (NULL);
@@ -1786,7 +1788,6 @@ tree_if_conversion (struct loop *loop)
static unsigned int
main_tree_if_conversion (void)
{
- loop_iterator li;
struct loop *loop;
bool changed = false;
unsigned todo = 0;
@@ -1794,7 +1795,7 @@ main_tree_if_conversion (void)
if (number_of_loops (cfun) <= 1)
return 0;
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
if (flag_tree_loop_if_convert == 1
|| flag_tree_loop_if_convert_stores == 1
|| flag_tree_loop_vectorize
diff --git a/gcc/tree-inline.c b/gcc/tree-inline.c
index f14b15b2f26..b95a1885216 100644
--- a/gcc/tree-inline.c
+++ b/gcc/tree-inline.c
@@ -24,6 +24,8 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "diagnostic-core.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "calls.h"
#include "tree-inline.h"
#include "flags.h"
#include "params.h"
@@ -43,8 +45,10 @@ along with GCC; see the file COPYING3. If not see
#include "tree-cfg.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-into-ssa.h"
+#include "expr.h"
#include "tree-dfa.h"
#include "tree-ssa.h"
#include "function.h"
@@ -195,7 +199,7 @@ remap_ssa_name (tree name, copy_body_data *id)
if (SSA_NAME_IS_DEFAULT_DEF (name)
&& TREE_CODE (SSA_NAME_VAR (name)) == PARM_DECL
&& id->entry_bb == NULL
- && single_succ_p (ENTRY_BLOCK_PTR))
+ && single_succ_p (ENTRY_BLOCK_PTR_FOR_FN (cfun)))
{
tree vexpr = make_node (DEBUG_EXPR_DECL);
gimple def_temp;
@@ -214,7 +218,7 @@ remap_ssa_name (tree name, copy_body_data *id)
DECL_ARTIFICIAL (vexpr) = 1;
TREE_TYPE (vexpr) = TREE_TYPE (name);
DECL_MODE (vexpr) = DECL_MODE (SSA_NAME_VAR (name));
- gsi = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR));
+ gsi = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
gsi_insert_before (&gsi, def_temp, GSI_SAME_STMT);
return vexpr;
}
@@ -296,7 +300,8 @@ remap_ssa_name (tree name, copy_body_data *id)
&& SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name)
&& (!SSA_NAME_VAR (name)
|| TREE_CODE (SSA_NAME_VAR (name)) != PARM_DECL)
- && (id->entry_bb != EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->dest
+ && (id->entry_bb != EDGE_SUCC (ENTRY_BLOCK_PTR_FOR_FN (cfun),
+ 0)->dest
|| EDGE_COUNT (id->entry_bb->preds) != 1))
{
gimple_stmt_iterator gsi = gsi_last_bb (id->entry_bb);
@@ -1972,7 +1977,7 @@ copy_edges_for_bb (basic_block bb, gcov_type count_scale, basic_block ret_bb,
/* Return edges do get a FALLTHRU flag when the get inlined. */
if (old_edge->dest->index == EXIT_BLOCK && !old_edge->flags
- && old_edge->dest->aux != EXIT_BLOCK_PTR)
+ && old_edge->dest->aux != EXIT_BLOCK_PTR_FOR_FN (cfun))
flags |= EDGE_FALLTHRU;
new_edge = make_edge (new_bb, (basic_block) old_edge->dest->aux, flags);
new_edge->count = apply_scale (old_edge->count, count_scale);
@@ -2157,10 +2162,10 @@ initialize_cfun (tree new_fndecl, tree callee_fndecl, gcov_type count)
if (!DECL_RESULT (new_fndecl))
DECL_RESULT (new_fndecl) = DECL_RESULT (callee_fndecl);
- if (ENTRY_BLOCK_PTR_FOR_FUNCTION (src_cfun)->count)
+ if (ENTRY_BLOCK_PTR_FOR_FN (src_cfun)->count)
count_scale
= GCOV_COMPUTE_SCALE (count,
- ENTRY_BLOCK_PTR_FOR_FUNCTION (src_cfun)->count);
+ ENTRY_BLOCK_PTR_FOR_FN (src_cfun)->count);
else
count_scale = REG_BR_PROB_BASE;
@@ -2196,16 +2201,16 @@ initialize_cfun (tree new_fndecl, tree callee_fndecl, gcov_type count)
init_empty_tree_cfg ();
profile_status_for_function (cfun) = profile_status_for_function (src_cfun);
- ENTRY_BLOCK_PTR->count =
- (ENTRY_BLOCK_PTR_FOR_FUNCTION (src_cfun)->count * count_scale /
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)->count =
+ (ENTRY_BLOCK_PTR_FOR_FN (src_cfun)->count * count_scale /
REG_BR_PROB_BASE);
- ENTRY_BLOCK_PTR->frequency
- = ENTRY_BLOCK_PTR_FOR_FUNCTION (src_cfun)->frequency;
- EXIT_BLOCK_PTR->count =
- (EXIT_BLOCK_PTR_FOR_FUNCTION (src_cfun)->count * count_scale /
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency
+ = ENTRY_BLOCK_PTR_FOR_FN (src_cfun)->frequency;
+ EXIT_BLOCK_PTR_FOR_FN (cfun)->count =
+ (EXIT_BLOCK_PTR_FOR_FN (src_cfun)->count * count_scale /
REG_BR_PROB_BASE);
- EXIT_BLOCK_PTR->frequency =
- EXIT_BLOCK_PTR_FOR_FUNCTION (src_cfun)->frequency;
+ EXIT_BLOCK_PTR_FOR_FN (cfun)->frequency =
+ EXIT_BLOCK_PTR_FOR_FN (src_cfun)->frequency;
if (src_cfun->eh)
init_eh_for_function ();
@@ -2404,7 +2409,7 @@ copy_cfg_body (copy_body_data * id, gcov_type count, int frequency_scale,
before inlining, using the guessed edge frequencies, so that we don't
end up with a 0-count inline body which can confuse downstream
optimizations such as function splitting. */
- if (!ENTRY_BLOCK_PTR_FOR_FUNCTION (src_cfun)->count && count)
+ if (!ENTRY_BLOCK_PTR_FOR_FN (src_cfun)->count && count)
{
/* Apply the larger of the call bb count and the total incoming
call edge count to the callee. */
@@ -2416,10 +2421,10 @@ copy_cfg_body (copy_body_data * id, gcov_type count, int frequency_scale,
freqs_to_counts (id->src_node, count > in_count ? count : in_count);
}
- if (ENTRY_BLOCK_PTR_FOR_FUNCTION (src_cfun)->count)
+ if (ENTRY_BLOCK_PTR_FOR_FN (src_cfun)->count)
count_scale
= GCOV_COMPUTE_SCALE (count,
- ENTRY_BLOCK_PTR_FOR_FUNCTION (src_cfun)->count);
+ ENTRY_BLOCK_PTR_FOR_FN (src_cfun)->count);
else
count_scale = REG_BR_PROB_BASE;
@@ -2444,20 +2449,20 @@ copy_cfg_body (copy_body_data * id, gcov_type count, int frequency_scale,
incoming_count = apply_scale (incoming_count, count_scale);
incoming_frequency
= apply_scale ((gcov_type)incoming_frequency, frequency_scale);
- ENTRY_BLOCK_PTR->count = incoming_count;
- ENTRY_BLOCK_PTR->frequency = incoming_frequency;
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)->count = incoming_count;
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency = incoming_frequency;
}
/* Must have a CFG here at this point. */
- gcc_assert (ENTRY_BLOCK_PTR_FOR_FUNCTION
+ gcc_assert (ENTRY_BLOCK_PTR_FOR_FN
(DECL_STRUCT_FUNCTION (callee_fndecl)));
cfun_to_copy = id->src_cfun = DECL_STRUCT_FUNCTION (callee_fndecl);
- ENTRY_BLOCK_PTR_FOR_FUNCTION (cfun_to_copy)->aux = entry_block_map;
- EXIT_BLOCK_PTR_FOR_FUNCTION (cfun_to_copy)->aux = exit_block_map;
- entry_block_map->aux = ENTRY_BLOCK_PTR_FOR_FUNCTION (cfun_to_copy);
- exit_block_map->aux = EXIT_BLOCK_PTR_FOR_FUNCTION (cfun_to_copy);
+ ENTRY_BLOCK_PTR_FOR_FN (cfun_to_copy)->aux = entry_block_map;
+ EXIT_BLOCK_PTR_FOR_FN (cfun_to_copy)->aux = exit_block_map;
+ entry_block_map->aux = ENTRY_BLOCK_PTR_FOR_FN (cfun_to_copy);
+ exit_block_map->aux = EXIT_BLOCK_PTR_FOR_FN (cfun_to_copy);
/* Duplicate any exception-handling regions. */
if (cfun->eh)
@@ -2629,7 +2634,7 @@ copy_debug_stmt (gimple stmt, copy_body_data *id)
&& TREE_CODE ((**debug_args)[i + 1]) == DEBUG_EXPR_DECL)
{
t = (**debug_args)[i + 1];
- stmt->gsbase.subcode = GIMPLE_DEBUG_BIND;
+ stmt->subcode = GIMPLE_DEBUG_BIND;
gimple_debug_bind_set_value (stmt, t);
break;
}
@@ -2688,7 +2693,7 @@ copy_body (copy_body_data *id, gcov_type count, int frequency_scale,
tree body;
/* If this body has a CFG, walk CFG and copy. */
- gcc_assert (ENTRY_BLOCK_PTR_FOR_FUNCTION (DECL_STRUCT_FUNCTION (fndecl)));
+ gcc_assert (ENTRY_BLOCK_PTR_FOR_FN (DECL_STRUCT_FUNCTION (fndecl)));
body = copy_cfg_body (id, count, frequency_scale, entry_block_map, exit_block_map,
new_entry);
copy_debug_stmts (id);
@@ -4423,7 +4428,7 @@ gimple_expand_calls_inline (basic_block bb, copy_body_data *id)
static void
fold_marked_statements (int first, struct pointer_set_t *statements)
{
- for (; first < n_basic_blocks; first++)
+ for (; first < n_basic_blocks_for_fn (cfun); first++)
if (BASIC_BLOCK (first))
{
gimple_stmt_iterator gsi;
@@ -4511,7 +4516,7 @@ optimize_inline_calls (tree fn)
{
copy_body_data id;
basic_block bb;
- int last = n_basic_blocks;
+ int last = n_basic_blocks_for_fn (cfun);
struct gimplify_ctx gctx;
bool inlined_p = false;
@@ -5092,7 +5097,8 @@ delete_unreachable_blocks_update_callgraph (copy_body_data *id)
/* Delete all unreachable basic blocks. */
- for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
+ for (b = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; b
+ != EXIT_BLOCK_PTR_FOR_FN (cfun); b = next_bb)
{
next_bb = b->next_bb;
@@ -5288,7 +5294,7 @@ tree_function_versioning (tree old_decl, tree new_decl,
id.transform_parameter = false;
id.transform_lang_insert_block = NULL;
- old_entry_block = ENTRY_BLOCK_PTR_FOR_FUNCTION
+ old_entry_block = ENTRY_BLOCK_PTR_FOR_FN
(DECL_STRUCT_FUNCTION (old_decl));
DECL_RESULT (new_decl) = DECL_RESULT (old_decl);
DECL_ARGUMENTS (new_decl) = DECL_ARGUMENTS (old_decl);
@@ -5407,7 +5413,8 @@ tree_function_versioning (tree old_decl, tree new_decl,
/* Copy the Function's body. */
copy_body (&id, old_entry_block->count, REG_BR_PROB_BASE,
- ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, new_entry);
+ ENTRY_BLOCK_PTR_FOR_FN (cfun), EXIT_BLOCK_PTR_FOR_FN (cfun),
+ new_entry);
/* Renumber the lexical scoping (non-code) blocks consecutively. */
number_blocks (new_decl);
@@ -5415,7 +5422,7 @@ tree_function_versioning (tree old_decl, tree new_decl,
/* We want to create the BB unconditionally, so that the addition of
debug stmts doesn't affect BB count, which may in the end cause
codegen differences. */
- bb = split_edge (single_succ_edge (ENTRY_BLOCK_PTR));
+ bb = split_edge (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
while (init_stmts.length ())
insert_init_stmt (&id, bb, init_stmts.pop ());
update_clone_info (&id);
@@ -5452,7 +5459,7 @@ tree_function_versioning (tree old_decl, tree new_decl,
struct cgraph_edge *e;
rebuild_frequencies ();
- new_version_node->count = ENTRY_BLOCK_PTR->count;
+ new_version_node->count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
for (e = new_version_node->callees; e; e = e->next_callee)
{
basic_block bb = gimple_bb (e->call_stmt);
diff --git a/gcc/tree-inline.h b/gcc/tree-inline.h
index 50ccaab42fc..d871fc4e4b6 100644
--- a/gcc/tree-inline.h
+++ b/gcc/tree-inline.h
@@ -183,10 +183,9 @@ extern eni_weights eni_size_weights;
extern eni_weights eni_time_weights;
/* Function prototypes. */
-
+void init_inline_once (void);
extern tree copy_tree_body_r (tree *, int *, void *);
extern void insert_decl_map (copy_body_data *, tree, tree);
-
unsigned int optimize_inline_calls (tree);
tree maybe_inline_call_in_expr (tree);
bool tree_inlinable_function_p (tree);
@@ -197,9 +196,13 @@ int estimate_num_insns (gimple, eni_weights *);
int estimate_num_insns_fn (tree, eni_weights *);
int count_insns_seq (gimple_seq, eni_weights *);
bool tree_versionable_function_p (tree);
-
extern tree remap_decl (tree decl, copy_body_data *id);
extern tree remap_type (tree type, copy_body_data *id);
extern gimple_seq copy_gimple_seq_and_replace_locals (gimple_seq seq);
+extern bool debug_find_tree (tree, tree);
+
+/* This is in tree-inline.c since the routine uses
+ data structures from the inliner. */
+extern tree build_duplicate_type (tree);
#endif /* GCC_TREE_INLINE_H */
diff --git a/gcc/tree-into-ssa.c b/gcc/tree-into-ssa.c
index ee86f2cfa9d..6cae27e34b7 100644
--- a/gcc/tree-into-ssa.c
+++ b/gcc/tree-into-ssa.c
@@ -35,8 +35,10 @@ along with GCC; see the file COPYING3. If not see
#include "tree-cfg.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-into-ssa.h"
+#include "expr.h"
#include "tree-dfa.h"
#include "tree-ssa.h"
#include "tree-inline.h"
@@ -1219,10 +1221,12 @@ rewrite_debug_stmt_uses (gimple stmt)
def = info->current_def;
if (!def)
{
- if (TREE_CODE (var) == PARM_DECL && single_succ_p (ENTRY_BLOCK_PTR))
+ if (TREE_CODE (var) == PARM_DECL
+ && single_succ_p (ENTRY_BLOCK_PTR_FOR_FN (cfun)))
{
gimple_stmt_iterator gsi
- = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR));
+ =
+ gsi_after_labels (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
int lim;
/* Search a few source bind stmts at the start of first bb to
see if a DEBUG_EXPR_DECL can't be reused. */
@@ -1251,7 +1255,8 @@ rewrite_debug_stmt_uses (gimple stmt)
DECL_ARTIFICIAL (def) = 1;
TREE_TYPE (def) = TREE_TYPE (var);
DECL_MODE (def) = DECL_MODE (var);
- gsi = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR));
+ gsi =
+ gsi_after_labels (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
gsi_insert_before (&gsi, def_temp, GSI_SAME_STMT);
}
update = true;
@@ -1866,7 +1871,7 @@ maybe_register_def (def_operand_p def_p, gimple stmt,
bind stmts, but there wouldn't be a PC to bind
them to either, so avoid diverging the CFG. */
if (ef && single_pred_p (ef->dest)
- && ef->dest != EXIT_BLOCK_PTR)
+ && ef->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
/* If there were PHI nodes in the node, we'd
have to make sure the value we're binding
@@ -2329,7 +2334,7 @@ rewrite_into_ssa (void)
insert_phi_nodes (dfs);
/* 4- Rename all the blocks. */
- rewrite_blocks (ENTRY_BLOCK_PTR, REWRITE_ALL);
+ rewrite_blocks (ENTRY_BLOCK_PTR_FOR_FN (cfun), REWRITE_ALL);
/* Free allocated memory. */
FOR_EACH_BB (bb)
@@ -3015,7 +3020,7 @@ insert_updated_phi_nodes_for (tree var, bitmap_head *dfs, bitmap blocks,
common dominator of all the definition blocks. */
entry = nearest_common_dominator_for_set (CDI_DOMINATORS,
db->def_blocks);
- if (entry != ENTRY_BLOCK_PTR)
+ if (entry != ENTRY_BLOCK_PTR_FOR_FN (cfun))
EXECUTE_IF_SET_IN_BITMAP (idf, 0, i, bi)
if (BASIC_BLOCK (i) != entry
&& dominated_by_p (CDI_DOMINATORS, BASIC_BLOCK (i), entry))
@@ -3214,7 +3219,7 @@ update_ssa (unsigned update_flags)
be possible to determine the nearest block that had a
definition for each of the symbols that are marked for
updating. For now this seems more work than it's worth. */
- start_bb = ENTRY_BLOCK_PTR;
+ start_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
/* Traverse the CFG looking for existing definitions and uses of
symbols in SSA operands. Mark interesting blocks and
@@ -3297,7 +3302,7 @@ update_ssa (unsigned update_flags)
/* Insertion of PHI nodes may have added blocks to the region.
We need to re-compute START_BB to include the newly added
blocks. */
- if (start_bb != ENTRY_BLOCK_PTR)
+ if (start_bb != ENTRY_BLOCK_PTR_FOR_FN (cfun))
start_bb = nearest_common_dominator_for_set (CDI_DOMINATORS,
blocks_to_update);
}
diff --git a/gcc/tree-iterator.h b/gcc/tree-iterator.h
index f7593890713..b5217f77873 100644
--- a/gcc/tree-iterator.h
+++ b/gcc/tree-iterator.h
@@ -117,5 +117,7 @@ extern tree alloc_stmt_list (void);
extern void free_stmt_list (tree);
extern void append_to_statement_list (tree, tree *);
extern void append_to_statement_list_force (tree, tree *);
+extern tree expr_first (tree);
+extern tree expr_last (tree);
#endif /* GCC_TREE_ITERATOR_H */
diff --git a/gcc/tree-loop-distribution.c b/gcc/tree-loop-distribution.c
index 075487726f4..0afa52aff1e 100644
--- a/gcc/tree-loop-distribution.c
+++ b/gcc/tree-loop-distribution.c
@@ -48,10 +48,12 @@ along with GCC; see the file COPYING3. If not see
#include "gimple.h"
#include "gimple-iterator.h"
#include "gimplify-me.h"
+#include "stor-layout.h"
#include "gimple-ssa.h"
#include "tree-cfg.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-ssa-loop-manip.h"
#include "tree-ssa-loop.h"
@@ -1659,7 +1661,6 @@ static unsigned int
tree_loop_distribution (void)
{
struct loop *loop;
- loop_iterator li;
bool changed = false;
basic_block bb;
control_dependences *cd = NULL;
@@ -1675,7 +1676,7 @@ tree_loop_distribution (void)
/* We can at the moment only distribute non-nested loops, thus restrict
walking to innermost loops. */
- FOR_EACH_LOOP (li, loop, LI_ONLY_INNERMOST)
+ FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
{
vec<gimple> work_list = vNULL;
basic_block *bbs;
diff --git a/gcc/tree-nested.c b/gcc/tree-nested.c
index 9b4493bade0..868c8fd0e1f 100644
--- a/gcc/tree-nested.c
+++ b/gcc/tree-nested.c
@@ -22,6 +22,8 @@
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
#include "tm_p.h"
#include "function.h"
#include "tree-dump.h"
diff --git a/gcc/tree-nrv.c b/gcc/tree-nrv.c
index b333abf3956..035428aaec3 100644
--- a/gcc/tree-nrv.c
+++ b/gcc/tree-nrv.c
@@ -29,6 +29,7 @@ along with GCC; see the file COPYING3. If not see
#include "gimple-iterator.h"
#include "gimple-walk.h"
#include "gimple-ssa.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-pass.h"
#include "langhooks.h"
diff --git a/gcc/tree-object-size.c b/gcc/tree-object-size.c
index 4d5b83f9a92..ced1f60f1e4 100644
--- a/gcc/tree-object-size.c
+++ b/gcc/tree-object-size.c
@@ -23,15 +23,19 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "tree-object-size.h"
#include "diagnostic-core.h"
#include "gimple-pretty-print.h"
#include "bitmap.h"
#include "gimple.h"
#include "gimple-iterator.h"
#include "gimple-ssa.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-pass.h"
#include "tree-ssa-propagate.h"
+#include "tree-phinodes.h"
+#include "ssa-iterators.h"
struct object_size_info
{
@@ -1205,16 +1209,9 @@ compute_object_sizes (void)
gimple_stmt_iterator i;
for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
{
- tree callee, result;
+ tree result;
gimple call = gsi_stmt (i);
-
- if (gimple_code (call) != GIMPLE_CALL)
- continue;
-
- callee = gimple_call_fndecl (call);
- if (!callee
- || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL
- || DECL_FUNCTION_CODE (callee) != BUILT_IN_OBJECT_SIZE)
+ if (!gimple_call_builtin_p (call, BUILT_IN_OBJECT_SIZE))
continue;
init_object_sizes ();
@@ -1243,20 +1240,32 @@ compute_object_sizes (void)
continue;
}
+ gcc_assert (TREE_CODE (result) == INTEGER_CST);
+
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "Simplified\n ");
print_gimple_stmt (dump_file, call, 0, dump_flags);
+ fprintf (dump_file, " to ");
+ print_generic_expr (dump_file, result, 0);
+ fprintf (dump_file, "\n");
}
- if (!update_call_from_tree (&i, result))
- gcc_unreachable ();
+ tree lhs = gimple_call_lhs (call);
+ if (!lhs)
+ continue;
- if (dump_file && (dump_flags & TDF_DETAILS))
+ /* Propagate into all uses and fold those stmts. */
+ gimple use_stmt;
+ imm_use_iterator iter;
+ FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
{
- fprintf (dump_file, "to\n ");
- print_gimple_stmt (dump_file, gsi_stmt (i), 0, dump_flags);
- fprintf (dump_file, "\n");
+ use_operand_p use_p;
+ FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
+ SET_USE (use_p, result);
+ gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
+ fold_stmt (&gsi);
+ update_stmt (gsi_stmt (gsi));
}
}
}
diff --git a/gcc/tree-object-size.h b/gcc/tree-object-size.h
new file mode 100644
index 00000000000..19029d89562
--- /dev/null
+++ b/gcc/tree-object-size.h
@@ -0,0 +1,26 @@
+/* Declarations for tree-object-size.c.
+ Copyright (C) 2013 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_OBJECT_SIZE_H
+#define GCC_TREE_OBJECT_SIZE_H
+
+extern void init_object_sizes (void);
+extern unsigned HOST_WIDE_INT compute_builtin_object_size (tree, int);
+
+#endif // GCC_TREE_OBJECT_SIZE_H
diff --git a/gcc/tree-outof-ssa.c b/gcc/tree-outof-ssa.c
index eb11c883fb4..9a7a73f4079 100644
--- a/gcc/tree-outof-ssa.c
+++ b/gcc/tree-outof-ssa.c
@@ -23,6 +23,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
#include "ggc.h"
#include "basic-block.h"
#include "gimple-pretty-print.h"
@@ -34,6 +35,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-cfg.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "dumpfile.h"
#include "diagnostic-core.h"
@@ -929,7 +931,8 @@ expand_phi_nodes (struct ssaexpand *sa)
elim_graph g = new_elim_graph (sa->map->num_partitions);
g->map = sa->map;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb, EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb,
+ EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
if (!gimple_seq_empty_p (phi_nodes (bb)))
{
edge e;
diff --git a/gcc/tree-parloops.c b/gcc/tree-parloops.c
index 648331cc767..d627c69f954 100644
--- a/gcc/tree-parloops.c
+++ b/gcc/tree-parloops.c
@@ -28,10 +28,13 @@ along with GCC; see the file COPYING3. If not see
#include "gimple-iterator.h"
#include "gimplify-me.h"
#include "gimple-walk.h"
+#include "stor-layout.h"
+#include "tree-nested.h"
#include "gimple-ssa.h"
#include "tree-cfg.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-ssa-loop-ivopts.h"
#include "tree-ssa-loop-manip.h"
@@ -1744,7 +1747,6 @@ static void
gen_parallel_loop (struct loop *loop, reduction_info_table_type reduction_list,
unsigned n_threads, struct tree_niter_desc *niter)
{
- loop_iterator li;
tree many_iterations_cond, type, nit;
tree arg_struct, new_arg_struct;
gimple_seq stmts;
@@ -1899,7 +1901,7 @@ gen_parallel_loop (struct loop *loop, reduction_info_table_type reduction_list,
/* Free loop bound estimations that could contain references to
removed statements. */
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
free_numbers_of_iterations_estimates_loop (loop);
/* Expand the parallel constructs. We do it directly here instead of running
@@ -2140,7 +2142,6 @@ parallelize_loops (void)
bool changed = false;
struct loop *loop;
struct tree_niter_desc niter_desc;
- loop_iterator li;
reduction_info_table_type reduction_list;
struct obstack parloop_obstack;
HOST_WIDE_INT estimated;
@@ -2156,7 +2157,7 @@ parallelize_loops (void)
reduction_list.create (10);
init_stmt_vec_info_vec ();
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
{
reduction_list.empty ();
if (dump_file && (dump_flags & TDF_DETAILS))
diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h
index 9efee1e7e00..02d71cd3961 100644
--- a/gcc/tree-pass.h
+++ b/gcc/tree-pass.h
@@ -447,6 +447,8 @@ extern gimple_opt_pass *make_pass_split_functions (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_feedback_split_functions (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_strength_reduction (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_vtable_verify (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_ubsan (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_sanopt (gcc::context *ctxt);
/* IPA Passes */
extern simple_ipa_opt_pass *make_pass_ipa_lower_emutls (gcc::context *ctxt);
@@ -458,7 +460,6 @@ extern simple_ipa_opt_pass *make_pass_early_local_passes (gcc::context *ctxt);
extern ipa_opt_pass_d *make_pass_ipa_whole_program_visibility (gcc::context
*ctxt);
-extern ipa_opt_pass_d *make_pass_ipa_lto_gimple_out (gcc::context *ctxt);
extern simple_ipa_opt_pass *make_pass_ipa_increase_alignment (gcc::context
*ctxt);
extern ipa_opt_pass_d *make_pass_ipa_inline (gcc::context *ctxt);
@@ -470,7 +471,6 @@ extern ipa_opt_pass_d *make_pass_ipa_devirt (gcc::context *ctxt);
extern ipa_opt_pass_d *make_pass_ipa_reference (gcc::context *ctxt);
extern ipa_opt_pass_d *make_pass_ipa_pure_const (gcc::context *ctxt);
extern simple_ipa_opt_pass *make_pass_ipa_pta (gcc::context *ctxt);
-extern ipa_opt_pass_d *make_pass_ipa_lto_finish_out (gcc::context *ctxt);
extern simple_ipa_opt_pass *make_pass_ipa_tm (gcc::context *ctxt);
extern ipa_opt_pass_d *make_pass_ipa_profile (gcc::context *ctxt);
extern ipa_opt_pass_d *make_pass_ipa_cdtor_merge (gcc::context *ctxt);
diff --git a/gcc/tree-phinodes.c b/gcc/tree-phinodes.c
index da7bf5be978..bf024ac976c 100644
--- a/gcc/tree-phinodes.c
+++ b/gcc/tree-phinodes.c
@@ -29,6 +29,7 @@ along with GCC; see the file COPYING3. If not see
#include "gimple-ssa.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-ssa.h"
#include "diagnostic-core.h"
@@ -93,10 +94,10 @@ phinodes_print_statistics (void)
happens to contain a PHI node with LEN arguments or more, return
that one. */
-static inline gimple
+static inline gimple_statement_phi *
allocate_phi_node (size_t len)
{
- gimple phi;
+ gimple_statement_phi *phi;
size_t bucket = NUM_BUCKETS - 2;
size_t size = sizeof (struct gimple_statement_phi)
+ (len - 1) * sizeof (struct phi_arg_d);
@@ -111,7 +112,7 @@ allocate_phi_node (size_t len)
&& gimple_phi_capacity ((*free_phinodes[bucket])[0]) >= len)
{
free_phinode_count--;
- phi = free_phinodes[bucket]->pop ();
+ phi = as_a <gimple_statement_phi> (free_phinodes[bucket]->pop ());
if (free_phinodes[bucket]->is_empty ())
vec_free (free_phinodes[bucket]);
if (GATHER_STATISTICS)
@@ -119,7 +120,8 @@ allocate_phi_node (size_t len)
}
else
{
- phi = ggc_alloc_gimple_statement_d (size);
+ phi = static_cast <gimple_statement_phi *> (
+ ggc_internal_alloc_stat (size MEM_STAT_INFO));
if (GATHER_STATISTICS)
{
enum gimple_alloc_kind kind = gimple_alloc_kind (GIMPLE_PHI);
@@ -171,7 +173,7 @@ ideal_phi_node_len (int len)
static gimple
make_phi_node (tree var, int len)
{
- gimple phi;
+ gimple_statement_phi *phi;
int capacity, i;
capacity = ideal_phi_node_len (len);
@@ -184,10 +186,10 @@ make_phi_node (tree var, int len)
memset (phi, 0, (sizeof (struct gimple_statement_phi)
- sizeof (struct phi_arg_d)
+ sizeof (struct phi_arg_d) * len));
- phi->gsbase.code = GIMPLE_PHI;
+ phi->code = GIMPLE_PHI;
gimple_init_singleton (phi);
- phi->gimple_phi.nargs = len;
- phi->gimple_phi.capacity = capacity;
+ phi->nargs = len;
+ phi->capacity = capacity;
if (!var)
;
else if (TREE_CODE (var) == SSA_NAME)
@@ -236,11 +238,11 @@ release_phi_node (gimple phi)
/* Resize an existing PHI node. The only way is up. Return the
possibly relocated phi. */
-static gimple
-resize_phi_node (gimple phi, size_t len)
+static gimple_statement_phi *
+resize_phi_node (gimple_statement_phi *phi, size_t len)
{
size_t old_size, i;
- gimple new_phi;
+ gimple_statement_phi *new_phi;
gcc_assert (len > gimple_phi_capacity (phi));
@@ -263,7 +265,7 @@ resize_phi_node (gimple phi, size_t len)
relink_imm_use_stmt (imm, old_imm, new_phi);
}
- new_phi->gimple_phi.capacity = len;
+ new_phi->capacity = len;
for (i = gimple_phi_num_args (new_phi); i < len; i++)
{
@@ -291,11 +293,12 @@ reserve_phi_args_for_new_edge (basic_block bb)
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
- gimple stmt = gsi_stmt (gsi);
+ gimple_statement_phi *stmt =
+ as_a <gimple_statement_phi> (gsi_stmt (gsi));
if (len > gimple_phi_capacity (stmt))
{
- gimple new_phi = resize_phi_node (stmt, cap);
+ gimple_statement_phi *new_phi = resize_phi_node (stmt, cap);
/* The result of the PHI is defined by this PHI node. */
SSA_NAME_DEF_STMT (gimple_phi_result (new_phi)) = new_phi;
@@ -315,7 +318,7 @@ reserve_phi_args_for_new_edge (basic_block bb)
SET_PHI_ARG_DEF (stmt, len - 1, NULL_TREE);
gimple_phi_arg_set_location (stmt, len - 1, UNKNOWN_LOCATION);
- stmt->gimple_phi.nargs++;
+ stmt->nargs++;
}
}
@@ -391,7 +394,7 @@ add_phi_arg (gimple phi, tree def, edge e, source_location locus)
is consistent with how we remove an edge from the edge vector. */
static void
-remove_phi_arg_num (gimple phi, int i)
+remove_phi_arg_num (gimple_statement_phi *phi, int i)
{
int num_elem = gimple_phi_num_args (phi);
@@ -418,7 +421,7 @@ remove_phi_arg_num (gimple phi, int i)
/* Shrink the vector and return. Note that we do not have to clear
PHI_ARG_DEF because the garbage collector will not look at those
elements beyond the first PHI_NUM_ARGS elements of the array. */
- phi->gimple_phi.nargs--;
+ phi->nargs--;
}
@@ -430,7 +433,8 @@ remove_phi_args (edge e)
gimple_stmt_iterator gsi;
for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
- remove_phi_arg_num (gsi_stmt (gsi), e->dest_idx);
+ remove_phi_arg_num (as_a <gimple_statement_phi> (gsi_stmt (gsi)),
+ e->dest_idx);
}
diff --git a/gcc/tree-predcom.c b/gcc/tree-predcom.c
index 338b0ff2ea8..55dd7c32475 100644
--- a/gcc/tree-predcom.c
+++ b/gcc/tree-predcom.c
@@ -198,12 +198,14 @@ along with GCC; see the file COPYING3. If not see
#include "gimple-ssa.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-ssa-loop-ivopts.h"
#include "tree-ssa-loop-manip.h"
#include "tree-ssa-loop-niter.h"
#include "tree-ssa-loop.h"
#include "tree-into-ssa.h"
+#include "expr.h"
#include "tree-dfa.h"
#include "tree-ssa.h"
#include "ggc.h"
@@ -2035,7 +2037,11 @@ combinable_refs_p (dref r1, dref r2,
stmt = find_common_use_stmt (&name1, &name2);
- if (!stmt)
+ if (!stmt
+ /* A simple post-dominance check - make sure the combination
+ is executed under the same condition as the references. */
+ || (gimple_bb (stmt) != gimple_bb (r1->stmt)
+ && gimple_bb (stmt) != gimple_bb (r2->stmt)))
return false;
acode = gimple_assign_rhs_code (stmt);
@@ -2505,11 +2511,10 @@ tree_predictive_commoning (void)
{
bool unrolled = false;
struct loop *loop;
- loop_iterator li;
unsigned ret = 0;
initialize_original_copy_tables ();
- FOR_EACH_LOOP (li, loop, LI_ONLY_INNERMOST)
+ FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
if (optimize_loop_for_speed_p (loop))
{
unrolled |= tree_predictive_commoning_loop (loop);
diff --git a/gcc/tree-pretty-print.c b/gcc/tree-pretty-print.c
index e1fb11bddf0..fa0e98d2a9b 100644
--- a/gcc/tree-pretty-print.c
+++ b/gcc/tree-pretty-print.c
@@ -23,6 +23,8 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "expr.h"
#include "tree-pretty-print.h"
#include "hashtab.h"
#include "gimple.h"
@@ -2242,6 +2244,12 @@ dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags,
pp_string (buffer, "OBJ_TYPE_REF(");
dump_generic_node (buffer, OBJ_TYPE_REF_EXPR (node), spc, flags, false);
pp_semicolon (buffer);
+ if (!(flags & TDF_SLIM) && virtual_method_call_p (node))
+ {
+ pp_string (buffer, "(");
+ dump_generic_node (buffer, obj_type_ref_class (node), spc, flags, false);
+ pp_string (buffer, ")");
+ }
dump_generic_node (buffer, OBJ_TYPE_REF_OBJECT (node), spc, flags, false);
pp_arrow (buffer);
dump_generic_node (buffer, OBJ_TYPE_REF_TOKEN (node), spc, flags, false);
diff --git a/gcc/tree-profile.c b/gcc/tree-profile.c
index 132ce0d4d13..0adc51a51aa 100644
--- a/gcc/tree-profile.c
+++ b/gcc/tree-profile.c
@@ -35,12 +35,15 @@ along with GCC; see the file COPYING3. If not see
#include "coverage.h"
#include "tree.h"
#include "gimple.h"
+#include "varasm.h"
+#include "tree-nested.h"
#include "gimplify.h"
#include "gimple-iterator.h"
#include "gimplify-me.h"
#include "gimple-ssa.h"
#include "cgraph.h"
#include "tree-cfg.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-into-ssa.h"
#include "tree-pass.h"
@@ -437,7 +440,8 @@ gimple_gen_ic_func_profiler (void)
stmt1: __gcov_indirect_call_profiler_v2 (profile_id,
&current_function_decl)
*/
- gsi = gsi_after_labels (split_edge (single_succ_edge (ENTRY_BLOCK_PTR)));
+ gsi =
+ gsi_after_labels (split_edge (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))));
cur_func = force_gimple_operand_gsi (&gsi,
build_addr (current_function_decl,
diff --git a/gcc/tree-scalar-evolution.c b/gcc/tree-scalar-evolution.c
index 115683de833..0c1f1dfbb1a 100644
--- a/gcc/tree-scalar-evolution.c
+++ b/gcc/tree-scalar-evolution.c
@@ -257,6 +257,7 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "tree.h"
+#include "expr.h"
#include "hash-table.h"
#include "gimple-pretty-print.h"
#include "gimple.h"
@@ -266,6 +267,7 @@ along with GCC; see the file COPYING3. If not see
#include "gimple-ssa.h"
#include "tree-cfg.h"
#include "tree-phinodes.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-ssa-loop-ivopts.h"
#include "tree-ssa-loop-manip.h"
@@ -3101,16 +3103,14 @@ initialize_scalar_evolutions_analyzer (void)
void
scev_initialize (void)
{
- loop_iterator li;
struct loop *loop;
-
scalar_evolution_info = htab_create_ggc (100, hash_scev_info, eq_scev_info,
del_scev_info);
initialize_scalar_evolutions_analyzer ();
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
{
loop->nb_iterations = NULL_TREE;
}
@@ -3142,7 +3142,6 @@ scev_reset_htab (void)
void
scev_reset (void)
{
- loop_iterator li;
struct loop *loop;
scev_reset_htab ();
@@ -3150,7 +3149,7 @@ scev_reset (void)
if (!current_loops)
return;
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
{
loop->nb_iterations = NULL_TREE;
}
@@ -3296,7 +3295,6 @@ scev_const_prop (void)
struct loop *loop, *ex_loop;
bitmap ssa_names_to_remove = NULL;
unsigned i;
- loop_iterator li;
gimple_stmt_iterator psi;
if (number_of_loops (cfun) <= 1)
@@ -3358,7 +3356,7 @@ scev_const_prop (void)
}
/* Now the regular final value replacement. */
- FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
+ FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
{
edge exit;
tree def, rslt, niter;
diff --git a/gcc/tree-scalar-evolution.h b/gcc/tree-scalar-evolution.h
index db7ac4c66f0..8846fbe50cc 100644
--- a/gcc/tree-scalar-evolution.h
+++ b/gcc/tree-scalar-evolution.h
@@ -47,7 +47,7 @@ static inline basic_block
block_before_loop (loop_p loop)
{
edge preheader = loop_preheader_edge (loop);
- return (preheader ? preheader->src : ENTRY_BLOCK_PTR);
+ return (preheader ? preheader->src : ENTRY_BLOCK_PTR_FOR_FN (cfun));
}
/* Analyze all the parameters of the chrec that were left under a
diff --git a/gcc/tree-sra.c b/gcc/tree-sra.c
index 04579526e48..eded16fc1f5 100644
--- a/gcc/tree-sra.c
+++ b/gcc/tree-sra.c
@@ -79,6 +79,7 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "tree.h"
#include "gimple.h"
+#include "stor-layout.h"
#include "gimplify.h"
#include "gimple-iterator.h"
#include "gimplify-me.h"
@@ -88,7 +89,9 @@ along with GCC; see the file COPYING3. If not see
#include "tree-cfg.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
+#include "expr.h"
#include "tree-dfa.h"
#include "tree-ssa.h"
#include "tree-pass.h"
@@ -3406,7 +3409,7 @@ initialize_parameter_reductions (void)
seq = gsi_seq (gsi);
if (seq)
- gsi_insert_seq_on_edge_immediate (single_succ_edge (ENTRY_BLOCK_PTR), seq);
+ gsi_insert_seq_on_edge_immediate (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)), seq);
}
/* The "main" function of intraprocedural SRA passes. Runs the analysis and if
@@ -3785,7 +3788,7 @@ propagate_dereference_distances (void)
basic_block bb;
queue.create (last_basic_block_for_function (cfun));
- queue.quick_push (ENTRY_BLOCK_PTR);
+ queue.quick_push (ENTRY_BLOCK_PTR_FOR_FN (cfun));
FOR_EACH_BB (bb)
{
queue.quick_push (bb);
@@ -3815,7 +3818,7 @@ propagate_dereference_distances (void)
{
int succ_idx = e->dest->index * func_param_count + i;
- if (e->src == EXIT_BLOCK_PTR)
+ if (e->src == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
if (first)
@@ -3856,10 +3859,11 @@ dump_dereferences_table (FILE *f, const char *str, HOST_WIDE_INT *table)
basic_block bb;
fprintf (dump_file, str);
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
+ EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
{
fprintf (f, "%4i %i ", bb->index, bitmap_bit_p (final_bbs, bb->index));
- if (bb != EXIT_BLOCK_PTR)
+ if (bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
int i;
for (i = 0; i < func_param_count; i++)
@@ -3911,7 +3915,7 @@ analyze_caller_dereference_legality (vec<access_p> representatives)
for (i = 0; i < func_param_count; i++)
{
struct access *repr = representatives[i];
- int idx = ENTRY_BLOCK_PTR->index * func_param_count + i;
+ int idx = ENTRY_BLOCK_PTR_FOR_FN (cfun)->index * func_param_count + i;
if (!repr || no_accesses_p (repr))
continue;
@@ -4725,9 +4729,9 @@ sra_ipa_reset_debug_stmts (ipa_parm_adjustment_vec adjustments)
int i, len;
gimple_stmt_iterator *gsip = NULL, gsi;
- if (MAY_HAVE_DEBUG_STMTS && single_succ_p (ENTRY_BLOCK_PTR))
+ if (MAY_HAVE_DEBUG_STMTS && single_succ_p (ENTRY_BLOCK_PTR_FOR_FN (cfun)))
{
- gsi = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR));
+ gsi = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
gsip = &gsi;
}
len = adjustments.length ();
diff --git a/gcc/tree-ssa-address.c b/gcc/tree-ssa-address.c
index 5e2a1071073..6707f866a58 100644
--- a/gcc/tree-ssa-address.c
+++ b/gcc/tree-ssa-address.c
@@ -25,14 +25,17 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
#include "tm_p.h"
#include "basic-block.h"
#include "tree-pretty-print.h"
#include "gimple.h"
#include "gimple-iterator.h"
#include "gimplify-me.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-ssa-loop-ivopts.h"
+#include "expr.h"
#include "tree-dfa.h"
#include "dumpfile.h"
#include "flags.h"
diff --git a/gcc/tree-ssa-alias.c b/gcc/tree-ssa-alias.c
index aefc38e58fc..860208bfb26 100644
--- a/gcc/tree-ssa-alias.c
+++ b/gcc/tree-ssa-alias.c
@@ -35,7 +35,9 @@ along with GCC; see the file COPYING3. If not see
#include "dumpfile.h"
#include "gimple.h"
#include "gimple-ssa.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
+#include "expr.h"
#include "tree-dfa.h"
#include "tree-inline.h"
#include "params.h"
@@ -576,7 +578,7 @@ ao_ref_alias_set (ao_ref *ref)
void
ao_ref_init_from_ptr_and_size (ao_ref *ref, tree ptr, tree size)
{
- HOST_WIDE_INT t, extra_offset = 0;
+ HOST_WIDE_INT t, size_hwi, extra_offset = 0;
ref->ref = NULL_TREE;
if (TREE_CODE (ptr) == SSA_NAME)
{
@@ -615,9 +617,8 @@ ao_ref_init_from_ptr_and_size (ao_ref *ref, tree ptr, tree size)
ref->offset += extra_offset;
if (size
&& tree_fits_shwi_p (size)
- && tree_to_shwi (size) * BITS_PER_UNIT / BITS_PER_UNIT
- == tree_to_shwi (size))
- ref->max_size = ref->size = tree_to_shwi (size) * BITS_PER_UNIT;
+ && (size_hwi = tree_to_shwi (size)) <= HOST_WIDE_INT_MAX / BITS_PER_UNIT)
+ ref->max_size = ref->size = size_hwi * BITS_PER_UNIT;
else
ref->max_size = ref->size = -1;
ref->ref_alias_set = 0;
diff --git a/gcc/tree-ssa-ccp.c b/gcc/tree-ssa-ccp.c
index e20f4123d6d..dd5077de2d4 100644
--- a/gcc/tree-ssa-ccp.c
+++ b/gcc/tree-ssa-ccp.c
@@ -123,6 +123,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
#include "flags.h"
#include "tm_p.h"
#include "basic-block.h"
@@ -135,6 +136,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-cfg.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-pass.h"
#include "tree-ssa-propagate.h"
@@ -1804,7 +1806,7 @@ evaluate_stmt (gimple stmt)
return val;
}
-typedef hash_table <pointer_hash <gimple_statement_d> > gimple_htab;
+typedef hash_table <pointer_hash <gimple_statement_base> > gimple_htab;
/* Given a BUILT_IN_STACK_SAVE value SAVED_VAL, insert a clobber of VAR before
each matching BUILT_IN_STACK_RESTORE. Mark visited phis in VISITED. */
@@ -1862,7 +1864,7 @@ gsi_prev_dom_bb_nondebug (gimple_stmt_iterator *i)
while (gsi_end_p (*i))
{
dom = get_immediate_dominator (CDI_DOMINATORS, i->bb);
- if (dom == NULL || dom == ENTRY_BLOCK_PTR)
+ if (dom == NULL || dom == ENTRY_BLOCK_PTR_FOR_FN (cfun))
return;
*i = gsi_last_bb (dom);
@@ -1924,7 +1926,7 @@ fold_builtin_alloca_with_align (gimple stmt)
|| !tree_fits_uhwi_p (arg))
return NULL_TREE;
- size = TREE_INT_CST_LOW (arg);
+ size = tree_to_uhwi (arg);
/* Heuristic: don't fold large allocas. */
threshold = (unsigned HOST_WIDE_INT)PARAM_VALUE (PARAM_LARGE_STACK_FRAME);
@@ -2352,7 +2354,7 @@ optimize_stack_restore (gimple_stmt_iterator i)
case 0:
break;
case 1:
- if (single_succ_edge (bb)->dest != EXIT_BLOCK_PTR)
+ if (single_succ_edge (bb)->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
return NULL_TREE;
break;
default:
diff --git a/gcc/tree-ssa-coalesce.c b/gcc/tree-ssa-coalesce.c
index 942602e5fe3..d6fbb1cc27f 100644
--- a/gcc/tree-ssa-coalesce.c
+++ b/gcc/tree-ssa-coalesce.c
@@ -32,6 +32,7 @@ along with GCC; see the file COPYING3. If not see
#include "gimple-ssa.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "hash-table.h"
#include "tree-ssa-live.h"
@@ -1077,7 +1078,7 @@ create_outofssa_var_map (coalesce_list_p cl, bitmap used_in_copy)
v2 = SSA_NAME_VERSION (var);
bitmap_set_bit (used_in_copy, v1);
bitmap_set_bit (used_in_copy, v2);
- cost = coalesce_cost_bb (EXIT_BLOCK_PTR);
+ cost = coalesce_cost_bb (EXIT_BLOCK_PTR_FOR_FN (cfun));
add_coalesce (cl, v1, v2, cost);
}
}
diff --git a/gcc/tree-ssa-copy.c b/gcc/tree-ssa-copy.c
index 0f70372c80e..def780650e0 100644
--- a/gcc/tree-ssa-copy.c
+++ b/gcc/tree-ssa-copy.c
@@ -33,6 +33,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-cfg.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-pass.h"
#include "tree-ssa-propagate.h"
diff --git a/gcc/tree-ssa-copyrename.c b/gcc/tree-ssa-copyrename.c
index d71802e7a1c..361b4c32fd7 100644
--- a/gcc/tree-ssa-copyrename.c
+++ b/gcc/tree-ssa-copyrename.c
@@ -31,7 +31,9 @@ along with GCC; see the file COPYING3. If not see
#include "tree-pretty-print.h"
#include "bitmap.h"
#include "gimple-ssa.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
+#include "expr.h"
#include "tree-dfa.h"
#include "tree-inline.h"
#include "hashtab.h"
diff --git a/gcc/tree-ssa-dce.c b/gcc/tree-ssa-dce.c
index d138f92f195..0c8110fb71a 100644
--- a/gcc/tree-ssa-dce.c
+++ b/gcc/tree-ssa-dce.c
@@ -48,6 +48,7 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "tree.h"
+#include "calls.h"
#include "gimple-pretty-print.h"
#include "basic-block.h"
#include "gimple.h"
@@ -57,9 +58,11 @@ along with GCC; see the file COPYING3. If not see
#include "tree-cfg.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-ssa-loop-niter.h"
#include "tree-into-ssa.h"
+#include "expr.h"
#include "tree-dfa.h"
#include "tree-pass.h"
#include "flags.h"
@@ -325,9 +328,9 @@ mark_control_dependent_edges_necessary (basic_block bb, bool ignore_self)
unsigned edge_number;
bool skipped = false;
- gcc_assert (bb != EXIT_BLOCK_PTR);
+ gcc_assert (bb != EXIT_BLOCK_PTR_FOR_FN (cfun));
- if (bb == ENTRY_BLOCK_PTR)
+ if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
return;
EXECUTE_IF_SET_IN_BITMAP (cd->get_edges_dependent_on (bb->index),
@@ -393,7 +396,6 @@ find_obviously_necessary_stmts (bool aggressive)
/* Prevent the empty possibly infinite loops from being removed. */
if (aggressive)
{
- loop_iterator li;
struct loop *loop;
scev_initialize ();
if (mark_irreducible_loops ())
@@ -411,7 +413,7 @@ find_obviously_necessary_stmts (bool aggressive)
}
}
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
if (!finite_loop_p (loop))
{
if (dump_file)
@@ -634,7 +636,7 @@ propagate_necessity (bool aggressive)
containing STMT is control dependent, but only if we haven't
already done so. */
basic_block bb = gimple_bb (stmt);
- if (bb != ENTRY_BLOCK_PTR
+ if (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun)
&& !bitmap_bit_p (visited_control_parents, bb->index))
mark_control_dependent_edges_necessary (bb, false);
}
@@ -740,7 +742,7 @@ propagate_necessity (bool aggressive)
if (!bitmap_bit_p (last_stmt_necessary, arg_bb->index))
mark_last_stmt_necessary (arg_bb);
}
- else if (arg_bb != ENTRY_BLOCK_PTR
+ else if (arg_bb != ENTRY_BLOCK_PTR_FOR_FN (cfun)
&& !bitmap_bit_p (visited_control_parents,
arg_bb->index))
mark_control_dependent_edges_necessary (arg_bb, true);
@@ -1074,7 +1076,7 @@ remove_dead_stmt (gimple_stmt_iterator *i, basic_block bb)
fake edges in the dominator tree. */
if (e)
;
- else if (! post_dom_bb || post_dom_bb == EXIT_BLOCK_PTR)
+ else if (! post_dom_bb || post_dom_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
e = EDGE_SUCC (bb, 0);
else
e = forward_edge_to_pdom (EDGE_SUCC (bb, 0), post_dom_bb);
@@ -1166,7 +1168,8 @@ eliminate_unnecessary_stmts (void)
as desired. */
gcc_assert (dom_info_available_p (CDI_DOMINATORS));
- h = get_all_dominated_blocks (CDI_DOMINATORS, single_succ (ENTRY_BLOCK_PTR));
+ h = get_all_dominated_blocks (CDI_DOMINATORS,
+ single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
while (h.length ())
{
@@ -1263,7 +1266,8 @@ eliminate_unnecessary_stmts (void)
find_unreachable_blocks ();
/* Delete all unreachable basic blocks in reverse dominator order. */
- for (bb = EXIT_BLOCK_PTR->prev_bb; bb != ENTRY_BLOCK_PTR; bb = prev_bb)
+ for (bb = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
+ bb != ENTRY_BLOCK_PTR_FOR_FN (cfun); bb = prev_bb)
{
prev_bb = bb->prev_bb;
diff --git a/gcc/tree-ssa-dom.c b/gcc/tree-ssa-dom.c
index 0ce24df2abe..a286c105615 100644
--- a/gcc/tree-ssa-dom.c
+++ b/gcc/tree-ssa-dom.c
@@ -24,6 +24,7 @@ along with GCC; see the file COPYING3. If not see
#include "hash-table.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
#include "flags.h"
#include "tm_p.h"
#include "basic-block.h"
@@ -36,6 +37,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-cfg.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-into-ssa.h"
#include "domwalk.h"
@@ -900,7 +902,7 @@ tree_ssa_dominator_optimize (void)
while (single_succ_p (bb)
&& (single_succ_edge (bb)->flags & EDGE_EH) == 0)
bb = single_succ (bb);
- if (bb == EXIT_BLOCK_PTR)
+ if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
if ((unsigned) bb->index != i)
bitmap_set_bit (need_eh_cleanup, bb->index);
@@ -3052,7 +3054,8 @@ eliminate_degenerate_phis (void)
phase in dominator order. Presumably this is because walking
in dominator order leaves fewer PHIs for later examination
by the worklist phase. */
- eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR, interesting_names);
+ eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR_FOR_FN (cfun),
+ interesting_names);
/* Second phase. Eliminate second order degenerate PHIs as well
as trivial copies or constant initializations identified by
diff --git a/gcc/tree-ssa-dse.c b/gcc/tree-ssa-dse.c
index 42e2380a0c8..905ef21761c 100644
--- a/gcc/tree-ssa-dse.c
+++ b/gcc/tree-ssa-dse.c
@@ -33,7 +33,9 @@ along with GCC; see the file COPYING3. If not see
#include "tree-cfg.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
+#include "expr.h"
#include "tree-dfa.h"
#include "tree-pass.h"
#include "domwalk.h"
diff --git a/gcc/tree-ssa-forwprop.c b/gcc/tree-ssa-forwprop.c
index 5dd8049582b..e8a92ed6c16 100644
--- a/gcc/tree-ssa-forwprop.c
+++ b/gcc/tree-ssa-forwprop.c
@@ -22,6 +22,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
#include "tm_p.h"
#include "basic-block.h"
#include "gimple-pretty-print.h"
@@ -33,7 +34,9 @@ along with GCC; see the file COPYING3. If not see
#include "tree-cfg.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
+#include "expr.h"
#include "tree-dfa.h"
#include "tree-pass.h"
#include "langhooks.h"
diff --git a/gcc/tree-ssa-ifcombine.c b/gcc/tree-ssa-ifcombine.c
index d3bb5b246cd..9d243753666 100644
--- a/gcc/tree-ssa-ifcombine.c
+++ b/gcc/tree-ssa-ifcombine.c
@@ -27,6 +27,7 @@ along with GCC; see the file COPYING3. If not see
#include "rtl.h"
#include "tm_p.h"
#include "tree.h"
+#include "stor-layout.h"
#include "basic-block.h"
#include "tree-pretty-print.h"
#include "gimple.h"
@@ -679,7 +680,7 @@ tree_ssa_ifcombine (void)
inner ones, and also that we do not try to visit a removed
block. This is opposite of PHI-OPT, because we cascade the
combining rather than cascading PHIs. */
- for (i = n_basic_blocks - NUM_FIXED_BLOCKS - 1; i >= 0; i--)
+ for (i = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS - 1; i >= 0; i--)
{
basic_block bb = bbs[i];
gimple stmt = last_stmt (bb);
diff --git a/gcc/tree-ssa-live.c b/gcc/tree-ssa-live.c
index 1657f6f6ca5..51b41017c2f 100644
--- a/gcc/tree-ssa-live.c
+++ b/gcc/tree-ssa-live.c
@@ -32,7 +32,9 @@ along with GCC; see the file COPYING3. If not see
#include "gimple-ssa.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
+#include "expr.h"
#include "tree-dfa.h"
#include "timevar.h"
#include "dumpfile.h"
@@ -1007,7 +1009,7 @@ loe_visit_block (tree_live_info_p live, basic_block bb, sbitmap visited,
FOR_EACH_EDGE (e, ei, bb->preds)
{
pred_bb = e->src;
- if (pred_bb == ENTRY_BLOCK_PTR)
+ if (pred_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
continue;
/* TMP is variables live-on-entry from BB that aren't defined in the
predecessor block. This should be the live on entry vars to pred.
@@ -1085,7 +1087,7 @@ set_var_live_on_entry (tree ssa_name, tree_live_info_p live)
bitmap_set_bit (&live->liveout[def_bb->index], p);
}
else
- def_bb = ENTRY_BLOCK_PTR;
+ def_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
/* Visit each use of SSA_NAME and if it isn't in the same block as the def,
add it to the list of live on entry blocks. */
@@ -1101,7 +1103,7 @@ set_var_live_on_entry (tree ssa_name, tree_live_info_p live)
defined in that block, or whether its live on entry. */
int index = PHI_ARG_INDEX_FROM_USE (use);
edge e = gimple_phi_arg_edge (use_stmt, index);
- if (e->src != ENTRY_BLOCK_PTR)
+ if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
{
if (e->src != def_bb)
add_block = e->src;
@@ -1167,14 +1169,14 @@ calculate_live_on_exit (tree_live_info_p liveinfo)
if (p == NO_PARTITION)
continue;
e = gimple_phi_arg_edge (phi, i);
- if (e->src != ENTRY_BLOCK_PTR)
+ if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
bitmap_set_bit (&liveinfo->liveout[e->src->index], p);
}
}
/* Add each successors live on entry to this bock live on exit. */
FOR_EACH_EDGE (e, ei, bb->succs)
- if (e->dest != EXIT_BLOCK_PTR)
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
bitmap_ior_into (&liveinfo->liveout[bb->index],
live_on_entry (liveinfo, e->dest));
}
@@ -1367,12 +1369,12 @@ verify_live_on_entry (tree_live_info_p live)
/* Check for live on entry partitions and report those with a DEF in
the program. This will typically mean an optimization has done
something wrong. */
- bb = ENTRY_BLOCK_PTR;
+ bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
num = 0;
FOR_EACH_EDGE (e, ei, bb->succs)
{
int entry_block = e->dest->index;
- if (e->dest == EXIT_BLOCK_PTR)
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
for (i = 0; i < (unsigned)num_var_partitions (map); i++)
{
diff --git a/gcc/tree-ssa-live.h b/gcc/tree-ssa-live.h
index 0aa9f0c4331..e8074bd425e 100644
--- a/gcc/tree-ssa-live.h
+++ b/gcc/tree-ssa-live.h
@@ -273,8 +273,8 @@ static inline bitmap
live_on_entry (tree_live_info_p live, basic_block bb)
{
gcc_checking_assert (live->livein
- && bb != ENTRY_BLOCK_PTR
- && bb != EXIT_BLOCK_PTR);
+ && bb != ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ && bb != EXIT_BLOCK_PTR_FOR_FN (cfun));
return &live->livein[bb->index];
}
@@ -287,8 +287,8 @@ static inline bitmap
live_on_exit (tree_live_info_p live, basic_block bb)
{
gcc_checking_assert (live->liveout
- && bb != ENTRY_BLOCK_PTR
- && bb != EXIT_BLOCK_PTR);
+ && bb != ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ && bb != EXIT_BLOCK_PTR_FOR_FN (cfun));
return &live->liveout[bb->index];
}
diff --git a/gcc/tree-ssa-loop-ch.c b/gcc/tree-ssa-loop-ch.c
index 57c1555d28c..df45c286790 100644
--- a/gcc/tree-ssa-loop-ch.c
+++ b/gcc/tree-ssa-loop-ch.c
@@ -130,7 +130,6 @@ do_while_loop_p (struct loop *loop)
static unsigned int
copy_loop_headers (void)
{
- loop_iterator li;
struct loop *loop;
basic_block header;
edge exit, entry;
@@ -146,11 +145,11 @@ copy_loop_headers (void)
return 0;
}
- bbs = XNEWVEC (basic_block, n_basic_blocks);
- copied_bbs = XNEWVEC (basic_block, n_basic_blocks);
- bbs_size = n_basic_blocks;
+ bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
+ copied_bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
+ bbs_size = n_basic_blocks_for_fn (cfun);
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
{
/* Copy at most 20 insns. */
int limit = 20;
diff --git a/gcc/tree-ssa-loop-im.c b/gcc/tree-ssa-loop-im.c
index 5a463b0a648..c975a97b896 100644
--- a/gcc/tree-ssa-loop-im.c
+++ b/gcc/tree-ssa-loop-im.c
@@ -33,6 +33,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-cfg.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-ssa-loop-manip.h"
#include "tree-ssa-loop.h"
@@ -1583,19 +1584,18 @@ analyze_memory_references (void)
gimple_stmt_iterator bsi;
basic_block bb, *bbs;
struct loop *loop, *outer;
- loop_iterator li;
unsigned i, n;
/* Initialize bb_loop_postorder with a mapping from loop->num to
its postorder index. */
i = 0;
bb_loop_postorder = XNEWVEC (unsigned, number_of_loops (cfun));
- FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
+ FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
bb_loop_postorder[loop->num] = i++;
/* Collect all basic-blocks in loops and sort them after their
loops postorder. */
i = 0;
- bbs = XNEWVEC (basic_block, n_basic_blocks - NUM_FIXED_BLOCKS);
+ bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS);
FOR_EACH_BB (bb)
if (bb->loop_father != current_loops->tree_root)
bbs[i++] = bb;
@@ -1616,7 +1616,7 @@ analyze_memory_references (void)
/* Propagate the information about accessed memory references up
the loop hierarchy. */
- FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
+ FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
{
/* Finalize the overall touched references (including subloops). */
bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[loop->num],
diff --git a/gcc/tree-ssa-loop-ivcanon.c b/gcc/tree-ssa-loop-ivcanon.c
index 18e76435923..b2edef5fc29 100644
--- a/gcc/tree-ssa-loop-ivcanon.c
+++ b/gcc/tree-ssa-loop-ivcanon.c
@@ -47,6 +47,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-cfg.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-ssa-loop-manip.h"
#include "tree-ssa-loop-niter.h"
@@ -982,7 +983,6 @@ canonicalize_loop_induction_variables (struct loop *loop,
unsigned int
canonicalize_induction_variables (void)
{
- loop_iterator li;
struct loop *loop;
bool changed = false;
bool irred_invalidated = false;
@@ -991,7 +991,7 @@ canonicalize_induction_variables (void)
free_numbers_of_iterations_estimates ();
estimate_numbers_of_iterations ();
- FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
+ FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
{
changed |= canonicalize_loop_induction_variables (loop,
true, UL_SINGLE_ITER,
diff --git a/gcc/tree-ssa-loop-ivopts.c b/gcc/tree-ssa-loop-ivopts.c
index 6b3aca381f5..f0484c794d3 100644
--- a/gcc/tree-ssa-loop-ivopts.c
+++ b/gcc/tree-ssa-loop-ivopts.c
@@ -66,6 +66,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
#include "tm_p.h"
#include "basic-block.h"
#include "gimple-pretty-print.h"
@@ -78,11 +79,13 @@ along with GCC; see the file COPYING3. If not see
#include "tree-cfg.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-ssa-loop-ivopts.h"
#include "tree-ssa-loop-manip.h"
#include "tree-ssa-loop-niter.h"
#include "tree-ssa-loop.h"
+#include "expr.h"
#include "tree-dfa.h"
#include "tree-ssa.h"
#include "cfgloop.h"
@@ -2004,7 +2007,7 @@ find_interesting_uses (struct ivopts_data *data)
bb = body[i];
FOR_EACH_EDGE (e, ei, bb->succs)
- if (e->dest != EXIT_BLOCK_PTR
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& !flow_bb_inside_loop_p (data->current_loop, e->dest))
find_interesting_uses_outside (data, e);
@@ -3207,10 +3210,20 @@ multiplier_allowed_in_address_p (HOST_WIDE_INT ratio, enum machine_mode mode,
TODO -- there must be some better way. This all is quite crude. */
+enum ainc_type
+{
+ AINC_PRE_INC, /* Pre increment. */
+ AINC_PRE_DEC, /* Pre decrement. */
+ AINC_POST_INC, /* Post increment. */
+ AINC_POST_DEC, /* Post decrement. */
+ AINC_NONE /* Also the number of auto increment types. */
+};
+
typedef struct address_cost_data_s
{
HOST_WIDE_INT min_offset, max_offset;
unsigned costs[2][2][2][2];
+ unsigned ainc_costs[AINC_NONE];
} *address_cost_data;
@@ -3228,6 +3241,7 @@ get_address_cost (bool symbol_present, bool var_present,
static bool has_preinc[MAX_MACHINE_MODE], has_postinc[MAX_MACHINE_MODE];
static bool has_predec[MAX_MACHINE_MODE], has_postdec[MAX_MACHINE_MODE];
unsigned cost, acost, complexity;
+ enum ainc_type autoinc_type;
bool offset_p, ratio_p, autoinc;
HOST_WIDE_INT s_offset, autoinc_offset, msize;
unsigned HOST_WIDE_INT mask;
@@ -3299,33 +3313,49 @@ get_address_cost (bool symbol_present, bool var_present,
reg0 = gen_raw_REG (address_mode, LAST_VIRTUAL_REGISTER + 1);
reg1 = gen_raw_REG (address_mode, LAST_VIRTUAL_REGISTER + 2);
- if (USE_LOAD_PRE_DECREMENT (mem_mode)
+ if (USE_LOAD_PRE_DECREMENT (mem_mode)
|| USE_STORE_PRE_DECREMENT (mem_mode))
{
addr = gen_rtx_PRE_DEC (address_mode, reg0);
has_predec[mem_mode]
= memory_address_addr_space_p (mem_mode, addr, as);
+
+ if (has_predec[mem_mode])
+ data->ainc_costs[AINC_PRE_DEC]
+ = address_cost (addr, mem_mode, as, speed);
}
- if (USE_LOAD_POST_DECREMENT (mem_mode)
+ if (USE_LOAD_POST_DECREMENT (mem_mode)
|| USE_STORE_POST_DECREMENT (mem_mode))
{
addr = gen_rtx_POST_DEC (address_mode, reg0);
has_postdec[mem_mode]
= memory_address_addr_space_p (mem_mode, addr, as);
+
+ if (has_postdec[mem_mode])
+ data->ainc_costs[AINC_POST_DEC]
+ = address_cost (addr, mem_mode, as, speed);
}
- if (USE_LOAD_PRE_INCREMENT (mem_mode)
+ if (USE_LOAD_PRE_INCREMENT (mem_mode)
|| USE_STORE_PRE_DECREMENT (mem_mode))
{
addr = gen_rtx_PRE_INC (address_mode, reg0);
has_preinc[mem_mode]
= memory_address_addr_space_p (mem_mode, addr, as);
+
+ if (has_preinc[mem_mode])
+ data->ainc_costs[AINC_PRE_INC]
+ = address_cost (addr, mem_mode, as, speed);
}
- if (USE_LOAD_POST_INCREMENT (mem_mode)
+ if (USE_LOAD_POST_INCREMENT (mem_mode)
|| USE_STORE_POST_INCREMENT (mem_mode))
{
addr = gen_rtx_POST_INC (address_mode, reg0);
has_postinc[mem_mode]
= memory_address_addr_space_p (mem_mode, addr, as);
+
+ if (has_postinc[mem_mode])
+ data->ainc_costs[AINC_POST_INC]
+ = address_cost (addr, mem_mode, as, speed);
}
for (i = 0; i < 16; i++)
{
@@ -3451,21 +3481,31 @@ get_address_cost (bool symbol_present, bool var_present,
s_offset = offset;
autoinc = false;
+ autoinc_type = AINC_NONE;
msize = GET_MODE_SIZE (mem_mode);
autoinc_offset = offset;
if (stmt_after_inc)
autoinc_offset += ratio * cstep;
if (symbol_present || var_present || ratio != 1)
autoinc = false;
- else if ((has_postinc[mem_mode] && autoinc_offset == 0
- && msize == cstep)
- || (has_postdec[mem_mode] && autoinc_offset == 0
+ else
+ {
+ if (has_postinc[mem_mode] && autoinc_offset == 0
+ && msize == cstep)
+ autoinc_type = AINC_POST_INC;
+ else if (has_postdec[mem_mode] && autoinc_offset == 0
&& msize == -cstep)
- || (has_preinc[mem_mode] && autoinc_offset == msize
+ autoinc_type = AINC_POST_DEC;
+ else if (has_preinc[mem_mode] && autoinc_offset == msize
&& msize == cstep)
- || (has_predec[mem_mode] && autoinc_offset == -msize
- && msize == -cstep))
- autoinc = true;
+ autoinc_type = AINC_PRE_INC;
+ else if (has_predec[mem_mode] && autoinc_offset == -msize
+ && msize == -cstep)
+ autoinc_type = AINC_PRE_DEC;
+
+ if (autoinc_type != AINC_NONE)
+ autoinc = true;
+ }
cost = 0;
offset_p = (s_offset != 0
@@ -3482,7 +3522,10 @@ get_address_cost (bool symbol_present, bool var_present,
if (may_autoinc)
*may_autoinc = autoinc;
- acost = data->costs[symbol_present][var_present][offset_p][ratio_p];
+ if (autoinc)
+ acost = data->ainc_costs[autoinc_type];
+ else
+ acost = data->costs[symbol_present][var_present][offset_p][ratio_p];
complexity = (symbol_present != 0) + (var_present != 0) + offset_p + ratio_p;
return new_cost (cost + acost, complexity);
}
@@ -6832,12 +6875,11 @@ tree_ssa_iv_optimize (void)
{
struct loop *loop;
struct ivopts_data data;
- loop_iterator li;
tree_ssa_iv_optimize_init (&data);
/* Optimize the loops starting with the innermost ones. */
- FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
+ FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
{
if (dump_file && (dump_flags & TDF_DETAILS))
flow_loop_dump (loop, dump_file, NULL, 1);
diff --git a/gcc/tree-ssa-loop-manip.c b/gcc/tree-ssa-loop-manip.c
index ae51ee66f07..67291670418 100644
--- a/gcc/tree-ssa-loop-manip.c
+++ b/gcc/tree-ssa-loop-manip.c
@@ -32,6 +32,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-cfg.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-ssa-loop-ivopts.h"
#include "tree-ssa-loop-manip.h"
@@ -194,7 +195,7 @@ compute_live_loop_exits (bitmap live_exits, bitmap use_blocks,
/* Normally the work list size is bounded by the number of basic
blocks in the largest loop. We don't know this number, but we
can be fairly sure that it will be relatively small. */
- worklist.create (MAX (8, n_basic_blocks / 128));
+ worklist.create (MAX (8, n_basic_blocks_for_fn (cfun) / 128));
EXECUTE_IF_SET_IN_BITMAP (use_blocks, 0, i, bi)
{
@@ -230,7 +231,7 @@ compute_live_loop_exits (bitmap live_exits, bitmap use_blocks,
bool pred_visited;
/* We should have met DEF_BB along the way. */
- gcc_assert (pred != ENTRY_BLOCK_PTR);
+ gcc_assert (pred != ENTRY_BLOCK_PTR_FOR_FN (cfun));
if (pred_loop_depth >= def_loop_depth)
{
@@ -349,12 +350,11 @@ add_exit_phis (bitmap names_to_rename, bitmap *use_blocks, bitmap *loop_exits)
static void
get_loops_exits (bitmap *loop_exits)
{
- loop_iterator li;
struct loop *loop;
unsigned j;
edge e;
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
{
vec<edge> exit_edges = get_loop_exit_edges (loop);
loop_exits[loop->num] = BITMAP_ALLOC (&loop_renamer_obstack);
diff --git a/gcc/tree-ssa-loop-niter.c b/gcc/tree-ssa-loop-niter.c
index c4f4aef0bc4..5c187f5d154 100644
--- a/gcc/tree-ssa-loop-niter.c
+++ b/gcc/tree-ssa-loop-niter.c
@@ -22,6 +22,8 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "calls.h"
+#include "expr.h"
#include "tm_p.h"
#include "basic-block.h"
#include "gimple-pretty-print.h"
@@ -47,6 +49,7 @@ along with GCC; see the file COPYING3. If not see
#include "diagnostic-core.h"
#include "tree-inline.h"
#include "tree-pass.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "wide-int-print.h"
@@ -490,7 +493,7 @@ bound_difference (struct loop *loop, tree x, tree y, bounds *bnds)
/* Now walk the dominators of the loop header and use the entry
guards to refine the estimates. */
for (bb = loop->header;
- bb != ENTRY_BLOCK_PTR && cnt < MAX_DOMINATORS_TO_WALK;
+ bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
bb = get_immediate_dominator (CDI_DOMINATORS, bb))
{
if (!single_pred_p (bb))
@@ -1774,7 +1777,7 @@ simplify_using_initial_conditions (struct loop *loop, tree expr)
the number of BBs times the number of loops in degenerate
cases. */
for (bb = loop->header;
- bb != ENTRY_BLOCK_PTR && cnt < MAX_DOMINATORS_TO_WALK;
+ bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
bb = get_immediate_dominator (CDI_DOMINATORS, bb))
{
if (!single_pred_p (bb))
@@ -3570,14 +3573,13 @@ estimated_stmt_executions (struct loop *loop, widest_int *nit)
void
estimate_numbers_of_iterations (void)
{
- loop_iterator li;
struct loop *loop;
/* We don't want to issue signed overflow warnings while getting
loop iteration estimates. */
fold_defer_overflow_warnings ();
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
{
estimate_numbers_of_iterations_loop (loop);
}
@@ -3847,10 +3849,9 @@ free_numbers_of_iterations_estimates_loop (struct loop *loop)
void
free_numbers_of_iterations_estimates (void)
{
- loop_iterator li;
struct loop *loop;
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
{
free_numbers_of_iterations_estimates_loop (loop);
}
diff --git a/gcc/tree-ssa-loop-prefetch.c b/gcc/tree-ssa-loop-prefetch.c
index 8046309f950..32b9f8c216f 100644
--- a/gcc/tree-ssa-loop-prefetch.c
+++ b/gcc/tree-ssa-loop-prefetch.c
@@ -22,6 +22,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
#include "tm_p.h"
#include "basic-block.h"
#include "tree-pretty-print.h"
@@ -1281,7 +1282,7 @@ may_use_storent_in_loop_p (struct loop *loop)
FOR_EACH_VEC_ELT (exits, i, exit)
if ((exit->flags & EDGE_ABNORMAL)
- && exit->dest == EXIT_BLOCK_PTR)
+ && exit->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
ret = false;
exits.release ();
@@ -1929,7 +1930,6 @@ fail:
unsigned int
tree_ssa_prefetch_arrays (void)
{
- loop_iterator li;
struct loop *loop;
bool unrolled = false;
int todo_flags = 0;
@@ -1977,7 +1977,7 @@ tree_ssa_prefetch_arrays (void)
here. */
gcc_assert ((PREFETCH_BLOCK & (PREFETCH_BLOCK - 1)) == 0);
- FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
+ FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Processing loop %d:\n", loop->num);
diff --git a/gcc/tree-ssa-loop-unswitch.c b/gcc/tree-ssa-loop-unswitch.c
index 236b89b8a12..27f52b28025 100644
--- a/gcc/tree-ssa-loop-unswitch.c
+++ b/gcc/tree-ssa-loop-unswitch.c
@@ -83,13 +83,12 @@ static tree tree_may_unswitch_on (basic_block, struct loop *);
unsigned int
tree_ssa_unswitch_loops (void)
{
- loop_iterator li;
struct loop *loop;
bool changed = false;
HOST_WIDE_INT iterations;
/* Go through inner loops (only original ones). */
- FOR_EACH_LOOP (li, loop, LI_ONLY_INNERMOST)
+ FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, ";; Considering loop %d\n", loop->num);
@@ -195,7 +194,7 @@ simplify_using_entry_checks (struct loop *loop, tree cond)
return cond;
e = single_pred_edge (e->src);
- if (e->src == ENTRY_BLOCK_PTR)
+ if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
return cond;
}
}
diff --git a/gcc/tree-ssa-math-opts.c b/gcc/tree-ssa-math-opts.c
index 0a91a6d90a3..87d2c023ed3 100644
--- a/gcc/tree-ssa-math-opts.c
+++ b/gcc/tree-ssa-math-opts.c
@@ -93,11 +93,14 @@ along with GCC; see the file COPYING3. If not see
#include "gimple.h"
#include "gimple-iterator.h"
#include "gimplify-me.h"
+#include "stor-layout.h"
#include "gimple-ssa.h"
#include "tree-cfg.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
+#include "expr.h"
#include "tree-dfa.h"
#include "tree-ssa.h"
#include "tree-pass.h"
@@ -285,7 +288,7 @@ register_division_in (basic_block bb)
if (!occ)
{
occ = occ_new (bb, NULL);
- insert_bb (occ, ENTRY_BLOCK_PTR, &occ_head);
+ insert_bb (occ, ENTRY_BLOCK_PTR_FOR_FN (cfun), &occ_head);
}
occ->bb_has_division = true;
@@ -512,7 +515,7 @@ execute_cse_reciprocals (void)
occ_pool = create_alloc_pool ("dominators for recip",
sizeof (struct occurrence),
- n_basic_blocks / 3 + 1);
+ n_basic_blocks_for_fn (cfun) / 3 + 1);
memset (&reciprocal_stats, 0, sizeof (reciprocal_stats));
calculate_dominance_info (CDI_DOMINATORS);
@@ -1504,7 +1507,7 @@ execute_cse_sincos (void)
{
if (!tree_fits_shwi_p (arg1))
break;
-
+
n = tree_to_shwi (arg1);
result = gimple_expand_builtin_powi (&gsi, loc, arg0, n);
}
diff --git a/gcc/tree-ssa-operands.c b/gcc/tree-ssa-operands.c
index 4e05d2df046..3508b34b77a 100644
--- a/gcc/tree-ssa-operands.c
+++ b/gcc/tree-ssa-operands.c
@@ -22,6 +22,8 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stmt.h"
+#include "print-tree.h"
#include "flags.h"
#include "function.h"
#include "gimple-pretty-print.h"
@@ -30,6 +32,7 @@ along with GCC; see the file COPYING3. If not see
#include "gimple-ssa.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-inline.h"
#include "timevar.h"
diff --git a/gcc/tree-ssa-phiopt.c b/gcc/tree-ssa-phiopt.c
index 0384c3dbb2a..61e4dbde35e 100644
--- a/gcc/tree-ssa-phiopt.c
+++ b/gcc/tree-ssa-phiopt.c
@@ -24,6 +24,7 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "ggc.h"
#include "tree.h"
+#include "stor-layout.h"
#include "flags.h"
#include "tm_p.h"
#include "basic-block.h"
@@ -35,7 +36,9 @@ along with GCC; see the file COPYING3. If not see
#include "tree-cfg.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
+#include "expr.h"
#include "tree-dfa.h"
#include "tree-pass.h"
#include "langhooks.h"
@@ -338,7 +341,7 @@ tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads)
outer ones, and also that we do not try to visit a removed
block. */
bb_order = single_pred_before_succ_order ();
- n = n_basic_blocks - NUM_FIXED_BLOCKS;
+ n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
for (i = 0; i < n; i++)
{
diff --git a/gcc/tree-ssa-phiprop.c b/gcc/tree-ssa-phiprop.c
index 070b8ed3f0b..389423b04c4 100644
--- a/gcc/tree-ssa-phiprop.c
+++ b/gcc/tree-ssa-phiprop.c
@@ -32,6 +32,7 @@ along with GCC; see the file COPYING3. If not see
#include "gimple-ssa.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-pass.h"
#include "langhooks.h"
@@ -380,7 +381,7 @@ tree_ssa_phiprop (void)
/* Walk the dominator tree in preorder. */
bbs = get_all_dominated_blocks (CDI_DOMINATORS,
- single_succ (ENTRY_BLOCK_PTR));
+ single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
FOR_EACH_VEC_ELT (bbs, i, bb)
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
did_something |= propagate_with_phi (bb, gsi_stmt (gsi), phivn, n);
diff --git a/gcc/tree-ssa-pre.c b/gcc/tree-ssa-pre.c
index 64e6866e7f6..3c5cefd9b49 100644
--- a/gcc/tree-ssa-pre.c
+++ b/gcc/tree-ssa-pre.c
@@ -35,9 +35,11 @@ along with GCC; see the file COPYING3. If not see
#include "tree-cfg.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-ssa-loop.h"
#include "tree-into-ssa.h"
+#include "expr.h"
#include "tree-dfa.h"
#include "tree-ssa.h"
#include "hash-table.h"
@@ -2465,7 +2467,7 @@ compute_antic (void)
}
/* At the exit block we anticipate nothing. */
- BB_VISITED (EXIT_BLOCK_PTR) = 1;
+ BB_VISITED (EXIT_BLOCK_PTR_FOR_FN (cfun)) = 1;
changed_blocks = sbitmap_alloc (last_basic_block + 1);
bitmap_ones (changed_blocks);
@@ -3666,7 +3668,7 @@ insert (void)
num_iterations++;
if (dump_file && dump_flags & TDF_DETAILS)
fprintf (dump_file, "Starting insert iteration %d\n", num_iterations);
- new_stuff = insert_aux (ENTRY_BLOCK_PTR);
+ new_stuff = insert_aux (ENTRY_BLOCK_PTR_FOR_FN (cfun));
/* Clear the NEW sets before the next iteration. We have already
fully propagated its contents. */
@@ -3711,24 +3713,25 @@ compute_avail (void)
e = get_or_alloc_expr_for_name (name);
add_to_value (get_expr_value_id (e), e);
- bitmap_insert_into_set (TMP_GEN (ENTRY_BLOCK_PTR), e);
- bitmap_value_insert_into_set (AVAIL_OUT (ENTRY_BLOCK_PTR), e);
+ bitmap_insert_into_set (TMP_GEN (ENTRY_BLOCK_PTR_FOR_FN (cfun)), e);
+ bitmap_value_insert_into_set (AVAIL_OUT (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
+ e);
}
if (dump_file && (dump_flags & TDF_DETAILS))
{
- print_bitmap_set (dump_file, TMP_GEN (ENTRY_BLOCK_PTR),
+ print_bitmap_set (dump_file, TMP_GEN (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
"tmp_gen", ENTRY_BLOCK);
- print_bitmap_set (dump_file, AVAIL_OUT (ENTRY_BLOCK_PTR),
+ print_bitmap_set (dump_file, AVAIL_OUT (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
"avail_out", ENTRY_BLOCK);
}
/* Allocate the worklist. */
- worklist = XNEWVEC (basic_block, n_basic_blocks);
+ worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
/* Seed the algorithm by putting the dominator children of the entry
block on the worklist. */
- for (son = first_dom_son (CDI_DOMINATORS, ENTRY_BLOCK_PTR);
+ for (son = first_dom_son (CDI_DOMINATORS, ENTRY_BLOCK_PTR_FOR_FN (cfun));
son;
son = next_dom_son (CDI_DOMINATORS, son))
worklist[sp++] = son;
@@ -4655,7 +4658,7 @@ init_pre (void)
connect_infinite_loops_to_exit ();
memset (&pre_stats, 0, sizeof (pre_stats));
- postorder = XNEWVEC (int, n_basic_blocks);
+ postorder = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
postorder_num = inverted_post_order_compute (postorder);
alloc_aux_for_blocks (sizeof (struct bb_bitmap_sets));
@@ -4731,7 +4734,7 @@ do_pre (void)
fixed, don't run it when he have an incredibly large number of
bb's. If we aren't going to run insert, there is no point in
computing ANTIC, either, even though it's plenty fast. */
- if (n_basic_blocks < 4000)
+ if (n_basic_blocks_for_fn (cfun) < 4000)
{
compute_antic ();
insert ();
diff --git a/gcc/tree-ssa-propagate.c b/gcc/tree-ssa-propagate.c
index 078b04afdbc..b9db34c5057 100644
--- a/gcc/tree-ssa-propagate.c
+++ b/gcc/tree-ssa-propagate.c
@@ -37,6 +37,7 @@
#include "tree-cfg.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-ssa.h"
#include "tree-ssa-propagate.h"
@@ -183,7 +184,8 @@ cfg_blocks_add (basic_block bb)
{
bool head = false;
- gcc_assert (bb != ENTRY_BLOCK_PTR && bb != EXIT_BLOCK_PTR);
+ gcc_assert (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ && bb != EXIT_BLOCK_PTR_FOR_FN (cfun));
gcc_assert (!bitmap_bit_p (bb_in_list, bb->index));
if (cfg_blocks_empty_p ())
@@ -278,7 +280,7 @@ static void
add_control_edge (edge e)
{
basic_block bb = e->dest;
- if (bb == EXIT_BLOCK_PTR)
+ if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
return;
/* If the edge had already been executed, skip it. */
@@ -407,7 +409,7 @@ simulate_block (basic_block block)
gimple_stmt_iterator gsi;
/* There is nothing to do for the exit block. */
- if (block == EXIT_BLOCK_PTR)
+ if (block == EXIT_BLOCK_PTR_FOR_FN (cfun))
return;
if (dump_file && (dump_flags & TDF_DETAILS))
@@ -518,7 +520,7 @@ ssa_prop_init (void)
/* Seed the algorithm by adding the successors of the entry block to the
edge worklist. */
- FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
+ FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
add_control_edge (e);
}
diff --git a/gcc/tree-ssa-reassoc.c b/gcc/tree-ssa-reassoc.c
index 4c3dca822f5..3ec272339bd 100644
--- a/gcc/tree-ssa-reassoc.c
+++ b/gcc/tree-ssa-reassoc.c
@@ -26,6 +26,7 @@ along with GCC; see the file COPYING3. If not see
#include "rtl.h"
#include "tm_p.h"
#include "tree.h"
+#include "stor-layout.h"
#include "basic-block.h"
#include "gimple-pretty-print.h"
#include "tree-inline.h"
@@ -36,9 +37,11 @@ along with GCC; see the file COPYING3. If not see
#include "tree-cfg.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-ssa-loop-niter.h"
#include "tree-ssa-loop.h"
+#include "expr.h"
#include "tree-dfa.h"
#include "tree-ssa.h"
#include "tree-iterator.h"
@@ -1267,11 +1270,11 @@ build_and_add_sum (tree type, tree op1, tree op2, enum tree_code opcode)
if ((!op1def || gimple_nop_p (op1def))
&& (!op2def || gimple_nop_p (op2def)))
{
- gsi = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR));
+ gsi = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
if (gsi_end_p (gsi))
{
gimple_stmt_iterator gsi2
- = gsi_last_bb (single_succ (ENTRY_BLOCK_PTR));
+ = gsi_last_bb (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
gimple_set_uid (sum,
gsi_end_p (gsi2) ? 1 : gimple_uid (gsi_stmt (gsi2)));
}
@@ -4525,8 +4528,8 @@ debug_ops_vector (vec<operand_entry_t> ops)
static void
do_reassoc (void)
{
- break_up_subtract_bb (ENTRY_BLOCK_PTR);
- reassociate_bb (EXIT_BLOCK_PTR);
+ break_up_subtract_bb (ENTRY_BLOCK_PTR_FOR_FN (cfun));
+ reassociate_bb (EXIT_BLOCK_PTR_FOR_FN (cfun));
}
/* Initialize the reassociation pass. */
@@ -4536,7 +4539,7 @@ init_reassoc (void)
{
int i;
long rank = 2;
- int *bbs = XNEWVEC (int, n_basic_blocks - NUM_FIXED_BLOCKS);
+ int *bbs = XNEWVEC (int, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS);
/* Find the loops, so that we can prevent moving calculations in
them. */
@@ -4566,7 +4569,7 @@ init_reassoc (void)
}
/* Set up rank for each BB */
- for (i = 0; i < n_basic_blocks - NUM_FIXED_BLOCKS; i++)
+ for (i = 0; i < n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS; i++)
bb_rank[bbs[i]] = ++rank << 16;
free (bbs);
diff --git a/gcc/tree-ssa-sccvn.c b/gcc/tree-ssa-sccvn.c
index ab983ecf3b6..601057f01ce 100644
--- a/gcc/tree-ssa-sccvn.c
+++ b/gcc/tree-ssa-sccvn.c
@@ -23,6 +23,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
#include "basic-block.h"
#include "gimple-pretty-print.h"
#include "tree-inline.h"
@@ -31,7 +32,9 @@ along with GCC; see the file COPYING3. If not see
#include "gimple-ssa.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
+#include "expr.h"
#include "tree-dfa.h"
#include "tree-ssa.h"
#include "dumpfile.h"
@@ -760,7 +763,7 @@ copy_reference_ops_from_ref (tree ref, vec<vn_reference_op_s> *result)
}
/* For non-calls, store the information that makes up the address. */
-
+ tree orig = ref;
while (ref)
{
vn_reference_op_s temp;
@@ -810,7 +813,15 @@ copy_reference_ops_from_ref (tree ref, vec<vn_reference_op_s> *result)
+ wi::lrshift (wi::to_offset (bit_offset),
BITS_PER_UNIT == 8
? 3 : exact_log2 (BITS_PER_UNIT)));
- if (wi::fits_shwi_p (off))
+ if (wi::fits_shwi_p (off)
+ /* Probibit value-numbering zero offset components
+ of addresses the same before the pass folding
+ __builtin_object_size had a chance to run
+ (checking cfun->after_inlining does the
+ trick here). */
+ && (TREE_CODE (orig) != ADDR_EXPR
+ || off != 0
+ || cfun->after_inlining))
temp.off = off.to_shwi ();
}
}
@@ -1900,7 +1911,7 @@ vn_reference_lookup_3 (ao_ref *ref, tree vuse, void *vr_)
&& TREE_CODE (rhs) != ADDR_EXPR)
return (void *)-1;
- copy_size = TREE_INT_CST_LOW (gimple_call_arg (def_stmt, 2));
+ copy_size = tree_to_uhwi (gimple_call_arg (def_stmt, 2));
/* The bases of the destination and the references have to agree. */
if ((TREE_CODE (base) != MEM_REF
@@ -1916,7 +1927,7 @@ vn_reference_lookup_3 (ao_ref *ref, tree vuse, void *vr_)
/* And the access has to be contained within the memcpy destination. */
at = offset / BITS_PER_UNIT;
if (TREE_CODE (base) == MEM_REF)
- at += TREE_INT_CST_LOW (TREE_OPERAND (base, 1));
+ at += tree_to_uhwi (TREE_OPERAND (base, 1));
if (lhs_offset > at
|| lhs_offset + copy_size < at + maxsize / BITS_PER_UNIT)
return (void *)-1;
@@ -3971,13 +3982,14 @@ init_scc_vn (void)
shared_lookup_phiargs.create (0);
shared_lookup_references.create (0);
rpo_numbers = XNEWVEC (int, last_basic_block);
- rpo_numbers_temp = XNEWVEC (int, n_basic_blocks - NUM_FIXED_BLOCKS);
+ rpo_numbers_temp =
+ XNEWVEC (int, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS);
pre_and_rev_post_order_compute (NULL, rpo_numbers_temp, false);
/* RPO numbers is an array of rpo ordering, rpo[i] = bb means that
the i'th block in RPO order is bb. We want to map bb's to RPO
numbers, so we need to rearrange this array. */
- for (j = 0; j < n_basic_blocks - NUM_FIXED_BLOCKS; j++)
+ for (j = 0; j < n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS; j++)
rpo_numbers[rpo_numbers_temp[j]] = j;
XDELETE (rpo_numbers_temp);
diff --git a/gcc/tree-ssa-sink.c b/gcc/tree-ssa-sink.c
index caf10bb522f..305882dd06f 100644
--- a/gcc/tree-ssa-sink.c
+++ b/gcc/tree-ssa-sink.c
@@ -23,6 +23,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
#include "basic-block.h"
#include "gimple-pretty-print.h"
#include "tree-inline.h"
@@ -169,7 +170,7 @@ nearest_common_dominator_of_uses (gimple stmt, bool *debug_stmts)
}
/* Short circuit. Nothing dominates the entry block. */
- if (useblock == ENTRY_BLOCK_PTR)
+ if (useblock == ENTRY_BLOCK_PTR_FOR_FN (cfun))
{
BITMAP_FREE (blocks);
return NULL;
@@ -567,7 +568,7 @@ execute_sink_code (void)
memset (&sink_stats, 0, sizeof (sink_stats));
calculate_dominance_info (CDI_DOMINATORS);
calculate_dominance_info (CDI_POST_DOMINATORS);
- sink_code_in_bb (EXIT_BLOCK_PTR);
+ sink_code_in_bb (EXIT_BLOCK_PTR_FOR_FN (cfun));
statistics_counter_event (cfun, "Sunk statements", sink_stats.sunk);
free_dominance_info (CDI_POST_DOMINATORS);
remove_fake_exit_edges ();
diff --git a/gcc/tree-ssa-strlen.c b/gcc/tree-ssa-strlen.c
index 04e9ef4a0e2..514b1b829e2 100644
--- a/gcc/tree-ssa-strlen.c
+++ b/gcc/tree-ssa-strlen.c
@@ -22,6 +22,7 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "tree.h"
+#include "stor-layout.h"
#include "hash-table.h"
#include "bitmap.h"
#include "gimple.h"
@@ -31,7 +32,9 @@ along with GCC; see the file COPYING3. If not see
#include "gimple-ssa.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
+#include "expr.h"
#include "tree-dfa.h"
#include "tree-pass.h"
#include "domwalk.h"
diff --git a/gcc/tree-ssa-structalias.c b/gcc/tree-ssa-structalias.c
index bcb617a1b95..0258c0e5263 100644
--- a/gcc/tree-ssa-structalias.c
+++ b/gcc/tree-ssa-structalias.c
@@ -29,12 +29,16 @@
#include "flags.h"
#include "basic-block.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "stmt.h"
#include "gimple.h"
#include "gimple-iterator.h"
#include "gimple-ssa.h"
#include "cgraph.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-into-ssa.h"
+#include "expr.h"
#include "tree-dfa.h"
#include "tree-inline.h"
#include "diagnostic-core.h"
@@ -5359,7 +5363,7 @@ push_fields_onto_fieldstack (tree type, vec<fieldoff_s> *fieldstack,
&& !pair->has_unknown_size
&& pair->offset + (HOST_WIDE_INT)pair->size == offset + foff)
{
- pair->size += TREE_INT_CST_LOW (DECL_SIZE (field));
+ pair->size += tree_to_uhwi (DECL_SIZE (field));
}
else
{
@@ -5367,7 +5371,7 @@ push_fields_onto_fieldstack (tree type, vec<fieldoff_s> *fieldstack,
e.offset = offset + foff;
e.has_unknown_size = has_unknown_size;
if (!has_unknown_size)
- e.size = TREE_INT_CST_LOW (DECL_SIZE (field));
+ e.size = tree_to_uhwi (DECL_SIZE (field));
else
e.size = -1;
e.must_have_pointers = must_have_pointers_p;
@@ -5684,7 +5688,7 @@ create_variable_info_for_1 (tree decl, const char *name)
vi = new_var_info (decl, name);
vi->offset = 0;
vi->may_have_pointers = true;
- vi->fullsize = TREE_INT_CST_LOW (declsize);
+ vi->fullsize = tree_to_uhwi (declsize);
vi->size = vi->fullsize;
vi->is_full_var = true;
fieldstack.release ();
@@ -5692,7 +5696,7 @@ create_variable_info_for_1 (tree decl, const char *name)
}
vi = new_var_info (decl, name);
- vi->fullsize = TREE_INT_CST_LOW (declsize);
+ vi->fullsize = tree_to_uhwi (declsize);
for (i = 0, newvi = vi;
fieldstack.iterate (i, &fo);
++i, newvi = vi_next (newvi))
diff --git a/gcc/tree-ssa-tail-merge.c b/gcc/tree-ssa-tail-merge.c
index 8d81f57daf5..35b8bbe0497 100644
--- a/gcc/tree-ssa-tail-merge.c
+++ b/gcc/tree-ssa-tail-merge.c
@@ -190,6 +190,8 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "trans-mem.h"
#include "tm_p.h"
#include "basic-block.h"
#include "flags.h"
@@ -763,11 +765,11 @@ static void
init_worklist (void)
{
alloc_aux_for_blocks (sizeof (struct aux_bb_info));
- same_succ_htab.create (n_basic_blocks);
+ same_succ_htab.create (n_basic_blocks_for_fn (cfun));
same_succ_edge_flags = XCNEWVEC (int, last_basic_block);
deleted_bbs = BITMAP_ALLOC (NULL);
deleted_bb_preds = BITMAP_ALLOC (NULL);
- worklist.create (n_basic_blocks);
+ worklist.create (n_basic_blocks_for_fn (cfun));
find_same_succ ();
if (dump_file && (dump_flags & TDF_DETAILS))
@@ -995,7 +997,7 @@ static vec<bb_cluster> all_clusters;
static void
alloc_cluster_vectors (void)
{
- all_clusters.create (n_basic_blocks);
+ all_clusters.create (n_basic_blocks_for_fn (cfun));
}
/* Reset all cluster vectors. */
diff --git a/gcc/tree-ssa-ter.c b/gcc/tree-ssa-ter.c
index df0c458e019..9b9e655b726 100644
--- a/gcc/tree-ssa-ter.c
+++ b/gcc/tree-ssa-ter.c
@@ -31,6 +31,7 @@ along with GCC; see the file COPYING3. If not see
#include "gimple-ssa.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "dumpfile.h"
#include "tree-ssa-live.h"
@@ -601,8 +602,7 @@ find_replaceable_in_bb (temp_expr_table_p tab, basic_block bb)
/* If the stmt does a memory store and the replacement
is a load aliasing it avoid creating overlapping
assignments which we cannot expand correctly. */
- if (gimple_vdef (stmt)
- && gimple_assign_single_p (stmt))
+ if (gimple_vdef (stmt))
{
gimple def_stmt = SSA_NAME_DEF_STMT (use);
while (is_gimple_assign (def_stmt)
@@ -611,8 +611,8 @@ find_replaceable_in_bb (temp_expr_table_p tab, basic_block bb)
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (def_stmt));
if (gimple_vuse (def_stmt)
&& gimple_assign_single_p (def_stmt)
- && refs_may_alias_p (gimple_assign_lhs (stmt),
- gimple_assign_rhs1 (def_stmt)))
+ && stmt_may_clobber_ref_p (stmt,
+ gimple_assign_rhs1 (def_stmt)))
same_root_var = true;
}
diff --git a/gcc/tree-ssa-threadedge.c b/gcc/tree-ssa-threadedge.c
index cabfc824c62..7bb8829e5cc 100644
--- a/gcc/tree-ssa-threadedge.c
+++ b/gcc/tree-ssa-threadedge.c
@@ -36,6 +36,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-cfg.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-ssa-propagate.h"
#include "tree-ssa-threadupdate.h"
@@ -1097,6 +1098,14 @@ thread_across_edge (gimple dummy_cond,
path,
&backedge_seen);
+ if (!found
+ && (!backedge_seen
+ || ! cond_arg_set_in_bb (path->last ()->e, e->dest)))
+ found = thread_through_normal_block (path->last ()->e, dummy_cond,
+ handle_dominating_asserts,
+ stack, simplify, path, visited,
+ &backedge_seen);
+
/* If we were able to thread through a successor of E->dest, then
record the jump threading opportunity. */
if (found)
diff --git a/gcc/tree-ssa-threadupdate.c b/gcc/tree-ssa-threadupdate.c
index e819d65e030..777fe41033b 100644
--- a/gcc/tree-ssa-threadupdate.c
+++ b/gcc/tree-ssa-threadupdate.c
@@ -30,6 +30,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-phinodes.h"
#include "tree-ssa.h"
#include "tree-ssa-threadupdate.h"
+#include "ssa-iterators.h"
#include "dumpfile.h"
#include "cfgloop.h"
#include "hash-table.h"
@@ -73,19 +74,16 @@ along with GCC; see the file COPYING3. If not see
set of unique destination blocks that the incoming edges should
be threaded to.
- Block duplication can be further minimized by using B instead of
- creating B' for one destination if all edges into B are going to be
- threaded to a successor of B. We had code to do this at one time, but
- I'm not convinced it is correct with the changes to avoid mucking up
- the loop structure (which may cancel threading requests, thus a block
- which we thought was going to become unreachable may still be reachable).
- This code was also going to get ugly with the introduction of the ability
- for a single jump thread request to bypass multiple blocks.
+ We reduce the number of edges and statements we create by not copying all
+ the outgoing edges and the control statement in step #1. We instead create
+ a template block without the outgoing edges and duplicate the template.
- We further reduce the number of edges and statements we create by
- not copying all the outgoing edges and the control statement in
- step #1. We instead create a template block without the outgoing
- edges and duplicate the template. */
+ Another case this code handles is threading through a "joiner" block. In
+ this case, we do not know the destination of the joiner block, but one
+ of the outgoing edges from the joiner block leads to a threadable path. This
+ case largely works as outlined above, except the duplicate of the joiner
+ block still contains a full set of outgoing edges and its control statement.
+ We just redirect one of its outgoing edges to our jump threading path. */
/* Steps #5 and #6 of the above algorithm are best implemented by walking
@@ -115,9 +113,20 @@ struct el
struct redirection_data : typed_free_remove<redirection_data>
{
- /* A duplicate of B with the trailing control statement removed and which
- targets a single successor of B. */
- basic_block dup_block;
+ /* We support wiring up two block duplicates in a jump threading path.
+
+ One is a normal block copy where we remove the control statement
+ and wire up its single remaining outgoing edge to the thread path.
+
+ The other is a joiner block where we leave the control statement
+ in place, but wire one of the outgoing edges to a thread path.
+
+ In theory we could have multiple block duplicates in a jump
+ threading path, but I haven't tried that.
+
+ The duplicate blocks appear in this array in the same order in
+ which they appear in the jump thread path. */
+ basic_block dup_blocks[2];
/* The jump threading path. */
vec<jump_thread_edge *> *path;
@@ -171,8 +180,11 @@ struct ssa_local_info_t
/* The current block we are working on. */
basic_block bb;
- /* A template copy of BB with no outgoing edges or control statement that
- we use for creating copies. */
+ /* We only create a template block for the first duplicated block in a
+ jump threading path as we may need many duplicates of that block.
+
+ The second duplicate block in a path is specific to that path. Creating
+ and sharing a template for that block is considerably more difficult. */
basic_block template_block;
/* TRUE if we thread one or more jumps, FALSE otherwise. */
@@ -234,24 +246,27 @@ remove_ctrl_stmt_and_useless_edges (basic_block bb, basic_block dest_bb)
}
}
-/* Create a duplicate of BB. Record the duplicate block in RD. */
+/* Create a duplicate of BB. Record the duplicate block in an array
+ indexed by COUNT stored in RD. */
static void
-create_block_for_threading (basic_block bb, struct redirection_data *rd)
+create_block_for_threading (basic_block bb,
+ struct redirection_data *rd,
+ unsigned int count)
{
edge_iterator ei;
edge e;
/* We can use the generic block duplication code and simply remove
the stuff we do not need. */
- rd->dup_block = duplicate_block (bb, NULL, NULL);
+ rd->dup_blocks[count] = duplicate_block (bb, NULL, NULL);
- FOR_EACH_EDGE (e, ei, rd->dup_block->succs)
+ FOR_EACH_EDGE (e, ei, rd->dup_blocks[count]->succs)
e->aux = NULL;
/* Zero out the profile, since the block is unreachable for now. */
- rd->dup_block->frequency = 0;
- rd->dup_block->count = 0;
+ rd->dup_blocks[count]->frequency = 0;
+ rd->dup_blocks[count]->count = 0;
}
/* Main data structure to hold information for duplicates of BB. */
@@ -275,7 +290,8 @@ lookup_redirection_data (edge e, enum insert_option insert)
in the table. */
elt = XNEW (struct redirection_data);
elt->path = path;
- elt->dup_block = NULL;
+ elt->dup_blocks[0] = NULL;
+ elt->dup_blocks[1] = NULL;
elt->incoming_edges = NULL;
slot = redirection_data.find_slot (elt, insert);
@@ -312,7 +328,7 @@ lookup_redirection_data (edge e, enum insert_option insert)
to the list of incoming edges associated with E. */
if (insert)
{
- struct el *el = XNEW (struct el);
+ struct el *el = XNEW (struct el);
el->next = elt->incoming_edges;
el->e = e;
elt->incoming_edges = el;
@@ -322,6 +338,31 @@ lookup_redirection_data (edge e, enum insert_option insert)
}
}
+/* Similar to copy_phi_args, except that the PHI arg exists, it just
+ does not have a value associated with it. */
+
+static void
+copy_phi_arg_into_existing_phi (edge src_e, edge tgt_e)
+{
+ int src_idx = src_e->dest_idx;
+ int tgt_idx = tgt_e->dest_idx;
+
+ /* Iterate over each PHI in e->dest. */
+ for (gimple_stmt_iterator gsi = gsi_start_phis (src_e->dest),
+ gsi2 = gsi_start_phis (tgt_e->dest);
+ !gsi_end_p (gsi);
+ gsi_next (&gsi), gsi_next (&gsi2))
+ {
+ gimple src_phi = gsi_stmt (gsi);
+ gimple dest_phi = gsi_stmt (gsi2);
+ tree val = gimple_phi_arg_def (src_phi, src_idx);
+ source_location locus = gimple_phi_arg_location (src_phi, src_idx);
+
+ SET_PHI_ARG_DEF (dest_phi, tgt_idx, val);
+ gimple_phi_arg_set_location (dest_phi, tgt_idx, locus);
+ }
+}
+
/* For each PHI in BB, copy the argument associated with SRC_E to TGT_E. */
static void
@@ -389,7 +430,7 @@ create_edge_and_update_destination_phis (struct redirection_data *rd,
= new jump_thread_edge ((*path)[i]->e, (*path)[i]->type);
copy->safe_push (x);
}
- e->aux = (void *)copy;
+ e->aux = (void *)copy;
}
else
{
@@ -403,7 +444,23 @@ create_edge_and_update_destination_phis (struct redirection_data *rd,
copy_phi_args (e->dest, rd->path->last ()->e, e);
}
-/* Wire up the outgoing edges from the duplicate block and
+/* Look through PATH beginning at START and return TRUE if there are
+ any additional blocks that need to be duplicated. Otherwise,
+ return FALSE. */
+static bool
+any_remaining_duplicated_blocks (vec<jump_thread_edge *> *path,
+ unsigned int start)
+{
+ for (unsigned int i = start + 1; i < path->length (); i++)
+ {
+ if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK
+ || (*path)[i]->type == EDGE_COPY_SRC_BLOCK)
+ return true;
+ }
+ return false;
+}
+
+/* Wire up the outgoing edges from the duplicate blocks and
update any PHIs as needed. */
void
ssa_fix_duplicate_block_edges (struct redirection_data *rd,
@@ -412,37 +469,77 @@ ssa_fix_duplicate_block_edges (struct redirection_data *rd,
edge e = rd->incoming_edges->e;
vec<jump_thread_edge *> *path = THREAD_PATH (e);
- /* If we were threading through an joiner block, then we want
- to keep its control statement and redirect an outgoing edge.
- Else we want to remove the control statement & edges, then create
- a new outgoing edge. In both cases we may need to update PHIs. */
- if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
- {
- edge victim;
- edge e2;
-
- /* This updates the PHIs at the destination of the duplicate
- block. */
- update_destination_phis (local_info->bb, rd->dup_block);
-
- /* Find the edge from the duplicate block to the block we're
- threading through. That's the edge we want to redirect. */
- victim = find_edge (rd->dup_block, (*path)[1]->e->dest);
- e2 = redirect_edge_and_branch (victim, path->last ()->e->dest);
- e2->count = path->last ()->e->count;
-
- /* If we redirected the edge, then we need to copy PHI arguments
- at the target. If the edge already existed (e2 != victim case),
- then the PHIs in the target already have the correct arguments. */
- if (e2 == victim)
- copy_phi_args (e2->dest, path->last ()->e, e2);
- }
- else
+ for (unsigned int count = 0, i = 1; i < path->length (); i++)
{
- remove_ctrl_stmt_and_useless_edges (rd->dup_block, NULL);
- create_edge_and_update_destination_phis (rd, rd->dup_block);
+ /* If we were threading through an joiner block, then we want
+ to keep its control statement and redirect an outgoing edge.
+ Else we want to remove the control statement & edges, then create
+ a new outgoing edge. In both cases we may need to update PHIs. */
+ if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
+ {
+ edge victim;
+ edge e2;
+
+ /* This updates the PHIs at the destination of the duplicate
+ block. */
+ update_destination_phis (local_info->bb, rd->dup_blocks[count]);
+
+ /* Find the edge from the duplicate block to the block we're
+ threading through. That's the edge we want to redirect. */
+ victim = find_edge (rd->dup_blocks[count], (*path)[i]->e->dest);
+
+ /* If there are no remaining blocks on the path to duplicate,
+ then redirect VICTIM to the final destination of the jump
+ threading path. */
+ if (!any_remaining_duplicated_blocks (path, i))
+ {
+ e2 = redirect_edge_and_branch (victim, path->last ()->e->dest);
+ e2->count = path->last ()->e->count;
+ /* If we redirected the edge, then we need to copy PHI arguments
+ at the target. If the edge already existed (e2 != victim
+ case), then the PHIs in the target already have the correct
+ arguments. */
+ if (e2 == victim)
+ copy_phi_args (e2->dest, path->last ()->e, e2);
+ }
+ else
+ {
+ /* Redirect VICTIM to the next duplicated block in the path. */
+ e2 = redirect_edge_and_branch (victim, rd->dup_blocks[count + 1]);
+
+ /* We need to update the PHIs in the next duplicated block. We
+ want the new PHI args to have the same value as they had
+ in the source of the next duplicate block.
+
+ Thus, we need to know which edge we traversed into the
+ source of the duplicate. Furthermore, we may have
+ traversed many edges to reach the source of the duplicate.
+
+ Walk through the path starting at element I until we
+ hit an edge marked with EDGE_COPY_SRC_BLOCK. We want
+ the edge from the prior element. */
+ for (unsigned int j = i + 1; j < path->length (); j++)
+ {
+ if ((*path)[j]->type == EDGE_COPY_SRC_BLOCK)
+ {
+ copy_phi_arg_into_existing_phi ((*path)[j - 1]->e, e2);
+ break;
+ }
+ }
+ }
+ count++;
+ }
+ else if ((*path)[i]->type == EDGE_COPY_SRC_BLOCK)
+ {
+ remove_ctrl_stmt_and_useless_edges (rd->dup_blocks[count], NULL);
+ create_edge_and_update_destination_phis (rd, rd->dup_blocks[count]);
+ if (count == 1)
+ single_succ_edge (rd->dup_blocks[1])->aux = NULL;
+ count++;
+ }
}
}
+
/* Hash table traversal callback routine to create duplicate blocks. */
int
@@ -451,12 +548,32 @@ ssa_create_duplicates (struct redirection_data **slot,
{
struct redirection_data *rd = *slot;
+ /* The second duplicated block in a jump threading path is specific
+ to the path. So it gets stored in RD rather than in LOCAL_DATA.
+
+ Each time we're called, we have to look through the path and see
+ if a second block needs to be duplicated.
+
+ Note the search starts with the third edge on the path. The first
+ edge is the incoming edge, the second edge always has its source
+ duplicated. Thus we start our search with the third edge. */
+ vec<jump_thread_edge *> *path = rd->path;
+ for (unsigned int i = 2; i < path->length (); i++)
+ {
+ if ((*path)[i]->type == EDGE_COPY_SRC_BLOCK
+ || (*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
+ {
+ create_block_for_threading ((*path)[i]->e->src, rd, 1);
+ break;
+ }
+ }
+
/* Create a template block if we have not done so already. Otherwise
use the template to create a new block. */
if (local_info->template_block == NULL)
{
- create_block_for_threading (local_info->bb, rd);
- local_info->template_block = rd->dup_block;
+ create_block_for_threading ((*path)[1]->e->src, rd, 0);
+ local_info->template_block = rd->dup_blocks[0];
/* We do not create any outgoing edges for the template. We will
take care of that in a later traversal. That way we do not
@@ -464,7 +581,7 @@ ssa_create_duplicates (struct redirection_data **slot,
}
else
{
- create_block_for_threading (local_info->template_block, rd);
+ create_block_for_threading (local_info->template_block, rd, 0);
/* Go ahead and wire up outgoing edges and update PHIs for the duplicate
block. */
@@ -492,7 +609,7 @@ ssa_fixup_template_block (struct redirection_data **slot,
to keep its control statement and redirect an outgoing edge.
Else we want to remove the control statement & edges, then create
a new outgoing edge. In both cases we may need to update PHIs. */
- if (rd->dup_block && rd->dup_block == local_info->template_block)
+ if (rd->dup_blocks[0] && rd->dup_blocks[0] == local_info->template_block)
{
ssa_fix_duplicate_block_edges (rd, local_info);
return 0;
@@ -526,36 +643,36 @@ ssa_redirect_edges (struct redirection_data **slot,
thread_stats.num_threaded_edges++;
- if (rd->dup_block)
+ if (rd->dup_blocks[0])
{
edge e2;
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, " Threaded jump %d --> %d to %d\n",
- e->src->index, e->dest->index, rd->dup_block->index);
+ e->src->index, e->dest->index, rd->dup_blocks[0]->index);
- rd->dup_block->count += e->count;
+ rd->dup_blocks[0]->count += e->count;
/* Excessive jump threading may make frequencies large enough so
the computation overflows. */
- if (rd->dup_block->frequency < BB_FREQ_MAX * 2)
- rd->dup_block->frequency += EDGE_FREQUENCY (e);
+ if (rd->dup_blocks[0]->frequency < BB_FREQ_MAX * 2)
+ rd->dup_blocks[0]->frequency += EDGE_FREQUENCY (e);
/* In the case of threading through a joiner block, the outgoing
edges from the duplicate block were updated when they were
redirected during ssa_fix_duplicate_block_edges. */
if ((*path)[1]->type != EDGE_COPY_SRC_JOINER_BLOCK)
- EDGE_SUCC (rd->dup_block, 0)->count += e->count;
+ EDGE_SUCC (rd->dup_blocks[0], 0)->count += e->count;
/* Redirect the incoming edge (possibly to the joiner block) to the
appropriate duplicate block. */
- e2 = redirect_edge_and_branch (e, rd->dup_block);
+ e2 = redirect_edge_and_branch (e, rd->dup_blocks[0]);
gcc_assert (e == e2);
flush_pending_stmts (e2);
}
/* Go ahead and clear E->aux. It's not needed anymore and failure
- to clear it will cause all kinds of unpleasant problems later. */
+ to clear it will cause all kinds of unpleasant problems later. */
delete_jump_thread_path (path);
e->aux = NULL;
@@ -580,9 +697,9 @@ redirection_block_p (basic_block bb)
/* Advance to the first executable statement. */
gsi = gsi_start_bb (bb);
while (!gsi_end_p (gsi)
- && (gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL
+ && (gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL
|| is_gimple_debug (gsi_stmt (gsi))
- || gimple_nop_p (gsi_stmt (gsi))))
+ || gimple_nop_p (gsi_stmt (gsi))))
gsi_next (&gsi);
/* Check if this is an empty block. */
@@ -591,9 +708,9 @@ redirection_block_p (basic_block bb)
/* Test that we've reached the terminating control statement. */
return gsi_stmt (gsi)
- && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
- || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
- || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH);
+ && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
+ || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
+ || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH);
}
/* BB is a block which ends with a COND_EXPR or SWITCH_EXPR and when BB
@@ -615,7 +732,7 @@ redirection_block_p (basic_block bb)
the appropriate duplicate of BB.
If NOLOOP_ONLY is true, we only perform the threading as long as it
- does not affect the structure of the loops in a nontrivial way.
+ does not affect the structure of the loops in a nontrivial way.
If JOINERS is true, then thread through joiner blocks as well. */
@@ -678,22 +795,12 @@ thread_block_1 (basic_block bb, bool noloop_only, bool joiners)
if (!e2 || noloop_only)
{
/* If NOLOOP_ONLY is true, we only allow threading through the
- header of a loop to exit edges.
-
- There are two cases to consider. The first when BB is the
- loop header. We will attempt to thread this elsewhere, so
- we can just continue here. */
-
- if (bb == bb->loop_father->header
- && (!loop_exit_edge_p (bb->loop_father, e2)
- || (*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK))
- continue;
-
+ header of a loop to exit edges. */
- /* The second occurs when there was loop header buried in a jump
- threading path. We do not try and thread this elsewhere, so
- just cancel the jump threading request by clearing the AUX
- field now. */
+ /* One case occurs when there was loop header buried in a jump
+ threading path that crosses loop boundaries. We do not try
+ and thread this elsewhere, so just cancel the jump threading
+ request by clearing the AUX field now. */
if ((bb->loop_father != e2->src->loop_father
&& !loop_exit_edge_p (e2->src->loop_father, e2))
|| (e2->src->loop_father != e2->dest->loop_father
@@ -706,11 +813,40 @@ thread_block_1 (basic_block bb, bool noloop_only, bool joiners)
e->aux = NULL;
continue;
}
+
+ /* Another case occurs when trying to thread through our
+ own loop header, possibly from inside the loop.
+
+ If our loop header is buried in the path, then go ahead
+ and cancel the jump threading request here. This likely
+ will need updating for the FSA/FSM coremark case.
+
+ Other cases (BB is the loop header) are handled elsewhere. */
+ unsigned int i;
+ for (i = 1; i < path->length (); i++)
+ {
+ if ((*path)[i]->e->src == bb->loop_father->header
+ && (!loop_exit_edge_p (bb->loop_father, e2)
+ || (*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK))
+ {
+ /* If i != 1, then it's a buried header that will not
+ be handled elsehwere. */
+ if (i != 1)
+ {
+ delete_jump_thread_path (path);
+ e->aux = NULL;
+ }
+ break;
+ }
+ }
+
+ if (i != path->length ())
+ continue;
}
if (e->dest == e2->src)
update_bb_profile_for_threading (e->dest, EDGE_FREQUENCY (e),
- e->count, (*THREAD_PATH (e))[1]->e);
+ e->count, (*THREAD_PATH (e))[1]->e);
/* Insert the outgoing edge into the hash table if it is not
already in the hash table. */
@@ -775,7 +911,7 @@ thread_block_1 (basic_block bb, bool noloop_only, bool joiners)
By doing things this way we can be as aggressive as possible and
not worry that copying a joiner block will create a jump threading
opportunity. */
-
+
static bool
thread_block (basic_block bb, bool noloop_only)
{
@@ -830,21 +966,21 @@ thread_single_edge (edge e)
npath->safe_push (x);
rd.path = npath;
- create_block_for_threading (bb, &rd);
- remove_ctrl_stmt_and_useless_edges (rd.dup_block, NULL);
- create_edge_and_update_destination_phis (&rd, rd.dup_block);
+ create_block_for_threading (bb, &rd, 0);
+ remove_ctrl_stmt_and_useless_edges (rd.dup_blocks[0], NULL);
+ create_edge_and_update_destination_phis (&rd, rd.dup_blocks[0]);
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, " Threaded jump %d --> %d to %d\n",
- e->src->index, e->dest->index, rd.dup_block->index);
+ e->src->index, e->dest->index, rd.dup_blocks[0]->index);
- rd.dup_block->count = e->count;
- rd.dup_block->frequency = EDGE_FREQUENCY (e);
- single_succ_edge (rd.dup_block)->count = e->count;
- redirect_edge_and_branch (e, rd.dup_block);
+ rd.dup_blocks[0]->count = e->count;
+ rd.dup_blocks[0]->frequency = EDGE_FREQUENCY (e);
+ single_succ_edge (rd.dup_blocks[0])->count = e->count;
+ redirect_edge_and_branch (e, rd.dup_blocks[0]);
flush_pending_stmts (e);
- return rd.dup_block;
+ return rd.dup_blocks[0];
}
/* Callback for dfs_enumerate_from. Returns true if BB is different
@@ -1025,11 +1161,22 @@ thread_through_loop_header (struct loop *loop, bool may_peel_loop_headers)
if (single_succ_p (header))
goto fail;
+ /* If we threaded the latch using a joiner block, we cancel the
+ threading opportunity out of an abundance of caution. However,
+ still allow threading from outside to inside the loop. */
if (latch->aux)
{
vec<jump_thread_edge *> *path = THREAD_PATH (latch);
if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
- goto fail;
+ {
+ delete_jump_thread_path (path);
+ latch->aux = NULL;
+ }
+ }
+
+ if (latch->aux)
+ {
+ vec<jump_thread_edge *> *path = THREAD_PATH (latch);
tgt_edge = (*path)[1]->e;
tgt_bb = tgt_edge->dest;
}
@@ -1114,7 +1261,7 @@ thread_through_loop_header (struct loop *loop, bool may_peel_loop_headers)
unsigned nblocks, i;
/* First handle the case latch edge is redirected. We are copying
- the loop header but not creating a multiple entry loop. Make the
+ the loop header but not creating a multiple entry loop. Make the
cfg manipulation code aware of that fact. */
set_loop_copy (loop, loop);
loop->latch = thread_single_edge (latch);
@@ -1426,7 +1573,6 @@ thread_through_all_blocks (bool may_peel_loop_headers)
bitmap_iterator bi;
bitmap threaded_blocks;
struct loop *loop;
- loop_iterator li;
/* We must know about loops in order to preserve them. */
gcc_assert (current_loops != NULL);
@@ -1454,7 +1600,7 @@ thread_through_all_blocks (bool may_peel_loop_headers)
/* Then perform the threading through loop headers. We start with the
innermost loop, so that the changes in cfg we perform won't affect
further threading. */
- FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
+ FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
{
if (!loop->header
|| !bitmap_bit_p (threaded_blocks, loop->header->index))
@@ -1464,7 +1610,7 @@ thread_through_all_blocks (bool may_peel_loop_headers)
}
/* Assume we had a jump thread path which went from the latch to the exit
- and a path which goes from outside to inside the same loop.
+ and a path which goes from outside to inside the same loop.
If the latch to exit was handled first, we will thread it and clear
loop->header.
diff --git a/gcc/tree-ssa-threadupdate.h b/gcc/tree-ssa-threadupdate.h
index 4617b9c1d3e..49501705235 100644
--- a/gcc/tree-ssa-threadupdate.h
+++ b/gcc/tree-ssa-threadupdate.h
@@ -1,5 +1,5 @@
/* Communication between registering jump thread requests and
- updating the SSA/CFG for jump threading.
+ updating the SSA/CFG for jump threading.
Copyright (C) 2013 Free Software Foundation, Inc.
This file is part of GCC.
diff --git a/gcc/tree-ssa-uncprop.c b/gcc/tree-ssa-uncprop.c
index 25f9f45b285..62ffe421f29 100644
--- a/gcc/tree-ssa-uncprop.c
+++ b/gcc/tree-ssa-uncprop.c
@@ -22,6 +22,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
#include "flags.h"
#include "tm_p.h"
#include "basic-block.h"
@@ -193,7 +194,7 @@ associate_equivalences_with_edges (void)
/* Now walk over the blocks to determine which ones were
marked as being reached by a useful case label. */
- for (i = 0; i < n_basic_blocks; i++)
+ for (i = 0; i < n_basic_blocks_for_fn (cfun); i++)
{
tree node = info[i];
diff --git a/gcc/tree-ssa-uninit.c b/gcc/tree-ssa-uninit.c
index ed5bdb4ac50..967b167727f 100644
--- a/gcc/tree-ssa-uninit.c
+++ b/gcc/tree-ssa-uninit.c
@@ -175,7 +175,7 @@ warn_uninitialized_vars (bool warn_possibly_uninitialized)
FOR_EACH_BB (bb)
{
bool always_executed = dominated_by_p (CDI_POST_DOMINATORS,
- single_succ (ENTRY_BLOCK_PTR), bb);
+ single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)), bb);
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple stmt = gsi_stmt (gsi);
@@ -315,14 +315,14 @@ compute_uninit_opnds_pos (gimple phi)
static inline basic_block
find_pdom (basic_block block)
{
- if (block == EXIT_BLOCK_PTR)
- return EXIT_BLOCK_PTR;
+ if (block == EXIT_BLOCK_PTR_FOR_FN (cfun))
+ return EXIT_BLOCK_PTR_FOR_FN (cfun);
else
{
basic_block bb
= get_immediate_dominator (CDI_POST_DOMINATORS, block);
if (! bb)
- return EXIT_BLOCK_PTR;
+ return EXIT_BLOCK_PTR_FOR_FN (cfun);
return bb;
}
}
@@ -333,13 +333,13 @@ find_pdom (basic_block block)
static inline basic_block
find_dom (basic_block block)
{
- if (block == ENTRY_BLOCK_PTR)
- return ENTRY_BLOCK_PTR;
+ if (block == ENTRY_BLOCK_PTR_FOR_FN (cfun))
+ return ENTRY_BLOCK_PTR_FOR_FN (cfun);
else
{
basic_block bb = get_immediate_dominator (CDI_DOMINATORS, block);
if (! bb)
- return ENTRY_BLOCK_PTR;
+ return ENTRY_BLOCK_PTR_FOR_FN (cfun);
return bb;
}
}
@@ -454,7 +454,8 @@ compute_control_dep_chain (basic_block bb, basic_block dep_bb,
cd_bb = find_pdom (cd_bb);
post_dom_check++;
- if (cd_bb == EXIT_BLOCK_PTR || post_dom_check > MAX_POSTDOM_CHECK)
+ if (cd_bb == EXIT_BLOCK_PTR_FOR_FN (cfun) || post_dom_check >
+ MAX_POSTDOM_CHECK)
break;
}
cur_cd_chain->pop ();
diff --git a/gcc/tree-ssa.c b/gcc/tree-ssa.c
index a0681963c57..550381ae94d 100644
--- a/gcc/tree-ssa.c
+++ b/gcc/tree-ssa.c
@@ -22,6 +22,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
#include "flags.h"
#include "tm_p.h"
#include "target.h"
@@ -38,6 +39,7 @@ along with GCC; see the file COPYING3. If not see
#include "gimple-ssa.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-ssa-loop-manip.h"
#include "tree-into-ssa.h"
diff --git a/gcc/tree-ssanames.c b/gcc/tree-ssanames.c
index 6e887aaad66..348fa315964 100644
--- a/gcc/tree-ssanames.c
+++ b/gcc/tree-ssanames.c
@@ -22,10 +22,12 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
#include "gimple.h"
#include "gimple-ssa.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-into-ssa.h"
#include "tree-ssa.h"
diff --git a/gcc/tree-stdarg.c b/gcc/tree-stdarg.c
index 5cd845c0d52..982937462db 100644
--- a/gcc/tree-stdarg.c
+++ b/gcc/tree-stdarg.c
@@ -34,6 +34,7 @@ along with GCC; see the file COPYING3. If not see
#include "gimple-ssa.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "sbitmap.h"
#include "tree-pass.h"
@@ -96,7 +97,7 @@ reachable_at_most_once (basic_block va_arg_bb, basic_block va_start_bb)
break;
}
- gcc_assert (src != ENTRY_BLOCK_PTR);
+ gcc_assert (src != ENTRY_BLOCK_PTR_FOR_FN (cfun));
if (! bitmap_bit_p (visited, src->index))
{
diff --git a/gcc/tree-streamer-in.c b/gcc/tree-streamer-in.c
index c0989c8ff72..a70c7670551 100644
--- a/gcc/tree-streamer-in.c
+++ b/gcc/tree-streamer-in.c
@@ -24,6 +24,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "diagnostic.h"
#include "tree.h"
+#include "stringpool.h"
#include "gimple.h"
#include "tree-streamer.h"
#include "data-streamer.h"
diff --git a/gcc/tree-streamer-out.c b/gcc/tree-streamer-out.c
index 7161dba1a54..7d3620d18a9 100644
--- a/gcc/tree-streamer-out.c
+++ b/gcc/tree-streamer-out.c
@@ -25,6 +25,7 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "diagnostic.h"
#include "tree.h"
+#include "stor-layout.h"
#include "gimple.h"
#include "tree-streamer.h"
#include "data-streamer.h"
diff --git a/gcc/tree-switch-conversion.c b/gcc/tree-switch-conversion.c
index 23681489acc..4d71efec08d 100644
--- a/gcc/tree-switch-conversion.c
+++ b/gcc/tree-switch-conversion.c
@@ -30,6 +30,8 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
#include "params.h"
#include "flags.h"
#include "tree.h"
+#include "varasm.h"
+#include "stor-layout.h"
#include "basic-block.h"
#include "gimple.h"
#include "gimplify.h"
@@ -39,6 +41,7 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
#include "cgraph.h"
#include "tree-cfg.h"
#include "tree-phinodes.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-pass.h"
#include "gimple-pretty-print.h"
diff --git a/gcc/tree-tailcall.c b/gcc/tree-tailcall.c
index 185bf165149..9a30400c0d9 100644
--- a/gcc/tree-tailcall.c
+++ b/gcc/tree-tailcall.c
@@ -22,6 +22,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
#include "tm_p.h"
#include "basic-block.h"
#include "function.h"
@@ -31,8 +32,10 @@ along with GCC; see the file COPYING3. If not see
#include "gimple-ssa.h"
#include "tree-cfg.h"
#include "tree-phinodes.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-into-ssa.h"
+#include "expr.h"
#include "tree-dfa.h"
#include "gimple-pretty-print.h"
#include "except.h"
@@ -818,7 +821,7 @@ eliminate_tail_call (struct tailcall *t)
gcc_assert (is_gimple_call (stmt));
- first = single_succ (ENTRY_BLOCK_PTR);
+ first = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
/* Remove the code after call_gsi that will become unreachable. The
possibly unreachable code in other blocks is removed later in
@@ -839,9 +842,10 @@ eliminate_tail_call (struct tailcall *t)
/* Number of executions of function has reduced by the tailcall. */
e = single_succ_edge (gsi_bb (t->call_gsi));
- decrease_profile (EXIT_BLOCK_PTR, e->count, EDGE_FREQUENCY (e));
- decrease_profile (ENTRY_BLOCK_PTR, e->count, EDGE_FREQUENCY (e));
- if (e->dest != EXIT_BLOCK_PTR)
+ decrease_profile (EXIT_BLOCK_PTR_FOR_FN (cfun), e->count, EDGE_FREQUENCY (e));
+ decrease_profile (ENTRY_BLOCK_PTR_FOR_FN (cfun), e->count,
+ EDGE_FREQUENCY (e));
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
decrease_profile (e->dest, e->count, EDGE_FREQUENCY (e));
/* Replace the call by a jump to the start of function. */
@@ -945,7 +949,7 @@ tree_optimize_tail_calls_1 (bool opt_tailcalls)
bool phis_constructed = false;
struct tailcall *tailcalls = NULL, *act, *next;
bool changed = false;
- basic_block first = single_succ (ENTRY_BLOCK_PTR);
+ basic_block first = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
tree param;
gimple stmt;
edge_iterator ei;
@@ -955,7 +959,7 @@ tree_optimize_tail_calls_1 (bool opt_tailcalls)
if (opt_tailcalls)
opt_tailcalls = suitable_for_tail_call_opt_p ();
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
{
/* Only traverse the normal exits, i.e. those that end with return
statement. */
@@ -979,7 +983,8 @@ tree_optimize_tail_calls_1 (bool opt_tailcalls)
or if there are existing degenerate PHI nodes. */
if (!single_pred_p (first)
|| !gimple_seq_empty_p (phi_nodes (first)))
- first = split_edge (single_succ_edge (ENTRY_BLOCK_PTR));
+ first =
+ split_edge (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
/* Copy the args if needed. */
for (param = DECL_ARGUMENTS (current_function_decl);
@@ -1026,7 +1031,7 @@ tree_optimize_tail_calls_1 (bool opt_tailcalls)
if (a_acc || m_acc)
{
/* Modify the remaining return statements. */
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
{
stmt = last_stmt (e->src);
diff --git a/gcc/tree-vect-data-refs.c b/gcc/tree-vect-data-refs.c
index 7d5309c4a42..47e96cdafbe 100644
--- a/gcc/tree-vect-data-refs.c
+++ b/gcc/tree-vect-data-refs.c
@@ -26,6 +26,7 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "ggc.h"
#include "tree.h"
+#include "stor-layout.h"
#include "tm_p.h"
#include "target.h"
#include "basic-block.h"
@@ -37,6 +38,7 @@ along with GCC; see the file COPYING3. If not see
#include "gimple-ssa.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-ssa-loop-ivopts.h"
#include "tree-ssa-loop-manip.h"
@@ -781,7 +783,7 @@ vect_compute_data_ref_alignment (struct data_reference *dr)
return false;
}
- SET_DR_MISALIGNMENT (dr, TREE_INT_CST_LOW (misalign));
+ SET_DR_MISALIGNMENT (dr, tree_to_uhwi (misalign));
if (dump_enabled_p ())
{
@@ -2569,13 +2571,13 @@ vect_analyze_data_ref_accesses (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
/* If init_b == init_a + the size of the type * k, we have an
interleaving, and DRA is accessed before DRB. */
- HOST_WIDE_INT type_size_a = TREE_INT_CST_LOW (sza);
+ HOST_WIDE_INT type_size_a = tree_to_uhwi (sza);
if ((init_b - init_a) % type_size_a != 0)
break;
/* The step (if not zero) is greater than the difference between
data-refs' inits. This splits groups into suitable sizes. */
- HOST_WIDE_INT step = TREE_INT_CST_LOW (DR_STEP (dra));
+ HOST_WIDE_INT step = tree_to_shwi (DR_STEP (dra));
if (step != 0 && step <= (init_b - init_a))
break;
diff --git a/gcc/tree-vect-generic.c b/gcc/tree-vect-generic.c
index 5b867b09d99..777687cbf2e 100644
--- a/gcc/tree-vect-generic.c
+++ b/gcc/tree-vect-generic.c
@@ -21,6 +21,7 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "tree.h"
+#include "stor-layout.h"
#include "tm.h"
#include "langhooks.h"
#include "gimple.h"
@@ -28,6 +29,7 @@ along with GCC; see the file COPYING3. If not see
#include "gimplify-me.h"
#include "gimple-ssa.h"
#include "tree-cfg.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-iterator.h"
#include "tree-pass.h"
diff --git a/gcc/tree-vect-loop-manip.c b/gcc/tree-vect-loop-manip.c
index c31b6dbddb7..8ac1d92f995 100644
--- a/gcc/tree-vect-loop-manip.c
+++ b/gcc/tree-vect-loop-manip.c
@@ -36,6 +36,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-cfg.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-ssa-loop-manip.h"
#include "tree-into-ssa.h"
@@ -1672,7 +1673,7 @@ vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo, tree niters,
gimple_stmt_iterator gsi, gsi1;
basic_block update_bb = update_e->dest;
- /* gcc_assert (vect_can_advance_ivs_p (loop_vinfo)); */
+ gcc_checking_assert (vect_can_advance_ivs_p (loop_vinfo));
/* Make sure there exists a single-predecessor exit bb: */
gcc_assert (single_pred_p (exit_bb));
diff --git a/gcc/tree-vect-loop.c b/gcc/tree-vect-loop.c
index 4a68157fee6..8883852a08a 100644
--- a/gcc/tree-vect-loop.c
+++ b/gcc/tree-vect-loop.c
@@ -26,6 +26,7 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "ggc.h"
#include "tree.h"
+#include "stor-layout.h"
#include "basic-block.h"
#include "gimple-pretty-print.h"
#include "gimple.h"
@@ -35,6 +36,7 @@ along with GCC; see the file COPYING3. If not see
#include "gimple-ssa.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-ssa-loop-ivopts.h"
#include "tree-ssa-loop-manip.h"
@@ -1586,24 +1588,19 @@ vect_analyze_loop_operations (loop_vec_info loop_vinfo, bool slp)
return false;
}
- if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo)
+ if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
|| ((int) tree_ctz (LOOP_VINFO_NITERS (loop_vinfo))
< exact_log2 (vectorization_factor)))
{
if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location, "epilog loop required.\n");
- if (!vect_can_advance_ivs_p (loop_vinfo))
- {
- if (dump_enabled_p ())
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "not vectorized: can't create epilog loop 1.\n");
- return false;
- }
- if (!slpeel_can_duplicate_loop_p (loop, single_exit (loop)))
+ dump_printf_loc (MSG_NOTE, vect_location, "epilog loop required\n");
+ if (!vect_can_advance_ivs_p (loop_vinfo)
+ || !slpeel_can_duplicate_loop_p (loop, single_exit (loop)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "not vectorized: can't create epilog loop 2.\n");
+ "not vectorized: can't create required "
+ "epilog loop\n");
return false;
}
}
diff --git a/gcc/tree-vect-patterns.c b/gcc/tree-vect-patterns.c
index 068c90aa547..da4cc94f7b7 100644
--- a/gcc/tree-vect-patterns.c
+++ b/gcc/tree-vect-patterns.c
@@ -24,6 +24,7 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "ggc.h"
#include "tree.h"
+#include "stor-layout.h"
#include "target.h"
#include "basic-block.h"
#include "gimple-pretty-print.h"
@@ -33,6 +34,7 @@ along with GCC; see the file COPYING3. If not see
#include "gimple-ssa.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "cfgloop.h"
#include "expr.h"
diff --git a/gcc/tree-vect-slp.c b/gcc/tree-vect-slp.c
index 247bdfd6669..bd9e4e23e0d 100644
--- a/gcc/tree-vect-slp.c
+++ b/gcc/tree-vect-slp.c
@@ -26,6 +26,7 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "ggc.h"
#include "tree.h"
+#include "stor-layout.h"
#include "target.h"
#include "basic-block.h"
#include "gimple-pretty-print.h"
@@ -34,6 +35,7 @@ along with GCC; see the file COPYING3. If not see
#include "gimple-ssa.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-pass.h"
#include "cfgloop.h"
diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c
index 54d821af9c0..2116cec3cf9 100644
--- a/gcc/tree-vect-stmts.c
+++ b/gcc/tree-vect-stmts.c
@@ -26,6 +26,7 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "ggc.h"
#include "tree.h"
+#include "stor-layout.h"
#include "target.h"
#include "basic-block.h"
#include "gimple-pretty-print.h"
@@ -37,6 +38,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-cfg.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-ssa-loop-manip.h"
#include "cfgloop.h"
diff --git a/gcc/tree-vectorizer.c b/gcc/tree-vectorizer.c
index 9c2cf5d1ce2..fea7086b623 100644
--- a/gcc/tree-vectorizer.c
+++ b/gcc/tree-vectorizer.c
@@ -61,6 +61,7 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "ggc.h"
#include "tree.h"
+#include "stor-layout.h"
#include "tree-pretty-print.h"
#include "gimple.h"
#include "gimple-iterator.h"
@@ -323,7 +324,6 @@ vectorize_loops (void)
unsigned int i;
unsigned int num_vectorized_loops = 0;
unsigned int vect_loops_num;
- loop_iterator li;
struct loop *loop;
hash_table <simduid_to_vf> simduid_to_vf_htab;
hash_table <simd_array_to_simduid> simd_array_to_simduid_htab;
@@ -348,7 +348,7 @@ vectorize_loops (void)
/* If some loop was duplicated, it gets bigger number
than all previously defined loops. This fact allows us to run
only over initial loops skipping newly generated ones. */
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
if ((flag_tree_loop_vectorize && optimize_loop_nest_for_speed_p (loop))
|| loop->force_vect)
{
diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c
index 634cdec4989..2da3b2b3f75 100644
--- a/gcc/tree-vrp.c
+++ b/gcc/tree-vrp.c
@@ -25,6 +25,8 @@ along with GCC; see the file COPYING3. If not see
#include "ggc.h"
#include "flags.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "calls.h"
#include "basic-block.h"
#include "gimple.h"
#include "gimple-iterator.h"
@@ -33,6 +35,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-cfg.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-ssa-loop-manip.h"
#include "tree-ssa-loop-niter.h"
@@ -5816,8 +5819,7 @@ find_assert_locations (void)
the order we compute liveness and insert asserts we otherwise
fail to insert asserts into the loop latch. */
loop_p loop;
- loop_iterator li;
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
{
i = loop->latch->index;
unsigned int j = single_succ_edge (loop->latch)->dest_idx;
diff --git a/gcc/tree.c b/gcc/tree.c
index 256c6cf75fe..1a310e6f1a4 100644
--- a/gcc/tree.c
+++ b/gcc/tree.c
@@ -33,6 +33,10 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "flags.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "calls.h"
+#include "attribs.h"
+#include "varasm.h"
#include "tm_p.h"
#include "function.h"
#include "obstack.h"
@@ -54,7 +58,9 @@ along with GCC; see the file COPYING3. If not see
#include "gimple-ssa.h"
#include "cgraph.h"
#include "tree-phinodes.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
+#include "expr.h"
#include "tree-dfa.h"
#include "params.h"
#include "pointer-set.h"
@@ -8580,8 +8586,8 @@ retry:
/* Third, unsigned integers with top bit set never fit signed types. */
if (!TYPE_UNSIGNED (type) && sgn_c == UNSIGNED)
{
- int uprec = GET_MODE_PRECISION (TYPE_MODE TREE_TYPE (c));
- if (uprec < TYPE_PRECISION (TREE_TYPE (c)))
+ int prec = GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (c))) - 1;
+ if (prec < TYPE_PRECISION (TREE_TYPE (c)))
{
/* When a tree_cst is converted to a wide-int, the precision
is taken from the type. However, if the precision of the
@@ -8589,7 +8595,7 @@ retry:
possible that the value will not fit. The test below
fails if any bit is set between the sign bit of the
underlying mode and the top bit of the type. */
- if (wi::ne_p (wi::zext (c, uprec - 1), c))
+ if (wi::ne_p (wi::zext (c, prec - 1), c))
return false;
}
else if (wi::neg_p (c))
diff --git a/gcc/tree.h b/gcc/tree.h
index cdc9ed43584..ea6d2fe8488 100644
--- a/gcc/tree.h
+++ b/gcc/tree.h
@@ -23,6 +23,12 @@ along with GCC; see the file COPYING3. If not see
#include "tree-core.h"
#include "wide-int.h"
+/* These includes are required here because they provide declarations
+ used by inline functions in this file.
+
+ FIXME - Move these users elsewhere? */
+#include "fold-const.h"
+
/* Macros for initializing `tree_contains_struct'. */
#define MARK_TS_BASE(C) \
do { \
@@ -578,7 +584,6 @@ extern void omp_clause_range_check_failed (const_tree, const char *, int,
#define COMPLETE_OR_UNBOUND_ARRAY_TYPE_P(NODE) \
(COMPLETE_TYPE_P (TREE_CODE (NODE) == ARRAY_TYPE ? TREE_TYPE (NODE) : (NODE)))
-
/* Define many boolean fields that all tree nodes have. */
/* In VAR_DECL, PARM_DECL and RESULT_DECL nodes, nonzero means address
@@ -895,7 +900,7 @@ extern void omp_clause_range_check_failed (const_tree, const char *, int,
(TREE_NOT_CHECK2 (NODE, TREE_VEC, SSA_NAME)->base.u.bits.lang_flag_5)
#define TREE_LANG_FLAG_6(NODE) \
(TREE_NOT_CHECK2 (NODE, TREE_VEC, SSA_NAME)->base.u.bits.lang_flag_6)
-
+
/* Define additional fields and accessors for nodes representing constants. */
#define TREE_INT_CST_NUNITS(NODE) \
@@ -1523,8 +1528,6 @@ extern void protected_set_expr_location (tree, location_t);
#define TYPE_MAIN_VARIANT(NODE) (TYPE_CHECK (NODE)->type_common.main_variant)
#define TYPE_CONTEXT(NODE) (TYPE_CHECK (NODE)->type_common.context)
-/* Vector types need to check target flags to determine type. */
-extern enum machine_mode vector_type_mode (const_tree);
#define TYPE_MODE(NODE) \
(VECTOR_TYPE_P (TYPE_CHECK (NODE)) \
? vector_type_mode (NODE) : (NODE)->type_common.mode)
@@ -2696,8 +2699,6 @@ extern vec<tree, va_gc> **decl_debug_args_insert (tree);
/* Return a tree node that encapsulates the optimization options in OPTS. */
extern tree build_optimization_node (struct gcc_options *opts);
-extern void init_tree_optimization_optabs (tree);
-
#define TREE_TARGET_OPTION(NODE) \
(&TARGET_OPTION_NODE_CHECK (NODE)->target_option.opts)
@@ -3498,30 +3499,6 @@ extern tree make_tree_vec_stat (int MEM_STAT_DECL);
extern tree grow_tree_vec_stat (tree v, int MEM_STAT_DECL);
#define grow_tree_vec(v, t) grow_tree_vec_stat (v, t MEM_STAT_INFO)
-/* Return the (unique) IDENTIFIER_NODE node for a given name.
- The name is supplied as a char *. */
-
-extern tree get_identifier (const char *);
-
-#if GCC_VERSION >= 3000
-#define get_identifier(str) \
- (__builtin_constant_p (str) \
- ? get_identifier_with_length ((str), strlen (str)) \
- : get_identifier (str))
-#endif
-
-
-/* Identical to get_identifier, except that the length is assumed
- known. */
-
-extern tree get_identifier_with_length (const char *, size_t);
-
-/* If an identifier with the name TEXT (a null-terminated string) has
- previously been referred to, return that node; otherwise return
- NULL_TREE. */
-
-extern tree maybe_get_identifier (const char *);
-
/* Construct various types of nodes. */
extern tree build_nt (enum tree_code, ...);
@@ -3611,7 +3588,6 @@ extern tree build_var_debug_value_stat (tree, tree MEM_STAT_DECL);
extern tree double_int_to_tree (tree, double_int);
-extern offset_int mem_ref_offset (const_tree);
extern tree wide_int_to_tree (tree type, const wide_int_ref &cst);
extern tree force_fit_type (tree, const wide_int_ref &, int, bool);
@@ -3663,14 +3639,10 @@ extern tree build_call_vec (tree, tree, vec<tree, va_gc> *);
/* Construct various nodes representing data types. */
-extern tree make_signed_type (int);
-extern tree make_unsigned_type (int);
extern tree signed_or_unsigned_type_for (int, tree);
extern tree signed_type_for (tree);
extern tree unsigned_type_for (tree);
extern tree truth_type_for (tree);
-extern void initialize_sizetypes (void);
-extern void fixup_unsigned_type (tree);
extern tree build_pointer_type_for_mode (tree, enum machine_mode, bool);
extern tree build_pointer_type (tree);
extern tree build_reference_type_for_mode (tree, enum machine_mode, bool);
@@ -3738,51 +3710,17 @@ tree_to_uhwi (const_tree t)
extern int tree_int_cst_sgn (const_tree);
extern int tree_int_cst_sign_bit (const_tree);
extern unsigned int tree_int_cst_min_precision (tree, signop);
-extern bool tree_expr_nonnegative_p (tree);
-extern bool tree_expr_nonnegative_warnv_p (tree, bool *);
-extern bool may_negate_without_overflow_p (const_tree);
extern tree strip_array_types (tree);
extern tree excess_precision_type (tree);
extern bool valid_constant_size_p (const_tree);
-extern unsigned int element_precision (const_tree);
-
-/* Construct various nodes representing fract or accum data types. */
-
-extern tree make_fract_type (int, int, int);
-extern tree make_accum_type (int, int, int);
-
-#define make_signed_fract_type(P) make_fract_type (P, 0, 0)
-#define make_unsigned_fract_type(P) make_fract_type (P, 1, 0)
-#define make_sat_signed_fract_type(P) make_fract_type (P, 0, 1)
-#define make_sat_unsigned_fract_type(P) make_fract_type (P, 1, 1)
-#define make_signed_accum_type(P) make_accum_type (P, 0, 0)
-#define make_unsigned_accum_type(P) make_accum_type (P, 1, 0)
-#define make_sat_signed_accum_type(P) make_accum_type (P, 0, 1)
-#define make_sat_unsigned_accum_type(P) make_accum_type (P, 1, 1)
-
-#define make_or_reuse_signed_fract_type(P) \
- make_or_reuse_fract_type (P, 0, 0)
-#define make_or_reuse_unsigned_fract_type(P) \
- make_or_reuse_fract_type (P, 1, 0)
-#define make_or_reuse_sat_signed_fract_type(P) \
- make_or_reuse_fract_type (P, 0, 1)
-#define make_or_reuse_sat_unsigned_fract_type(P) \
- make_or_reuse_fract_type (P, 1, 1)
-#define make_or_reuse_signed_accum_type(P) \
- make_or_reuse_accum_type (P, 0, 0)
-#define make_or_reuse_unsigned_accum_type(P) \
- make_or_reuse_accum_type (P, 1, 0)
-#define make_or_reuse_sat_signed_accum_type(P) \
- make_or_reuse_accum_type (P, 0, 1)
-#define make_or_reuse_sat_unsigned_accum_type(P) \
- make_or_reuse_accum_type (P, 1, 1)
+
/* From expmed.c. Since rtl.h is included after tree.h, we can't
put the prototype here. Rtl.h does declare the prototype if
tree.h had been included. */
extern tree make_tree (tree, rtx);
-
+
/* Return a type like TTYPE except that its TYPE_ATTRIBUTES
is ATTRIBUTE.
@@ -3901,30 +3839,6 @@ extern tree build_aligned_type (tree, unsigned int);
extern tree build_distinct_type_copy (tree);
extern tree build_variant_type_copy (tree);
-/* Finish up a builtin RECORD_TYPE. Give it a name and provide its
- fields. Optionally specify an alignment, and then lay it out. */
-
-extern void finish_builtin_struct (tree, const char *,
- tree, tree);
-
-/* Given a ..._TYPE node, calculate the TYPE_SIZE, TYPE_SIZE_UNIT,
- TYPE_ALIGN and TYPE_MODE fields. If called more than once on one
- node, does nothing except for the first time. */
-
-extern void layout_type (tree);
-
-extern record_layout_info start_record_layout (tree);
-extern tree bit_from_pos (tree, tree);
-extern tree byte_from_pos (tree, tree);
-extern void pos_from_bit (tree *, tree *, unsigned int, tree);
-extern void normalize_offset (tree *, tree *, unsigned int);
-extern tree rli_size_unit_so_far (record_layout_info);
-extern tree rli_size_so_far (record_layout_info);
-extern void normalize_rli (record_layout_info);
-extern void place_field (record_layout_info, tree);
-extern void compute_record_mode (tree);
-extern void finish_record_layout (record_layout_info, int);
-
/* Given a hashcode and a ..._TYPE node (for which the hashcode was made),
return a canonicalized ..._TYPE node, so that duplicates are not made.
How the hash code is computed is up to the caller, as long as any two
@@ -3932,37 +3846,8 @@ extern void finish_record_layout (record_layout_info, int);
extern tree type_hash_canon (unsigned int, tree);
-/* Given a VAR_DECL, PARM_DECL, RESULT_DECL or FIELD_DECL node,
- calculates the DECL_SIZE, DECL_SIZE_UNIT, DECL_ALIGN and DECL_MODE
- fields. Call this only once for any given decl node.
-
- Second argument is the boundary that this field can be assumed to
- be starting at (in bits). Zero means it can be assumed aligned
- on any boundary that may be needed. */
-
-extern void layout_decl (tree, unsigned);
-
-/* Given a VAR_DECL, PARM_DECL or RESULT_DECL, clears the results of
- a previous call to layout_decl and calls it again. */
-
-extern void relayout_decl (tree);
-
-/* Return the mode for data of a given size SIZE and mode class CLASS.
- If LIMIT is nonzero, then don't use modes bigger than MAX_FIXED_MODE_SIZE.
- The value is BLKmode if no other mode is found. This is like
- mode_for_size, but is passed a tree. */
-
-extern enum machine_mode mode_for_size_tree (const_tree, enum mode_class, int);
-
-/* Return an expr equal to X but certainly not valid as an lvalue. */
-
-#define non_lvalue(T) non_lvalue_loc (UNKNOWN_LOCATION, T)
-extern tree non_lvalue_loc (location_t, tree);
-
extern tree convert (tree, tree);
extern unsigned int expr_align (const_tree);
-extern tree expr_first (tree);
-extern tree expr_last (tree);
extern tree size_in_bytes (const_tree);
extern HOST_WIDE_INT int_size_in_bytes (const_tree);
extern HOST_WIDE_INT max_int_size_in_bytes (const_tree);
@@ -3975,26 +3860,11 @@ extern HOST_WIDE_INT int_byte_position (const_tree);
#define bitsizetype sizetype_tab[(int) stk_bitsizetype]
#define ssizetype sizetype_tab[(int) stk_ssizetype]
#define sbitsizetype sizetype_tab[(int) stk_sbitsizetype]
-
-extern tree size_int_kind (HOST_WIDE_INT, enum size_type_kind);
-#define size_binop(CODE,T1,T2)\
- size_binop_loc (UNKNOWN_LOCATION, CODE, T1, T2)
-extern tree size_binop_loc (location_t, enum tree_code, tree, tree);
-#define size_diffop(T1,T2)\
- size_diffop_loc (UNKNOWN_LOCATION, T1, T2)
-extern tree size_diffop_loc (location_t, tree, tree);
-
#define size_int(L) size_int_kind (L, stk_sizetype)
#define ssize_int(L) size_int_kind (L, stk_ssizetype)
#define bitsize_int(L) size_int_kind (L, stk_bitsizetype)
#define sbitsize_int(L) size_int_kind (L, stk_sbitsizetype)
-#define round_up(T,N) round_up_loc (UNKNOWN_LOCATION, T, N)
-extern tree round_up_loc (location_t, tree, int);
-#define round_down(T,N) round_down_loc (UNKNOWN_LOCATION, T, N)
-extern tree round_down_loc (location_t, tree, int);
-extern void finalize_size_functions (void);
-
/* Type for sizes of data-type. */
#define BITS_PER_UNIT_LOG \
@@ -4044,11 +3914,6 @@ extern tree uniform_vector_p (const_tree);
extern vec<tree, va_gc> *ctor_to_vec (tree);
-extern bool categorize_ctor_elements (const_tree, HOST_WIDE_INT *,
- HOST_WIDE_INT *, bool *);
-
-extern bool complete_ctor_at_level_p (const_tree, HOST_WIDE_INT, const_tree);
-
/* integer_zerop (tree x) is nonzero if X is an integer constant of value 0. */
extern int integer_zerop (const_tree);
@@ -4171,13 +4036,6 @@ extern tree substitute_placeholder_in_expr (tree, tree);
((EXP) == 0 || TREE_CONSTANT (EXP) ? (EXP) \
: substitute_placeholder_in_expr (EXP, OBJ))
-/* variable_size (EXP) is like save_expr (EXP) except that it
- is for the special case of something that is part of a
- variable size for a data type. It makes special arrangements
- to compute the value at the right time when the data type
- belongs to a function parameter. */
-
-extern tree variable_size (tree);
/* stabilize_reference (EXP) returns a reference equivalent to EXP
but it can be used multiple times
@@ -4220,36 +4078,6 @@ handled_component_p (const_tree t)
}
}
-/* Given an expression EXP that is a handled_component_p,
- look for the ultimate containing object, which is returned and specify
- the access position and size. */
-
-extern tree get_inner_reference (tree, HOST_WIDE_INT *, HOST_WIDE_INT *,
- tree *, enum machine_mode *, int *, int *,
- bool);
-
-/* Return a tree of sizetype representing the size, in bytes, of the element
- of EXP, an ARRAY_REF or an ARRAY_RANGE_REF. */
-
-extern tree array_ref_element_size (tree);
-
-bool array_at_struct_end_p (tree);
-
-/* Return a tree representing the lower bound of the array mentioned in
- EXP, an ARRAY_REF or an ARRAY_RANGE_REF. */
-
-extern tree array_ref_low_bound (tree);
-
-/* Return a tree representing the upper bound of the array mentioned in
- EXP, an ARRAY_REF or an ARRAY_RANGE_REF. */
-
-extern tree array_ref_up_bound (tree);
-
-/* Return a tree representing the offset, in bytes, of the field referenced
- by EXP. This does not include any offset in DECL_FIELD_BIT_OFFSET. */
-
-extern tree component_ref_field_offset (tree);
-
/* Given a DECL or TYPE, return the scope in which it was declared, or
NUL_TREE if there is no containing scope. */
@@ -4324,8 +4152,6 @@ inlined_function_outer_scope_p (const_tree block)
(TREE = function_args_iter_cond (&(ITER))) != NULL_TREE; \
function_args_iter_next (&(ITER)))
-
-
/* In tree.c */
extern unsigned crc32_string (unsigned, const char *);
extern unsigned crc32_byte (unsigned, char);
@@ -4356,14 +4182,6 @@ extern void assign_assembler_name_if_neeeded (tree);
extern void warn_deprecated_use (tree, tree);
extern void cache_integer_cst (tree);
-
-/* In cgraph.c */
-extern void change_decl_assembler_name (tree, tree);
-
-/* In stmt.c */
-
-extern void expand_label (tree);
-
/* Compare and hash for any structure which begins with a canonical
pointer. Assumes all pointers are interchangeable, which is sort
of already assumed by gcc elsewhere IIRC. */
@@ -4383,131 +4201,6 @@ struct_ptr_hash (const void *a)
return (intptr_t)*x >> 4;
}
-/* In fold-const.c */
-
-/* Non-zero if we are folding constants inside an initializer; zero
- otherwise. */
-extern int folding_initializer;
-
-/* Convert between trees and native memory representation. */
-extern int native_encode_expr (const_tree, unsigned char *, int);
-extern tree native_interpret_expr (tree, const unsigned char *, int);
-
-/* Fold constants as much as possible in an expression.
- Returns the simplified expression.
- Acts only on the top level of the expression;
- if the argument itself cannot be simplified, its
- subexpressions are not changed. */
-
-extern tree fold (tree);
-#define fold_unary(CODE,T1,T2)\
- fold_unary_loc (UNKNOWN_LOCATION, CODE, T1, T2)
-extern tree fold_unary_loc (location_t, enum tree_code, tree, tree);
-#define fold_unary_ignore_overflow(CODE,T1,T2)\
- fold_unary_ignore_overflow_loc (UNKNOWN_LOCATION, CODE, T1, T2)
-extern tree fold_unary_ignore_overflow_loc (location_t, enum tree_code, tree, tree);
-#define fold_binary(CODE,T1,T2,T3)\
- fold_binary_loc (UNKNOWN_LOCATION, CODE, T1, T2, T3)
-extern tree fold_binary_loc (location_t, enum tree_code, tree, tree, tree);
-#define fold_ternary(CODE,T1,T2,T3,T4)\
- fold_ternary_loc (UNKNOWN_LOCATION, CODE, T1, T2, T3, T4)
-extern tree fold_ternary_loc (location_t, enum tree_code, tree, tree, tree, tree);
-#define fold_build1(c,t1,t2)\
- fold_build1_stat_loc (UNKNOWN_LOCATION, c, t1, t2 MEM_STAT_INFO)
-#define fold_build1_loc(l,c,t1,t2)\
- fold_build1_stat_loc (l, c, t1, t2 MEM_STAT_INFO)
-extern tree fold_build1_stat_loc (location_t, enum tree_code, tree,
- tree MEM_STAT_DECL);
-#define fold_build2(c,t1,t2,t3)\
- fold_build2_stat_loc (UNKNOWN_LOCATION, c, t1, t2, t3 MEM_STAT_INFO)
-#define fold_build2_loc(l,c,t1,t2,t3)\
- fold_build2_stat_loc (l, c, t1, t2, t3 MEM_STAT_INFO)
-extern tree fold_build2_stat_loc (location_t, enum tree_code, tree, tree,
- tree MEM_STAT_DECL);
-#define fold_build3(c,t1,t2,t3,t4)\
- fold_build3_stat_loc (UNKNOWN_LOCATION, c, t1, t2, t3, t4 MEM_STAT_INFO)
-#define fold_build3_loc(l,c,t1,t2,t3,t4)\
- fold_build3_stat_loc (l, c, t1, t2, t3, t4 MEM_STAT_INFO)
-extern tree fold_build3_stat_loc (location_t, enum tree_code, tree, tree, tree,
- tree MEM_STAT_DECL);
-extern tree fold_build1_initializer_loc (location_t, enum tree_code, tree, tree);
-extern tree fold_build2_initializer_loc (location_t, enum tree_code, tree, tree, tree);
-#define fold_build_call_array(T1,T2,N,T4)\
- fold_build_call_array_loc (UNKNOWN_LOCATION, T1, T2, N, T4)
-extern tree fold_build_call_array_loc (location_t, tree, tree, int, tree *);
-#define fold_build_call_array_initializer(T1,T2,N,T4)\
- fold_build_call_array_initializer_loc (UNKNOWN_LOCATION, T1, T2, N, T4)
-extern tree fold_build_call_array_initializer_loc (location_t, tree, tree, int, tree *);
-extern bool fold_convertible_p (const_tree, const_tree);
-#define fold_convert(T1,T2)\
- fold_convert_loc (UNKNOWN_LOCATION, T1, T2)
-extern tree fold_convert_loc (location_t, tree, tree);
-extern tree fold_single_bit_test (location_t, enum tree_code, tree, tree, tree);
-extern tree fold_ignored_result (tree);
-extern tree fold_abs_const (tree, tree);
-extern tree fold_indirect_ref_1 (location_t, tree, tree);
-extern void fold_defer_overflow_warnings (void);
-extern void fold_undefer_overflow_warnings (bool, const_gimple, int);
-extern void fold_undefer_and_ignore_overflow_warnings (void);
-extern bool fold_deferring_overflow_warnings_p (void);
-extern tree fold_fma (location_t, tree, tree, tree, tree);
-extern int operand_equal_p (const_tree, const_tree, unsigned int);
-extern int multiple_of_p (tree, const_tree, const_tree);
-#define omit_one_operand(T1,T2,T3)\
- omit_one_operand_loc (UNKNOWN_LOCATION, T1, T2, T3)
-extern tree omit_one_operand_loc (location_t, tree, tree, tree);
-#define omit_two_operands(T1,T2,T3,T4)\
- omit_two_operands_loc (UNKNOWN_LOCATION, T1, T2, T3, T4)
-extern tree omit_two_operands_loc (location_t, tree, tree, tree, tree);
-#define invert_truthvalue(T)\
- invert_truthvalue_loc (UNKNOWN_LOCATION, T)
-extern tree invert_truthvalue_loc (location_t, tree);
-extern tree fold_unary_to_constant (enum tree_code, tree, tree);
-extern tree fold_binary_to_constant (enum tree_code, tree, tree, tree);
-extern tree fold_read_from_constant_string (tree);
-extern tree int_const_binop (enum tree_code, const_tree, const_tree);
-#define build_fold_addr_expr(T)\
- build_fold_addr_expr_loc (UNKNOWN_LOCATION, (T))
-extern tree build_fold_addr_expr_loc (location_t, tree);
-#define build_fold_addr_expr_with_type(T,TYPE)\
- build_fold_addr_expr_with_type_loc (UNKNOWN_LOCATION, (T), TYPE)
-extern tree build_fold_addr_expr_with_type_loc (location_t, tree, tree);
-extern tree fold_build_cleanup_point_expr (tree type, tree expr);
-extern tree fold_strip_sign_ops (tree);
-#define build_fold_indirect_ref(T)\
- build_fold_indirect_ref_loc (UNKNOWN_LOCATION, T)
-extern tree build_fold_indirect_ref_loc (location_t, tree);
-#define fold_indirect_ref(T)\
- fold_indirect_ref_loc (UNKNOWN_LOCATION, T)
-extern tree fold_indirect_ref_loc (location_t, tree);
-extern tree build_simple_mem_ref_loc (location_t, tree);
-#define build_simple_mem_ref(T)\
- build_simple_mem_ref_loc (UNKNOWN_LOCATION, T)
-extern tree build_invariant_address (tree, tree, HOST_WIDE_INT);
-extern tree constant_boolean_node (bool, tree);
-extern tree div_if_zero_remainder (const_tree, const_tree);
-
-extern bool tree_swap_operands_p (const_tree, const_tree, bool);
-extern enum tree_code swap_tree_comparison (enum tree_code);
-
-extern bool ptr_difference_const (tree, tree, HOST_WIDE_INT *);
-extern enum tree_code invert_tree_comparison (enum tree_code, bool);
-
-extern bool tree_unary_nonzero_warnv_p (enum tree_code, tree, tree, bool *);
-extern bool tree_binary_nonzero_warnv_p (enum tree_code, tree, tree, tree op1,
- bool *);
-extern bool tree_single_nonzero_warnv_p (tree, bool *);
-extern bool tree_unary_nonnegative_warnv_p (enum tree_code, tree, tree, bool *);
-extern bool tree_binary_nonnegative_warnv_p (enum tree_code, tree, tree, tree,
- bool *);
-extern bool tree_single_nonnegative_warnv_p (tree t, bool *strict_overflow_p);
-extern bool tree_call_nonnegative_warnv_p (tree, tree, tree, tree, bool *);
-
-extern bool fold_real_zero_addition_p (const_tree, const_tree, int);
-extern tree combine_comparisons (location_t, enum tree_code, enum tree_code,
- enum tree_code, tree, tree, tree);
-extern void debug_fold_checksum (const_tree);
-
/* Return nonzero if CODE is a tree code that represents a truth value. */
static inline bool
truth_value_p (enum tree_code code)
@@ -4557,56 +4250,7 @@ fold_build_pointer_plus_hwi_loc (location_t loc, tree ptr, HOST_WIDE_INT off)
#define fold_build_pointer_plus_hwi(p,o) \
fold_build_pointer_plus_hwi_loc (UNKNOWN_LOCATION, p, o)
-/* In builtins.c */
-
-/* Non-zero if __builtin_constant_p should be folded right away. */
-extern bool force_folding_builtin_constant_p;
-
-extern bool avoid_folding_inline_builtin (tree);
-extern tree fold_call_expr (location_t, tree, bool);
-extern tree fold_builtin_fputs (location_t, tree, tree, bool, bool, tree);
-extern tree fold_builtin_strcpy (location_t, tree, tree, tree, tree);
-extern tree fold_builtin_strncpy (location_t, tree, tree, tree, tree, tree);
-extern tree fold_builtin_memory_chk (location_t, tree, tree, tree, tree, tree, tree, bool,
- enum built_in_function);
-extern tree fold_builtin_stxcpy_chk (location_t, tree, tree, tree, tree, tree, bool,
- enum built_in_function);
-extern tree fold_builtin_stxncpy_chk (location_t, tree, tree, tree, tree, tree, bool,
- enum built_in_function);
-extern bool fold_builtin_next_arg (tree, bool);
-extern enum built_in_function builtin_mathfn_code (const_tree);
-extern tree fold_builtin_call_array (location_t, tree, tree, int, tree *);
-extern tree build_call_expr_loc_array (location_t, tree, int, tree *);
-extern tree build_call_expr_loc_vec (location_t, tree, vec<tree, va_gc> *);
-extern tree build_call_expr_loc (location_t, tree, int, ...);
-extern tree build_call_expr (tree, int, ...);
-extern tree mathfn_built_in (tree, enum built_in_function fn);
-extern tree c_strlen (tree, int);
-extern tree build_string_literal (int, const char *);
-extern rtx builtin_memset_read_str (void *, HOST_WIDE_INT, enum machine_mode);
-extern bool is_builtin_fn (tree);
-extern bool get_object_alignment_1 (tree, unsigned int *,
- unsigned HOST_WIDE_INT *);
-extern unsigned int get_object_alignment (tree);
-extern bool get_pointer_alignment_1 (tree, unsigned int *,
- unsigned HOST_WIDE_INT *);
-extern unsigned int get_pointer_alignment (tree);
-extern tree fold_call_stmt (gimple, bool);
-extern tree gimple_fold_builtin_snprintf_chk (gimple, tree, enum built_in_function);
-extern tree make_range (tree, int *, tree *, tree *, bool *);
-extern tree make_range_step (location_t, enum tree_code, tree, tree, tree,
- tree *, tree *, int *, bool *);
-extern tree build_range_check (location_t, tree, tree, int, tree, tree);
-extern bool merge_ranges (int *, tree *, tree *, int, tree, tree, int,
- tree, tree);
-extern void set_builtin_user_assembler_name (tree decl, const char *asmspec);
-extern bool is_simple_builtin (tree);
-extern bool is_inexpensive_builtin (tree);
-
-/* In convert.c */
extern tree strip_float_extensions (tree);
-
-/* In tree.c */
extern int really_constant_p (const_tree);
extern bool decl_address_invariant_p (const_tree);
extern bool decl_address_ip_invariant_p (const_tree);
@@ -4630,7 +4274,6 @@ extern void dump_tree_statistics (void);
extern void recompute_tree_invariant_for_addr_expr (tree);
extern bool needs_to_live_in_memory (const_tree);
extern tree reconstruct_complex_type (tree, tree);
-
extern int real_onep (const_tree);
extern int real_minus_onep (const_tree);
extern void init_ttree (void);
@@ -4642,14 +4285,11 @@ extern tree build_nonshared_range_type (tree, tree, tree);
extern bool subrange_type_for_debug_p (const_tree, tree *, tree *);
extern HOST_WIDE_INT int_cst_value (const_tree);
extern HOST_WIDEST_INT widest_int_cst_value (const_tree);
-
extern tree tree_block (tree);
extern void tree_set_block (tree, tree);
extern location_t *block_nonartificial_location (tree);
extern location_t tree_nonartificial_location (tree);
-
extern tree block_ultimate_origin (const_tree);
-
extern tree get_binfo_at_offset (tree, HOST_WIDE_INT, tree);
extern bool virtual_method_call_p (tree);
extern tree obj_type_ref_class (tree ref);
@@ -4659,163 +4299,8 @@ extern bool type_in_anonymous_namespace_p (tree);
extern bool block_may_fallthru (const_tree);
extern void using_eh_for_cleanups (void);
extern bool using_eh_for_cleanups_p (void);
-
extern const char *get_tree_code_name (enum tree_code);
-
-/* In function.c */
-extern void expand_function_end (void);
-extern void expand_function_start (tree);
-extern void stack_protect_epilogue (void);
-extern void init_dummy_function_start (void);
-extern void expand_dummy_function_end (void);
-extern void allocate_struct_function (tree, bool);
-extern void push_struct_function (tree fndecl);
-extern void init_function_start (tree);
-extern bool use_register_for_decl (const_tree);
-extern void generate_setjmp_warnings (void);
-extern void init_temp_slots (void);
-extern void free_temp_slots (void);
-extern void pop_temp_slots (void);
-extern void push_temp_slots (void);
-extern void preserve_temp_slots (rtx);
-extern int aggregate_value_p (const_tree, const_tree);
-extern void push_function_context (void);
-extern void pop_function_context (void);
-extern gimple_seq gimplify_parameters (void);
-
-/* In print-rtl.c */
-#ifdef BUFSIZ
-extern void print_rtl (FILE *, const_rtx);
-#endif
-
-/* In print-tree.c */
-extern void debug_tree (tree);
-extern void debug_raw (const tree_node &ref);
-extern void debug_raw (const tree_node *ptr);
-extern void debug (const tree_node &ref);
-extern void debug (const tree_node *ptr);
-extern void debug_verbose (const tree_node &ref);
-extern void debug_verbose (const tree_node *ptr);
-extern void debug_head (const tree_node &ref);
-extern void debug_head (const tree_node *ptr);
-extern void debug_body (const tree_node &ref);
-extern void debug_body (const tree_node *ptr);
-extern void debug_vec_tree (vec<tree, va_gc> *);
-extern void debug (vec<tree, va_gc> &ref);
-extern void debug (vec<tree, va_gc> *ptr);
-extern void debug_raw (vec<tree, va_gc> &ref);
-extern void debug_raw (vec<tree, va_gc> *ptr);
-#ifdef BUFSIZ
-extern void dump_addr (FILE*, const char *, const void *);
-extern void print_node (FILE *, const char *, tree, int);
-extern void print_node_brief (FILE *, const char *, const_tree, int);
-extern void indent_to (FILE *, int);
-#endif
-
-/* In tree-inline.c: */
-extern bool debug_find_tree (tree, tree);
-/* This is in tree-inline.c since the routine uses
- data structures from the inliner. */
-extern tree build_duplicate_type (tree);
-
-/* In calls.c */
-extern int flags_from_decl_or_type (const_tree);
-extern int call_expr_flags (const_tree);
extern void set_call_expr_flags (tree, int);
-
-extern int setjmp_call_p (const_tree);
-extern bool gimple_alloca_call_p (const_gimple);
-extern bool alloca_call_p (const_tree);
-extern bool must_pass_in_stack_var_size (enum machine_mode, const_tree);
-extern bool must_pass_in_stack_var_size_or_pad (enum machine_mode, const_tree);
-
-/* In attribs.c. */
-
-extern const struct attribute_spec *lookup_attribute_spec (const_tree);
-extern void init_attributes (void);
-
-/* Process the attributes listed in ATTRIBUTES and install them in *NODE,
- which is either a DECL (including a TYPE_DECL) or a TYPE. If a DECL,
- it should be modified in place; if a TYPE, a copy should be created
- unless ATTR_FLAG_TYPE_IN_PLACE is set in FLAGS. FLAGS gives further
- information, in the form of a bitwise OR of flags in enum attribute_flags
- from tree.h. Depending on these flags, some attributes may be
- returned to be applied at a later stage (for example, to apply
- a decl attribute to the declaration rather than to its type). */
-extern tree decl_attributes (tree *, tree, int);
-
-extern bool cxx11_attribute_p (const_tree);
-
-extern tree get_attribute_name (const_tree);
-
-extern void apply_tm_attr (tree, tree);
-
-/* In stor-layout.c */
-extern void set_min_and_max_values_for_integral_type (tree, int, signop);
-extern void fixup_signed_type (tree);
-extern void internal_reference_types (void);
-extern unsigned int update_alignment_for_field (record_layout_info, tree,
- unsigned int);
-/* varasm.c */
-extern tree tree_output_constant_def (tree);
-extern void make_decl_rtl (tree);
-extern rtx make_decl_rtl_for_debug (tree);
-extern void make_decl_one_only (tree, tree);
-extern int supports_one_only (void);
-extern void resolve_unique_section (tree, int, int);
-extern void mark_referenced (tree);
-extern void mark_decl_referenced (tree);
-extern void notice_global_symbol (tree);
-extern void set_user_assembler_name (tree, const char *);
-extern void process_pending_assemble_externals (void);
-extern bool decl_replaceable_p (tree);
-extern bool decl_binds_to_current_def_p (tree);
-extern enum tls_model decl_default_tls_model (const_tree);
-
-/* Declare DECL to be a weak symbol. */
-extern void declare_weak (tree);
-/* Merge weak status. */
-extern void merge_weak (tree, tree);
-/* Make one symbol an alias for another. */
-extern void assemble_alias (tree, tree);
-
-/* Return nonzero if VALUE is a valid constant-valued expression
- for use in initializing a static variable; one that can be an
- element of a "constant" initializer.
-
- Return null_pointer_node if the value is absolute;
- if it is relocatable, return the variable that determines the relocation.
- We assume that VALUE has been folded as much as possible;
- therefore, we do not need to check for such things as
- arithmetic-combinations of integers. */
-extern tree initializer_constant_valid_p (tree, tree);
-
-/* Return true if VALUE is a valid constant-valued expression
- for use in initializing a static bit-field; one that can be
- an element of a "constant" initializer. */
-extern bool initializer_constant_valid_for_bitfield_p (tree);
-
-/* Whether a constructor CTOR is a valid static constant initializer if all
- its elements are. This used to be internal to initializer_constant_valid_p
- and has been exposed to let other functions like categorize_ctor_elements
- evaluate the property while walking a constructor for other purposes. */
-
-extern bool constructor_static_from_elts_p (const_tree);
-
-/* In stmt.c */
-extern bool parse_output_constraint (const char **, int, int, int,
- bool *, bool *, bool *);
-extern bool parse_input_constraint (const char **, int, int, int, int,
- const char * const *, bool *, bool *);
-extern tree resolve_asm_operand_names (tree, tree, tree, tree);
-#ifdef HARD_CONST
-/* Silly ifdef to avoid having all includers depend on hard-reg-set.h. */
-extern tree tree_overlaps_hard_reg_set (tree, HARD_REG_SET *);
-#endif
-
-
-/* In tree-inline.c */
-
extern tree walk_tree_1 (tree*, walk_tree_fn, void*, struct pointer_set_t*,
walk_tree_lh);
extern tree walk_tree_without_duplicates_1 (tree*, walk_tree_fn, void*,
@@ -4825,16 +4310,7 @@ extern tree walk_tree_without_duplicates_1 (tree*, walk_tree_fn, void*,
#define walk_tree_without_duplicates(a,b,c) \
walk_tree_without_duplicates_1 (a, b, c, NULL)
-/* In emit-rtl.c */
-/* Assign the RTX to declaration. */
-
-extern void set_decl_rtl (tree, rtx);
-extern void set_decl_incoming_rtl (tree, rtx, bool);
-
-/* In gimple.c. */
extern tree get_base_address (tree t);
-
-/* In tree.c. */
extern tree drop_tree_overflow (tree);
extern int tree_map_base_eq (const void *, const void *);
extern unsigned int tree_map_base_hash (const void *);
@@ -4860,33 +4336,6 @@ extern unsigned int tree_decl_map_hash (const void *);
#define tree_vec_map_hash tree_decl_map_hash
#define tree_vec_map_marked_p tree_map_base_marked_p
-/* In tree-object-size.c. */
-extern void init_object_sizes (void);
-extern unsigned HOST_WIDE_INT compute_builtin_object_size (tree, int);
-
-/* In expr.c. */
-
-/* Determine whether the LEN bytes can be moved by using several move
- instructions. Return nonzero if a call to move_by_pieces should
- succeed. */
-extern int can_move_by_pieces (unsigned HOST_WIDE_INT, unsigned int);
-
-extern unsigned HOST_WIDE_INT highest_pow2_factor (const_tree);
-extern tree build_personality_function (const char *);
-
-/* In trans-mem.c. */
-extern tree build_tm_abort_call (location_t, bool);
-extern bool is_tm_safe (const_tree);
-extern bool is_tm_pure (const_tree);
-extern bool is_tm_may_cancel_outer (tree);
-extern bool is_tm_ending_fndecl (tree);
-extern void record_tm_replacement (tree, tree);
-extern void tm_malloc_replacement (tree);
-
-/* In tree-inline.c. */
-
-void init_inline_once (void);
-
/* Initialize the abstract argument list iterator object ITER with the
arguments from CALL_EXPR node EXP. */
static inline void
@@ -5236,4 +4685,70 @@ wi::max_value (const_tree type)
return max_value (TYPE_PRECISION (type), TYPE_SIGN (type));
}
+/* FIXME - These declarations belong in builtins.h, expr.h and emit-rtl.h,
+ but none of these files are allowed to be included from front ends.
+ They should be split in two. One suitable for the FEs, the other suitable
+ for the BE. */
+
+/* Assign the RTX to declaration. */
+extern void set_decl_rtl (tree, rtx);
+extern bool complete_ctor_at_level_p (const_tree, HOST_WIDE_INT, const_tree);
+
+/* Return a tree representing the upper bound of the array mentioned in
+ EXP, an ARRAY_REF or an ARRAY_RANGE_REF. */
+extern tree array_ref_up_bound (tree);
+
+extern tree build_personality_function (const char *);
+
+/* Given an expression EXP that is a handled_component_p,
+ look for the ultimate containing object, which is returned and specify
+ the access position and size. */
+extern tree get_inner_reference (tree, HOST_WIDE_INT *, HOST_WIDE_INT *,
+ tree *, enum machine_mode *, int *, int *,
+ bool);
+
+/* Return a tree representing the lower bound of the array mentioned in
+ EXP, an ARRAY_REF or an ARRAY_RANGE_REF. */
+extern tree array_ref_low_bound (tree);
+
+/* In builtins.c. */
+
+/* Non-zero if __builtin_constant_p should be folded right away. */
+extern bool force_folding_builtin_constant_p;
+
+extern bool avoid_folding_inline_builtin (tree);
+extern tree fold_call_expr (location_t, tree, bool);
+extern tree fold_builtin_fputs (location_t, tree, tree, bool, bool, tree);
+extern tree fold_builtin_strcpy (location_t, tree, tree, tree, tree);
+extern tree fold_builtin_strncpy (location_t, tree, tree, tree, tree, tree);
+extern tree fold_builtin_memory_chk (location_t, tree, tree, tree, tree, tree, tree, bool,
+ enum built_in_function);
+extern tree fold_builtin_stxcpy_chk (location_t, tree, tree, tree, tree, tree, bool,
+ enum built_in_function);
+extern tree fold_builtin_stxncpy_chk (location_t, tree, tree, tree, tree, tree, bool,
+ enum built_in_function);
+extern bool fold_builtin_next_arg (tree, bool);
+extern enum built_in_function builtin_mathfn_code (const_tree);
+extern tree fold_builtin_call_array (location_t, tree, tree, int, tree *);
+extern tree build_call_expr_loc_array (location_t, tree, int, tree *);
+extern tree build_call_expr_loc_vec (location_t, tree, vec<tree, va_gc> *);
+extern tree build_call_expr_loc (location_t, tree, int, ...);
+extern tree build_call_expr (tree, int, ...);
+extern tree mathfn_built_in (tree, enum built_in_function fn);
+extern tree c_strlen (tree, int);
+extern tree build_string_literal (int, const char *);
+extern rtx builtin_memset_read_str (void *, HOST_WIDE_INT, enum machine_mode);
+extern bool is_builtin_fn (tree);
+extern bool get_object_alignment_1 (tree, unsigned int *,
+ unsigned HOST_WIDE_INT *);
+extern unsigned int get_object_alignment (tree);
+extern bool get_pointer_alignment_1 (tree, unsigned int *,
+ unsigned HOST_WIDE_INT *);
+extern unsigned int get_pointer_alignment (tree);
+extern tree fold_call_stmt (gimple, bool);
+extern tree gimple_fold_builtin_snprintf_chk (gimple, tree, enum built_in_function);
+extern void set_builtin_user_assembler_name (tree decl, const char *asmspec);
+extern bool is_simple_builtin (tree);
+extern bool is_inexpensive_builtin (tree);
+
#endif /* GCC_TREE_H */
diff --git a/gcc/tsan.c b/gcc/tsan.c
index b8f65af6541..9330074ce30 100644
--- a/gcc/tsan.c
+++ b/gcc/tsan.c
@@ -23,6 +23,7 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "tree.h"
+#include "expr.h"
#include "intl.h"
#include "tm.h"
#include "basic-block.h"
@@ -33,6 +34,7 @@ along with GCC; see the file COPYING3. If not see
#include "gimple-ssa.h"
#include "cgraph.h"
#include "tree-cfg.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-pass.h"
#include "tree-iterator.h"
@@ -650,7 +652,7 @@ instrument_func_entry (void)
tree ret_addr, builtin_decl;
gimple g;
- succ_bb = single_succ (ENTRY_BLOCK_PTR);
+ succ_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
gsi = gsi_after_labels (succ_bb);
builtin_decl = builtin_decl_implicit (BUILT_IN_RETURN_ADDRESS);
@@ -680,7 +682,7 @@ instrument_func_exit (void)
edge_iterator ei;
/* Find all function exits. */
- exit_bb = EXIT_BLOCK_PTR;
+ exit_bb = EXIT_BLOCK_PTR_FOR_FN (cfun);
FOR_EACH_EDGE (e, ei, exit_bb->preds)
{
gsi = gsi_last_bb (e->src);
diff --git a/gcc/ubsan.c b/gcc/ubsan.c
index 9e9b94da12a..de5dc40d659 100644
--- a/gcc/ubsan.c
+++ b/gcc/ubsan.c
@@ -22,16 +22,26 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "stringpool.h"
#include "cgraph.h"
+#include "tree-pass.h"
#include "gimple.h"
+#include "gimple-iterator.h"
+#include "gimple-ssa.h"
+#include "gimple-walk.h"
#include "hashtab.h"
#include "pointer-set.h"
#include "output.h"
#include "tm_p.h"
#include "toplev.h"
+#include "cfgloop.h"
#include "ubsan.h"
#include "c-family/c-common.h"
+/* From trans-mem.c. */
+#define PROB_VERY_UNLIKELY (REG_BR_PROB_BASE / 2000 - 1)
+
/* Map from a tree to a VAR_DECL tree. */
struct GTY(()) tree_type_map {
@@ -40,9 +50,16 @@ struct GTY(()) tree_type_map {
};
#define tree_type_map_eq tree_map_base_eq
-#define tree_type_map_hash tree_map_base_hash
#define tree_type_map_marked_p tree_map_base_marked_p
+/* Hash from a tree in a tree_type_map. */
+
+unsigned int
+tree_type_map_hash (const void *item)
+{
+ return TYPE_UID (((const struct tree_type_map *)item)->type.from);
+}
+
static GTY ((if_marked ("tree_type_map_marked_p"), param_is (struct tree_type_map)))
htab_t decl_tree_for_type;
@@ -240,12 +257,14 @@ get_ubsan_type_info_for_type (tree type)
}
/* Helper routine that returns ADDR_EXPR of a VAR_DECL of a type
- descriptor. It first looks into the pointer map; if not found,
- create the VAR_DECL, put it into the pointer map and return the
- ADDR_EXPR of it. TYPE describes a particular type. */
+ descriptor. It first looks into the hash table; if not found,
+ create the VAR_DECL, put it into the hash table and return the
+ ADDR_EXPR of it. TYPE describes a particular type. WANT_POINTER_TYPE_P
+ means whether we are interested in the pointer type and not the pointer
+ itself. */
tree
-ubsan_type_descriptor (tree type)
+ubsan_type_descriptor (tree type, bool want_pointer_type_p)
{
/* See through any typedefs. */
type = TYPE_MAIN_VARIANT (type);
@@ -255,33 +274,73 @@ ubsan_type_descriptor (tree type)
return decl;
tree dtype = ubsan_type_descriptor_type ();
- const char *tname;
+ tree type2 = type;
+ const char *tname = NULL;
+ char *pretty_name;
+ unsigned char deref_depth = 0;
unsigned short tkind, tinfo;
- /* At least for INTEGER_TYPE/REAL_TYPE/COMPLEX_TYPE, this should work.
- For e.g. type_unsigned_for (type) or bit-fields, the TYPE_NAME
- would be NULL. */
- if (TYPE_NAME (type) != NULL)
+ /* Get the name of the type, or the name of the pointer type. */
+ if (want_pointer_type_p)
+ {
+ gcc_assert (POINTER_TYPE_P (type));
+ type2 = TREE_TYPE (type);
+
+ /* Remove any '*' operators from TYPE. */
+ while (POINTER_TYPE_P (type2))
+ deref_depth++, type2 = TREE_TYPE (type2);
+
+ if (TREE_CODE (type2) == METHOD_TYPE)
+ type2 = TYPE_METHOD_BASETYPE (type2);
+ }
+
+ if (TYPE_NAME (type2) != NULL)
{
- if (TREE_CODE (TYPE_NAME (type)) == IDENTIFIER_NODE)
- tname = IDENTIFIER_POINTER (TYPE_NAME (type));
+ if (TREE_CODE (TYPE_NAME (type2)) == IDENTIFIER_NODE)
+ tname = IDENTIFIER_POINTER (TYPE_NAME (type2));
else
- tname = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (type)));
+ tname = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (type2)));
}
- else
+
+ if (tname == NULL)
+ /* We weren't able to determine the type name. */
tname = "<unknown>";
- if (TREE_CODE (type) == INTEGER_TYPE)
+ /* Decorate the type name with '', '*', "struct", or "union". */
+ pretty_name = (char *) alloca (strlen (tname) + 16 + deref_depth);
+ if (want_pointer_type_p)
{
- /* For INTEGER_TYPE, this is 0x0000. */
- tkind = 0x000;
- tinfo = get_ubsan_type_info_for_type (type);
+ int pos = sprintf (pretty_name, "'%s%s%s%s%s%s%s",
+ TYPE_VOLATILE (type2) ? "volatile " : "",
+ TYPE_READONLY (type2) ? "const " : "",
+ TYPE_RESTRICT (type2) ? "restrict " : "",
+ TYPE_ATOMIC (type2) ? "_Atomic " : "",
+ TREE_CODE (type2) == RECORD_TYPE
+ ? "struct "
+ : TREE_CODE (type2) == UNION_TYPE
+ ? "union " : "", tname,
+ deref_depth == 0 ? "" : " ");
+ while (deref_depth-- > 0)
+ pretty_name[pos++] = '*';
+ pretty_name[pos++] = '\'';
+ pretty_name[pos] = '\0';
}
- else if (TREE_CODE (type) == REAL_TYPE)
- /* We don't have float support yet. */
- gcc_unreachable ();
else
- gcc_unreachable ();
+ sprintf (pretty_name, "'%s'", tname);
+
+ switch (TREE_CODE (type))
+ {
+ case INTEGER_TYPE:
+ tkind = 0x0000;
+ break;
+ case REAL_TYPE:
+ tkind = 0x0001;
+ break;
+ default:
+ tkind = 0xffff;
+ break;
+ }
+ tinfo = get_ubsan_type_info_for_type (type);
/* Create a new VAR_DECL of type descriptor. */
char tmp_name[32];
@@ -295,8 +354,8 @@ ubsan_type_descriptor (tree type)
DECL_IGNORED_P (decl) = 1;
DECL_EXTERNAL (decl) = 0;
- size_t len = strlen (tname);
- tree str = build_string (len + 1, tname);
+ size_t len = strlen (pretty_name);
+ tree str = build_string (len + 1, pretty_name);
TREE_TYPE (str) = build_array_type (char_type_node,
build_index_type (size_int (len)));
TREE_READONLY (str) = 1;
@@ -311,7 +370,7 @@ ubsan_type_descriptor (tree type)
DECL_INITIAL (decl) = ctor;
rest_of_decl_compilation (decl, 1, 0);
- /* Save the address of the VAR_DECL into the pointer map. */
+ /* Save the address of the VAR_DECL into the hash table. */
decl = build_fold_addr_expr (decl);
decl_for_type_insert (type, decl);
@@ -320,10 +379,12 @@ ubsan_type_descriptor (tree type)
/* Create a structure for the ubsan library. NAME is a name of the new
structure. The arguments in ... are of __ubsan_type_descriptor type
- and there are at most two of them. */
+ and there are at most two of them. MISMATCH are data used by ubsan
+ pointer checking. */
tree
-ubsan_create_data (const char *name, location_t loc, ...)
+ubsan_create_data (const char *name, location_t loc,
+ const struct ubsan_mismatch_data *mismatch, ...)
{
va_list args;
tree ret, t;
@@ -346,12 +407,12 @@ ubsan_create_data (const char *name, location_t loc, ...)
i++;
}
- va_start (args, loc);
+ va_start (args, mismatch);
for (t = va_arg (args, tree); t != NULL_TREE;
i++, t = va_arg (args, tree))
{
gcc_checking_assert (i < 3);
- /* Save the tree argument for later use. */
+ /* Save the tree arguments for later use. */
vec_safe_push (saved_args, t);
fields[i] = build_decl (UNKNOWN_LOCATION, FIELD_DECL, NULL_TREE,
td_type);
@@ -359,10 +420,27 @@ ubsan_create_data (const char *name, location_t loc, ...)
if (i)
DECL_CHAIN (fields[i - 1]) = fields[i];
}
+ va_end (args);
+
+ if (mismatch != NULL)
+ {
+ /* We have to add two more decls. */
+ fields[i] = build_decl (UNKNOWN_LOCATION, FIELD_DECL, NULL_TREE,
+ pointer_sized_int_node);
+ DECL_CONTEXT (fields[i]) = ret;
+ DECL_CHAIN (fields[i - 1]) = fields[i];
+ i++;
+
+ fields[i] = build_decl (UNKNOWN_LOCATION, FIELD_DECL, NULL_TREE,
+ unsigned_char_type_node);
+ DECL_CONTEXT (fields[i]) = ret;
+ DECL_CHAIN (fields[i - 1]) = fields[i];
+ i++;
+ }
+
TYPE_FIELDS (ret) = fields[0];
TYPE_NAME (ret) = get_identifier (name);
layout_type (ret);
- va_end (args);
/* Now, fill in the type. */
char tmp_name[32];
@@ -391,6 +469,13 @@ ubsan_create_data (const char *name, location_t loc, ...)
CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, t);
}
+ if (mismatch != NULL)
+ {
+ /* Append the pointer data. */
+ CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, mismatch->align);
+ CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, mismatch->ckind);
+ }
+
TREE_CONSTANT (ctor) = 1;
TREE_STATIC (ctor) = 1;
DECL_INITIAL (var) = ctor;
@@ -405,7 +490,8 @@ ubsan_create_data (const char *name, location_t loc, ...)
tree
ubsan_instrument_unreachable (location_t loc)
{
- tree data = ubsan_create_data ("__ubsan_unreachable_data", loc, NULL_TREE);
+ tree data = ubsan_create_data ("__ubsan_unreachable_data", loc, NULL,
+ NULL_TREE);
tree t = builtin_decl_explicit (BUILT_IN_UBSAN_HANDLE_BUILTIN_UNREACHABLE);
return build_call_expr_loc (loc, t, 1, build_fold_addr_expr_loc (loc, data));
}
@@ -420,4 +506,199 @@ is_ubsan_builtin_p (tree t)
"__builtin___ubsan_", 18) == 0;
}
+/* Expand UBSAN_NULL internal call. */
+
+void
+ubsan_expand_null_ifn (gimple_stmt_iterator gsi)
+{
+ gimple stmt = gsi_stmt (gsi);
+ location_t loc = gimple_location (stmt);
+ gcc_assert (gimple_call_num_args (stmt) == 2);
+ tree ptr = gimple_call_arg (stmt, 0);
+ tree ckind = gimple_call_arg (stmt, 1);
+
+ basic_block cur_bb = gsi_bb (gsi);
+
+ /* Split the original block holding the pointer dereference. */
+ edge e = split_block (cur_bb, stmt);
+
+ /* Get a hold on the 'condition block', the 'then block' and the
+ 'else block'. */
+ basic_block cond_bb = e->src;
+ basic_block fallthru_bb = e->dest;
+ basic_block then_bb = create_empty_bb (cond_bb);
+ if (current_loops)
+ {
+ add_bb_to_loop (then_bb, cond_bb->loop_father);
+ loops_state_set (LOOPS_NEED_FIXUP);
+ }
+
+ /* Make an edge coming from the 'cond block' into the 'then block';
+ this edge is unlikely taken, so set up the probability accordingly. */
+ e = make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
+ e->probability = PROB_VERY_UNLIKELY;
+
+ /* Connect 'then block' with the 'else block'. This is needed
+ as the ubsan routines we call in the 'then block' are not noreturn.
+ The 'then block' only has one outcoming edge. */
+ make_single_succ_edge (then_bb, fallthru_bb, EDGE_FALLTHRU);
+
+ /* Set up the fallthrough basic block. */
+ e = find_edge (cond_bb, fallthru_bb);
+ e->flags = EDGE_FALSE_VALUE;
+ e->count = cond_bb->count;
+ e->probability = REG_BR_PROB_BASE - PROB_VERY_UNLIKELY;
+
+ /* Update dominance info for the newly created then_bb; note that
+ fallthru_bb's dominance info has already been updated by
+ split_bock. */
+ if (dom_info_available_p (CDI_DOMINATORS))
+ set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
+
+ /* Put the ubsan builtin call into the newly created BB. */
+ tree fn = builtin_decl_implicit (BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH);
+ const struct ubsan_mismatch_data m
+ = { build_zero_cst (pointer_sized_int_node), ckind };
+ tree data = ubsan_create_data ("__ubsan_null_data",
+ loc, &m,
+ ubsan_type_descriptor (TREE_TYPE (ptr), true),
+ NULL_TREE);
+ data = build_fold_addr_expr_loc (loc, data);
+ gimple g = gimple_build_call (fn, 2, data,
+ build_zero_cst (pointer_sized_int_node));
+ gimple_set_location (g, loc);
+ gimple_stmt_iterator gsi2 = gsi_start_bb (then_bb);
+ gsi_insert_after (&gsi2, g, GSI_NEW_STMT);
+
+ /* Unlink the UBSAN_NULLs vops before replacing it. */
+ unlink_stmt_vdef (stmt);
+
+ g = gimple_build_cond (EQ_EXPR, ptr, build_int_cst (TREE_TYPE (ptr), 0),
+ NULL_TREE, NULL_TREE);
+ gimple_set_location (g, loc);
+
+ /* Replace the UBSAN_NULL with a GIMPLE_COND stmt. */
+ gsi_replace (&gsi, g, false);
+}
+
+/* Instrument a member call. We check whether 'this' is NULL. */
+
+static void
+instrument_member_call (gimple_stmt_iterator *iter)
+{
+ tree this_parm = gimple_call_arg (gsi_stmt (*iter), 0);
+ tree kind = build_int_cst (unsigned_char_type_node, UBSAN_MEMBER_CALL);
+ gimple g = gimple_build_call_internal (IFN_UBSAN_NULL, 2, this_parm, kind);
+ gimple_set_location (g, gimple_location (gsi_stmt (*iter)));
+ gsi_insert_before (iter, g, GSI_SAME_STMT);
+}
+
+/* Instrument a memory reference. T is the pointer, IS_LHS says
+ whether the pointer is on the left hand side of the assignment. */
+
+static void
+instrument_mem_ref (tree t, gimple_stmt_iterator *iter, bool is_lhs)
+{
+ enum ubsan_null_ckind ikind = is_lhs ? UBSAN_STORE_OF : UBSAN_LOAD_OF;
+ if (RECORD_OR_UNION_TYPE_P (TREE_TYPE (TREE_TYPE (t))))
+ ikind = UBSAN_MEMBER_ACCESS;
+ tree kind = build_int_cst (unsigned_char_type_node, ikind);
+ gimple g = gimple_build_call_internal (IFN_UBSAN_NULL, 2, t, kind);
+ gimple_set_location (g, gimple_location (gsi_stmt (*iter)));
+ gsi_insert_before (iter, g, GSI_SAME_STMT);
+}
+
+/* Callback function for the pointer instrumentation. */
+
+static tree
+instrument_null (tree *tp, int * /*walk_subtree*/, void *data)
+{
+ tree t = *tp;
+ const enum tree_code code = TREE_CODE (t);
+ struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
+
+ if (code == MEM_REF
+ && TREE_CODE (TREE_OPERAND (t, 0)) == SSA_NAME)
+ instrument_mem_ref (TREE_OPERAND (t, 0), &wi->gsi, wi->is_lhs);
+ else if (code == ADDR_EXPR
+ && POINTER_TYPE_P (TREE_TYPE (t))
+ && TREE_CODE (TREE_TYPE (TREE_TYPE (t))) == METHOD_TYPE)
+ instrument_member_call (&wi->gsi);
+
+ return NULL_TREE;
+}
+
+/* Gate and execute functions for ubsan pass. */
+
+static unsigned int
+ubsan_pass (void)
+{
+ basic_block bb;
+ gimple_stmt_iterator gsi;
+
+ FOR_EACH_BB (bb)
+ {
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);)
+ {
+ struct walk_stmt_info wi;
+ gimple stmt = gsi_stmt (gsi);
+ if (is_gimple_debug (stmt))
+ {
+ gsi_next (&gsi);
+ continue;
+ }
+
+ memset (&wi, 0, sizeof (wi));
+ wi.gsi = gsi;
+ walk_gimple_op (stmt, instrument_null, &wi);
+ gsi_next (&gsi);
+ }
+ }
+ return 0;
+}
+
+static bool
+gate_ubsan (void)
+{
+ return flag_sanitize & SANITIZE_NULL;
+}
+
+namespace {
+
+const pass_data pass_data_ubsan =
+{
+ GIMPLE_PASS, /* type */
+ "ubsan", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ true, /* has_gate */
+ true, /* has_execute */
+ TV_TREE_UBSAN, /* tv_id */
+ ( PROP_cfg | PROP_ssa ), /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_update_ssa, /* todo_flags_finish */
+};
+
+class pass_ubsan : public gimple_opt_pass
+{
+public:
+ pass_ubsan (gcc::context *ctxt)
+ : gimple_opt_pass (pass_data_ubsan, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ bool gate () { return gate_ubsan (); }
+ unsigned int execute () { return ubsan_pass (); }
+
+}; // class pass_ubsan
+
+} // anon namespace
+
+gimple_opt_pass *
+make_pass_ubsan (gcc::context *ctxt)
+{
+ return new pass_ubsan (ctxt);
+}
+
#include "gt-ubsan.h"
diff --git a/gcc/ubsan.h b/gcc/ubsan.h
index 3553a6cfbc4..666e5fe15ab 100644
--- a/gcc/ubsan.h
+++ b/gcc/ubsan.h
@@ -21,9 +21,26 @@ along with GCC; see the file COPYING3. If not see
#ifndef GCC_UBSAN_H
#define GCC_UBSAN_H
+/* The various kinds of NULL pointer checks. */
+enum ubsan_null_ckind {
+ UBSAN_LOAD_OF,
+ UBSAN_STORE_OF,
+ UBSAN_REF_BINDING,
+ UBSAN_MEMBER_ACCESS,
+ UBSAN_MEMBER_CALL
+};
+
+/* An extra data used by ubsan pointer checking. */
+struct ubsan_mismatch_data {
+ tree align;
+ tree ckind;
+};
+
+extern void ubsan_expand_null_ifn (gimple_stmt_iterator);
extern tree ubsan_instrument_unreachable (location_t);
-extern tree ubsan_create_data (const char *, location_t, ...);
-extern tree ubsan_type_descriptor (tree);
+extern tree ubsan_create_data (const char *, location_t,
+ const struct ubsan_mismatch_data *, ...);
+extern tree ubsan_type_descriptor (tree, bool);
extern tree ubsan_encode_value (tree);
extern bool is_ubsan_builtin_p (tree);
diff --git a/gcc/value-prof.c b/gcc/value-prof.c
index e9d2fefb328..80854daa2ab 100644
--- a/gcc/value-prof.c
+++ b/gcc/value-prof.c
@@ -22,6 +22,8 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "tree-nested.h"
+#include "calls.h"
#include "rtl.h"
#include "expr.h"
#include "hard-reg-set.h"
@@ -40,6 +42,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-cfg.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "diagnostic.h"
#include "gimple-pretty-print.h"
diff --git a/gcc/var-tracking.c b/gcc/var-tracking.c
index eb3f7ce3a7d..58bcd01e20a 100644
--- a/gcc/var-tracking.c
+++ b/gcc/var-tracking.c
@@ -91,6 +91,9 @@
#include "tm.h"
#include "rtl.h"
#include "tree.h"
+#include "varasm.h"
+#include "stor-layout.h"
+#include "gimple.h"
#include "tm_p.h"
#include "hard-reg-set.h"
#include "basic-block.h"
@@ -833,16 +836,18 @@ vt_stack_adjustments (void)
int sp;
/* Initialize entry block. */
- VTI (ENTRY_BLOCK_PTR)->visited = true;
- VTI (ENTRY_BLOCK_PTR)->in.stack_adjust = INCOMING_FRAME_SP_OFFSET;
- VTI (ENTRY_BLOCK_PTR)->out.stack_adjust = INCOMING_FRAME_SP_OFFSET;
+ VTI (ENTRY_BLOCK_PTR_FOR_FN (cfun))->visited = true;
+ VTI (ENTRY_BLOCK_PTR_FOR_FN (cfun))->in.stack_adjust =
+ INCOMING_FRAME_SP_OFFSET;
+ VTI (ENTRY_BLOCK_PTR_FOR_FN (cfun))->out.stack_adjust =
+ INCOMING_FRAME_SP_OFFSET;
/* Allocate stack for back-tracking up CFG. */
- stack = XNEWVEC (edge_iterator, n_basic_blocks + 1);
+ stack = XNEWVEC (edge_iterator, n_basic_blocks_for_fn (cfun) + 1);
sp = 0;
/* Push the first edge on to the stack. */
- stack[sp++] = ei_start (ENTRY_BLOCK_PTR->succs);
+ stack[sp++] = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
while (sp)
{
@@ -863,7 +868,7 @@ vt_stack_adjustments (void)
VTI (dest)->visited = true;
VTI (dest)->in.stack_adjust = offset = VTI (src)->out.stack_adjust;
- if (dest != EXIT_BLOCK_PTR)
+ if (dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
for (insn = BB_HEAD (dest);
insn != NEXT_INSN (BB_END (dest));
insn = NEXT_INSN (insn))
@@ -6921,10 +6926,10 @@ vt_find_locations (void)
timevar_push (TV_VAR_TRACKING_DATAFLOW);
/* Compute reverse completion order of depth first search of the CFG
so that the data-flow runs faster. */
- rc_order = XNEWVEC (int, n_basic_blocks - NUM_FIXED_BLOCKS);
+ rc_order = XNEWVEC (int, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS);
bb_order = XNEWVEC (int, last_basic_block);
pre_and_rev_post_order_compute (NULL, rc_order, false);
- for (i = 0; i < n_basic_blocks - NUM_FIXED_BLOCKS; i++)
+ for (i = 0; i < n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS; i++)
bb_order[rc_order[i]] = i;
free (rc_order);
@@ -7049,7 +7054,7 @@ vt_find_locations (void)
{
FOR_EACH_EDGE (e, ei, bb->succs)
{
- if (e->dest == EXIT_BLOCK_PTR)
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
if (bitmap_bit_p (visited, e->dest->index))
@@ -9598,7 +9603,7 @@ vt_add_function_parameter (tree parm)
if (!track_loc_p (incoming, parm, offset, false, &mode, &offset))
return;
- out = &VTI (ENTRY_BLOCK_PTR)->out;
+ out = &VTI (ENTRY_BLOCK_PTR_FOR_FN (cfun))->out;
dv = dv_from_decl (parm);
@@ -9945,7 +9950,7 @@ vt_initialize (void)
for (;;)
{
edge e;
- if (bb->next_bb == EXIT_BLOCK_PTR
+ if (bb->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
|| ! single_pred_p (bb->next_bb))
break;
e = find_edge (bb, bb->next_bb);
@@ -10048,7 +10053,7 @@ vt_initialize (void)
}
hard_frame_pointer_adjustment = -1;
- VTI (ENTRY_BLOCK_PTR)->flooded = true;
+ VTI (ENTRY_BLOCK_PTR_FOR_FN (cfun))->flooded = true;
cfa_base_rtx = NULL_RTX;
return true;
}
@@ -10174,7 +10179,8 @@ variable_tracking_main_1 (void)
return 0;
}
- if (n_basic_blocks > 500 && n_edges / n_basic_blocks >= 20)
+ if (n_basic_blocks_for_fn (cfun) > 500 &&
+ n_edges_for_fn (cfun) / n_basic_blocks_for_fn (cfun) >= 20)
{
vt_debug_insns_local (true);
return 0;
diff --git a/gcc/varasm.c b/gcc/varasm.c
index a8f9e53513c..70df7467347 100644
--- a/gcc/varasm.c
+++ b/gcc/varasm.c
@@ -31,6 +31,10 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "rtl.h"
#include "tree.h"
+#include "stor-layout.h"
+#include "stringpool.h"
+#include "gcc-symtab.h"
+#include "varasm.h"
#include "flags.h"
#include "function.h"
#include "expr.h"
@@ -960,9 +964,9 @@ align_variable (tree decl, bool dont_output_data)
In particular, a.out format supports a maximum alignment of 4. */
if (align > MAX_OFILE_ALIGNMENT)
{
- warning (0, "alignment of %q+D is greater than maximum object "
- "file alignment. Using %d", decl,
- MAX_OFILE_ALIGNMENT/BITS_PER_UNIT);
+ error ("alignment of %q+D is greater than maximum object "
+ "file alignment %d", decl,
+ MAX_OFILE_ALIGNMENT/BITS_PER_UNIT);
align = MAX_OFILE_ALIGNMENT;
}
@@ -1635,7 +1639,7 @@ assemble_start_function (tree decl, const char *fnname)
align the hot section and write out the hot section label.
But if the current function is a thunk, we do not have a CFG. */
if (!cfun->is_thunk
- && BB_PARTITION (ENTRY_BLOCK_PTR->next_bb) == BB_COLD_PARTITION)
+ && BB_PARTITION (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb) == BB_COLD_PARTITION)
{
switch_to_section (text_section);
assemble_align (DECL_ALIGN (decl));
@@ -1908,8 +1912,8 @@ assemble_noswitch_variable (tree decl, const char *name, section *sect,
if (!sect->noswitch.callback (decl, name, size, rounded)
&& (unsigned HOST_WIDE_INT) (align / BITS_PER_UNIT) > rounded)
- warning (0, "requested alignment for %q+D is greater than "
- "implemented alignment of %wu", decl, rounded);
+ error ("requested alignment for %q+D is greater than "
+ "implemented alignment of %wu", decl, rounded);
}
/* A subroutine of assemble_variable. Output the label and contents of
diff --git a/gcc/varasm.h b/gcc/varasm.h
new file mode 100644
index 00000000000..d2a01a700ab
--- /dev/null
+++ b/gcc/varasm.h
@@ -0,0 +1,69 @@
+/* Declarations for varasm.h.
+ Copyright (C) 2013 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_VARASM_H
+#define GCC_VARASM_H
+
+extern tree tree_output_constant_def (tree);
+extern void make_decl_rtl (tree);
+extern rtx make_decl_rtl_for_debug (tree);
+extern void make_decl_one_only (tree, tree);
+extern int supports_one_only (void);
+extern void resolve_unique_section (tree, int, int);
+extern void mark_referenced (tree);
+extern void mark_decl_referenced (tree);
+extern void notice_global_symbol (tree);
+extern void set_user_assembler_name (tree, const char *);
+extern void process_pending_assemble_externals (void);
+extern bool decl_replaceable_p (tree);
+extern bool decl_binds_to_current_def_p (tree);
+extern enum tls_model decl_default_tls_model (const_tree);
+
+/* Declare DECL to be a weak symbol. */
+extern void declare_weak (tree);
+
+/* Merge weak status. */
+extern void merge_weak (tree, tree);
+
+/* Make one symbol an alias for another. */
+extern void assemble_alias (tree, tree);
+
+/* Return nonzero if VALUE is a valid constant-valued expression
+ for use in initializing a static variable; one that can be an
+ element of a "constant" initializer.
+
+ Return null_pointer_node if the value is absolute;
+ if it is relocatable, return the variable that determines the relocation.
+ We assume that VALUE has been folded as much as possible;
+ therefore, we do not need to check for such things as
+ arithmetic-combinations of integers. */
+extern tree initializer_constant_valid_p (tree, tree);
+
+/* Return true if VALUE is a valid constant-valued expression
+ for use in initializing a static bit-field; one that can be
+ an element of a "constant" initializer. */
+extern bool initializer_constant_valid_for_bitfield_p (tree);
+
+/* Whether a constructor CTOR is a valid static constant initializer if all
+ its elements are. This used to be internal to initializer_constant_valid_p
+ and has been exposed to let other functions like categorize_ctor_elements
+ evaluate the property while walking a constructor for other purposes. */
+extern bool constructor_static_from_elts_p (const_tree);
+
+#endif // GCC_VARASM_H
diff --git a/gcc/varpool.c b/gcc/varpool.c
index 4521caf2b10..06b93a30d5f 100644
--- a/gcc/varpool.c
+++ b/gcc/varpool.c
@@ -23,6 +23,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "varasm.h"
#include "cgraph.h"
#include "langhooks.h"
#include "diagnostic-core.h"
diff --git a/gcc/vmsdbgout.c b/gcc/vmsdbgout.c
index 9f308a6fdf2..7972dd7a93e 100644
--- a/gcc/vmsdbgout.c
+++ b/gcc/vmsdbgout.c
@@ -26,6 +26,7 @@ along with GCC; see the file COPYING3. If not see
#ifdef VMS_DEBUGGING_INFO
#include "tree.h"
+#include "varasm.h"
#include "version.h"
#include "flags.h"
#include "rtl.h"
diff --git a/gcc/vtable-verify.c b/gcc/vtable-verify.c
index ecf1dc25e84..8d692e62e11 100644
--- a/gcc/vtable-verify.c
+++ b/gcc/vtable-verify.c
@@ -143,6 +143,7 @@ along with GCC; see the file COPYING3. If not see
#include "gimple-ssa.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
+#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-pass.h"
#include "cfgloop.h"
diff --git a/gcc/xcoffout.c b/gcc/xcoffout.c
index 8a9093c10c2..c14f01424f3 100644
--- a/gcc/xcoffout.c
+++ b/gcc/xcoffout.c
@@ -27,6 +27,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "varasm.h"
#include "rtl.h"
#include "flags.h"
#include "diagnostic-core.h"
diff --git a/libbacktrace/ChangeLog b/libbacktrace/ChangeLog
index 8d305a9e409..79ac1178f80 100644
--- a/libbacktrace/ChangeLog
+++ b/libbacktrace/ChangeLog
@@ -1,3 +1,41 @@
+2013-11-19 Jakub Jelinek <jakub@redhat.com>
+
+ * backtrace.h (backtrace_syminfo_callback): Add symsize argument.
+ * elf.c (elf_syminfo): Pass 0 or sym->size to the callback as
+ last argument.
+ * btest.c (struct symdata): Add size field.
+ (callback_three): Add symsize argument. Copy it to the data->size
+ field.
+ (f23): Set symdata.size to 0.
+ (test5): Likewise. If sizeof (int) > 1, lookup address of
+ ((uintptr_t) &global) + 1. Verify symdata.val and symdata.size
+ values.
+
+ * atomic.c: Include sys/types.h.
+
+2013-11-18 Ian Lance Taylor <iant@google.com>
+
+ * configure.ac: Check for support of __atomic extensions.
+ * internal.h: Declare or #define atomic functions for use in
+ backtrace code.
+ * atomic.c: New file.
+ * dwarf.c (dwarf_lookup_pc): Use atomic functions.
+ (dwarf_fileline, backtrace_dwarf_add): Likewise.
+ * elf.c (elf_add_syminfo_data, elf_syminfo): Likewise.
+ (backtrace_initialize): Likewise.
+ * fileline.c (fileline_initialize): Likewise.
+ * Makefile.am (libbacktrace_la_SOURCES): Add atomic.c.
+ * configure, config.h.in, Makefile.in: Rebuild.
+
+2013-11-18 Jakub Jelinek <jakub@redhat.com>
+
+ * elf.c (SHN_UNDEF): Define.
+ (elf_initialize_syminfo): Add base_address argument. Ignore symbols
+ with st_shndx == SHN_UNDEF. Add base_address to address fields.
+ (elf_add): Adjust caller.
+
+ * elf.c (phdr_callback): Process info->dlpi_addr == 0 normally.
+
2013-11-16 Ian Lance Taylor <iant@google.com>
* backtrace.h (backtrace_create_state): Correct comment about
diff --git a/libbacktrace/Makefile.am b/libbacktrace/Makefile.am
index 035986bbf96..20dbde480c5 100644
--- a/libbacktrace/Makefile.am
+++ b/libbacktrace/Makefile.am
@@ -40,6 +40,7 @@ noinst_LTLIBRARIES = libbacktrace.la
libbacktrace_la_SOURCES = \
backtrace.h \
+ atomic.c \
dwarf.c \
fileline.c \
internal.h \
diff --git a/libbacktrace/Makefile.in b/libbacktrace/Makefile.in
index 971406bf6da..a1e144aca17 100644
--- a/libbacktrace/Makefile.in
+++ b/libbacktrace/Makefile.in
@@ -16,7 +16,7 @@
@SET_MAKE@
# Makefile.am -- Backtrace Makefile.
-# Copyright (C) 2012 Free Software Foundation, Inc.
+# Copyright (C) 2012-2013 Free Software Foundation, Inc.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
@@ -93,8 +93,8 @@ CONFIG_CLEAN_FILES = backtrace-supported.h
CONFIG_CLEAN_VPATH_FILES =
LTLIBRARIES = $(noinst_LTLIBRARIES)
am__DEPENDENCIES_1 =
-am_libbacktrace_la_OBJECTS = dwarf.lo fileline.lo posix.lo print.lo \
- state.lo
+am_libbacktrace_la_OBJECTS = atomic.lo dwarf.lo fileline.lo posix.lo \
+ print.lo state.lo
libbacktrace_la_OBJECTS = $(am_libbacktrace_la_OBJECTS)
@NATIVE_TRUE@am__EXEEXT_1 = btest$(EXEEXT)
@NATIVE_TRUE@am_btest_OBJECTS = btest-btest.$(OBJEXT)
@@ -258,6 +258,7 @@ AM_CFLAGS = $(EXTRA_FLAGS) $(WARN_FLAGS) $(PIC_FLAG)
noinst_LTLIBRARIES = libbacktrace.la
libbacktrace_la_SOURCES = \
backtrace.h \
+ atomic.c \
dwarf.c \
fileline.c \
internal.h \
diff --git a/libbacktrace/atomic.c b/libbacktrace/atomic.c
new file mode 100644
index 00000000000..10418abadaa
--- /dev/null
+++ b/libbacktrace/atomic.c
@@ -0,0 +1,113 @@
+/* atomic.c -- Support for atomic functions if not present.
+ Copyright (C) 2013 Free Software Foundation, Inc.
+ Written by Ian Lance Taylor, Google.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ (1) Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ (2) Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ (3) The name of the author may not be used to
+ endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE. */
+
+#include "config.h"
+
+#include <sys/types.h>
+
+#include "backtrace.h"
+#include "backtrace-supported.h"
+#include "internal.h"
+
+/* This file holds implementations of the atomic functions that are
+ used if the host compiler has the sync functions but not the atomic
+ functions, as is true of versions of GCC before 4.7. */
+
+#if !defined (HAVE_ATOMIC_FUNCTIONS) && defined (HAVE_SYNC_FUNCTIONS)
+
+/* Do an atomic load of a pointer. */
+
+void *
+backtrace_atomic_load_pointer (void *arg)
+{
+ void **pp;
+ void *p;
+
+ pp = (void **) arg;
+ p = *pp;
+ while (!__sync_bool_compare_and_swap (pp, p, p))
+ p = *pp;
+ return p;
+}
+
+/* Do an atomic load of an int. */
+
+int
+backtrace_atomic_load_int (int *p)
+{
+ int i;
+
+ i = *p;
+ while (!__sync_bool_compare_and_swap (p, i, i))
+ i = *p;
+ return i;
+}
+
+/* Do an atomic store of a pointer. */
+
+void
+backtrace_atomic_store_pointer (void *arg, void *p)
+{
+ void **pp;
+ void *old;
+
+ pp = (void **) arg;
+ old = *pp;
+ while (!__sync_bool_compare_and_swap (pp, old, p))
+ old = *pp;
+}
+
+/* Do an atomic store of a size_t value. */
+
+void
+backtrace_atomic_store_size_t (size_t *p, size_t v)
+{
+ size_t old;
+
+ old = *p;
+ while (!__sync_bool_compare_and_swap (p, old, v))
+ old = *p;
+}
+
+/* Do an atomic store of a int value. */
+
+void
+backtrace_atomic_store_int (int *p, int v)
+{
+ size_t old;
+
+ old = *p;
+ while (!__sync_bool_compare_and_swap (p, old, v))
+ old = *p;
+}
+
+#endif
diff --git a/libbacktrace/backtrace.h b/libbacktrace/backtrace.h
index 3be400737c1..33595cf5139 100644
--- a/libbacktrace/backtrace.h
+++ b/libbacktrace/backtrace.h
@@ -169,12 +169,13 @@ extern int backtrace_pcinfo (struct backtrace_state *state, uintptr_t pc,
/* The type of the callback argument to backtrace_syminfo. DATA and
PC are the arguments passed to backtrace_syminfo. SYMNAME is the
name of the symbol for the corresponding code. SYMVAL is the
- value. SYMNAME will be NULL if no error occurred but the symbol
- could not be found. */
+ value and SYMSIZE is the size of the symbol. SYMNAME will be NULL
+ if no error occurred but the symbol could not be found. */
typedef void (*backtrace_syminfo_callback) (void *data, uintptr_t pc,
const char *symname,
- uintptr_t symval);
+ uintptr_t symval,
+ uintptr_t symsize);
/* Given ADDR, an address or program counter in the current program,
call the callback information with the symbol name and value
diff --git a/libbacktrace/btest.c b/libbacktrace/btest.c
index c06493f341e..22b08e05030 100644
--- a/libbacktrace/btest.c
+++ b/libbacktrace/btest.c
@@ -92,7 +92,7 @@ struct sdata
struct symdata
{
const char *name;
- uintptr_t val;
+ uintptr_t val, size;
int failed;
};
@@ -238,7 +238,8 @@ error_callback_two (void *vdata, const char *msg, int errnum)
static void
callback_three (void *vdata, uintptr_t pc ATTRIBUTE_UNUSED,
- const char *symname, uintptr_t symval)
+ const char *symname, uintptr_t symval,
+ uintptr_t symsize)
{
struct symdata *data = (struct symdata *) vdata;
@@ -250,6 +251,7 @@ callback_three (void *vdata, uintptr_t pc ATTRIBUTE_UNUSED,
assert (data->name != NULL);
}
data->val = symval;
+ data->size = symsize;
}
/* The backtrace_syminfo error callback function. */
@@ -458,6 +460,7 @@ f23 (int f1line, int f2line)
symdata.name = NULL;
symdata.val = 0;
+ symdata.size = 0;
symdata.failed = 0;
i = backtrace_syminfo (state, addrs[j], callback_three,
@@ -605,12 +608,17 @@ test5 (void)
{
struct symdata symdata;
int i;
+ uintptr_t addr = (uintptr_t) &global;
+
+ if (sizeof (global) > 1)
+ addr += 1;
symdata.name = NULL;
symdata.val = 0;
+ symdata.size = 0;
symdata.failed = 0;
- i = backtrace_syminfo (state, (uintptr_t) &global, callback_three,
+ i = backtrace_syminfo (state, addr, callback_three,
error_callback_three, &symdata);
if (i == 0)
{
@@ -634,6 +642,22 @@ test5 (void)
symdata.name, "global");
symdata.failed = 1;
}
+ else if (symdata.val != (uintptr_t) &global)
+ {
+ fprintf (stderr,
+ "test5: unexpected syminfo value got %lx expected %lx\n",
+ (unsigned long) symdata.val,
+ (unsigned long) (uintptr_t) &global);
+ symdata.failed = 1;
+ }
+ else if (symdata.size != sizeof (global))
+ {
+ fprintf (stderr,
+ "test5: unexpected syminfo size got %lx expected %lx\n",
+ (unsigned long) symdata.size,
+ (unsigned long) sizeof (global));
+ symdata.failed = 1;
+ }
}
printf ("%s: backtrace_syminfo variable\n",
diff --git a/libbacktrace/config.h.in b/libbacktrace/config.h.in
index 48ff63fdc05..87cb805984d 100644
--- a/libbacktrace/config.h.in
+++ b/libbacktrace/config.h.in
@@ -3,6 +3,9 @@
/* ELF size: 32 or 64 */
#undef BACKTRACE_ELF_SIZE
+/* Define to 1 if you have the __atomic functions */
+#undef HAVE_ATOMIC_FUNCTIONS
+
/* Define to 1 if you have the declaration of `strnlen', and to 0 if you
don't. */
#undef HAVE_DECL_STRNLEN
diff --git a/libbacktrace/configure b/libbacktrace/configure
index e6b13c0b706..d6bda6e67b6 100755
--- a/libbacktrace/configure
+++ b/libbacktrace/configure
@@ -11748,6 +11748,44 @@ $as_echo "#define HAVE_SYNC_FUNCTIONS 1" >>confdefs.h
fi
+# Test for __atomic support.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking __atomic extensions" >&5
+$as_echo_n "checking __atomic extensions... " >&6; }
+if test "${libbacktrace_cv_sys_atomic+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "${with_target_subdir}"; then
+ libbacktrace_cv_sys_atomic=yes
+ else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+int i;
+int
+main ()
+{
+__atomic_load_n (&i, __ATOMIC_ACQUIRE);
+ __atomic_store_n (&i, 1, __ATOMIC_RELEASE);
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ libbacktrace_cv_sys_atomic=yes
+else
+ libbacktrace_cv_sys_atomic=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+ fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libbacktrace_cv_sys_atomic" >&5
+$as_echo "$libbacktrace_cv_sys_atomic" >&6; }
+if test "$libbacktrace_cv_sys_atomic" = "yes"; then
+
+$as_echo "#define HAVE_ATOMIC_FUNCTIONS 1" >>confdefs.h
+
+fi
+
# The library needs to be able to read the executable itself. Compile
# a file to determine the executable format. The awk script
# filetype.awk prints out the file type.
diff --git a/libbacktrace/configure.ac b/libbacktrace/configure.ac
index 48c86203837..f97afbc8b74 100644
--- a/libbacktrace/configure.ac
+++ b/libbacktrace/configure.ac
@@ -194,6 +194,24 @@ if test "$libbacktrace_cv_sys_sync" = "yes"; then
fi
AC_SUBST(BACKTRACE_SUPPORTS_THREADS)
+# Test for __atomic support.
+AC_CACHE_CHECK([__atomic extensions],
+[libbacktrace_cv_sys_atomic],
+[if test -n "${with_target_subdir}"; then
+ libbacktrace_cv_sys_atomic=yes
+ else
+ AC_LINK_IFELSE(
+ [AC_LANG_PROGRAM([int i;],
+ [__atomic_load_n (&i, __ATOMIC_ACQUIRE);
+ __atomic_store_n (&i, 1, __ATOMIC_RELEASE);])],
+ [libbacktrace_cv_sys_atomic=yes],
+ [libbacktrace_cv_sys_atomic=no])
+ fi])
+if test "$libbacktrace_cv_sys_atomic" = "yes"; then
+ AC_DEFINE([HAVE_ATOMIC_FUNCTIONS], 1,
+ [Define to 1 if you have the __atomic functions])
+fi
+
# The library needs to be able to read the executable itself. Compile
# a file to determine the executable format. The awk script
# filetype.awk prints out the file type.
diff --git a/libbacktrace/dwarf.c b/libbacktrace/dwarf.c
index 501afe553d2..0aba2d3f574 100644
--- a/libbacktrace/dwarf.c
+++ b/libbacktrace/dwarf.c
@@ -2643,12 +2643,7 @@ dwarf_lookup_pc (struct backtrace_state *state, struct dwarf_data *ddata,
&& pc < (entry - 1)->high)
{
if (state->threaded)
- {
- /* Use __sync_bool_compare_and_swap to do a
- load-acquire. */
- while (!__sync_bool_compare_and_swap (&u->lines, lines, lines))
- lines = u->lines;
- }
+ lines = (struct line *) backtrace_atomic_load_pointer (&u->lines);
if (lines != (struct line *) (uintptr_t) -1)
break;
@@ -2659,13 +2654,8 @@ dwarf_lookup_pc (struct backtrace_state *state, struct dwarf_data *ddata,
lines = u->lines;
}
- /* Do a load-acquire of u->lines. */
if (state->threaded)
- {
- /* Use __sync_bool_compare_and_swap to do an atomic load. */
- while (!__sync_bool_compare_and_swap (&u->lines, lines, lines))
- lines = u->lines;
- }
+ lines = backtrace_atomic_load_pointer (&u->lines);
new_data = 0;
if (lines == NULL)
@@ -2713,12 +2703,11 @@ dwarf_lookup_pc (struct backtrace_state *state, struct dwarf_data *ddata,
}
else
{
- __sync_bool_compare_and_swap (&u->lines_count, 0, count);
- __sync_bool_compare_and_swap (&u->function_addrs, NULL,
- function_addrs);
- __sync_bool_compare_and_swap (&u->function_addrs_count, 0,
- function_addrs_count);
- __sync_bool_compare_and_swap (&u->lines, NULL, lines);
+ backtrace_atomic_store_size_t (&u->lines_count, count);
+ backtrace_atomic_store_pointer (&u->function_addrs, function_addrs);
+ backtrace_atomic_store_size_t (&u->function_addrs_count,
+ function_addrs_count);
+ backtrace_atomic_store_pointer (&u->lines, lines);
}
}
@@ -2849,11 +2838,7 @@ dwarf_fileline (struct backtrace_state *state, uintptr_t pc,
pp = (struct dwarf_data **) (void *) &state->fileline_data;
while (1)
{
- ddata = *pp;
- /* Atomic load. */
- while (!__sync_bool_compare_and_swap (pp, ddata, ddata))
- ddata = *pp;
-
+ ddata = backtrace_atomic_load_pointer (pp);
if (ddata == NULL)
break;
@@ -2985,10 +2970,7 @@ backtrace_dwarf_add (struct backtrace_state *state,
{
struct dwarf_data *p;
- /* Atomic load. */
- p = *pp;
- while (!__sync_bool_compare_and_swap (pp, p, p))
- p = *pp;
+ p = backtrace_atomic_load_pointer (pp);
if (p == NULL)
break;
diff --git a/libbacktrace/elf.c b/libbacktrace/elf.c
index c58b74e192f..3747c03079c 100644
--- a/libbacktrace/elf.c
+++ b/libbacktrace/elf.c
@@ -98,6 +98,7 @@ dl_iterate_phdr (int (*callback) (struct dl_phdr_info *,
#undef EV_CURRENT
#undef SHN_LORESERVE
#undef SHN_XINDEX
+#undef SHN_UNDEF
#undef SHT_SYMTAB
#undef SHT_STRTAB
#undef SHT_DYNSYM
@@ -183,6 +184,7 @@ typedef struct {
b_elf_wxword sh_entsize; /* Entry size if section holds table */
} b_elf_shdr; /* Elf_Shdr. */
+#define SHN_UNDEF 0x0000 /* Undefined section */
#define SHN_LORESERVE 0xFF00 /* Begin range of reserved indices */
#define SHN_XINDEX 0xFFFF /* Section index is held elsewhere */
@@ -342,6 +344,7 @@ elf_symbol_search (const void *vkey, const void *ventry)
static int
elf_initialize_syminfo (struct backtrace_state *state,
+ uintptr_t base_address,
const unsigned char *symtab_data, size_t symtab_size,
const unsigned char *strtab, size_t strtab_size,
backtrace_error_callback error_callback,
@@ -365,7 +368,8 @@ elf_initialize_syminfo (struct backtrace_state *state,
int info;
info = sym->st_info & 0xf;
- if (info == STT_FUNC || info == STT_OBJECT)
+ if ((info == STT_FUNC || info == STT_OBJECT)
+ && sym->st_shndx != SHN_UNDEF)
++elf_symbol_count;
}
@@ -385,6 +389,8 @@ elf_initialize_syminfo (struct backtrace_state *state,
info = sym->st_info & 0xf;
if (info != STT_FUNC && info != STT_OBJECT)
continue;
+ if (sym->st_shndx == SHN_UNDEF)
+ continue;
if (sym->st_name >= strtab_size)
{
error_callback (data, "symbol string index out of range", 0);
@@ -393,7 +399,7 @@ elf_initialize_syminfo (struct backtrace_state *state,
return 0;
}
elf_symbols[j].name = (const char *) strtab + sym->st_name;
- elf_symbols[j].address = sym->st_value;
+ elf_symbols[j].address = sym->st_value + base_address;
elf_symbols[j].size = sym->st_size;
++j;
}
@@ -436,10 +442,7 @@ elf_add_syminfo_data (struct backtrace_state *state,
{
struct elf_syminfo_data *p;
- /* Atomic load. */
- p = *pp;
- while (!__sync_bool_compare_and_swap (pp, p, p))
- p = *pp;
+ p = backtrace_atomic_load_pointer (pp);
if (p == NULL)
break;
@@ -484,11 +487,7 @@ elf_syminfo (struct backtrace_state *state, uintptr_t addr,
pp = (struct elf_syminfo_data **) (void *) &state->syminfo_data;
while (1)
{
- edata = *pp;
- /* Atomic load. */
- while (!__sync_bool_compare_and_swap (pp, edata, edata))
- edata = *pp;
-
+ edata = backtrace_atomic_load_pointer (pp);
if (edata == NULL)
break;
@@ -503,9 +502,9 @@ elf_syminfo (struct backtrace_state *state, uintptr_t addr,
}
if (sym == NULL)
- callback (data, addr, NULL, 0);
+ callback (data, addr, NULL, 0, 0);
else
- callback (data, addr, sym->name, sym->address);
+ callback (data, addr, sym->name, sym->address, sym->size);
}
/* Add the backtrace data for one ELF file. */
@@ -733,7 +732,7 @@ elf_add (struct backtrace_state *state, int descriptor, uintptr_t base_address,
if (sdata == NULL)
goto fail;
- if (!elf_initialize_syminfo (state,
+ if (!elf_initialize_syminfo (state, base_address,
symtab_view.data, symtab_shdr->sh_size,
strtab_view.data, strtab_shdr->sh_size,
error_callback, data, sdata))
@@ -863,12 +862,8 @@ phdr_callback (struct dl_phdr_info *info, size_t size ATTRIBUTE_UNUSED,
fileline elf_fileline_fn;
int found_dwarf;
- /* There is not much we can do if we don't have the module name. If
- the base address is 0, this is probably the executable, which we
- already loaded. */
- if (info->dlpi_name == NULL
- || info->dlpi_name[0] == '\0'
- || info->dlpi_addr == 0)
+ /* There is not much we can do if we don't have the module name. */
+ if (info->dlpi_name == NULL || info->dlpi_name[0] == '\0')
return 0;
descriptor = backtrace_open (info->dlpi_name, pd->error_callback, pd->data,
@@ -900,7 +895,6 @@ backtrace_initialize (struct backtrace_state *state, int descriptor,
{
int found_sym;
int found_dwarf;
- syminfo elf_syminfo_fn;
fileline elf_fileline_fn;
struct phdr_data pd;
@@ -917,18 +911,19 @@ backtrace_initialize (struct backtrace_state *state, int descriptor,
dl_iterate_phdr (phdr_callback, (void *) &pd);
- elf_syminfo_fn = found_sym ? elf_syminfo : elf_nosyms;
if (!state->threaded)
{
- if (state->syminfo_fn == NULL || found_sym)
- state->syminfo_fn = elf_syminfo_fn;
+ if (found_sym)
+ state->syminfo_fn = elf_syminfo;
+ else if (state->syminfo_fn == NULL)
+ state->syminfo_fn = elf_nosyms;
}
else
{
- __sync_bool_compare_and_swap (&state->syminfo_fn, NULL, elf_syminfo_fn);
if (found_sym)
- __sync_bool_compare_and_swap (&state->syminfo_fn, elf_nosyms,
- elf_syminfo_fn);
+ backtrace_atomic_store_pointer (&state->syminfo_fn, elf_syminfo);
+ else
+ __sync_bool_compare_and_swap (&state->syminfo_fn, NULL, elf_nosyms);
}
if (!state->threaded)
@@ -940,11 +935,7 @@ backtrace_initialize (struct backtrace_state *state, int descriptor,
{
fileline current_fn;
- /* Atomic load. */
- current_fn = state->fileline_fn;
- while (!__sync_bool_compare_and_swap (&state->fileline_fn, current_fn,
- current_fn))
- current_fn = state->fileline_fn;
+ current_fn = backtrace_atomic_load_pointer (&state->fileline_fn);
if (current_fn == NULL || current_fn == elf_nodebug)
*fileline_fn = elf_fileline_fn;
}
diff --git a/libbacktrace/fileline.c b/libbacktrace/fileline.c
index e5c39be8e0e..cc063f52c0d 100644
--- a/libbacktrace/fileline.c
+++ b/libbacktrace/fileline.c
@@ -58,15 +58,10 @@ fileline_initialize (struct backtrace_state *state,
int called_error_callback;
int descriptor;
- failed = state->fileline_initialization_failed;
-
- if (state->threaded)
- {
- /* Use __sync_bool_compare_and_swap to do an atomic load. */
- while (!__sync_bool_compare_and_swap
- (&state->fileline_initialization_failed, failed, failed))
- failed = state->fileline_initialization_failed;
- }
+ if (!state->threaded)
+ failed = state->fileline_initialization_failed;
+ else
+ failed = backtrace_atomic_load_int (&state->fileline_initialization_failed);
if (failed)
{
@@ -74,13 +69,10 @@ fileline_initialize (struct backtrace_state *state,
return 0;
}
- fileline_fn = state->fileline_fn;
- if (state->threaded)
- {
- while (!__sync_bool_compare_and_swap (&state->fileline_fn, fileline_fn,
- fileline_fn))
- fileline_fn = state->fileline_fn;
- }
+ if (!state->threaded)
+ fileline_fn = state->fileline_fn;
+ else
+ fileline_fn = backtrace_atomic_load_pointer (&state->fileline_fn);
if (fileline_fn != NULL)
return 1;
@@ -151,8 +143,7 @@ fileline_initialize (struct backtrace_state *state,
if (!state->threaded)
state->fileline_initialization_failed = 1;
else
- __sync_bool_compare_and_swap (&state->fileline_initialization_failed,
- 0, failed);
+ backtrace_atomic_store_int (&state->fileline_initialization_failed, 1);
return 0;
}
@@ -160,15 +151,10 @@ fileline_initialize (struct backtrace_state *state,
state->fileline_fn = fileline_fn;
else
{
- __sync_bool_compare_and_swap (&state->fileline_fn, NULL, fileline_fn);
-
- /* At this point we know that state->fileline_fn is not NULL.
- Either we stored our value, or some other thread stored its
- value. If some other thread stored its value, we leak the
- one we just initialized. Either way, state->fileline_fn is
- initialized. The compare_and_swap is a full memory barrier,
- so we should have full access to that value even if it was
- created by another thread. */
+ backtrace_atomic_store_pointer (&state->fileline_fn, fileline_fn);
+
+ /* Note that if two threads initialize at once, one of the data
+ sets may be leaked. */
}
return 1;
diff --git a/libbacktrace/internal.h b/libbacktrace/internal.h
index 1ea664a0bcd..c93e89f36f4 100644
--- a/libbacktrace/internal.h
+++ b/libbacktrace/internal.h
@@ -65,7 +65,48 @@ POSSIBILITY OF SUCH DAMAGE. */
#define __sync_lock_test_and_set(A, B) (abort(), 0)
#define __sync_lock_release(A) abort()
-#endif /* !defined(HAVE_SYNC_FUNCTIONS) */
+#endif /* !defined (HAVE_SYNC_FUNCTIONS) */
+
+#ifdef HAVE_ATOMIC_FUNCTIONS
+
+/* We have the atomic builtin functions. */
+
+#define backtrace_atomic_load_pointer(p) \
+ __atomic_load_n ((p), __ATOMIC_ACQUIRE)
+#define backtrace_atomic_load_int(p) \
+ __atomic_load_n ((p), __ATOMIC_ACQUIRE)
+#define backtrace_atomic_store_pointer(p, v) \
+ __atomic_store_n ((p), (v), __ATOMIC_RELEASE)
+#define backtrace_atomic_store_size_t(p, v) \
+ __atomic_store_n ((p), (v), __ATOMIC_RELEASE)
+#define backtrace_atomic_store_int(p, v) \
+ __atomic_store_n ((p), (v), __ATOMIC_RELEASE)
+
+#else /* !defined (HAVE_ATOMIC_FUNCTIONS) */
+#ifdef HAVE_SYNC_FUNCTIONS
+
+/* We have the sync functions but not the atomic functions. Define
+ the atomic ones in terms of the sync ones. */
+
+extern void *backtrace_atomic_load_pointer (void *);
+extern int backtrace_atomic_load_int (int *);
+extern void backtrace_atomic_store_pointer (void *, void *);
+extern void backtrace_atomic_store_size_t (size_t *, size_t);
+extern void backtrace_atomic_store_int (int *, int);
+
+#else /* !defined (HAVE_SYNC_FUNCTIONS) */
+
+/* We have neither the sync nor the atomic functions. These will
+ never be called. */
+
+#define backtrace_atomic_load_pointer(p) (abort(), 0)
+#define backtrace_atomic_load_int(p) (abort(), 0)
+#define backtrace_atomic_store_pointer(p, v) abort()
+#define backtrace_atomic_store_size_t(p, v) abort()
+#define backtrace_atomic_store_int(p, v) abort()
+
+#endif /* !defined (HAVE_SYNC_FUNCTIONS) */
+#endif /* !defined (HAVE_ATOMIC_FUNCTIONS) */
/* The type of the function that collects file/line information. This
is like backtrace_pcinfo. */
diff --git a/libcpp/ChangeLog b/libcpp/ChangeLog
index 5e38c4b5dff..c3391b4959d 100644
--- a/libcpp/ChangeLog
+++ b/libcpp/ChangeLog
@@ -1,3 +1,7 @@
+2013-11-18 Bill Schmidt <wschmidt@linux.vnet.ibm.com>
+
+ * lex.c (search_line_fast): Correct for little endian.
+
2013-11-15 Joseph Myers <joseph@codesourcery.com>
* ucnid.tab: Add C11 and C11NOSTART data.
diff --git a/libcpp/lex.c b/libcpp/lex.c
index 99c2140c357..80829d69b98 100644
--- a/libcpp/lex.c
+++ b/libcpp/lex.c
@@ -559,8 +559,13 @@ search_line_fast (const uchar *s, const uchar *end ATTRIBUTE_UNUSED)
beginning with all ones and shifting in zeros according to the
mis-alignment. The LVSR instruction pulls the exact shift we
want from the address. */
+#ifdef __BIG_ENDIAN__
mask = __builtin_vec_lvsr(0, s);
mask = __builtin_vec_perm(zero, ones, mask);
+#else
+ mask = __builtin_vec_lvsl(0, s);
+ mask = __builtin_vec_perm(ones, zero, mask);
+#endif
data &= mask;
/* While altivec loads mask addresses, we still need to align S so
@@ -624,7 +629,11 @@ search_line_fast (const uchar *s, const uchar *end ATTRIBUTE_UNUSED)
/* L now contains 0xff in bytes for which we matched one of the
relevant characters. We can find the byte index by finding
its bit index and dividing by 8. */
+#ifdef __BIG_ENDIAN__
l = __builtin_clzl(l) >> 3;
+#else
+ l = __builtin_ctzl(l) >> 3;
+#endif
return s + l;
#undef N
diff --git a/libgcc/ChangeLog b/libgcc/ChangeLog
index 311cd23a57a..b5224f1989e 100644
--- a/libgcc/ChangeLog
+++ b/libgcc/ChangeLog
@@ -1,3 +1,23 @@
+2013-11-18 Jan Hubicka <jh@suse.cz>
+
+ * libgcov-driver.c (run_accounted): Make global level static.
+ (gcov_exit_merge_summary): Silence warning; do not clear
+ run_accounted here.
+ (gcov_exit): Clear it here.
+
+ * libgcov-driver.c (gcov_exit_merge_summary): Fix setting
+ run_accounted.
+
+ * libgcov-driver.c (get_gcov_dump_complete): Update comments.
+ (all_prg, crc32): Remove static vars.
+ (gcov_exit_compute_summary): Rewrite to return crc32; do not clear
+ all_prg.
+ (gcov_exit_merge_gcda): Add crc32 parameter.
+ (gcov_exit_merge_summary): Add crc32 and all_prg parameter;
+ do not account run if it was already accounted.
+ (gcov_exit_dump_gcov): Add crc32 and all_prg parameters.
+ (gcov_exit): Initialize all_prg; update.
+
2013-11-15 Andreas Schwab <schwab@linux-m68k.org>
* configure: Regenerate.
diff --git a/libgcc/libgcov-driver.c b/libgcc/libgcov-driver.c
index 93bf2fb7ace..ec6dffd76b3 100644
--- a/libgcc/libgcov-driver.c
+++ b/libgcc/libgcov-driver.c
@@ -96,7 +96,7 @@ static size_t gcov_max_filename = 0;
/* Flag when the profile has already been dumped via __gcov_dump(). */
static int gcov_dump_complete;
-/* A global functino that get the vaule of gcov_dump_complete. */
+/* A global function that get the vaule of gcov_dump_complete. */
int
get_gcov_dump_complete (void)
@@ -319,24 +319,21 @@ gcov_compute_histogram (struct gcov_summary *sum)
/* summary for program. */
static struct gcov_summary this_prg;
-#if !GCOV_LOCKED
-/* summary for all instances of program. */
-static struct gcov_summary all_prg;
-#endif
-/* crc32 for this program. */
-static gcov_unsigned_t crc32;
/* gcda filename. */
static char *gi_filename;
/* buffer for the fn_data from another program. */
static struct gcov_fn_buffer *fn_buffer;
/* buffer for summary from other programs to be written out. */
static struct gcov_summary_buffer *sum_buffer;
+/* If application calls fork or exec multiple times, we end up storing
+ profile repeadely. We should not account this as multiple runs or
+ functions executed once may mistakely become cold. */
+static int run_accounted = 0;
/* This funtions computes the program level summary and the histo-gram.
- It initializes ALL_PRG, computes CRC32, and stores the summary in
- THIS_PRG. All these three variables are file statics. */
+ It computes and returns CRC32 and stored summari in THIS_PRG. */
-static void
+static gcov_unsigned_t
gcov_exit_compute_summary (void)
{
struct gcov_info *gi_ptr;
@@ -346,10 +343,8 @@ gcov_exit_compute_summary (void)
int f_ix;
unsigned t_ix;
gcov_unsigned_t c_num;
+ gcov_unsigned_t crc32 = 0;
-#if !GCOV_LOCKED
- memset (&all_prg, 0, sizeof (all_prg));
-#endif
/* Find the totals for this execution. */
memset (&this_prg, 0, sizeof (this_prg));
for (gi_ptr = gcov_list; gi_ptr; gi_ptr = gi_ptr->next)
@@ -391,6 +386,7 @@ gcov_exit_compute_summary (void)
}
}
gcov_compute_histogram (&this_prg);
+ return crc32;
}
/* A struct that bundles all the related information about the
@@ -412,7 +408,8 @@ static int
gcov_exit_merge_gcda (struct gcov_info *gi_ptr,
struct gcov_summary *prg_p,
gcov_position_t *summary_pos_p,
- gcov_position_t *eof_pos_p)
+ gcov_position_t *eof_pos_p,
+ gcov_unsigned_t crc32)
{
gcov_unsigned_t tag, length;
unsigned t_ix;
@@ -652,13 +649,16 @@ gcov_exit_write_gcda (const struct gcov_info *gi_ptr,
Return -1 on error. Return 0 on success. */
static int
-gcov_exit_merge_summary (const struct gcov_info *gi_ptr, struct gcov_summary *prg)
+gcov_exit_merge_summary (const struct gcov_info *gi_ptr, struct gcov_summary *prg,
+ gcov_unsigned_t crc32,
+ struct gcov_summary *all_prg __attribute__ ((unused)))
{
struct gcov_ctr_summary *cs_prg, *cs_tprg;
-#if !GCOV_LOCKED
- struct gcov_ctr_summary *cs_all;
-#endif
unsigned t_ix;
+#if !GCOV_LOCKED
+ /* summary for all instances of program. */
+ struct gcov_ctr_summary *cs_all;
+#endif
/* Merge the summaries. */
for (t_ix = 0; t_ix < GCOV_COUNTERS_SUMMABLE; t_ix++)
@@ -668,13 +668,17 @@ gcov_exit_merge_summary (const struct gcov_info *gi_ptr, struct gcov_summary *pr
if (gi_ptr->merge[t_ix])
{
- if (!cs_prg->runs++)
+ int first = !cs_prg->runs;
+
+ if (!run_accounted)
+ cs_prg->runs++;
+ if (first)
cs_prg->num = cs_tprg->num;
cs_prg->sum_all += cs_tprg->sum_all;
if (cs_prg->run_max < cs_tprg->run_max)
cs_prg->run_max = cs_tprg->run_max;
cs_prg->sum_max += cs_tprg->run_max;
- if (cs_prg->runs == 1)
+ if (first)
memcpy (cs_prg->histogram, cs_tprg->histogram,
sizeof (gcov_bucket_type) * GCOV_HISTOGRAM_SIZE);
else
@@ -686,9 +690,8 @@ gcov_exit_merge_summary (const struct gcov_info *gi_ptr, struct gcov_summary *pr
gi_filename);
return -1;
}
-
#if !GCOV_LOCKED
- cs_all = &all_prg.ctrs[t_ix];
+ cs_all = &all_prg->ctrs[t_ix];
if (!cs_all->runs && cs_prg->runs)
{
cs_all->num = cs_prg->num;
@@ -697,7 +700,7 @@ gcov_exit_merge_summary (const struct gcov_info *gi_ptr, struct gcov_summary *pr
cs_all->run_max = cs_prg->run_max;
cs_all->sum_max = cs_prg->sum_max;
}
- else if (!all_prg.checksum
+ else if (!all_prg->checksum
/* Don't compare the histograms, which may have slight
variations depending on the order they were updated
due to the truncating integer divides used in the
@@ -711,7 +714,7 @@ gcov_exit_merge_summary (const struct gcov_info *gi_ptr, struct gcov_summary *pr
gcov_error ("profiling:%s:Data file mismatch - some "
"data files may have been concurrently "
"updated without locking support\n", gi_filename);
- all_prg.checksum = ~0u;
+ all_prg->checksum = ~0u;
}
#endif
}
@@ -729,7 +732,8 @@ gcov_exit_merge_summary (const struct gcov_info *gi_ptr, struct gcov_summary *pr
summaries separate. */
static void
-gcov_exit_dump_gcov (struct gcov_info *gi_ptr, struct gcov_filename_aux *gf)
+gcov_exit_dump_gcov (struct gcov_info *gi_ptr, struct gcov_filename_aux *gf,
+ gcov_unsigned_t crc32, struct gcov_summary *all_prg)
{
struct gcov_summary prg; /* summary for this object over all program. */
int error;
@@ -753,7 +757,8 @@ gcov_exit_dump_gcov (struct gcov_info *gi_ptr, struct gcov_filename_aux *gf)
gcov_error ("profiling:%s:Not a gcov data file\n", gi_filename);
goto read_fatal;
}
- error = gcov_exit_merge_gcda (gi_ptr, &prg, &summary_pos, &eof_pos);
+ error = gcov_exit_merge_gcda (gi_ptr, &prg, &summary_pos, &eof_pos,
+ crc32);
if (error == -1)
goto read_fatal;
}
@@ -766,7 +771,7 @@ gcov_exit_dump_gcov (struct gcov_info *gi_ptr, struct gcov_filename_aux *gf)
summary_pos = eof_pos;
}
- error = gcov_exit_merge_summary (gi_ptr, &prg);
+ error = gcov_exit_merge_summary (gi_ptr, &prg, crc32, all_prg);
if (error == -1)
goto read_fatal;
@@ -794,19 +799,25 @@ gcov_exit (void)
{
struct gcov_info *gi_ptr;
struct gcov_filename_aux gf;
+ gcov_unsigned_t crc32;
+ struct gcov_summary all_prg;
/* Prevent the counters from being dumped a second time on exit when the
application already wrote out the profile using __gcov_dump(). */
if (gcov_dump_complete)
return;
- gcov_exit_compute_summary ();
+ crc32 = gcov_exit_compute_summary ();
allocate_filename_struct (&gf);
+#if !GCOV_LOCKED
+ memset (&all_prg, 0, sizeof (all_prg));
+#endif
/* Now merge each file. */
for (gi_ptr = gcov_list; gi_ptr; gi_ptr = gi_ptr->next)
- gcov_exit_dump_gcov (gi_ptr, &gf);
+ gcov_exit_dump_gcov (gi_ptr, &gf, crc32, &all_prg);
+ run_accounted = 1;
if (gi_filename)
free (gi_filename);
diff --git a/libgo/configure b/libgo/configure
index d2ad366bbf0..35bf82088fd 100755
--- a/libgo/configure
+++ b/libgo/configure
@@ -13555,7 +13555,7 @@ else
LIBGO_IS_OPENBSD_FALSE=
fi
- if test $is_dragonly = yes; then
+ if test $is_dragonfly = yes; then
LIBGO_IS_DRAGONFLY_TRUE=
LIBGO_IS_DRAGONFLY_FALSE='#'
else
diff --git a/libgo/configure.ac b/libgo/configure.ac
index 1e84dc7c380..0ffcaf9af03 100644
--- a/libgo/configure.ac
+++ b/libgo/configure.ac
@@ -154,7 +154,7 @@ AM_CONDITIONAL(LIBGO_IS_IRIX, test $is_irix = yes)
AM_CONDITIONAL(LIBGO_IS_LINUX, test $is_linux = yes)
AM_CONDITIONAL(LIBGO_IS_NETBSD, test $is_netbsd = yes)
AM_CONDITIONAL(LIBGO_IS_OPENBSD, test $is_openbsd = yes)
-AM_CONDITIONAL(LIBGO_IS_DRAGONFLY, test $is_dragonly = yes)
+AM_CONDITIONAL(LIBGO_IS_DRAGONFLY, test $is_dragonfly = yes)
AM_CONDITIONAL(LIBGO_IS_RTEMS, test $is_rtems = yes)
AM_CONDITIONAL(LIBGO_IS_SOLARIS, test $is_solaris = yes)
AC_SUBST(GOOS)
diff --git a/libgo/go/reflect/all_test.go b/libgo/go/reflect/all_test.go
index 6ab02f7d854..918adce887f 100644
--- a/libgo/go/reflect/all_test.go
+++ b/libgo/go/reflect/all_test.go
@@ -1434,6 +1434,46 @@ func TestFunc(t *testing.T) {
}
}
+type emptyStruct struct{}
+
+type nonEmptyStruct struct {
+ member int
+}
+
+func returnEmpty() emptyStruct {
+ return emptyStruct{}
+}
+
+func takesEmpty(e emptyStruct) {
+}
+
+func returnNonEmpty(i int) nonEmptyStruct {
+ return nonEmptyStruct{member: i}
+}
+
+func takesNonEmpty(n nonEmptyStruct) int {
+ return n.member
+}
+
+func TestCallWithStruct(t *testing.T) {
+ r := ValueOf(returnEmpty).Call([]Value{})
+ if len(r) != 1 || r[0].Type() != TypeOf(emptyStruct{}) {
+ t.Errorf("returning empty struct returned %s instead", r)
+ }
+ r = ValueOf(takesEmpty).Call([]Value{ValueOf(emptyStruct{})})
+ if len(r) != 0 {
+ t.Errorf("takesEmpty returned values: %s", r)
+ }
+ r = ValueOf(returnNonEmpty).Call([]Value{ValueOf(42)})
+ if len(r) != 1 || r[0].Type() != TypeOf(nonEmptyStruct{}) || r[0].Field(0).Int() != 42 {
+ t.Errorf("returnNonEmpty returned %s", r)
+ }
+ r = ValueOf(takesNonEmpty).Call([]Value{ValueOf(nonEmptyStruct{member: 42})})
+ if len(r) != 1 || r[0].Type() != TypeOf(1) || r[0].Int() != 42 {
+ t.Errorf("takesNonEmpty returned %s", r)
+ }
+}
+
func TestMakeFunc(t *testing.T) {
switch runtime.GOARCH {
case "amd64", "386":
diff --git a/libgo/runtime/go-caller.c b/libgo/runtime/go-caller.c
index c49704df416..e97b85097bc 100644
--- a/libgo/runtime/go-caller.c
+++ b/libgo/runtime/go-caller.c
@@ -135,7 +135,7 @@ __go_file_line (uintptr pc, String *fn, String *file, intgo *line)
static void
syminfo_callback (void *data, uintptr_t pc __attribute__ ((unused)),
const char *symname __attribute__ ((unused)),
- uintptr_t address)
+ uintptr_t address, uintptr_t size __attribute__ ((unused)))
{
uintptr_t *pval = (uintptr_t *) data;
diff --git a/libgo/runtime/go-reflect-call.c b/libgo/runtime/go-reflect-call.c
index 0fed68a50e7..07b99d7433b 100644
--- a/libgo/runtime/go-reflect-call.c
+++ b/libgo/runtime/go-reflect-call.c
@@ -98,9 +98,12 @@ go_struct_to_ffi (const struct __go_struct_type *descriptor)
const struct __go_struct_field *fields;
int i;
+ field_count = descriptor->__fields.__count;
+ if (field_count == 0) {
+ return &ffi_type_void;
+ }
ret = (ffi_type *) __go_alloc (sizeof (ffi_type));
ret->type = FFI_TYPE_STRUCT;
- field_count = descriptor->__fields.__count;
fields = (const struct __go_struct_field *) descriptor->__fields.__values;
ret->elements = (ffi_type **) __go_alloc ((field_count + 1)
* sizeof (ffi_type *));
diff --git a/libgo/testsuite/gotest b/libgo/testsuite/gotest
index 4015ed1e81e..155c7a8619a 100755
--- a/libgo/testsuite/gotest
+++ b/libgo/testsuite/gotest
@@ -369,7 +369,7 @@ localname() {
{
text="T"
case "$GOARCH" in
- ppc64) text="D" ;;
+ ppc64) text="[TD]" ;;
esac
symtogo='sed -e s/_test/XXXtest/ -e s/.*_\([^_]*\.\)/\1/ -e s/XXXtest/_test/'
diff --git a/libstdc++-v3/ChangeLog b/libstdc++-v3/ChangeLog
index 5af235f1c7b..f372d62dcbc 100644
--- a/libstdc++-v3/ChangeLog
+++ b/libstdc++-v3/ChangeLog
@@ -1,3 +1,55 @@
+2013-11-20 David Edelsohn <dje.gcc@gmail.com>
+
+ * testsuite/17_intro/static.cc: Ignore AIX TOC reload warnings.
+
+2013-11-19 Jonathan Wakely <jwakely.gcc@gmail.com>
+
+ * testsuite/23_containers/forward_list/allocator/noexcept.cc: Change
+ to compile-only test. Adjust swap overload to handle rebound
+ allocators.
+ * testsuite/23_containers/map/allocator/noexcept.cc: Likewise.
+ * testsuite/23_containers/multimap/allocator/noexcept.cc: Likewise.
+ * testsuite/23_containers/multiset/allocator/noexcept.cc: Likewise.
+ * testsuite/23_containers/set/allocator/noexcept.cc: Likewise.
+ * testsuite/23_containers/unordered_map/allocator/noexcept.cc:
+ Likewise.
+ * testsuite/23_containers/unordered_multimap/allocator/noexcept.cc:
+ Likewise.
+ * testsuite/23_containers/unordered_multiset/allocator/noexcept.cc:
+ Likewise.
+ * testsuite/23_containers/unordered_set/allocator/noexcept.cc:
+ Likewise.
+ * testsuite/23_containers/vector/allocator/noexcept.cc: Likewise.
+ * testsuite/23_containers/vector/allocator/swap.cc: Add elements
+ before swapping.
+
+ * config/abi/pre/gnu.ver (_ZNSt11regex_errorC*): Export regex_error
+ constructors.
+ * testsuite/20_util/addressof/1.cc: Remove { dg-do compile }.
+ * testsuite/20_util/allocator_traits/members/destroy.cc: Likewise.
+ * testsuite/20_util/allocator_traits/members/select.cc: Likewise. Fix
+ failure.
+ * testsuite/28_regex/basic_regex/ctors/extended/cstring.cc: Likewise.
+ * testsuite/28_regex/init-list.cc: Likewise.
+ * testsuite/28_regex/regex_error/regex_error.cc: Likewise.
+ * testsuite/28_regex/sub_match/length.cc: Likewise. Add main.
+ * testsuite/28_regex/sub_match/cast_char.cc: Likewise. Fix test.
+ * testsuite/28_regex/sub_match/cast_wchar_t.cc: Likewise.
+ * testsuite/tr1/7_regular_expressions/regex/cons/char/
+ c_string_extended.cc: Delete.
+
+2013-11-19 Paolo Carlini <paolo.carlini@oracle.com>
+
+ * include/experimental/string_view (_S_max_size): Remove.
+ (basic_string_view<>::max_size): Adjust.
+ * testsuite/experimental/string_view/capacity/1.cc: Clean-up.
+ * testsuite/experimental/string_view/inserters/pod/10081-out.cc:
+ Likewise.
+
+2013-11-19 Jonathan Wakely <jwakely.gcc@gmail.com>
+
+ * doc/xml/manual/status_cxx2014.xml: Create new table for TS statuses.
+
2013-11-18 Jonathan Wakely <jwakely.gcc@gmail.com>
* include/bits/shared_ptr_base.h (_Sp_counted_base<_S_single>): Use
diff --git a/libstdc++-v3/config/abi/pre/gnu.ver b/libstdc++-v3/config/abi/pre/gnu.ver
index d3c399f6bf2..323579399e6 100644
--- a/libstdc++-v3/config/abi/pre/gnu.ver
+++ b/libstdc++-v3/config/abi/pre/gnu.ver
@@ -1368,6 +1368,9 @@ GLIBCXX_3.4.20 {
# std::__throw_out_of_range_fmt(char const*, ...)
_ZSt24__throw_out_of_range_fmtPKcz;
+ # std::regex_error::regex_error(std::regex_constants::error_type)
+ _ZNSt11regex_errorC[01]ENSt15regex_constants10error_typeE;
+
} GLIBCXX_3.4.19;
# Symbols in the support library (libsupc++) have their own tag.
diff --git a/libstdc++-v3/doc/xml/manual/status_cxx2014.xml b/libstdc++-v3/doc/xml/manual/status_cxx2014.xml
index 0e0ac37a9e8..bb389e876b3 100644
--- a/libstdc++-v3/doc/xml/manual/status_cxx2014.xml
+++ b/libstdc++-v3/doc/xml/manual/status_cxx2014.xml
@@ -20,8 +20,8 @@ presence of the required flag.
</para>
<para>
-This page describes the C++14 support in mainline GCC SVN, not in any
-particular release.
+This page describes the C++14 and library TS support in mainline GCC SVN,
+not in any particular release.
</para>
<table frame="all">
@@ -223,29 +223,53 @@ particular release.
<entry/>
</row>
-
<row>
<entry>
- <link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://www.open-std.org/JTC1/sc22/WG21/docs/papers/2013/n3793.html">
- N3672
+ <link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://www.open-std.org/JTC1/sc22/WG21/docs/papers/2013/n3655.pdf">
+ N3655
</link>
</entry>
- <entry>A proposal to add a utility class to represent optional objects</entry>
+ <entry>TransformationTraits Redux</entry>
<entry>Y</entry>
- <entry>Moved from C++14 to Library Fundamentals TS</entry>
+ <entry/>
</row>
<row>
+ <?dbhtml bgcolor="#C8B0B0" ?>
<entry>
- <link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://www.open-std.org/JTC1/sc22/WG21/docs/papers/2013/n3655.pdf">
- N3655
+ <link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://www.open-std.org/JTC1/sc22/WG21/docs/papers/2013/n3644.pdf">
+ N3644
</link>
</entry>
- <entry>TransformationTraits Redux</entry>
- <entry>Y</entry>
+ <entry>Null Forward Iterators</entry>
+ <entry>N</entry>
<entry/>
</row>
+ </tbody>
+</tgroup>
+</table>
+
+
+<table frame="all">
+<title>C++ Technical Specifications Implementation Status</title>
+
+<tgroup cols="4" align="left" colsep="0" rowsep="1">
+<colspec colname="c1"/>
+<colspec colname="c2"/>
+<colspec colname="c3"/>
+<colspec colname="c4"/>
+ <thead>
+ <row>
+ <entry>Paper</entry>
+ <entry>Title</entry>
+ <entry>Status</entry>
+ <entry>Comments</entry>
+ </row>
+ </thead>
+
+ <tbody>
+
<row>
<?dbhtml bgcolor="#C8B0B0" ?>
<entry>
@@ -255,21 +279,44 @@ particular release.
</entry>
<entry>C++ Dynamic Arrays</entry>
<entry>N</entry>
- <entry>Moved from C++14 to Library Fundamentals TS</entry>
+ <entry>Array Extensions TS</entry>
</row>
<row>
- <?dbhtml bgcolor="#C8B0B0" ?>
<entry>
- <link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://www.open-std.org/JTC1/sc22/WG21/docs/papers/2013/n3644.pdf">
- N3644
+ <link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://www.open-std.org/JTC1/sc22/WG21/docs/papers/2013/n3793.html">
+ N3672
</link>
</entry>
- <entry>Null Forward Iterators</entry>
- <entry>N</entry>
- <entry/>
+ <entry>A proposal to add a utility class to represent optional objects</entry>
+ <entry>Y</entry>
+ <entry>Library Fundamentals TS</entry>
</row>
+ <row>
+ <entry>
+ <link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://open-std.org/jtc1/sc22/wg21/docs/papers/2013/n3762.html">
+ N3762
+ </link>
+ </entry>
+ <entry><code>string_view</code>: a non-owning reference to a string</entry>
+ <entry>Y</entry>
+ <entry>Library Fundamentals TS</entry>
+ </row>
+
+ <row>
+ <?dbhtml bgcolor="#C8C8B0" ?>
+ <entry>
+ <link xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://open-std.org/jtc1/sc22/wg21/docs/papers/2013/n3790.html">
+ N3790
+ </link>
+ </entry>
+ <entry>File System</entry>
+ <entry>WIP</entry>
+ <entry></entry>
+ </row>
+
+
</tbody>
</tgroup>
</table>
diff --git a/libstdc++-v3/include/experimental/string_view b/libstdc++-v3/include/experimental/string_view
index fbe982ace28..6a95e8d0bd0 100644
--- a/libstdc++-v3/include/experimental/string_view
+++ b/libstdc++-v3/include/experimental/string_view
@@ -98,7 +98,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
constexpr basic_string_view(const basic_string_view&) noexcept = default;
template<typename _Allocator>
- basic_string_view(const basic_string<_CharT, _Traits, _Allocator>& __str) noexcept
+ basic_string_view(const basic_string<_CharT, _Traits,
+ _Allocator>& __str) noexcept
: _M_len{__str.length()}, _M_str{__str.data()}
{ }
@@ -159,7 +160,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
constexpr size_type
max_size() const noexcept
- { return _S_max_size; }
+ { return ((npos - sizeof(size_type) - sizeof(void*))
+ / sizeof(value_type) / 4); }
constexpr bool
empty() const noexcept
@@ -406,12 +408,6 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
}
private:
-
- // Compute max_size similarly to how string does it.
- static const size_type _S_max_size = (npos
- - sizeof(size_type) - sizeof(void*))
- / sizeof(value_type) / 4;
-
static constexpr const int
_S_compare(size_type __n1, size_type __n2) noexcept
{
diff --git a/libstdc++-v3/testsuite/17_intro/static.cc b/libstdc++-v3/testsuite/17_intro/static.cc
index b7ce9f4dcaa..44d60ea4506 100644
--- a/libstdc++-v3/testsuite/17_intro/static.cc
+++ b/libstdc++-v3/testsuite/17_intro/static.cc
@@ -29,3 +29,8 @@ int main()
std::cout << "i am old-skool\n";
return 0;
}
+// Ignore TOC warnings on AIX
+// { dg-prune-output "ld: 0711-768 WARNING" }
+// { dg-prune-output "recognized no-op" }
+// { dg-prune-output "TOC-reload" }
+
diff --git a/libstdc++-v3/testsuite/20_util/addressof/1.cc b/libstdc++-v3/testsuite/20_util/addressof/1.cc
index e874258b501..2b32855b72a 100644
--- a/libstdc++-v3/testsuite/20_util/addressof/1.cc
+++ b/libstdc++-v3/testsuite/20_util/addressof/1.cc
@@ -1,5 +1,4 @@
// { dg-options "-std=gnu++0x" }
-// { dg-do compile }
// 2010-05-20 Paolo Carlini <paolo.carlini@oracle.com>
diff --git a/libstdc++-v3/testsuite/20_util/allocator_traits/members/destroy.cc b/libstdc++-v3/testsuite/20_util/allocator_traits/members/destroy.cc
index a8e9d0ad44a..56d598a21d1 100644
--- a/libstdc++-v3/testsuite/20_util/allocator_traits/members/destroy.cc
+++ b/libstdc++-v3/testsuite/20_util/allocator_traits/members/destroy.cc
@@ -1,5 +1,4 @@
// { dg-options "-std=gnu++0x" }
-// { dg-do compile }
// Copyright (C) 2011-2013 Free Software Foundation, Inc.
//
diff --git a/libstdc++-v3/testsuite/20_util/allocator_traits/members/select.cc b/libstdc++-v3/testsuite/20_util/allocator_traits/members/select.cc
index 2ef2e347cc5..2671f18257d 100644
--- a/libstdc++-v3/testsuite/20_util/allocator_traits/members/select.cc
+++ b/libstdc++-v3/testsuite/20_util/allocator_traits/members/select.cc
@@ -1,5 +1,4 @@
// { dg-options "-std=gnu++0x" }
-// { dg-do compile }
// Copyright (C) 2011-2013 Free Software Foundation, Inc.
//
@@ -54,7 +53,7 @@ void test01()
void test02()
{
- typedef std::allocator_traits<alloc1<X>> traits_type;
+ typedef std::allocator_traits<alloc2<X>> traits_type;
traits_type::allocator_type a{1};
const traits_type::allocator_type& a2
= traits_type::select_on_container_copy_construction(a);
diff --git a/libstdc++-v3/testsuite/23_containers/forward_list/allocator/noexcept.cc b/libstdc++-v3/testsuite/23_containers/forward_list/allocator/noexcept.cc
index 0ee16705a91..635fb77bec5 100644
--- a/libstdc++-v3/testsuite/23_containers/forward_list/allocator/noexcept.cc
+++ b/libstdc++-v3/testsuite/23_containers/forward_list/allocator/noexcept.cc
@@ -15,23 +15,21 @@
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
+// { dg-do compile }
// { dg-options "-std=gnu++0x" }
#include <forward_list>
-#include <testsuite_hooks.h>
#include <testsuite_allocator.h>
struct T { int i; };
namespace __gnu_test
{
- inline void
- swap(propagating_allocator<T, true>& l, propagating_allocator<T, true>& r)
- noexcept(false)
- {
- typedef uneq_allocator<T> base_alloc;
- swap(static_cast<base_alloc&>(l), static_cast<base_alloc&>(r));
- }
+ template<typename U>
+ inline void
+ swap(propagating_allocator<U, true>& l, propagating_allocator<U, true>& r)
+ noexcept(false)
+ { }
}
using __gnu_test::propagating_allocator;
@@ -64,13 +62,5 @@ void test03()
test_type v1(alloc_type(1));
test_type v2(alloc_type(2));
static_assert( noexcept( v1 = std::move(v2) ), "Move assign cannot throw" );
- // static_assert( !noexcept( v1.swap(v2) ), "Swap can throw" );
-}
-
-int main()
-{
- test01();
- test02();
- test03();
- return 0;
+ static_assert( !noexcept( v1.swap(v2) ), "Swap can throw" );
}
diff --git a/libstdc++-v3/testsuite/23_containers/map/allocator/noexcept.cc b/libstdc++-v3/testsuite/23_containers/map/allocator/noexcept.cc
index 832a28ab63a..4bd3f5755aa 100644
--- a/libstdc++-v3/testsuite/23_containers/map/allocator/noexcept.cc
+++ b/libstdc++-v3/testsuite/23_containers/map/allocator/noexcept.cc
@@ -15,10 +15,10 @@
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
+// { dg-do compile }
// { dg-options "-std=gnu++11" }
#include <map>
-#include <testsuite_hooks.h>
#include <testsuite_allocator.h>
struct T { int i; };
@@ -31,14 +31,11 @@ struct U { };
namespace __gnu_test
{
- inline void
- swap(propagating_allocator<std::pair<const T, U>, true>& l,
- propagating_allocator<std::pair<const T, U>, true>& r)
- noexcept(false)
- {
- typedef uneq_allocator<std::pair<const T, U>> base_alloc;
- swap(static_cast<base_alloc&>(l), static_cast<base_alloc&>(r));
- }
+ template<typename U>
+ inline void
+ swap(propagating_allocator<U, true>& l, propagating_allocator<U, true>& r)
+ noexcept(false)
+ { }
}
using __gnu_test::propagating_allocator;
@@ -73,11 +70,3 @@ void test03()
static_assert( noexcept( v1 = std::move(v2) ), "Move assign cannot throw" );
static_assert( !noexcept( v1.swap(v2) ), "Swap can throw" );
}
-
-int main()
-{
- test01();
- test02();
- test03();
- return 0;
-}
diff --git a/libstdc++-v3/testsuite/23_containers/multimap/allocator/noexcept.cc b/libstdc++-v3/testsuite/23_containers/multimap/allocator/noexcept.cc
index aee4dc90029..9913acb8632 100644
--- a/libstdc++-v3/testsuite/23_containers/multimap/allocator/noexcept.cc
+++ b/libstdc++-v3/testsuite/23_containers/multimap/allocator/noexcept.cc
@@ -15,10 +15,10 @@
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
+// { dg-do compile }
// { dg-options "-std=gnu++11" }
#include <map>
-#include <testsuite_hooks.h>
#include <testsuite_allocator.h>
struct T { int i; };
@@ -31,14 +31,11 @@ struct U { };
namespace __gnu_test
{
- inline void
- swap(propagating_allocator<std::pair<const T, U>, true>& l,
- propagating_allocator<std::pair<const T, U>, true>& r)
- noexcept(false)
- {
- typedef uneq_allocator<std::pair<const T, U>> base_alloc;
- swap(static_cast<base_alloc&>(l), static_cast<base_alloc&>(r));
- }
+ template<typename U>
+ inline void
+ swap(propagating_allocator<U, true>& l, propagating_allocator<U, true>& r)
+ noexcept(false)
+ { }
}
using __gnu_test::propagating_allocator;
@@ -73,11 +70,3 @@ void test03()
static_assert( noexcept( v1 = std::move(v2) ), "Move assign cannot throw" );
static_assert( !noexcept( v1.swap(v2) ), "Swap can throw" );
}
-
-int main()
-{
- test01();
- test02();
- test03();
- return 0;
-}
diff --git a/libstdc++-v3/testsuite/23_containers/multiset/allocator/noexcept.cc b/libstdc++-v3/testsuite/23_containers/multiset/allocator/noexcept.cc
index 89b0053d4c2..d429313d587 100644
--- a/libstdc++-v3/testsuite/23_containers/multiset/allocator/noexcept.cc
+++ b/libstdc++-v3/testsuite/23_containers/multiset/allocator/noexcept.cc
@@ -15,6 +15,7 @@
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
+// { dg-do compile }
// { dg-options "-std=gnu++11" }
#include <set>
@@ -29,14 +30,11 @@ using Cmp = std::less<T>;
namespace __gnu_test
{
- inline void
- swap(propagating_allocator<T, true>& l,
- propagating_allocator<T, true>& r)
- noexcept(false)
- {
- typedef uneq_allocator<T> base_alloc;
- swap(static_cast<base_alloc&>(l), static_cast<base_alloc&>(r));
- }
+ template<typename U>
+ inline void
+ swap(propagating_allocator<U, true>& l, propagating_allocator<U, true>& r)
+ noexcept(false)
+ { }
}
using __gnu_test::propagating_allocator;
@@ -71,11 +69,3 @@ void test03()
static_assert( noexcept( v1 = std::move(v2) ), "Move assign cannot throw" );
static_assert( !noexcept( v1.swap(v2) ), "Swap can throw" );
}
-
-int main()
-{
- test01();
- test02();
- test03();
- return 0;
-}
diff --git a/libstdc++-v3/testsuite/23_containers/set/allocator/noexcept.cc b/libstdc++-v3/testsuite/23_containers/set/allocator/noexcept.cc
index 07adbc08013..f8389d6a42f 100644
--- a/libstdc++-v3/testsuite/23_containers/set/allocator/noexcept.cc
+++ b/libstdc++-v3/testsuite/23_containers/set/allocator/noexcept.cc
@@ -15,10 +15,10 @@
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
+// { dg-do compile }
// { dg-options "-std=gnu++11" }
#include <set>
-#include <testsuite_hooks.h>
#include <testsuite_allocator.h>
struct T { int i; };
@@ -29,14 +29,11 @@ using Cmp = std::less<T>;
namespace __gnu_test
{
- inline void
- swap(propagating_allocator<T, true>& l,
- propagating_allocator<T, true>& r)
- noexcept(false)
- {
- typedef uneq_allocator<T> base_alloc;
- swap(static_cast<base_alloc&>(l), static_cast<base_alloc&>(r));
- }
+ template<typename U>
+ inline void
+ swap(propagating_allocator<U, true>& l, propagating_allocator<U, true>& r)
+ noexcept(false)
+ { }
}
using __gnu_test::propagating_allocator;
@@ -71,11 +68,3 @@ void test03()
static_assert( noexcept( v1 = std::move(v2) ), "Move assign cannot throw" );
static_assert( !noexcept( v1.swap(v2) ), "Swap can throw" );
}
-
-int main()
-{
- test01();
- test02();
- test03();
- return 0;
-}
diff --git a/libstdc++-v3/testsuite/23_containers/unordered_map/allocator/noexcept.cc b/libstdc++-v3/testsuite/23_containers/unordered_map/allocator/noexcept.cc
index 47eb61d77fc..64c46f2e12b 100644
--- a/libstdc++-v3/testsuite/23_containers/unordered_map/allocator/noexcept.cc
+++ b/libstdc++-v3/testsuite/23_containers/unordered_map/allocator/noexcept.cc
@@ -15,10 +15,10 @@
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
+// { dg-do compile }
// { dg-options "-std=c++11" }
#include <unordered_map>
-#include <testsuite_hooks.h>
#include <testsuite_allocator.h>
struct T { int i; };
@@ -37,13 +37,11 @@ struct equal_to
namespace __gnu_test
{
- inline void
- swap(propagating_allocator<T, true>& l, propagating_allocator<T, true>& r)
- noexcept(false)
- {
- typedef uneq_allocator<T> base_alloc;
- swap(static_cast<base_alloc&>(l), static_cast<base_alloc&>(r));
- }
+ template<typename U>
+ inline void
+ swap(propagating_allocator<U, true>& l, propagating_allocator<U, true>& r)
+ noexcept(false)
+ { }
}
using __gnu_test::propagating_allocator;
@@ -76,13 +74,5 @@ void test03()
test_type v1(alloc_type(1));
test_type v2(alloc_type(2));
static_assert( noexcept( v1 = std::move(v2) ), "Move assign cannot throw" );
- // static_assert( !noexcept( v1.swap(v2) ), "Swap can throw" );
-}
-
-int main()
-{
- test01();
- test02();
- test03();
- return 0;
+ static_assert( !noexcept( v1.swap(v2) ), "Swap can throw" );
}
diff --git a/libstdc++-v3/testsuite/23_containers/unordered_multimap/allocator/noexcept.cc b/libstdc++-v3/testsuite/23_containers/unordered_multimap/allocator/noexcept.cc
index de16cbd25e8..fe6fc6e0c72 100644
--- a/libstdc++-v3/testsuite/23_containers/unordered_multimap/allocator/noexcept.cc
+++ b/libstdc++-v3/testsuite/23_containers/unordered_multimap/allocator/noexcept.cc
@@ -15,10 +15,10 @@
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
+// { dg-do compile }
// { dg-options "-std=c++11" }
#include <unordered_map>
-#include <testsuite_hooks.h>
#include <testsuite_allocator.h>
struct T { int i; };
@@ -37,13 +37,11 @@ struct equal_to
namespace __gnu_test
{
- inline void
- swap(propagating_allocator<T, true>& l, propagating_allocator<T, true>& r)
- noexcept(false)
- {
- typedef uneq_allocator<T> base_alloc;
- swap(static_cast<base_alloc&>(l), static_cast<base_alloc&>(r));
- }
+ template<typename U>
+ inline void
+ swap(propagating_allocator<U, true>& l, propagating_allocator<U, true>& r)
+ noexcept(false)
+ { }
}
using __gnu_test::propagating_allocator;
@@ -76,13 +74,5 @@ void test03()
test_type v1(alloc_type(1));
test_type v2(alloc_type(2));
static_assert( noexcept( v1 = std::move(v2) ), "Move assign cannot throw" );
- // static_assert( !noexcept( v1.swap(v2) ), "Swap can throw" );
-}
-
-int main()
-{
- test01();
- test02();
- test03();
- return 0;
+ static_assert( !noexcept( v1.swap(v2) ), "Swap can throw" );
}
diff --git a/libstdc++-v3/testsuite/23_containers/unordered_multiset/allocator/noexcept.cc b/libstdc++-v3/testsuite/23_containers/unordered_multiset/allocator/noexcept.cc
index 5d69e0768ce..2c7e853891f 100644
--- a/libstdc++-v3/testsuite/23_containers/unordered_multiset/allocator/noexcept.cc
+++ b/libstdc++-v3/testsuite/23_containers/unordered_multiset/allocator/noexcept.cc
@@ -15,10 +15,10 @@
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
+// { dg-do compile }
// { dg-options "-std=c++11" }
#include <unordered_set>
-#include <testsuite_hooks.h>
#include <testsuite_allocator.h>
struct T { int i; };
@@ -37,13 +37,11 @@ struct equal_to
namespace __gnu_test
{
- inline void
- swap(propagating_allocator<T, true>& l, propagating_allocator<T, true>& r)
- noexcept(false)
- {
- typedef uneq_allocator<T> base_alloc;
- swap(static_cast<base_alloc&>(l), static_cast<base_alloc&>(r));
- }
+ template<typename U>
+ inline void
+ swap(propagating_allocator<U, true>& l, propagating_allocator<U, true>& r)
+ noexcept(false)
+ { }
}
using __gnu_test::propagating_allocator;
@@ -76,13 +74,5 @@ void test03()
test_type v1(alloc_type(1));
test_type v2(alloc_type(2));
static_assert( noexcept( v1 = std::move(v2) ), "Move assign cannot throw" );
- // static_assert( !noexcept( v1.swap(v2) ), "Swap can throw" );
-}
-
-int main()
-{
- test01();
- test02();
- test03();
- return 0;
+ static_assert( !noexcept( v1.swap(v2) ), "Swap can throw" );
}
diff --git a/libstdc++-v3/testsuite/23_containers/unordered_set/allocator/noexcept.cc b/libstdc++-v3/testsuite/23_containers/unordered_set/allocator/noexcept.cc
index 0f73126ccd9..5ada755fcbf 100644
--- a/libstdc++-v3/testsuite/23_containers/unordered_set/allocator/noexcept.cc
+++ b/libstdc++-v3/testsuite/23_containers/unordered_set/allocator/noexcept.cc
@@ -15,10 +15,10 @@
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
+// { dg-do compile }
// { dg-options "-std=c++11" }
#include <unordered_set>
-#include <testsuite_hooks.h>
#include <testsuite_allocator.h>
struct T { int i; };
@@ -37,13 +37,11 @@ struct equal_to
namespace __gnu_test
{
- inline void
- swap(propagating_allocator<T, true>& l, propagating_allocator<T, true>& r)
- noexcept(false)
- {
- typedef uneq_allocator<T> base_alloc;
- swap(static_cast<base_alloc&>(l), static_cast<base_alloc&>(r));
- }
+ template<typename U>
+ inline void
+ swap(propagating_allocator<U, true>& l, propagating_allocator<U, true>& r)
+ noexcept(false)
+ { }
}
using __gnu_test::propagating_allocator;
@@ -76,13 +74,5 @@ void test03()
test_type v1(alloc_type(1));
test_type v2(alloc_type(2));
static_assert( noexcept( v1 = std::move(v2) ), "Move assign cannot throw" );
- // static_assert( !noexcept( v1.swap(v2) ), "Swap can throw" );
-}
-
-int main()
-{
- test01();
- test02();
- test03();
- return 0;
+ static_assert( !noexcept( v1.swap(v2) ), "Swap can throw" );
}
diff --git a/libstdc++-v3/testsuite/23_containers/vector/allocator/noexcept.cc b/libstdc++-v3/testsuite/23_containers/vector/allocator/noexcept.cc
index a805a4f0a2d..31231436b03 100644
--- a/libstdc++-v3/testsuite/23_containers/vector/allocator/noexcept.cc
+++ b/libstdc++-v3/testsuite/23_containers/vector/allocator/noexcept.cc
@@ -15,23 +15,21 @@
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
+// { dg-do compile }
// { dg-options "-std=gnu++0x" }
#include <vector>
-#include <testsuite_hooks.h>
#include <testsuite_allocator.h>
struct T { int i; };
namespace __gnu_test
{
- inline void
- swap(propagating_allocator<T, true>& l, propagating_allocator<T, true>& r)
- noexcept(false)
- {
- typedef uneq_allocator<T> base_alloc;
- swap(static_cast<base_alloc&>(l), static_cast<base_alloc&>(r));
- }
+ template<typename U>
+ inline void
+ swap(propagating_allocator<U, true>& l, propagating_allocator<U, true>& r)
+ noexcept(false)
+ { }
}
using __gnu_test::propagating_allocator;
@@ -66,11 +64,3 @@ void test03()
static_assert( noexcept( v1 = std::move(v2) ), "Move assign cannot throw" );
static_assert( !noexcept( v1.swap(v2) ), "Swap can throw" );
}
-
-int main()
-{
- test01();
- test02();
- test03();
- return 0;
-}
diff --git a/libstdc++-v3/testsuite/23_containers/vector/allocator/swap.cc b/libstdc++-v3/testsuite/23_containers/vector/allocator/swap.cc
index ce44cf80f88..ba44267737f 100644
--- a/libstdc++-v3/testsuite/23_containers/vector/allocator/swap.cc
+++ b/libstdc++-v3/testsuite/23_containers/vector/allocator/swap.cc
@@ -64,7 +64,9 @@ void test02()
typedef propagating_allocator<T, true> alloc_type;
typedef std::vector<T, alloc_type> test_type;
test_type v1(alloc_type(1));
+ v1.push_back(T());
test_type v2(alloc_type(2));
+ v2.push_back(T());
std::swap(v1, v2);
VERIFY(2 == v1.get_allocator().get_personality());
VERIFY(1 == v2.get_allocator().get_personality());
diff --git a/libstdc++-v3/testsuite/28_regex/basic_regex/ctors/extended/cstring.cc b/libstdc++-v3/testsuite/28_regex/basic_regex/ctors/extended/cstring.cc
index fa2e009b2d2..03c20a4f6dd 100644
--- a/libstdc++-v3/testsuite/28_regex/basic_regex/ctors/extended/cstring.cc
+++ b/libstdc++-v3/testsuite/28_regex/basic_regex/ctors/extended/cstring.cc
@@ -1,4 +1,3 @@
-// { dg-do compile }
// { dg-options "-std=c++0x" }
// 2007-03-12 Stephen M. Webb <stephen.webb@bregmasoft.com>
@@ -33,7 +32,7 @@ test01()
std::regex re("(wee|week)(knights|night)", std::regex::extended);
VERIFY( re.flags() == std::regex::extended );
- VERIFY( re.mark_count() == 0 );
+ VERIFY( re.mark_count() == 2 );
}
int main()
diff --git a/libstdc++-v3/testsuite/28_regex/init-list.cc b/libstdc++-v3/testsuite/28_regex/init-list.cc
index ee71bb99be3..390e715cc34 100644
--- a/libstdc++-v3/testsuite/28_regex/init-list.cc
+++ b/libstdc++-v3/testsuite/28_regex/init-list.cc
@@ -1,5 +1,4 @@
// { dg-options "-std=gnu++0x" }
-// { dg-do compile }
// Copyright (C) 2008-2013 Free Software Foundation, Inc.
//
diff --git a/libstdc++-v3/testsuite/28_regex/regex_error/regex_error.cc b/libstdc++-v3/testsuite/28_regex/regex_error/regex_error.cc
index 9cb08f9cc81..253797329cf 100644
--- a/libstdc++-v3/testsuite/28_regex/regex_error/regex_error.cc
+++ b/libstdc++-v3/testsuite/28_regex/regex_error/regex_error.cc
@@ -1,5 +1,4 @@
// { dg-options "-std=c++0x" }
-// { dg-do compile }
//
// 2009-06-17 Stephen M. Webb <stephen.webb@xandros.com>
//
diff --git a/libstdc++-v3/testsuite/28_regex/sub_match/cast_char.cc b/libstdc++-v3/testsuite/28_regex/sub_match/cast_char.cc
index ac2f305ccd4..300757e9a27 100644
--- a/libstdc++-v3/testsuite/28_regex/sub_match/cast_char.cc
+++ b/libstdc++-v3/testsuite/28_regex/sub_match/cast_char.cc
@@ -1,4 +1,3 @@
-// { dg-do compile }
// { dg-options "-std=c++0x" }
//
@@ -27,9 +26,7 @@
#include <string>
#include <testsuite_hooks.h>
-
-void
-test01()
+int main()
{
bool test __attribute__((unused)) = true;
@@ -39,8 +36,8 @@ test01()
value_type test_data[] = "cabbage";
sub_match_type sm;
- sm.first = test_data + 0;
- sm.second = test_data + sizeof(test_data)/sizeof(value_type);
+ sm.first = std::begin(test_data);
+ sm.second = std::end(test_data) - 1;
sm.matched = true;
string_type sm_string = sm;
diff --git a/libstdc++-v3/testsuite/28_regex/sub_match/cast_wchar_t.cc b/libstdc++-v3/testsuite/28_regex/sub_match/cast_wchar_t.cc
index b4cbe1abdfe..2a5cd3fa048 100644
--- a/libstdc++-v3/testsuite/28_regex/sub_match/cast_wchar_t.cc
+++ b/libstdc++-v3/testsuite/28_regex/sub_match/cast_wchar_t.cc
@@ -1,4 +1,3 @@
-// { dg-do compile }
// { dg-options "-std=c++0x" }
//
@@ -27,9 +26,7 @@
#include <string>
#include <testsuite_hooks.h>
-
-void
-test01()
+int main()
{
bool test __attribute__((unused)) = true;
@@ -39,8 +36,8 @@ test01()
value_type test_data[] = L"cabbage";
sub_match_type sm;
- sm.first = test_data + 0;
- sm.second = test_data + sizeof(test_data)/sizeof(value_type);
+ sm.first = std::begin(test_data);
+ sm.second = std::end(test_data) - 1;
sm.matched = true;
string_type sm_string = sm;
diff --git a/libstdc++-v3/testsuite/28_regex/sub_match/length.cc b/libstdc++-v3/testsuite/28_regex/sub_match/length.cc
index e4acf711f96..7f073e4242b 100644
--- a/libstdc++-v3/testsuite/28_regex/sub_match/length.cc
+++ b/libstdc++-v3/testsuite/28_regex/sub_match/length.cc
@@ -1,4 +1,3 @@
-// { dg-do compile }
// { dg-options "-std=c++0x" }
//
@@ -47,3 +46,8 @@ test01()
VERIFY( sm1.length() == test_len );
VERIFY( sm2.length() == 0 );
}
+
+int main()
+{
+ test01();
+}
diff --git a/libstdc++-v3/testsuite/experimental/string_view/capacity/1.cc b/libstdc++-v3/testsuite/experimental/string_view/capacity/1.cc
index 8dcff72a2bb..0f09fe89dd6 100644
--- a/libstdc++-v3/testsuite/experimental/string_view/capacity/1.cc
+++ b/libstdc++-v3/testsuite/experimental/string_view/capacity/1.cc
@@ -155,13 +155,6 @@ test01()
VERIFY( sz03 >= sz04 );
}
-#if !__GXX_WEAK__
-// Explicitly instantiate for systems with no COMDAT or weak support.
-template
- const std::experimental::basic_string_view<A<B>>::size_type
- std::experimental::basic_string_view<A<B>>::_S_max_size;
-#endif
-
int
main()
{
diff --git a/libstdc++-v3/testsuite/experimental/string_view/inserters/pod/10081-out.cc b/libstdc++-v3/testsuite/experimental/string_view/inserters/pod/10081-out.cc
index a275ad216b6..b976d6f441b 100644
--- a/libstdc++-v3/testsuite/experimental/string_view/inserters/pod/10081-out.cc
+++ b/libstdc++-v3/testsuite/experimental/string_view/inserters/pod/10081-out.cc
@@ -68,13 +68,6 @@ test01()
}
}
-#if !__GXX_WEAK__
-// Explicitly instantiate for systems with no COMDAT or weak support.
-template
- const std::experimental::basic_string_view<__gnu_test::pod_ushort>::size_type
- std::experimental::basic_string_view_view<__gnu_test::pod_ushort>::_S_max_size;
-#endif
-
int
main()
{
diff --git a/libstdc++-v3/testsuite/tr1/7_regular_expressions/regex/cons/char/c_string_extended.cc b/libstdc++-v3/testsuite/tr1/7_regular_expressions/regex/cons/char/c_string_extended.cc
deleted file mode 100644
index 24fa717d840..00000000000
--- a/libstdc++-v3/testsuite/tr1/7_regular_expressions/regex/cons/char/c_string_extended.cc
+++ /dev/null
@@ -1,42 +0,0 @@
-// { dg-do compile }
-
-// 2007-03-12 Stephen M. Webb <stephen.webb@bregmasoft.com>
-//
-// Copyright (C) 2007-2013 Free Software Foundation, Inc.
-//
-// This file is part of the GNU ISO C++ Library. This library is free
-// software; you can redistribute it and/or modify it under the
-// terms of the GNU General Public License as published by the
-// Free Software Foundation; either version 3, or (at your option)
-// any later version.
-//
-// This library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License along
-// with this library; see the file COPYING3. If not see
-// <http://www.gnu.org/licenses/>.
-
-// 7.8.2 basic_regex constructors
-
-#include <tr1/regex>
-#include <testsuite_hooks.h>
-
-void
-test01()
-{
- bool test __attribute__((unused)) = true;
-
- std::tr1::regex re("(wee|week)(knights|night)", std::tr1::regex::extended);
-
- VERIFY( re.flags() == std::tr1::regex::extended );
- VERIFY( re.mark_count() == 0 );
-}
-
-int main()
-{
- test01();
- return 0;
-}